diff --git a/.circleci/config.yml b/.circleci/config.yml index dc07605fc5..7e64d59226 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -268,7 +268,11 @@ commands: --set consensusnode.persistence.enabled=true \ --set node.serviceMonitor.enabled=true \ --set bootnode.serviceMonitor.enabled=false \ - --set consensusnode.serviceMonitor.enabled=true + --set consensusnode.serviceMonitor.enabled=true \ + --set node.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set bootnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set consensusnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set node.indexer.enabled=true fi cleanup_prnet_chart: @@ -568,6 +572,8 @@ jobs: https://explorer-pr-<>.prnet.taraxa.io \ RPC>.prnet.taraxa.io >\ https://rpc-pr-<>.prnet.taraxa.io \ + Indexer>.prnet.taraxa.io >\ + https://indexer-pr-<>.prnet.taraxa.io \ \

Boot Nodes

\ \ diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index 9a7c8168ae..4f7a0b22bb 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -11,7 +11,8 @@ else () --error-exitcode=1 --enable=all --suppress=missingInclude - --suppress=useStlAlgorithm + # find_if - useless here + --suppress=useStlAlgorithm:${PROJECT_SOURCE_DIR}/*/pbft_sync_packet_handler.cpp --suppress=noExplicitConstructor --suppress=unknownMacro # false positive @@ -27,8 +28,16 @@ else () # TODO remove this when we solve correct exit of programs --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/final_chain.cpp + # exclude graphql generated + -i ${PROJECT_SOURCE_DIR}/libraries/core_libs/network/graphql/gen/ + # messy files + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/Common.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/Common.h + #not an issue here --suppress=virtualCallInConstructor:${PROJECT_SOURCE_DIR}/*/final_chain.cpp - # Only show found errors "--quiet" diff --git a/charts/taraxa-node/.gitignore b/charts/taraxa-node/.gitignore new file mode 100644 index 0000000000..7d251af7f5 --- /dev/null +++ b/charts/taraxa-node/.gitignore @@ -0,0 +1,6 @@ +# do not include into git chart dependencies +charts/*.tgz + +# Helm stuff +requirements.lock +Chart.lock \ No newline at end of file diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index d5896d3506..f923c637a9 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,53 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.9 + +### Major changes + +* Rename / restrusture manifest files +* Added light nodes + +### Minor changes + +* Removed "@channel" from slack notifications + +## v0.3.8 + +### Minor changes + +* Added port for scrapping metrics from sidecar of rpc-nodes ([taraxa-indexer](https://github.com/Taraxa-project/taraxa-indexer)) + +## v0.3.7 + +### Minor changes + +* Adds transaction generating service to replace the explorer faucet + +## v0.3.6 + +### Minor changes + +* Added labels into `StatefulSets` for [kube-monkey](https://github.com/asobti/kube-monkey) + +## v0.3.5 + +### Minor changes + +* Changed `db_path` to `data_dir` for taraxa-indexer + +## v0.3.4 + +### Minor changes + +* Enabled CORS on `Ingress` of indexer + +## v0.3.3 + +### Major changes + +* Added sidecar container to RPC nodes with [taraxa-indexer](https://github.com/Taraxa-project/taraxa-indexer) + ## v0.3.2 ### Minor changes @@ -21,6 +68,13 @@ numbering uses [semantic versioning](http://semver.org). * Separate config for genesis +## v0.2.5 + +### Minor changes + +* Allow for different images in `StatefulSet`s for boot, rpc and consensus nodes + + ## v0.2.4 ### Minor changes diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index d0a045aaa9..b638b10387 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.2 +version: 0.3.9 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/templates/_helpers.tpl b/charts/taraxa-node/templates/_helpers.tpl index a9855a8bc1..02d1c805b1 100644 --- a/charts/taraxa-node/templates/_helpers.tpl +++ b/charts/taraxa-node/templates/_helpers.tpl @@ -87,6 +87,19 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{- end -}} +{{/* +Create a default fully qualified indexer name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "taraxa-node.indexerName" -}} +{{- if .Values.indexerNameOverride -}} +{{- .Values.indexerNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s.%s" "indexer" .Release.Name .Values.domain | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified graphql websocket. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). diff --git a/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml b/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml deleted file mode 100644 index ed6379ba82..0000000000 --- a/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{ if .Values.bootnode.enabled }} -{{ if .Values.bootnode.loadBalancer.enabled }} -{{- range $key, $value := .Values.bootnode.loadBalancer.addresses }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} - {{- with $.Values.bootnode.loadBalancer.serviceAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-boot-node-{{ $key }} - ports: - - name: udp-listen-port - port: 10002 - targetPort: 10002 - protocol: UDP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/initconfig-boot-node.yaml b/charts/taraxa-node/templates/bootnode-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-boot-node.yaml rename to charts/taraxa-node/templates/bootnode-configmap.yaml diff --git a/charts/taraxa-node/templates/bootnode-service.yaml b/charts/taraxa-node/templates/bootnode-service.yaml new file mode 100644 index 0000000000..5e2f2c52fd --- /dev/null +++ b/charts/taraxa-node/templates/bootnode-service.yaml @@ -0,0 +1,59 @@ +{{ if .Values.bootnode.enabled }} +{{- if .Values.bootnode.service.ports }} +# Note: This is a headless service +apiVersion: v1 +kind: Service +metadata: + name: {{ include "taraxa-boot-node.fullname" . }} + labels: + name: boot-node + app.kubernetes.io/name: boot-node + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + selector: + app: boot-node + app.kubernetes.io/name: {{ .Release.Name }}-boot-node + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + {{- range $port := .Values.bootnode.service.ports }} + - name: {{ $port.name | default $port.port }} + port: {{ $port.port }} + targetPort: {{ $port.targetPort | default $port.port }} + {{- if $port.protocol }} + protocol: {{ $port.protocol }} + {{- end }} + {{- end }} +{{- end }} + +{{ if .Values.bootnode.loadBalancer.enabled }} +{{- range $key, $value := .Values.bootnode.loadBalancer.addresses }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} + {{- with $.Values.bootnode.loadBalancer.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-boot-node-{{ $key }} + ports: + - name: udp-listen-port + port: 10002 + targetPort: 10002 + protocol: UDP +{{- end }} +{{- end }} + +{{- end }} diff --git a/charts/taraxa-node/templates/boot-node-servicemonitor.yaml b/charts/taraxa-node/templates/bootnode-servicemonitor.yaml similarity index 100% rename from charts/taraxa-node/templates/boot-node-servicemonitor.yaml rename to charts/taraxa-node/templates/bootnode-servicemonitor.yaml diff --git a/charts/taraxa-node/templates/boot-node.yaml b/charts/taraxa-node/templates/bootnode-statefulset.yaml similarity index 76% rename from charts/taraxa-node/templates/boot-node.yaml rename to charts/taraxa-node/templates/bootnode-statefulset.yaml index f73004d58b..0b848666ec 100644 --- a/charts/taraxa-node/templates/boot-node.yaml +++ b/charts/taraxa-node/templates/bootnode-statefulset.yaml @@ -9,6 +9,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} spec: replicas: {{ .Values.bootnode.replicaCount }} serviceName: {{ include "taraxa-boot-node.fullname" . }} @@ -29,13 +36,29 @@ spec: partition: a app.kubernetes.io/name: {{ .Release.Name }}-boot-node app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: initContainers: - name: config-adapter + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag }} + image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + + {{- if .Values.bootnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -54,8 +77,17 @@ spec: mountPath: /root/.taraxa containers: - name: boot-node + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag }} + image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + + {{- if .Values.bootnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} args: {{- toYaml .Values.bootnode.args | nindent 12 }} - --chain-id diff --git a/charts/taraxa-node/templates/initconfig-consensus-node.yaml b/charts/taraxa-node/templates/consensus-node-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-consensus-node.yaml rename to charts/taraxa-node/templates/consensus-node-configmap.yaml diff --git a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml new file mode 100644 index 0000000000..b1fa283c97 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml @@ -0,0 +1,178 @@ +{{ if .Values.consensusnodeLight.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-consensus-node-light-init-script + labels: + app: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + genconfig.py: |- + import json + import sys + import subprocess + + def get_vrf_public(vrf_prv_key): + process = subprocess.Popen(['taraxad', '--command', 'vrf', vrf_prv_key],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + for line in process.stdout: + l = line.decode("utf-8") + + if "vrf_public" in l: + vrf_public = l.split(':')[1].replace("\"", "").strip() + return f'0x{vrf_public}' + + def get_addr(prv_key): + process = subprocess.Popen(['taraxad', '--command', 'account', prv_key],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + for line in process.stdout: + l = line.decode("utf-8") + + if "node_address" in l: + addr = l.split(':')[1].replace("\"", "").strip() + return f'0x{addr}' + + def main(config): + keys = [] + vrfs = [] + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + keys = [ + {{- range $key, $value := .Values.config.consensusnode.keys }} + "{{ $value }}", + {{- end }} + ] + {{- end }} + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + vrfs = [ + {{- range $key, $value := .Values.config.consensusnode.vrfs }} + "{{ $value }}", + {{- end }} + ] + {{- end }} + + with open(config) as f: + data = json.load(f) + + initial_validators = data['dpos']['initial_validators'] + + # get delegations from 1st default validator + delegations = initial_validators[0]['delegations'] + + validators = [] + for idx, key in enumerate(keys): + + validator = { + 'address': '', + 'commission': '0x0', + 'delegations': {}, + 'description': 'Taraxa validator', + 'endpoint': '', + 'owner': '' + } + + addr = get_addr(key) + validator['address'] = addr + validator['owner'] = addr + validator['delegations'] = delegations + validator['vrf_key'] = get_vrf_public(vrfs[idx]) + validators.append(validator) + + data['dpos']['initial_validators'] = validators + print(json.dumps(data)) + + if __name__ == "__main__": + config_file_name = sys.argv[1] + main(config_file_name) + + entrypoint.sh: |- + #!/bin/bash + DATA_PATH=/root/.taraxa + CONFIG_PATH=$DATA_PATH/conf_taraxa.json + GENESIS_PATH=$DATA_PATH/genesis_taraxa.json + WALLET_PATH=$DATA_PATH/wallet_taraxa.json + + echo "Cleaning up old config..." + rm -rf $CONFIG_PATH + rm -rf $GENESIS_PATH + + echo "Generating config" + INDEX=${HOSTNAME##*-} + KEY="CONSENSUS_NODE_LIGHT_KEY_${INDEX}" + VRF="CONSENSUS_NODE_LIGHT_VRF_${INDEX}" + + KEY="${!KEY}" + VRF="${!VRF}" + + if [ -z "$KEY" ] + then + if [ ! -f "$WALLET_PATH" ] + then + echo "No predifined keys. Generating new wallet..." + KEY=$(taraxad --command account | grep node_secret | cut -d\ -f3- | tr -d \") + VRF=$(taraxad --command vrf | grep vrf_secret | cut -d\ -f3 | tr -d \") + {{ if .Values.explorer.enabled }} + NODE_ADDRESS=$(taraxad --command account ${KEY} | grep node_address | cut -d\ -f3 | tr -d \") + echo "New wallet: 0x${NODE_ADDRESS}" + + SIG=$(taraxa-sign sign --key 0x${EXPLORER_DELEGATION_PRIVATE_KEY} 0x${NODE_ADDRESS}) + + curl --silent http://{{ .Release.Name }}-explorer/api/delegate/0x${NODE_ADDRESS}?sig=${SIG} + {{- end }} + else + echo "Found wallet file." + KEY=$(cat "$WALLET_PATH" | jq -r .node_secret) + VRF=$(cat "$WALLET_PATH" | jq -r .vrf_secret) + fi + fi + + {{ if .Values.explorer.enabled }} + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + NODE_ADDRESS=$(taraxad --command account ${KEY} | grep node_address | cut -d\ -f3 | tr -d \") + curl --silent http://{{ .Release.Name }}-explorer/api/faucet/0x${NODE_ADDRESS} + {{- end }} + {{- end }} + + taraxad --command config \ + --chain-id {{ .Values.config.network }} \ + --node-secret ${KEY} \ + --vrf-secret ${VRF} \ + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + {{- $lbEnabled := .Values.bootnode.loadBalancer.enabled -}} + {{- $lbIPs := .Values.bootnode.loadBalancer.addresses -}} + {{- range $key, $value := .Values.config.bootnode.addresses }} + --boot-nodes {{ if $lbEnabled }}{{ index $lbIPs $key }}{{- else }}{{ include "taraxa-boot-node.fullname" $ }}-{{ $key }}.{{ include "taraxa-boot-node.fullname" $ }}.{{$.Release.Namespace}}{{- end }}:10002/{{ $value }} \ + {{- end }} + {{- end }} + {{- if .Values.config.extraArgs }} + {{ join " " .Values.config.extraArgs }} \ + {{- end }} + --config $CONFIG_PATH \ + --genesis $GENESIS_PATH \ + --wallet $WALLET_PATH \ + --data-dir $DATA_PATH + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + python3 /bin/genconfig.py $GENESIS_PATH > $GENESIS_PATH.tmp && mv $GENESIS_PATH.tmp $GENESIS_PATH + {{- end }} + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + {{ if .Values.explorer.enabled }} + export FAUCET_ADDRESS=$(taraxad --command account ${EXPLORER_FAUCET_PRIVATE_KEY} | grep node_address | cut -d\ -f3 | tr -d \") + cat $GENESIS_PATH | jq '.initial_balances += ({("0x"+env.FAUCET_ADDRESS): "0x1027e72f1f12813088000000"})' > $GENESIS_PATH.tmp && mv $GENESIS_PATH.tmp $GENESIS_PATH + {{- end }} + {{- end }} + + echo "***** $CONFIG_PATH *****" + cat $CONFIG_PATH + echo "***** $CONFIG_PATH *****" + + echo "***** $GENESIS_PATH *****" + cat $GENESIS_PATH + echo "***** $GENESIS_PATH *****" +{{- end }} diff --git a/charts/taraxa-node/templates/boot-node-service.yaml b/charts/taraxa-node/templates/consensus-node-light-service.yaml similarity index 61% rename from charts/taraxa-node/templates/boot-node-service.yaml rename to charts/taraxa-node/templates/consensus-node-light-service.yaml index 0f6f1ab71f..724a7e02df 100644 --- a/charts/taraxa-node/templates/boot-node-service.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-service.yaml @@ -1,13 +1,13 @@ -{{ if .Values.bootnode.enabled }} -{{- if .Values.bootnode.service.ports }} +{{ if .Values.consensusnodeLight.enabled }} +{{- if .Values.consensusnodeLight.service.ports }} # Note: This is a headless service apiVersion: v1 kind: Service metadata: - name: {{ include "taraxa-boot-node.fullname" . }} + name: {{ include "taraxa-consensus-node.fullname" . }}-light labels: - name: boot-node - app.kubernetes.io/name: boot-node + name: consensus-node-light + app.kubernetes.io/name: consensus-node-light helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} @@ -15,11 +15,11 @@ spec: type: ClusterIP clusterIP: None selector: - app: boot-node - app.kubernetes.io/name: {{ .Release.Name }}-boot-node + app: consensus-node-light + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light app.kubernetes.io/instance: {{ .Release.Name }} ports: - {{- range $port := .Values.bootnode.service.ports }} + {{- range $port := .Values.consensusnode.service.ports }} - name: {{ $port.name | default $port.port }} port: {{ $port.port }} targetPort: {{ $port.targetPort | default $port.port }} diff --git a/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml b/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml new file mode 100644 index 0000000000..730f84f570 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml @@ -0,0 +1,26 @@ +{{ if .Values.consensusnodeLight.enabled }} +{{- if .Values.consensusnodeLight.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + name: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ $.Release.Namespace | quote }} + endpoints: + - honorLabels: true + path: /metrics + port: metrics +{{- end }} +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml new file mode 100644 index 0000000000..91fcda21a2 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml @@ -0,0 +1,235 @@ +{{ if .Values.consensusnodeLight.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + app: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }}-light + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} +spec: + replicas: {{ .Values.consensusnodeLight.replicaCount }} + serviceName: {{ include "taraxa-consensus-node.fullname" . }}-light + # to launch or terminate all Pods in parallel. + # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management + podManagementPolicy: Parallel + selector: + matchLabels: + app: consensus-node-light + partition: a + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: consensus-node-light + labels: + app: consensus-node-light + partition: a + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }}-light + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} + annotations: + kubernetes.io/change-cause: "Configuration through configmaps." + spec: + initContainers: + {{ if .Values.explorer.enabled }} + - name: wait-for-explorer + image: dwdraju/alpine-curl-jq:latest + command: ["/bin/entrypoint.sh"] + volumeMounts: + - name: explorer-check + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + {{- end }} + - name: config-adapter + {{- if and .Values.consensusnodeLight.image.repository .Values.consensusnodeLight.image.tag }} + image: "{{ .Values.consensusnodeLight.image.repository }}:{{ .Values.consensusnodeLight.image.tag }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnodeLight.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnodeLight.image.pullPolicy }} + {{- else }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} + envFrom: + - secretRef: + name: {{ .Release.Name }} + env: + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + command: ["/bin/entrypoint.sh"] + volumeMounts: + - name: initconfig + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + - name: initconfig + mountPath: /bin/genconfig.py + readOnly: true + subPath: genconfig.py + - name: data + mountPath: /root/.taraxa + containers: + {{- if .Values.slack.enabled }} + - name: status + image: "python:3.8" + imagePullPolicy: IfNotPresent + env: + - name: SLACK_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Release.Name }} + key: SLACK_TOKEN + - name: SLACK_CHANNEL + value: {{ .Values.slack.channel }} + - name: K8S_CLUSTER + value: {{ .Values.slack.k8s_cluster }} + command: ["/bin/bash", "-c", "--"] + args: [ "pip install -r /app/requirements.txt && python /app/status.py" ] + volumeMounts: + - name: status-requirements + mountPath: /app/requirements.txt + readOnly: true + subPath: requirements.txt + - name: status-script + mountPath: /app/status.py + readOnly: true + subPath: status.py + {{- end }} + - name: consensus-node-light + {{- if and .Values.consensusnodeLight.image.repository .Values.consensusnodeLight.image.tag }} + image: "{{ .Values.consensusnodeLight.image.repository }}:{{ .Values.consensusnodeLight.image.tag }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnodeLight.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnodeLight.image.pullPolicy }} + {{- else }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} + args: + {{- toYaml .Values.consensusnodeLight.args | nindent 12 }} + env: + - name: DEBUG + value: "{{ .Values.consensusnodeLight.debug }}" + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if not .Values.consensusnodeLight.probes.enabled }} + - name: TARAXA_SLEEP_DIAGNOSE + value: "true" + {{- end }} + ports: + {{- toYaml .Values.consensusnodeLight.ports | nindent 12 }} + {{- if .Values.consensusnodeLight.probes.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + - "ps -A | grep taraxad" + initialDelaySeconds: 10 + periodSeconds: 5 + readinessProbe: + exec: + command: + - curl + - -X + - POST + - -H + - "'Content-Type: application/json'" + - -d + - "'{\"jsonrpc\":\"2.0\",\"method\":\"taraxa_protocolVersion\",\"params\": [],\"id\":1}'" + - http://127.0.0.1:7777 + initialDelaySeconds: 10 + periodSeconds: 5 + {{- end }} + resources: + {{- toYaml .Values.consensusnodeLight.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: /root/.taraxa + securityContext: + capabilities: + add: + - SYS_PTRACE + {{- with .Values.consensusnodeLight.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: initconfig + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-consensus-node-light-init-script + {{ if .Values.explorer.enabled }} + - name: explorer-check + configMap: + defaultMode: 0700 + name: {{ include "taraxa-node.fullname" . }}-explorer-check + {{- end }} + {{- if .Values.slack.enabled }} + - name: status-requirements + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-node-status-script + - name: status-script + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-node-status-script + {{- end }} + {{- if not .Values.consensusnodeLight.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.consensusnodeLight.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + annotations: + {{- if .Values.consensusnodeLight.persistence.annotations}} + {{- toYaml .Values.consensusnodeLight.persistence.annotations | nindent 4 }} + {{- end }} + spec: + accessModes: + - {{ .Values.consensusnodeLight.persistence.accessMode | quote }} + {{- if .Values.consensusnodeLight.persistence.storageClass }} + {{- if (eq "-" .Values.consensusnodeLight.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.consensusnodeLight.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.consensusnodeLight.persistence.size }}" + {{- end }} +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node-statefulset.yaml similarity index 81% rename from charts/taraxa-node/templates/consensus-node.yaml rename to charts/taraxa-node/templates/consensus-node-statefulset.yaml index df2608e935..920a688eb0 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node-statefulset.yaml @@ -9,6 +9,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} spec: replicas: {{ .Values.consensusnode.replicaCount }} serviceName: {{ include "taraxa-consensus-node.fullname" . }} @@ -29,6 +36,13 @@ spec: partition: a app.kubernetes.io/name: {{ .Release.Name }}-consensus-node app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: @@ -44,8 +58,16 @@ spec: subPath: entrypoint.sh {{- end }} - name: config-adapter + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -94,8 +116,16 @@ spec: subPath: status.py {{- end }} - name: consensus-node + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} args: {{- toYaml .Values.consensusnode.args | nindent 12 }} env: diff --git a/charts/taraxa-node/templates/explorer-check.yaml b/charts/taraxa-node/templates/explorer-check-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/explorer-check.yaml rename to charts/taraxa-node/templates/explorer-check-configmap.yaml diff --git a/charts/taraxa-node/templates/port-check.yaml b/charts/taraxa-node/templates/port-check.yaml deleted file mode 100644 index 2a754fa05b..0000000000 --- a/charts/taraxa-node/templates/port-check.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- $fullName := include "taraxa-node.fullname" . -}} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $fullName }}-port-check - labels: - helm.sh/chart: {{ include "taraxa-node.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - entrypoint.sh: |- - #!/bin/sh - set -e - echo "Checking host $1" - while [ $(nc -z -w5 $1 $2 > /dev/null 2>&1; echo $?) -ne 0 ]; do - sleep 5 - echo "Waiting for $1:$2..." - done diff --git a/charts/taraxa-node/templates/secrets.yaml b/charts/taraxa-node/templates/secrets.yaml index aa7631a87f..7d2f50167b 100644 --- a/charts/taraxa-node/templates/secrets.yaml +++ b/charts/taraxa-node/templates/secrets.yaml @@ -27,6 +27,12 @@ data: {{- range $key, $value := .Values.config.consensusnode.vrfs }} CONSENSUS_NODE_VRF_{{ $key }}: {{ $value | b64enc | quote }} {{- end }} + {{- range $key, $value := .Values.config.consensusnodeLight.keys }} + CONSENSUS_NODE_LIGHT_KEY_{{ $key }}: {{ $value | b64enc | quote }} + {{- end }} + {{- range $key, $value := .Values.config.consensusnodeLight.vrfs }} + CONSENSUS_NODE_LIGHT_VRF_{{ $key }}: {{ $value | b64enc | quote }} + {{- end }} {{- range $key, $value := .Values.config.bootnode.keys }} BOOT_NODE_KEY_{{ $key }}: {{ $value | b64enc | quote }} {{- end }} @@ -36,5 +42,6 @@ data: SLACK_TOKEN: {{ .Values.slack.token | b64enc | quote }} EXPLORER_DELEGATION_PRIVATE_KEY: {{ .Values.config.consensusnode.explorerDelegationPrivateKey | b64enc | quote }} EXPLORER_FAUCET_PRIVATE_KEY: {{ .Values.explorer.faucet.privKey | b64enc | quote }} + TRANSACTION_GENERATION_PRIVATE_KEY: {{ .Values.transactionGeneration.privateKey | b64enc | quote }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/node-status-script.yaml b/charts/taraxa-node/templates/status-script-configmap.yaml similarity index 93% rename from charts/taraxa-node/templates/node-status-script.yaml rename to charts/taraxa-node/templates/status-script-configmap.yaml index 185546ce2a..01fa04ac0c 100644 --- a/charts/taraxa-node/templates/node-status-script.yaml +++ b/charts/taraxa-node/templates/status-script-configmap.yaml @@ -118,8 +118,8 @@ data: switcher = { "UP": ":white_check_mark: {} ({}) node is up and running :white_check_mark:".format(HOSTNAME, K8S_CLUSTER), - "DOWN": "@channel :fire: {} ({}) node is down (RPC not responding) :fire:".format(HOSTNAME, K8S_CLUSTER), - "DOWN_NP": "@channel :fire: {} ({}) node is down (no network progress). Last block is {} :fire:".format(HOSTNAME, K8S_CLUSTER, last_block), + "DOWN": ":fire: {} ({}) node is down (RPC not responding) :fire:".format(HOSTNAME, K8S_CLUSTER), + "DOWN_NP": ":fire: {} ({}) node is down (no network progress). Last block is {} :fire:".format(HOSTNAME, K8S_CLUSTER, last_block), } message = switcher.get(current_status) diff --git a/charts/taraxa-node/templates/initconfig-node.yaml b/charts/taraxa-node/templates/taraxa-node-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-node.yaml rename to charts/taraxa-node/templates/taraxa-node-configmap.yaml diff --git a/charts/taraxa-node/templates/taraxa-node-ingress.yaml b/charts/taraxa-node/templates/taraxa-node-ingress.yaml index 927e068001..e42f1f868b 100644 --- a/charts/taraxa-node/templates/taraxa-node-ingress.yaml +++ b/charts/taraxa-node/templates/taraxa-node-ingress.yaml @@ -1,5 +1,6 @@ {{ if .Values.node.enabled }} {{- if .Values.node.ingress.enabled -}} + {{- $fullName := include "taraxa-node.fullname" . -}} {{- $apiIsStable := eq (include "taraxa-node.ingress.isStable" .) "true" -}} {{- $ingressSupportsPathType := eq (include "taraxa-node.ingress.supportsPathType" .) "true" -}} @@ -9,11 +10,14 @@ {{- $servicePortRpcWs := 8777 -}} {{- $servicePortGraphQl := 9777 -}} {{- $servicePortGraphQlWs := 6777 -}} +{{- $servicePortHttp := 8080 -}} + {{- range .Values.node.service.ports }} {{ if eq .name "rest"}} {{ $servicePortRpc = .port }} {{ end }} {{ if eq .name "ws"}} {{ $servicePortRpcWs = .port }} {{ end }} {{ if eq .name "graphql"}} {{ $servicePortGraphQl = .port }} {{ end }} {{ if eq .name "graphql-ws"}} {{ $servicePortGraphQlWs = .port }} {{ end }} + {{ if eq .name "http-indexer"}} {{ $servicePortHttp = .port }} {{ end }} {{- end }} {{- $pathType := .Values.node.ingress.pathType | default "ImplementationSpecific" -}} @@ -197,5 +201,51 @@ spec: serviceName: {{ $serviceName }} servicePort: {{ $servicePortGraphQlWs }} {{- end }} + +--- +apiVersion: {{ include "taraxa-node.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }}-indexer + labels: + app: taraxa-node + app.kubernetes.io/name: {{ include "taraxa-node.name" . }} + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.node.ingress.annotationsIndexer }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if $apiIsStable }} +{{- if .Values.node.ingress.ingressClassName }} + ingressClassName: {{ .Values.node.ingress.ingressClassName }} +{{- end }} +{{- end }} + {{- if .Values.wildCertDomainSecret }} + tls: + - hosts: + - {{ include "taraxa-node.indexerName" . | quote }} + secretName: {{ .Values.wildCertDomainSecret }} + {{- end }} + rules: + - host: {{ include "taraxa-node.indexerName" . | quote }} + http: + paths: + - path: / + {{- if and $pathType $ingressSupportsPathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePortHttp }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePortHttp }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-node-service.yaml b/charts/taraxa-node/templates/taraxa-node-service.yaml index b9a6653e1c..7b3dab0832 100644 --- a/charts/taraxa-node/templates/taraxa-node-service.yaml +++ b/charts/taraxa-node/templates/taraxa-node-service.yaml @@ -52,4 +52,74 @@ spec: protocol: {{ $port.protocol }} {{- end }} {{- end }} + +{{ if .Values.node.loadBalancer.enabled }} +{{- range $key, $value := .Values.node.loadBalancer.addresses }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: udp-listen-port + port: 10002 + targetPort: 10002 + protocol: UDP + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: tcp-listen-port + port: 10002 + targetPort: 10002 + protocol: TCP +{{- end }} +{{- end }} + +{{ if .Values.node.nodePort.enabled }} +{{- range $key, $value := .Values.node.nodePort.ports }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} +spec: + type: NodePort + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: udp-listen-port + port: {{ $value }} + targetPort: {{ $value }} + nodePort: {{ $value }} + protocol: UDP + - name: tcp-listen-port + port: {{ $value }} + targetPort: {{ $value }} + nodePort: {{ $value }} + protocol: TCP +{{- end }} +{{- end }} + {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml b/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml index 0ee519890d..ed7d6aaa2a 100644 --- a/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml +++ b/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml @@ -22,5 +22,8 @@ spec: - honorLabels: true path: /metrics port: metrics + - honorLabels: true + path: /metrics + port: metrics-indexer {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node-statefulset.yaml similarity index 71% rename from charts/taraxa-node/templates/taraxa-node.yaml rename to charts/taraxa-node/templates/taraxa-node-statefulset.yaml index 49494c0e2f..b462673a3a 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node-statefulset.yaml @@ -10,6 +10,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} spec: replicas: {{ .Values.node.replicaCount }} # to launch or terminate all Pods in parallel. @@ -30,13 +37,28 @@ spec: partition: a app.kubernetes.io/name: {{ include "taraxa-node.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: initContainers: - name: config-adapter + {{- if and .Values.node.image.repository .Values.node.image.tag }} + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.node.image.pullPolicy }} + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -90,9 +112,31 @@ spec: readOnly: true subPath: status.py {{- end }} + {{- if .Values.node.indexer.enabled }} + - name: taraxa-indexer + image: "{{ .Values.node.indexer.image.repository }}:{{ .Values.node.indexer.image.tag }}" + imagePullPolicy: {{ .Values.node.indexer.image.pullPolicy }} + command: ["/taraxa-indexer"] + args: + - -data_dir + - {{ .Values.node.indexer.persistence.mountPoint }} + - -blockchain_ws + - 'ws://localhost:8777' + volumeMounts: + - name: indexer-data + mountPath: /data + {{- end }} - name: taraxa-node + {{- if and .Values.node.image.repository .Values.node.image.tag }} + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.node.image.pullPolicy }} + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} args: {{- toYaml .Values.node.args | nindent 12 }} env: @@ -208,5 +252,26 @@ spec: resources: requests: storage: "{{ .Values.node.persistence.size }}" + {{- if .Values.node.indexer.enabled }} + - metadata: + name: indexer-data + annotations: + {{- if .Values.node.indexer.persistence.annotations}} + {{- toYaml .Values.node.indexer.persistence.annotations | nindent 4 }} + {{- end }} + spec: + accessModes: + - {{ .Values.node.indexer.persistence.accessMode | quote }} + {{- if .Values.node.indexer.persistence.storageClass }} + {{- if (eq "-" .Values.node.indexer.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.node.indexer.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.node.indexer.persistence.size }}" + {{- end }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml b/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml deleted file mode 100644 index 818c98b107..0000000000 --- a/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{ if .Values.node.enabled }} -{{ if .Values.node.loadBalancer.enabled }} -{{- range $key, $value := .Values.node.loadBalancer.addresses }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: udp-listen-port - port: 10002 - targetPort: 10002 - protocol: UDP - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: tcp-listen-port - port: 10002 - targetPort: 10002 - protocol: TCP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml b/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml deleted file mode 100644 index 51a5e78fab..0000000000 --- a/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{ if .Values.node.enabled }} -{{ if .Values.node.nodePort.enabled }} -{{- range $key, $value := .Values.node.nodePort.ports }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} -spec: - type: NodePort - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: udp-listen-port - port: {{ $value }} - targetPort: {{ $value }} - nodePort: {{ $value }} - protocol: UDP - - name: tcp-listen-port - port: {{ $value }} - targetPort: {{ $value }} - nodePort: {{ $value }} - protocol: TCP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml b/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml new file mode 100644 index 0000000000..316d45bbc5 --- /dev/null +++ b/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml @@ -0,0 +1,127 @@ +{{- if .Values.transactionGeneration.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-transaction-generation + labels: + app: node + app.kubernetes.io/name: node + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + requirements.txt: |- + aiohttp==3.8.4 + aiosignal==1.3.1 + async-timeout==4.0.2 + attrs==22.2.0 + base58==2.1.1 + bitarray==2.7.3 + certifi==2022.12.7 + charset-normalizer==3.1.0 + coloredlogs==15.0.1 + cytoolz==0.12.1 + eth-abi==2.2.0 + eth-account==0.5.9 + eth-hash==0.5.1 + eth-keyfile==0.5.1 + eth-keys==0.3.4 + eth-rlp==0.2.1 + eth-typing==2.3.0 + eth-utils==1.9.5 + frozenlist==1.3.3 + hexbytes==0.3.0 + humanfriendly==10.0 + idna==3.4 + ipfshttpclient==0.8.0a2 + jsonschema==4.17.3 + lru-dict==1.1.8 + multiaddr==0.0.9 + multidict==6.0.4 + netaddr==0.8.0 + parsimonious==0.8.1 + protobuf==3.19.5 + pycryptodome==3.17 + pyrsistent==0.19.3 + python-dotenv==1.0.0 + requests==2.28.2 + rlp==2.0.1 + six==1.16.0 + toolz==0.12.0 + urllib3==1.26.15 + varint==1.0.2 + web3==5.31.4 + websockets==9.1 + yarl==1.8.2 + transactions.py: |- + import logging + import coloredlogs + import time + from dotenv import load_dotenv + from os import getenv + from web3 import Web3 + + load_dotenv() + + LOG_LEVEL = getenv('LOG_LEVEL', 'INFO') + PROVIDER_URL = getenv('PROVIDER_URL') + PRIVATE_KEY = getenv('PRIVATE_KEY') + PENDING_TRANSACTIONS_THRESHOLD = 1000 + + logger = logging.getLogger() + coloredlogs.install(level=LOG_LEVEL, logger=logger) + + provider = Web3.HTTPProvider(PROVIDER_URL) + chain_id = provider.make_request('net_version', []) + chain_id = int(chain_id['result']) + logger.info(f'Got chain ID: {chain_id}') + + node_config = provider.make_request('taraxa_getConfig', []) + initial_validators = list( + map(lambda x: Web3.to_checksum_address(x['address']), node_config['result']['dpos']['initial_validators'])) + logger.info(f'Got initial validators: {initial_validators}') + + web3 = Web3(provider) + logger.info(f'Connected to Taraxa node: {PROVIDER_URL}') + + last_block = web3.eth.getBlock('latest') + logger.info(f'Last block: #{last_block.number}') + + account = web3.eth.account.from_key(PRIVATE_KEY) + logger.info(f'Account: {account.address}') + + transaction_count = int(web3.eth.get_transaction_count(account.address)) + logger.info(f'Transaction count for address: {transaction_count}') + + while True: + pending_transactions = web3.eth.get_block_transaction_count('pending') + logger.info(f'Number of pending transactions: {pending_transactions}') + if pending_transactions > PENDING_TRANSACTIONS_THRESHOLD: + logger.info( + f'Number of pending transactions is above threshold, sleeping for 10 seconds') + time.sleep(10) + continue + + logger.info(f'Sending transactions to initial validators') + + for initial_validator in initial_validators: + transaction_count = transaction_count+1 + logger.info( + f'Sending transaction #{transaction_count} to {initial_validator}') + + transaction = { + 'from': account.address, + 'to': initial_validator, + 'value': 1, + 'gas': 21000, + 'gasPrice': 1, + 'nonce': transaction_count, + 'chainId': chain_id, + } + logger.debug(f'Transaction {transaction}') + signed_transaction = account.sign_transaction(transaction) + web3.eth.send_raw_transaction(signed_transaction.rawTransaction) + + time.sleep(1) +{{- end }} diff --git a/charts/taraxa-node/templates/transaction-generation-statefulset.yaml b/charts/taraxa-node/templates/transaction-generation-statefulset.yaml new file mode 100644 index 0000000000..c620d5b3fb --- /dev/null +++ b/charts/taraxa-node/templates/transaction-generation-statefulset.yaml @@ -0,0 +1,73 @@ +{{ if .Values.transactionGeneration.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-transaction-generation + labels: + app: transaction-generation + app.kubernetes.io/name: transaction-generation + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 1 + serviceName: {{ .Release.Name }}-transaction-generation + # to launch or terminate all Pods in parallel. + # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management + podManagementPolicy: Parallel + selector: + matchLabels: + app: transaction-generation + app.kubernetes.io/name: {{ .Release.Name }}-transaction-generation + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: transaction-generation + labels: + app: transaction-generation + app.kubernetes.io/name: {{ .Release.Name }}-transaction-generation + app.kubernetes.io/instance: {{ .Release.Name }} + annotations: + kubernetes.io/change-cause: "Configuration through configmaps." + spec: + containers: + - name: transaction-generation + image: "python:3.8" + imagePullPolicy: IfNotPresent + env: + - name: PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ .Release.Name }} + key: TRANSACTION_GENERATION_PRIVATE_KEY + - name: PROVIDER_URL + value: http://{{ include "taraxa-node.fullname" . }}-head:7777 + command: ["/bin/bash", "-c", "--"] + args: [ "pip install -r /app/requirements.txt && python /app/transactions.py" ] + volumeMounts: + - name: requirements + mountPath: /app/requirements.txt + readOnly: true + subPath: requirements.txt + - name: script + mountPath: /app/transactions.py + readOnly: true + subPath: transactions.py + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: requirements + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-transaction-generation + - name: script + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-transaction-generation +{{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index f62ff08f9a..aa49074ef8 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -16,7 +16,7 @@ image: pullPolicy: IfNotPresent config: - # integer, 1=Mainnet, 2=Testnet, 3=Devnet) + # integer, 841=Mainnet, 842=Testnet, 843=Devnet # 100 for default helm test network: "100" extraArgs: [] @@ -33,6 +33,11 @@ config: - "badf7196e18f653130564fd2f27419bff36194094057a69053bbe3a83a97b4fe" vrfs: - "c7c32f136cf4529471905a6b775ad82a076a5b5d3160b76ba683c743b8a852cff06560102e3dbab2e8b62082611dfc378c90336d01c0a7fd2a1a7bb88fb63478" + consensusnodeLight: + keys: + - "a48867f0133acd5e10dd980c4ad824da69c6c1947d2fb6c2b576f41cccf5e782" + vrfs: + - "6441cd427dcad51d7a2054d777237e1e53f6cb280eebfed6a6647a5c15fd0808d24dab2ffe1c32b4b608bdadf657f82f1871fa8dc19faeef3833bb3e42bb65ec" bootnode: keys: - "45dc56636faf97230f557e16345055f5839dad25f4b3f6f88a02add24b4a00fc" @@ -50,6 +55,10 @@ slack: channel: channel k8s_cluster: taraxa +transactionGeneration: + enabled: true + privateKey: "" + nameOverride: "" fullnameOverride: "" @@ -65,6 +74,7 @@ affinity: {} node: enabled: true + image: {} replicaCount: 20 loadBalancer: enabled: false @@ -94,6 +104,16 @@ node: annotationsRpcWS: {} annotationsGraphQl: {} annotationsGraphQlWS: {} + annotationsIndexer: + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/session-cookie-name: "stickounet" + nginx.ingress.kubernetes.io/session-cookie-expires: "172800" + nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" + nginx.ingress.kubernetes.io/affinity-mode: persistent + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS, DELETE" + nginx.ingress.kubernetes.io/cors-allow-headers: "Authorization,Range,Content-Range,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Access-Control-Allow-Origin" hosts: [] tls: [] ports: @@ -111,6 +131,9 @@ node: - name: udp-listen-port containerPort: 10002 protocol: UDP + - name: http-indexer + containerPort: 8080 + protocol: TCP service: ports: - name: rest @@ -130,6 +153,12 @@ node: - name: metrics port: 8888 protocol: TCP + - name: metrics-indexer + port: 2112 + protocol: TCP + - name: http-indexer + port: 8080 + protocol: TCP serviceMonitor: enabled: false resources: {} @@ -140,10 +169,23 @@ node: size: 30Gi storageClass: annotations: {} - + indexer: + enabled: false + image: + repository: gcr.io/jovial-meridian-249123/taraxa-indexer + tag: latest + pullPolicy: Always + persistence: + enabled: false + accessMode: ReadWriteOnce + size: 30Gi + storageClass: + annotations: {} + mountPoint: /data bootnode: enabled: true + image: {} replicaCount: 1 loadBalancer: enabled: false @@ -187,6 +229,7 @@ bootnode: consensusnode: enabled: true + image: {} replicaCount: 1 probes: enabled: true @@ -236,6 +279,61 @@ consensusnode: storageClass: annotations: {} +consensusnodeLight: + enabled: false + image: {} + replicaCount: 1 + probes: + enabled: true + debug: 0 + args: + - "taraxad" + - "--config" + - "/root/.taraxa/conf_taraxa.json" + - "--genesis" + - "/root/.taraxa/genesis_taraxa.json" + - "--wallet" + - "/root/.taraxa/wallet_taraxa.json" + - "--light" + ports: + - name: rest + containerPort: 7777 + - name: ws + containerPort: 8777 + - name: tcp-listen-port + containerPort: 10002 + protocol: TCP + - name: udp-listen-port + containerPort: 10002 + protocol: UDP + service: + ports: + - name: rest + port: 7777 + - name: ws + port: 8777 + - name: tcp-listen-port + port: 10002 + protocol: TCP + - name: udp-listen-port + port: 10002 + protocol: UDP + - name: metrics + port: 8888 + protocol: TCP + serviceMonitor: + enabled: false + resources: {} + nodeSelector: {} + persistence: + enabled: false + accessMode: ReadWriteOnce + size: 30Gi + storageClass: + annotations: {} + + + explorer: enabled: false @@ -260,3 +358,9 @@ test: repository: gcr.io/jovial-meridian-249123/python tag: latest pullPolicy: IfNotPresent + +kubemonkey: + enabled: false + mtbf: 2 + killMode: "fixed" + killValue: '1' \ No newline at end of file diff --git a/conanfile.py b/conanfile.py index ca76325458..cf82c04941 100644 --- a/conanfile.py +++ b/conanfile.py @@ -13,11 +13,11 @@ class TaraxaConan(ConanFile): generators = "cmake" def requirements(self): - self.requires("boost/1.80.0") - self.requires("cppcheck/2.7.5") - self.requires("openssl/1.1.1s") + self.requires("boost/1.81.0") + self.requires("cppcheck/2.10") + self.requires("openssl/1.1.1t") self.requires("cryptopp/8.7.0") - self.requires("gtest/1.12.1") + self.requires("gtest/1.13.0") self.requires("lz4/1.9.4") self.requires("rocksdb/6.29.5") self.requires("prometheus-cpp/1.1.0") @@ -67,13 +67,9 @@ def configure(self): # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False - # mpir is z3 dependency and it couldn't be built for arm - if (self.settings.arch == "armv8"): - self.options["cppcheck"].with_z3 = False - def _configure_cmake(self): cmake = CMake(self) - # set find path to clang utils dowloaded by that script + # set find path to clang utils downloaded by that script cmake.configure() return cmake diff --git a/doc/RPC.md b/doc/RPC.md index 5e80b6fa49..5713106b93 100644 --- a/doc/RPC.md +++ b/doc/RPC.md @@ -448,9 +448,6 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getConfig","params":[],"i "0x0274cfffea9fa850e54c93a23042f12a87358a82": "0x141e8d17", "0x111f91441efc8c6c0edf6534970cc887e2fabaa8": "0x24048ce3d" }, - "hardforks": { - "fix_genesis_fork_block": "0x102ca0" - } }, "pbft": { "committee_size": "0x3e8", @@ -481,6 +478,39 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getConfig","params":[],"i } ``` +### taraxa_getChainStats + +Returns current chain stats with count of transactions, PBFT blocks and DAG blocks + +#### Parameters + +none + +#### Returns + +`OBJECT` - current chain stats object +* `pbft_period`: `QUANTITY` - current PBFT period +* `dag_blocks_executed`: `QUANTITY` - count of executed(finalized) DAG blocks +* `transactions_executed`: `QUANTITY` - count of executed transactions + +#### Example + +```json +// Request +curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getChainStats","params":[],"id":1}' + +// Result +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "pbft_period": 50, + "dag_blocks_executed": 100, + "transactions_executed": 200 + } +} +``` + ## Test API ### get_sortition_change diff --git a/doc/building.md b/doc/building.md index b6434a9927..584658f761 100644 --- a/doc/building.md +++ b/doc/building.md @@ -40,7 +40,7 @@ will build out of the box without further effort: sudo python3 -m pip install conan==1.59.0 # Setup clang as default compiler either in your IDE or by env. variables" - export C="clang-14" + export CC="clang-14" export CXX="clang++-14" ### Clone the Repository @@ -127,7 +127,7 @@ will build out of the box without further effort: rm -f llvm.sh # Setup clang as default compiler either in your IDE or by env. variables" - export C="clang-14" + export CC="clang-14" export CXX="clang++-14" ### Clone the Repository diff --git a/libraries/aleth/libdevcore/CommonData.h b/libraries/aleth/libdevcore/CommonData.h index 4b7177a828..56733d8601 100644 --- a/libraries/aleth/libdevcore/CommonData.h +++ b/libraries/aleth/libdevcore/CommonData.h @@ -73,19 +73,19 @@ static bool isHash(std::string const& _hash) { /// Converts byte array to a string containing the same (binary) data. Unless /// the byte array happens to contain ASCII data, this won't be printable. inline std::string asString(bytes const& _b) { - return std::string((char const*)_b.data(), (char const*)(_b.data() + _b.size())); + return std::string(reinterpret_cast(_b.data()), reinterpret_cast(_b.data() + _b.size())); } /// Converts byte array ref to a string containing the same (binary) data. /// Unless the byte array happens to contain ASCII data, this won't be /// printable. inline std::string asString(bytesConstRef _b) { - return std::string((char const*)_b.data(), (char const*)(_b.data() + _b.size())); + return std::string(reinterpret_cast(_b.data()), reinterpret_cast(_b.data() + _b.size())); } /// Converts a string to a byte array containing the string's (byte) data. inline bytes asBytes(std::string const& _b) { - return bytes((::byte const*)_b.data(), (::byte const*)(_b.data() + _b.size())); + return bytes(reinterpret_cast<::byte const*>(_b.data()), reinterpret_cast<::byte const*>(_b.data() + _b.size())); } /// Converts a string into the big-endian base-16 stream of integers (NOT diff --git a/libraries/aleth/libdevcore/FixedHash.h b/libraries/aleth/libdevcore/FixedHash.h index 645142ef3e..63c60f3f55 100644 --- a/libraries/aleth/libdevcore/FixedHash.h +++ b/libraries/aleth/libdevcore/FixedHash.h @@ -209,7 +209,8 @@ class FixedHash { /// Populate with random data. template void randomize(Engine& _eng) { - for (auto& i : m_data) i = (uint8_t)std::uniform_int_distribution(0, 255)(_eng); + std::generate(m_data.begin(), m_data.end(), + [&]() { return (uint8_t)std::uniform_int_distribution(0, 255)(_eng); }); } /// @returns a random valued object. @@ -404,8 +405,8 @@ class SecureFixedHash : private FixedHash { /// Fast equality operator for h256. template <> inline bool FixedHash<32>::operator==(FixedHash<32> const& _other) const { - const uint64_t* hash1 = (const uint64_t*)data(); - const uint64_t* hash2 = (const uint64_t*)_other.data(); + const uint64_t* hash1 = reinterpret_cast(data()); + const uint64_t* hash2 = reinterpret_cast(_other.data()); return (hash1[0] == hash2[0]) && (hash1[1] == hash2[1]) && (hash1[2] == hash2[2]) && (hash1[3] == hash2[3]); } diff --git a/libraries/aleth/libdevcore/RLP.cpp b/libraries/aleth/libdevcore/RLP.cpp index e85aff99a2..a8a34966cb 100644 --- a/libraries/aleth/libdevcore/RLP.cpp +++ b/libraries/aleth/libdevcore/RLP.cpp @@ -115,27 +115,27 @@ size_t RLP::length() const { if (m_data.size() <= size_t(n - c_rlpDataIndLenZero)) BOOST_THROW_EXCEPTION(BadRLP()); if (m_data.size() > 1) if (m_data[1] == 0) BOOST_THROW_EXCEPTION(BadRLP()); - unsigned lengthSize = n - c_rlpDataIndLenZero; - if (lengthSize > sizeof(ret)) + const unsigned length_size = n - c_rlpDataIndLenZero; + if (length_size > sizeof(ret)) // We did not check, but would most probably not fit in our memory. BOOST_THROW_EXCEPTION(UndersizeRLP()); // No leading zeroes. if (!m_data[1]) BOOST_THROW_EXCEPTION(BadRLP()); - for (unsigned i = 0; i < lengthSize; ++i) ret = (ret << 8) | m_data[i + 1]; + for (unsigned i = 0; i < length_size; ++i) ret = (ret << 8) | m_data[i + 1]; // Must be greater than the limit. if (ret < c_rlpListStart - c_rlpDataImmLenStart - c_rlpMaxLengthBytes) BOOST_THROW_EXCEPTION(BadRLP()); } else if (n <= c_rlpListIndLenZero) return n - c_rlpListStart; else { - unsigned lengthSize = n - c_rlpListIndLenZero; - if (m_data.size() <= lengthSize) BOOST_THROW_EXCEPTION(BadRLP()); + const unsigned length_size = n - c_rlpListIndLenZero; + if (m_data.size() <= length_size) BOOST_THROW_EXCEPTION(BadRLP()); if (m_data.size() > 1) if (m_data[1] == 0) BOOST_THROW_EXCEPTION(BadRLP()); - if (lengthSize > sizeof(ret)) + if (length_size > sizeof(ret)) // We did not check, but would most probably not fit in our memory. BOOST_THROW_EXCEPTION(UndersizeRLP()); if (!m_data[1]) BOOST_THROW_EXCEPTION(BadRLP()); - for (unsigned i = 0; i < lengthSize; ++i) ret = (ret << 8) | m_data[i + 1]; + for (unsigned i = 0; i < length_size; ++i) ret = (ret << 8) | m_data[i + 1]; if (ret < 0x100 - c_rlpListStart - c_rlpMaxLengthBytes) BOOST_THROW_EXCEPTION(BadRLP()); } // We have to be able to add payloadOffset to length without overflow. diff --git a/libraries/aleth/libdevcore/RLP.h b/libraries/aleth/libdevcore/RLP.h index 98fef745c2..15f897f344 100644 --- a/libraries/aleth/libdevcore/RLP.h +++ b/libraries/aleth/libdevcore/RLP.h @@ -84,7 +84,7 @@ class RLP { /// Construct a node to read RLP data in the string. explicit RLP(std::string const& _s, Strictness _st = VeryStrict) - : RLP(bytesConstRef((::byte const*)_s.data(), _s.size()), _st) {} + : RLP(bytesConstRef(reinterpret_cast<::byte const*>(_s.data()), _s.size()), _st) {} /// The bare data of the RLP. bytesConstRef data() const { return m_data; } @@ -251,7 +251,8 @@ class RLP { std::vector ret; if (isList()) { ret.reserve(itemCount()); - for (auto const i : *this) ret.push_back(i.convert(_flags)); + std::transform((*this).begin(), (*this).end(), std::back_inserter(ret), + [_flags](const auto i) { return i.template convert(_flags); }); } else if (_flags & ThrowOnFail) BOOST_THROW_EXCEPTION(BadCast()); return ret; diff --git a/libraries/aleth/libdevcrypto/Common.h b/libraries/aleth/libdevcrypto/Common.h index a3fe92e29c..83c23b1324 100644 --- a/libraries/aleth/libdevcrypto/Common.h +++ b/libraries/aleth/libdevcrypto/Common.h @@ -34,7 +34,7 @@ struct SignatureStruct { SignatureStruct() = default; SignatureStruct(Signature const& _s) { *(h520*)this = _s; } SignatureStruct(h256 const& _r, h256 const& _s, byte _v) : r(_r), s(_s), v(_v) {} - operator Signature() const { return *(h520 const*)this; } + operator Signature() const { return *reinterpret_cast(this); } /// @returns true if r,s,v values are valid, otherwise false bool isValid() const noexcept; diff --git a/libraries/aleth/libdevcrypto/CryptoPP.cpp b/libraries/aleth/libdevcrypto/CryptoPP.cpp index da5b5ff5f3..42ebf71d89 100644 --- a/libraries/aleth/libdevcrypto/CryptoPP.cpp +++ b/libraries/aleth/libdevcrypto/CryptoPP.cpp @@ -119,7 +119,7 @@ bool Secp256k1PP::decryptECIES(Secret const& _k, bytesConstRef _sharedMacData, b return false; Secret z; - if (!ecdh::agree(_k, *(Public*)(io_text.data() + 1), z)) return false; // Invalid pubkey or seckey. + if (!ecdh::agree(_k, *reinterpret_cast(io_text.data() + 1), z)) return false; // Invalid pubkey or seckey. auto key = ecies::kdf(z, bytes(), 64); bytesConstRef eKey = bytesConstRef(&key).cropped(0, 16); bytesRef mKeyMaterial = bytesRef(&key).cropped(16, 16); diff --git a/libraries/aleth/libp2p/UPnP.cpp b/libraries/aleth/libp2p/UPnP.cpp index 0451c91970..bcda5f2c0d 100644 --- a/libraries/aleth/libp2p/UPnP.cpp +++ b/libraries/aleth/libp2p/UPnP.cpp @@ -56,7 +56,7 @@ UPnP::UPnP() #elif MINIUPNPC_API_VERSION >= 9 descXML = (char*)miniwget(dev->descURL, &descXMLsize, 0); #else - descXML = (char*)miniwget(dev->descURL, &descXMLsize); + descXML = static_cast(miniwget(dev->descURL, &descXMLsize)); #endif if (descXML) { parserootdesc(descXML, descXMLsize, m_data.get()); diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 781aa879e9..f86fc92377 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -13,8 +13,8 @@ "delegation_locking_period": "0x5", "eligibility_balance_threshold": "0xd3c21bcecceda1000000", "vote_eligibility_balance_step": "0x152d02c7e14af6800000", - "validator_maximum_stake": "0x84595161401484A000000", - "minimum_deposit": "0x0", + "validator_maximum_stake": "0x84595161401484a000000", + "minimum_deposit": "0x3635c9adc5dea00000", "max_block_author_reward": "0x5", "dag_proposers_reward": "0x32", "commission_change_delta": "0x0", @@ -28,7 +28,7 @@ "endpoint": "", "description": "Taraxa devnet validator 1", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "05fe580fd2d461ee5f762a33bbe669403bb04a851f2e9ed8d2579a9c9b77c3ec" }, @@ -39,7 +39,7 @@ "endpoint": "", "description": "Taraxa devnet validator 2", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "70d34c86787e5f7bd0f266cad291cb521e23176fa37c6efc034858a1620ac69e" }, @@ -50,7 +50,7 @@ "endpoint": "", "description": "Taraxa devnet validator 3", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "f8d5c00ce9fa3058341e051b36a1e6ccf69df81fb865568b2bf1507d085691e2" }, @@ -61,7 +61,7 @@ "endpoint": "", "description": "Taraxa devnet validator 4", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "aa12507d00c992b95e65d80b21fd2db5b48c4f7ff4393064828d1adc930710b4" }, @@ -72,7 +72,7 @@ "endpoint": "", "description": "Taraxa devnet validator 5", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "bd34898ae0080187c408b5724f05682855c4425fda61d332f5f9d746d4eb753a" }, @@ -83,7 +83,7 @@ "endpoint": "", "description": "Taraxa devnet validator 6", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "25d35fed93989c40b4e8685d9d7ee02213230221ea9efcbe8cfccfc788670dba" }, @@ -94,7 +94,7 @@ "endpoint": "", "description": "Taraxa devnet validator 7", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "55c0bd1af84fb793a5dd7b960e330248d8a0acde566922b3e210f43592700dad" }, @@ -105,7 +105,7 @@ "endpoint": "", "description": "Taraxa devnet validator 8", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "33131367e7279ee51c0f26c6f9b6627848f822d134abef21a88be467dfbaae7b" }, @@ -116,7 +116,7 @@ "endpoint": "", "description": "Taraxa devnet validator 9", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "da63de37c69a59cb3ebbcfb79ef8d561b18b448b544a14438c62cd56bc0a29f5" }, @@ -127,7 +127,7 @@ "endpoint": "", "description": "Taraxa devnet validator 10", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "337178752602a5ca38928bf0d8d434ec653505c92b280b0edab6c39d5e79f4fd" }, @@ -138,7 +138,7 @@ "endpoint": "", "description": "Taraxa devnet validator 11", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "ac08e4ca5f1bcdd61dbefa7551ab839bdd4545e59ee8a4ab5d3aabb71104ab73" }, @@ -149,7 +149,7 @@ "endpoint": "", "description": "Taraxa devnet validator 12", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "189b05cca0a816a36f977f0541ef7585218b2087f04b23444ab58d0c755adecc" }, @@ -160,7 +160,7 @@ "endpoint": "", "description": "Taraxa devnet validator 13", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "8e95172f90b68ee753132bf6342ee00b398e2417312f610d58c34729ab0608ee" }, @@ -171,7 +171,7 @@ "endpoint": "", "description": "Taraxa devnet validator 14", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "959551740ff948d9714f15a2bfb2183c4ead897dd79775a0a18488aa8936e2ba" }, @@ -182,7 +182,7 @@ "endpoint": "", "description": "Taraxa devnet validator 15", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "56b7831cb3e35c1d6d1e3f661de2068d6feeaa54074b3e02709a87d7f0d6c72a" }, @@ -193,7 +193,7 @@ "endpoint": "", "description": "Taraxa devnet validator 16", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "e774c519814cbc04008aa958932e7adb82ebbbd6ca69089c0a1458ea34fb4299" }, @@ -204,7 +204,7 @@ "endpoint": "", "description": "Taraxa devnet validator 17", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "1b15b7bef6a1dbe9aeb2792f2e38d6222d31f8c6c15cff1152f258013d70d933" }, @@ -215,7 +215,7 @@ "endpoint": "", "description": "Taraxa devnet validator 18", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "37bf145ac98e7de7db6e5b933e72737fbf190fd4fb1d193b15cf8b00db30ba30" } diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 65f9b02e75..7c19bcbafa 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -214,11 +214,6 @@ Config::Config(int argc, const char* argv[]) { auto default_genesis_json = tools::getGenesis((Config::ChainIdType)chain_id); // override hardforks data with one from default json addNewHardforks(genesis_json, default_genesis_json); - // add vote_eligibility_balance_step field if it is missing in the config - if (genesis_json["dpos"]["vote_eligibility_balance_step"].isNull()) { - genesis_json["dpos"]["vote_eligibility_balance_step"] = - default_genesis_json["dpos"]["vote_eligibility_balance_step"]; - } write_config_and_wallet_files(); } // Override config values with values from CLI diff --git a/libraries/common/include/common/encoding_rlp.hpp b/libraries/common/include/common/encoding_rlp.hpp index e973d9decd..41a4c2c917 100644 --- a/libraries/common/include/common/encoding_rlp.hpp +++ b/libraries/common/include/common/encoding_rlp.hpp @@ -9,7 +9,7 @@ #include "common/range_view.hpp" #include "common/util.hpp" -namespace taraxa::util::encoding_rlp { +namespace taraxa::util { using dev::RLP; using RLPEncoderRef = dev::RLPStream&; @@ -153,13 +153,12 @@ void __dec_rlp_tuple_body__(RLP::iterator& i, RLP::iterator const& end, RLP::Str } } -struct InvalidEncodingSize : std::invalid_argument { - uint expected, actual; +struct InvalidEncodingSize : dev::RLPException { + dev::bigint expected, actual; - InvalidEncodingSize(uint expected, uint actual) - : invalid_argument(fmt("Invalid rlp list size; expected: %s, actual: %s", expected, actual)), - expected(expected), - actual(actual) {} + InvalidEncodingSize(uint e, uint a) : expected(e), actual(a) { + RLPException() << dev::errinfo_comment("Invalid rlp list size") << dev::RequirementError(expected, actual); + } }; template @@ -196,34 +195,16 @@ bytes rlp_enc(T const& obj) { return std::move(s.invalidate()); } -} // namespace taraxa::util::encoding_rlp - -#define HAS_RLP_FIELDS \ - void rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding); \ - void rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const; +} // namespace taraxa::util -#define RLP_FIELDS_DEFINE(_class_, ...) \ - void _class_::rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding) { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } \ - void _class_::rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } +#define HAS_RLP_FIELDS \ + void rlp(::taraxa::util::RLPDecoderRef encoding); \ + void rlp(::taraxa::util::RLPEncoderRef encoding) const; -#define RLP_FIELDS_DEFINE_INPLACE(...) \ - void rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding) { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } \ - void rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } +#define RLP_FIELDS_DEFINE(_class_, ...) \ + void _class_::rlp(::taraxa::util::RLPDecoderRef encoding) { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } \ + void _class_::rlp(::taraxa::util::RLPEncoderRef encoding) const { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } -namespace taraxa::util { -using encoding_rlp::InvalidEncodingSize; -using encoding_rlp::rlp; -using encoding_rlp::rlp_dec; -using encoding_rlp::rlp_enc; -using encoding_rlp::rlp_tuple; -using encoding_rlp::RLPDecoderRef; -using encoding_rlp::RLPEncoderRef; -} // namespace taraxa::util +#define RLP_FIELDS_DEFINE_INPLACE(...) \ + void rlp(::taraxa::util::RLPDecoderRef encoding) { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } \ + void rlp(::taraxa::util::RLPEncoderRef encoding) const { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } diff --git a/libraries/common/src/vrf_wrapper.cpp b/libraries/common/src/vrf_wrapper.cpp index 66606b7ad1..fcf01f4103 100644 --- a/libraries/common/src/vrf_wrapper.cpp +++ b/libraries/common/src/vrf_wrapper.cpp @@ -5,23 +5,25 @@ namespace taraxa::vrf_wrapper { std::pair getVrfKeyPair() { vrf_sk_t sk; vrf_pk_t pk; - crypto_vrf_keypair((unsigned char *)pk.data(), (unsigned char *)sk.data()); + crypto_vrf_keypair(pk.data(), sk.data()); return {pk, sk}; } vrf_pk_t getVrfPublicKey(vrf_sk_t const &sk) { vrf_pk_t pk; - crypto_vrf_sk_to_pk((unsigned char *)pk.data(), (unsigned char *)sk.data()); + crypto_vrf_sk_to_pk(pk.data(), const_cast(sk.data())); return pk; } -bool isValidVrfPublicKey(vrf_pk_t const &pk) { return crypto_vrf_is_valid_key((unsigned char *)pk.data()) == 1; } +bool isValidVrfPublicKey(vrf_pk_t const &pk) { + return crypto_vrf_is_valid_key(const_cast(pk.data())) == 1; +} std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { vrf_proof_t proof; // crypto_vrf_prove return 0 on success! - if (!crypto_vrf_prove((unsigned char *)proof.data(), (const unsigned char *)sk.data(), - (const unsigned char *)msg.data(), msg.size())) { + if (!crypto_vrf_prove(proof.data(), const_cast(sk.data()), const_cast(msg.data()), + msg.size())) { return proof; } return {}; @@ -30,8 +32,9 @@ std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg) { vrf_output_t output; // crypto_vrf_verify return 0 on success! - if (!crypto_vrf_verify((unsigned char *)output.data(), (const unsigned char *)pk.data(), - (const unsigned char *)proof.data(), (const unsigned char *)msg.data(), msg.size())) { + if (!crypto_vrf_verify(output.data(), const_cast(pk.data()), + const_cast(proof.data()), const_cast(msg.data()), + msg.size())) { return output; } return {}; diff --git a/libraries/config/CMakeLists.txt b/libraries/config/CMakeLists.txt index 5df88e6af7..75616c110c 100644 --- a/libraries/config/CMakeLists.txt +++ b/libraries/config/CMakeLists.txt @@ -7,7 +7,7 @@ set(HEADERS include/config/dag_config.hpp include/config/pbft_config.hpp include/config/state_config.hpp - include/config/hardfork.hpp + # include/config/hardfork.hpp ) set(SOURCES @@ -18,7 +18,7 @@ set(SOURCES src/dag_config.cpp src/pbft_config.cpp src/state_config.cpp - src/hardfork.cpp + # src/hardfork.cpp ) # Configure file with version diff --git a/libraries/config/include/config/state_config.hpp b/libraries/config/include/config/state_config.hpp index 6cd844094f..1cbda1b401 100644 --- a/libraries/config/include/config/state_config.hpp +++ b/libraries/config/include/config/state_config.hpp @@ -6,7 +6,7 @@ #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "common/vrf_wrapper.hpp" -#include "config/hardfork.hpp" +// #include "config/hardfork.hpp" namespace taraxa::state_api { diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index f4325fd9dc..3f4a83a73b 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -137,7 +137,7 @@ void dec_json(const Json::Value &json, NetworkConfig &network) { getConfigDataAsUInt(json, {"deep_syncing_threshold"}, true, network.deep_syncing_threshold); network.ddos_protection = dec_ddos_protection_config_json(getConfigData(json, {"ddos_protection"})); - for (auto &item : json["boot_nodes"]) { + for (const auto &item : json["boot_nodes"]) { network.boot_nodes.push_back(dec_json(item)); } auto listen_ip = boost::asio::ip::address::from_string(network.listen_ip); diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 0d6f111446..30e97a429d 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -87,17 +87,6 @@ class FinalChain { * @return BlockHash h256 */ virtual std::optional block_hash(std::optional n = {}) const = 0; - struct TransactionHashes { - TransactionHashes() = default; - virtual ~TransactionHashes() = default; - TransactionHashes(const TransactionHashes&) = default; - TransactionHashes(TransactionHashes&&) = default; - TransactionHashes& operator=(const TransactionHashes&) = default; - TransactionHashes& operator=(TransactionHashes&&) = default; - - virtual size_t count() const = 0; - virtual h256 get(size_t i) const = 0; - }; /** * @brief Needed if we are changing params with hardfork and it affects Go part of code. For example DPOS contract diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 5597de9bdf..0661f3a0cb 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -205,7 +205,7 @@ class PbftManager : public std::enable_shared_from_this { * @brief Get PBFT lambda. PBFT lambda is a timer clock * @return PBFT lambda */ - std::chrono::milliseconds getPbftInitialLambda() const { return LAMBDA_ms_MIN; } + std::chrono::milliseconds getPbftInitialLambda() const { return kMinLambda; } /** * @brief Calculate DAG blocks ordering hash @@ -253,11 +253,6 @@ class PbftManager : public std::enable_shared_from_this { */ void resume(); - /** - * @brief Resume PBFT daemon on single state. Only to be used for unit tests - */ - void resumeSingleState(); - /** * @brief Get a proposed PBFT block based on specified period and block hash * @param period @@ -336,16 +331,6 @@ class PbftManager : public std::enable_shared_from_this { */ void sleep_(); - /** - * @brief Go to next PBFT state. Only to be used for unit tests - */ - void doNextState_(); - - /** - * @brief Set next PBFT state - */ - void setNextState_(); - /** * @brief Set PBFT filter state */ @@ -552,9 +537,8 @@ class PbftManager : public std::enable_shared_from_this { const addr_t node_addr_; const secret_t node_sk_; - const std::chrono::milliseconds LAMBDA_ms_MIN; - std::chrono::milliseconds LAMBDA_ms{0}; - uint64_t LAMBDA_backoff_multiple = 1; + const std::chrono::milliseconds kMinLambda; // [ms] + std::chrono::milliseconds lambda_{0}; // [ms] const std::chrono::milliseconds kMaxLambda{60000}; // in ms, max lambda is 1 minutes const uint32_t kBroadcastVotesLambdaTime = 20; @@ -564,8 +548,6 @@ class PbftManager : public std::enable_shared_from_this { uint32_t broadcast_reward_votes_counter_ = 1; uint32_t rebroadcast_reward_votes_counter_ = 1; - std::default_random_engine random_engine_{std::random_device{}()}; - PbftStates state_ = value_proposal_state; std::atomic round_ = 1; PbftStep step_ = 1; diff --git a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp index 36f64a1291..ea3b24250b 100644 --- a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp @@ -19,6 +19,12 @@ struct VerifiedVotes { // Step votes std::map step_votes; + + // Greatest step, for which there is at least t+1 next votes - it is used for lambda exponential backoff: Usually + // when network gets stalled it is due to lack of 2t+1 voting power and steps keep increasing. When new node joins + // the network, it should catch up with the rest of nodes asap so we dont start exponentially backing of its lambda + // if it's current step is far behind network_t_plus_one_step (at least 1 third of network is at this step) + PbftStep network_t_plus_one_step{0}; }; } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 8d904c22e0..ab7a1a03ca 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -96,13 +96,16 @@ class VoteManager { std::optional determineNewRound(PbftPeriod current_pbft_period, PbftRound current_pbft_round); /** - * @brief Replace current reward votes info with new period, round & block hash based on vote + * @brief Replace current reward votes with new period, round & block hash based on vote * * @param period * @param round + * @param step * @param block_hash + * @param batch */ - void resetRewardVotesInfo(PbftPeriod period, PbftRound round, const blk_hash_t& block_hash); + void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, + DbStorage::Batch& batch); /** * @brief Check reward votes for specified pbft block @@ -115,11 +118,11 @@ class VoteManager { bool copy_votes); /** - * @brief Get reward votes from reward_votes_ with the round during which was the previous block pushed + * @brief Get reward votes with the round during which was the previous block pushed * * @return vector of reward votes */ - std::vector> getProposeRewardVotes(); + std::vector> getRewardVotes(); /** * @brief Get current reward votes pbft block period @@ -197,23 +200,19 @@ class VoteManager { * @param period * @param round * @param type - * @param peer_filter if specified, get only votes that are unknown for peer * @return vector of votes if 2t+1 voted block votes found, otherwise empty vector */ - std::vector> getTwoTPlusOneVotedBlockVotes( - PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type, - const std::shared_ptr& peer_filter = {}) const; + std::vector> getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, + TwoTPlusOneVotedBlockType type) const; /** * Get all 2t+1 voted block next votes(both for null block as well as specific block) for specific period and round * * @param period * @param round - * @param peer_filter if specified, get only votes that are unknown for peer * @return vector of next votes if 2t+1 voted block votes found, otherwise empty vector */ - std::vector> getAllTwoTPlusOneNextVotes( - PbftPeriod period, PbftRound round, const std::shared_ptr& peer_filter = {}) const; + std::vector> getAllTwoTPlusOneNextVotes(PbftPeriod period, PbftRound round) const; /** * @brief Sets current pbft period & round. It also checks if we dont alredy have 2t+1 vote bundles(pf any type) for @@ -224,6 +223,17 @@ class VoteManager { */ void setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound pbft_round); + /** + * @brief Returns greatest step (in specified period & round), for which there is at least t+1 voting power + * from all nodes + * @note It is used for triggering lambda exponential backoff + * + * @param period + * @param round + * @return greatest network 2t+1 next voting step + */ + PbftStep getNetworkTplusOneNextVotingStep(PbftPeriod period, PbftRound round) const; + private: /** * @param vote @@ -273,6 +283,7 @@ class VoteManager { blk_hash_t reward_votes_block_hash_; PbftRound reward_votes_period_; PbftRound reward_votes_round_; + std::vector extra_reward_votes_; mutable std::shared_mutex reward_votes_info_mutex_; // Cache for current 2T+1 - > diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index c7d397b5b9..416a8f6906 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -316,10 +316,7 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, trx_hashes.push_back(trx->getHash()); } - uint64_t block_estimation = 0; - for (const auto& e : estimations) { - block_estimation += e; - } + const uint64_t block_estimation = std::accumulate(estimations.begin(), estimations.end(), 0); // If number of tips is over the limit filter by producer and level if (frontier.tips.size() > kDagBlockMaxTips || (frontier.tips.size() + 1) > kPbftGasLimit / kDagGasLimit) { diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 9c2c73e0ad..b62b8d7e4b 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -20,9 +20,11 @@ class FinalChainImpl final : public FinalChain { const bool kLightNode = false; const uint64_t kLightNodeHistory = 0; const uint32_t kMaxLevelsPerPeriod; + const uint64_t kLightNodePruneOffset = 0; // It is not prepared to use more then 1 thread. Examine it if you want to change threads count boost::asio::thread_pool executor_thread_{1}; + boost::asio::thread_pool prune_thread_{1}; std::atomic num_executed_dag_blk_ = 0; std::atomic num_executed_trx_ = 0; @@ -59,6 +61,10 @@ class FinalChainImpl final : public FinalChain { kLightNode(config.is_light_node), kLightNodeHistory(config.light_node_history), kMaxLevelsPerPeriod(config.max_levels_per_period), + // This will provide a speific random offset based on node address for each node to prevent all light nodes + // performing prune at the same block height + kLightNodePruneOffset((*reinterpret_cast(node_addr.asBytes().data())) % + std::max(config.light_node_history, (uint64_t)1)), block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), @@ -121,7 +127,10 @@ class FinalChainImpl final : public FinalChain { delegation_delay_ = config.genesis.state.dpos.delegation_delay; } - void stop() override { executor_thread_.join(); } + void stop() override { + executor_thread_.join(); + prune_thread_.join(); + } std::future> finalize( PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, @@ -181,9 +190,9 @@ class FinalChainImpl final : public FinalChain { for (auto const& r : exec_results) { LogEntries logs; logs.reserve(r.logs.size()); - for (auto const& l : r.logs) { - logs.emplace_back(LogEntry{l.address, l.topics, l.data}); - } + std::transform(r.logs.cbegin(), r.logs.cend(), std::back_inserter(logs), [](const auto& l) { + return LogEntry{l.address, l.topics, l.data}; + }); receipts.emplace_back(TransactionReceipt{ r.code_err.empty() && r.consensus_err.empty(), r.gas_used, @@ -239,7 +248,7 @@ class FinalChainImpl final : public FinalChain { if (kLightNode) { // Actual history size will be between 100% and 105% of light_node_history_ to avoid deleting on every period - if (((blk_header->number % (std::max(kLightNodeHistory / 20, (uint64_t)1)) == 0)) && + if ((((blk_header->number + kLightNodePruneOffset) % (std::max(kLightNodeHistory / 20, (uint64_t)1)) == 0)) && blk_header->number > kLightNodeHistory) { prune(blk_header->number - kLightNodeHistory); } @@ -248,9 +257,9 @@ class FinalChainImpl final : public FinalChain { } void prune(EthBlockNumber blk_n) override { - std::vector state_root_to_prune; const auto last_block_to_keep = get_block_header(blk_n); if (last_block_to_keep) { + std::vector state_root_to_prune; LOG(log_nf_) << "Pruning data older than " << blk_n; auto block_to_prune = get_block_header(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { @@ -261,10 +270,13 @@ class FinalChainImpl final : public FinalChain { block_to_prune = get_block_header(block_to_prune->number - 1); } - state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); db_->compactColumn(DB::Columns::final_chain_blk_by_number); db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); + + boost::asio::post(prune_thread_, [this, last_block_to_keep, state_root_to_prune]() { + state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); + }); } } @@ -309,16 +321,8 @@ class FinalChainImpl final : public FinalChain { chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; db_->insert(batch, DB::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); } - TransactionLocation tl{blk_header.number}; - for (auto const& trx : transactions) { - db_->insert(batch, DB::Columns::final_chain_transaction_location_by_hash, trx->getHash(), - util::rlp_enc(rlp_strm, tl)); - ++tl.index; - } db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, blk_header.number, - TransactionHashesImpl::serialize_from_transactions(transactions)); - db_->insert(batch, DB::Columns::final_chain_transaction_count_by_blk_number, blk_header.number, - transactions.size()); + dev::rlp(hashes_from_transactions(transactions))); db_->insert(batch, DB::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); db_->insert(batch, DB::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); @@ -343,14 +347,12 @@ class FinalChainImpl final : public FinalChain { return block_headers_cache_.get(*n); } - std::optional transaction_location(h256 const& trx_hash) const override { - auto raw = db_->lookup(trx_hash, DB::Columns::final_chain_transaction_location_by_hash); - if (raw.empty()) { + std::optional transaction_location(const h256& trx_hash) const override { + const auto period = db_->getTransactionPeriod(trx_hash); + if (!period) { return {}; } - TransactionLocation ret; - ret.rlp(dev::RLP(raw)); - return ret; + return TransactionLocation{period->first, period->second}; } std::optional transaction_receipt(h256 const& trx_h) const override { @@ -364,8 +366,7 @@ class FinalChainImpl final : public FinalChain { } uint64_t transactionCount(std::optional n = {}) const override { - return db_->lookup_int(last_if_absent(n), DB::Columns::final_chain_transaction_count_by_blk_number) - .value_or(0); + return db_->getTransactionCount(last_if_absent(n)); } std::shared_ptr transaction_hashes(std::optional n = {}) const override { @@ -449,17 +450,18 @@ class FinalChainImpl final : public FinalChain { } private: - std::shared_ptr get_transaction_hashes(std::optional n = {}) const { - return make_shared( - db_->lookup(last_if_absent(n), DB::Columns::final_chain_transaction_hashes_by_blk_number)); + std::shared_ptr get_transaction_hashes(std::optional n = {}) const { + auto res = db_->lookup(last_if_absent(n), DB::Columns::final_chain_transaction_hashes_by_blk_number); + + return std::make_shared(util::rlp_dec(dev::RLP(res))); } const SharedTransactions get_transactions(std::optional n = {}) const { SharedTransactions ret; auto hashes = transaction_hashes(n); - ret.reserve(hashes->count()); + ret.reserve(hashes->size()); for (size_t i = 0; i < ret.capacity(); ++i) { - auto trx = db_->getTransaction(hashes->get(i)); + auto trx = db_->getTransaction(hashes->at(i)); assert(trx); ret.emplace_back(trx); } @@ -525,31 +527,6 @@ class FinalChainImpl final : public FinalChain { } return ret; } - - struct TransactionHashesImpl : TransactionHashes { - string serialized_; - size_t count_; - - explicit TransactionHashesImpl(string serialized) - : serialized_(std::move(serialized)), count_(serialized_.size() / h256::size) {} - - static bytes serialize_from_transactions(SharedTransactions const& transactions) { - bytes serialized; - serialized.reserve(transactions.size() * h256::size); - for (auto const& trx : transactions) { - for (auto b : trx->getHash()) { - serialized.push_back(b); - } - } - return serialized; - } - - h256 get(size_t i) const override { - return h256((uint8_t*)(serialized_.data() + i * h256::size), h256::ConstructFromPointer); - } - - size_t count() const override { return count_; } - }; }; std::shared_ptr NewFinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index cbf46340e7..62fe456160 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -21,7 +21,7 @@ void from_rlp(taraxa_evm_Bytes b, Result& result) { util::rlp(dev::RLP(map_bytes(b), 0), result); } -void to_str(taraxa_evm_Bytes b, string& result) { result = {(char*)b.Data, b.Len}; } +void to_str(taraxa_evm_Bytes b, string& result) { result = {reinterpret_cast(b.Data), b.Len}; } void to_bytes(taraxa_evm_Bytes b, bytes& result) { result.assign(b.Data, b.Data + b.Len); } @@ -31,7 +31,7 @@ template taraxa_evm_BytesCallback decoder_cb_c(Result& res) { return { &res, - [](auto receiver, auto b) { decode(b, *(Result*)receiver); }, + [](auto receiver, auto b) { decode(b, *static_cast(receiver)); }, }; } diff --git a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp index fe1464e31c..220707fc2f 100644 --- a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp +++ b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp @@ -19,7 +19,7 @@ std::shared_ptr KeyManager::get(EthBlockNumber blk_n, con std::unique_lock lock(mutex_); return key_map_.insert_or_assign(addr, std::make_shared(std::move(key))).first->second; } - } catch (state_api::ErrFutureBlock& e) { + } catch (state_api::ErrFutureBlock&) { return nullptr; } diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 6cf167aecc..127fcf91b8 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -41,7 +41,7 @@ PbftManager::PbftManager(const PbftConfig &conf, const blk_hash_t &dag_genesis_b final_chain_(std::move(final_chain)), node_addr_(std::move(node_addr)), node_sk_(std::move(node_sk)), - LAMBDA_ms_MIN(conf.lambda_ms), + kMinLambda(conf.lambda_ms), dag_genesis_block_hash_(dag_genesis_block_hash), config_(conf), proposed_blocks_(db_) { @@ -124,65 +124,6 @@ void PbftManager::resume() { daemon_ = std::make_unique([this]() { run(); }); } -// Only to be used for tests... -void PbftManager::resumeSingleState() { - if (!stopped_.load()) daemon_->join(); - stopped_ = false; - - if (step_ == 1) { - state_ = value_proposal_state; - } else if (step_ == 2) { - state_ = filter_state; - } else if (step_ == 3) { - state_ = certify_state; - } else if (step_ % 2 == 0) { - state_ = finish_state; - } else { - state_ = finish_polling_state; - } - - doNextState_(); -} - -// Only to be used for tests... -void PbftManager::doNextState_() { - auto initial_state = state_; - - while (!stopped_ && state_ == initial_state) { - if (stateOperations_()) { - continue; - } - - // PBFT states - switch (state_) { - case value_proposal_state: - proposeBlock_(); - break; - case filter_state: - identifyBlock_(); - break; - case certify_state: - certifyBlock_(); - break; - case finish_state: - firstFinish_(); - break; - case finish_polling_state: - secondFinish_(); - break; - default: - LOG(log_er_) << "Unknown PBFT state " << state_; - assert(false); - } - - setNextState_(); - if (state_ != initial_state) { - return; - } - sleep_(); - } -} - /* When a node starts up it has to sync to the current phase (type of block * being generated) and step (within the block generation round) * Five step loop for block generation over three phases of blocks @@ -200,25 +141,41 @@ void PbftManager::run() { switch (state_) { case value_proposal_state: proposeBlock_(); + setFilterState_(); break; case filter_state: identifyBlock_(); + setCertifyState_(); break; case certify_state: certifyBlock_(); + if (go_finish_state_) { + setFinishState_(); + } else { + next_step_time_ms_ += kPollingIntervalMs; + } break; case finish_state: firstFinish_(); + setFinishPollingState_(); break; case finish_polling_state: secondFinish_(); + if (loop_back_finish_state_) { + loopBackFinishState_(); + + // Print voting summary for current round + printVotingSummary(); + } else { + next_step_time_ms_ += kPollingIntervalMs; + } break; default: LOG(log_er_) << "Unknown PBFT state " << state_; assert(false); } - setNextState_(); + LOG(log_tr_) << "next step time(ms): " << next_step_time_ms_.count() << ", step " << step_; sleep_(); } } @@ -288,31 +245,31 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { db_->savePbftMgrField(PbftMgrField::Step, pbft_step); step_ = pbft_step; - if (step_ > kMaxSteps && LAMBDA_backoff_multiple < 8) { - // Note: We calculate the lambda for a step independently of prior steps - // in case missed earlier steps. - std::uniform_int_distribution distribution(0, step_ - kMaxSteps); - auto lambda_random_count = distribution(random_engine_); - LAMBDA_backoff_multiple = 2 * LAMBDA_backoff_multiple; - LAMBDA_ms = LAMBDA_ms_MIN * (LAMBDA_backoff_multiple + lambda_random_count); - if (LAMBDA_ms > kMaxLambda) { - LAMBDA_ms = kMaxLambda; + // Increase lambda only for odd steps (second finish steps) after node reached kMaxSteps steps + if (step_ > kMaxSteps && step_ % 2) { + const auto [round, period] = getPbftRoundAndPeriod(); + const auto network_next_voting_step = vote_mgr_->getNetworkTplusOneNextVotingStep(period, round); + + // Node is still >= kMaxSteps steps behind the rest (at least 1/3) of the network - keep lambda at the standard + // value so node can catch up with the rest of the nodes + if (network_next_voting_step > step_ && network_next_voting_step - step_ >= kMaxSteps) { + lambda_ = kMinLambda; + } else if (lambda_ < kMaxLambda) { + // Node is < kMaxSteps steps behind the rest (at least 1/3) of the network - start exponentially backing off + // lambda until it reaches kMaxLambda + // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. + lambda_ *= 2; + if (lambda_ > kMaxLambda) { + lambda_ = kMaxLambda; + } } - - LOG(log_dg_) << "Surpassed max steps, exponentially backing off lambda to " << LAMBDA_ms.count() << " ms in round " - << getPbftRound() << ", step " << step_; - } else { - LAMBDA_ms = LAMBDA_ms_MIN; - LAMBDA_backoff_multiple = 1; } } void PbftManager::resetStep() { step_ = 1; startingStepInRound_ = 1; - - LAMBDA_ms = LAMBDA_ms_MIN; - LAMBDA_backoff_multiple = 1; + lambda_ = kMinLambda; } bool PbftManager::tryPushCertVotesBlock() { @@ -460,7 +417,7 @@ void PbftManager::initialState() { // Initial PBFT state // Time constants... - LAMBDA_ms = LAMBDA_ms_MIN; + lambda_ = kMinLambda; const auto current_pbft_period = getPbftPeriod(); const auto current_pbft_round = db_->getPbftMgrField(PbftMgrField::Round); @@ -539,55 +496,23 @@ void PbftManager::initialState() { : "no value"); } -void PbftManager::setNextState_() { - switch (state_) { - case value_proposal_state: - setFilterState_(); - break; - case filter_state: - setCertifyState_(); - break; - case certify_state: - if (go_finish_state_) { - setFinishState_(); - } else { - next_step_time_ms_ += kPollingIntervalMs; - } - break; - case finish_state: - setFinishPollingState_(); - break; - case finish_polling_state: - if (loop_back_finish_state_) { - loopBackFinishState_(); - } else { - next_step_time_ms_ += kPollingIntervalMs; - } - break; - default: - LOG(log_er_) << "Unknown PBFT state " << state_; - assert(false); - } - LOG(log_tr_) << "next step time(ms): " << next_step_time_ms_.count() << ", step " << step_; -} - void PbftManager::setFilterState_() { state_ = filter_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 2 * LAMBDA_ms; + next_step_time_ms_ = 2 * lambda_; } void PbftManager::setCertifyState_() { state_ = certify_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 2 * LAMBDA_ms; + next_step_time_ms_ = 2 * lambda_; } void PbftManager::setFinishState_() { LOG(log_dg_) << "Will go to first finish State"; state_ = finish_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 4 * LAMBDA_ms; + next_step_time_ms_ = 4 * lambda_; } void PbftManager::setFinishPollingState_() { @@ -600,6 +525,7 @@ void PbftManager::setFinishPollingState_() { already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; second_finish_step_start_datetime_ = std::chrono::system_clock::now(); + next_step_time_ms_ += kPollingIntervalMs; } void PbftManager::loopBackFinishState_() { @@ -613,9 +539,6 @@ void PbftManager::loopBackFinishState_() { already_next_voted_null_block_hash_ = false; assert(step_ >= startingStepInRound_); next_step_time_ms_ += kPollingIntervalMs; - - // Print voting summary for current round - printVotingSummary(); } void PbftManager::broadcastSoftAndNextVotes(bool rebroadcast) { @@ -634,6 +557,14 @@ void PbftManager::broadcastSoftAndNextVotes(bool rebroadcast) { rebroadcast); } + // Broadcast reward votes - previous round 2t+1 cert votes + auto reward_votes = vote_mgr_->getRewardVotes(); + if (!reward_votes.empty()) { + LOG(log_dg_) << "Broadcast propose reward votes for period " << period << ", round " << round; + net->getSpecificHandler()->onNewPbftVotesBundle(std::move(reward_votes), + rebroadcast); + } + // Broadcast previous round 2t+1 next votes if (round > 1) { if (auto next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(period, round - 1); !next_votes.empty()) { @@ -653,7 +584,7 @@ void PbftManager::broadcastRewardVotes(bool rebroadcast) { auto [round, period] = getPbftRoundAndPeriod(); // Broadcast reward votes - previous round 2t+1 cert votes - auto reward_votes = vote_mgr_->getProposeRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); if (!reward_votes.empty()) { LOG(log_dg_) << "Broadcast propose reward votes for period " << period << ", round " << round; net->getSpecificHandler()->onNewPbftVotesBundle(std::move(reward_votes), @@ -688,23 +619,23 @@ bool PbftManager::stateOperations_() { const auto round_elapsed_time = elapsedTimeInMs(current_round_start_datetime_); const auto period_elapsed_time = elapsedTimeInMs(current_period_start_datetime_); - if (round_elapsed_time / LAMBDA_ms_MIN > kRebroadcastVotesLambdaTime * rebroadcast_soft_next_votes_counter_) { + if (round_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_soft_next_votes_counter_) { broadcastSoftAndNextVotes(true); rebroadcast_soft_next_votes_counter_++; // If there was a rebroadcast no need to do next broadcast either broadcast_soft_next_votes_counter_++; - } else if (round_elapsed_time / LAMBDA_ms_MIN > kBroadcastVotesLambdaTime * broadcast_soft_next_votes_counter_) { + } else if (round_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_soft_next_votes_counter_) { broadcastSoftAndNextVotes(false); broadcast_soft_next_votes_counter_++; } // Reward votes need to be broadcast even if we are advancing rounds but unable to advance a period - if (period_elapsed_time / LAMBDA_ms_MIN > kRebroadcastVotesLambdaTime * rebroadcast_reward_votes_counter_) { + if (period_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_reward_votes_counter_) { broadcastRewardVotes(true); rebroadcast_reward_votes_counter_++; // If there was a rebroadcast no need to do next broadcast either broadcast_reward_votes_counter_++; - } else if (period_elapsed_time / LAMBDA_ms_MIN > kBroadcastVotesLambdaTime * broadcast_reward_votes_counter_) { + } else if (period_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_reward_votes_counter_) { broadcastRewardVotes(false); broadcast_reward_votes_counter_++; } @@ -929,14 +860,14 @@ void PbftManager::certifyBlock_() { LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; const auto elapsed_time_in_round = elapsedTimeInMs(current_round_start_datetime_); - go_finish_state_ = elapsed_time_in_round > 4 * LAMBDA_ms - kPollingIntervalMs; + go_finish_state_ = elapsed_time_in_round > 4 * lambda_ - kPollingIntervalMs; if (go_finish_state_) { LOG(log_dg_) << "Step 3 expired, will go to step 4 in period " << period << ", round " << round; return; } // Should not happen, add log here for safety checking - if (elapsed_time_in_round < 2 * LAMBDA_ms) { + if (elapsed_time_in_round < 2 * lambda_) { LOG(log_er_) << "PBFT Reached step 3 too quickly after only " << elapsed_time_in_round.count() << " [ms] in period " << period << ", round " << round; return; @@ -1106,7 +1037,7 @@ void PbftManager::secondFinish_() { // Try to next vote 2t+1 next voted null block from previous round next_vote_null_block(); - loop_back_finish_state_ = elapsedTimeInMs(second_finish_step_start_datetime_) > 2 * (LAMBDA_ms - kPollingIntervalMs); + loop_back_finish_state_ = elapsedTimeInMs(second_finish_step_start_datetime_) > 2 * (lambda_ - kPollingIntervalMs); } std::optional, std::vector>>> PbftManager::generatePbftBlock( @@ -1114,7 +1045,7 @@ std::optional, std::vectorgetProposeRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); if (propose_period > 1) [[likely]] { assert(!reward_votes.empty()); if (reward_votes[0]->getPeriod() != propose_period - 1) { @@ -1505,7 +1436,6 @@ void PbftManager::reorderTransactions(SharedTransactions &transactions) { // While iterating over transactions, account_nonce will keep the last nonce for the account std::unordered_map account_nonce; - std::unordered_map>> account_nonce_transactions; // Find accounts that need reordering and place in account_reverse_order set for (uint32_t i = 0; i < transactions.size(); i++) { @@ -1526,6 +1456,7 @@ void PbftManager::reorderTransactions(SharedTransactions &transactions) { // If account_reverse_order size is 0, there is no need to reorder transactions if (account_reverse_order.size() > 0) { + std::unordered_map>> account_nonce_transactions; // Keep the order for all transactions that do not need reordering for (uint32_t i = 0; i < transactions.size(); i++) { const auto &t = transactions[i]; @@ -1601,8 +1532,8 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectorsavePeriodData(period_data, batch); // Replace current reward votes - vote_mgr_->resetRewardVotesInfo(cert_votes[0]->getPeriod(), cert_votes[0]->getRound(), cert_votes[0]->getBlockHash()); - db_->replaceRewardVotes(cert_votes, batch); + vote_mgr_->resetRewardVotes(cert_votes[0]->getPeriod(), cert_votes[0]->getRound(), cert_votes[0]->getStep(), + cert_votes[0]->getBlockHash(), batch); // pass pbft with dag blocks and transactions to adjust difficulty if (period_data.pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { @@ -1851,10 +1782,9 @@ void PbftManager::periodDataQueuePush(PeriodData &&period_data, dev::p2p::NodeID size_t PbftManager::periodDataQueueSize() const { return sync_queue_.size(); } bool PbftManager::checkBlockWeight(const std::vector &dag_blocks) const { - u256 total_weight = 0; - for (const auto &dag_block : dag_blocks) { - total_weight += dag_block.getGasEstimation(); - } + const u256 total_weight = + std::accumulate(dag_blocks.begin(), dag_blocks.end(), u256(0), + [](u256 value, const auto &dag_block) { return value + dag_block.getGasEstimation(); }); if (total_weight > config_.gas_limit) { return false; } diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 74b28f5058..2cc98476a3 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -32,12 +32,20 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, auto db_votes = db_->getAllTwoTPlusOneVotes(); auto loadVotesFromDb = [this](const std::vector>& votes) { + bool reward_votes_info_set = false; for (const auto& vote : votes) { // Check if votes are unique per round, step & voter if (!isUniqueVote(vote).first) { continue; } + if (!reward_votes_info_set && vote->getType() == PbftVoteTypes::cert_vote) { + reward_votes_info_set = true; + reward_votes_block_hash_ = vote->getBlockHash(); + reward_votes_period_ = vote->getPeriod(); + reward_votes_round_ = vote->getRound(); + } + addVerifiedVote(vote); LOG(log_dg_) << "Vote " << vote->getHash() << " loaded from db to memory"; } @@ -45,11 +53,9 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, loadVotesFromDb(db_->getAllTwoTPlusOneVotes()); loadVotesFromDb(db_->getOwnVerifiedVotes()); - - if (const auto reward_votes = db_->getRewardVotes(); !reward_votes.empty()) { - loadVotesFromDb(reward_votes); - resetRewardVotesInfo(reward_votes[0]->getPeriod(), reward_votes[0]->getRound(), reward_votes[0]->getBlockHash()); - } + auto reward_votes = db_->getRewardVotes(); + for (const auto& vote : reward_votes) extra_reward_votes_.emplace_back(vote->getHash()); + loadVotesFromDb(reward_votes); } void VoteManager::setNetwork(std::weak_ptr network) { network_ = std::move(network); } @@ -81,9 +87,9 @@ uint64_t VoteManager::getVerifiedVotesSize() const { for (auto const& period : verified_votes_) { for (auto const& round : period.second) { for (auto const& step : round.second.step_votes) { - for (auto const& voted_value : step.second.votes) { - size += voted_value.second.second.size(); - } + size += std::accumulate( + step.second.votes.begin(), step.second.votes.end(), 0, + [](uint64_t value, const auto& voted_value) { return value + voted_value.second.second.size(); }); } } } @@ -142,6 +148,22 @@ void VoteManager::setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound } } +PbftStep VoteManager::getNetworkTplusOneNextVotingStep(PbftPeriod period, PbftRound round) const { + std::shared_lock lock(verified_votes_access_); + + const auto found_period_it = verified_votes_.find(period); + if (found_period_it == verified_votes_.end()) { + return 0; + } + + const auto found_round_it = found_period_it->second.find(round); + if (found_round_it == found_period_it->second.end()) { + return 0; + } + + return found_round_it->second.network_t_plus_one_step; +} + bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { assert(vote->getWeight().has_value()); const auto hash = vote->getHash(); @@ -211,16 +233,30 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { LOG(log_nf_) << "Added verified vote: " << hash; LOG(log_dg_) << "Added verified vote: " << *vote; - // Save in db only those reward votes that have the same round as round during which we pushed the block into chain - if (is_valid_potential_reward_vote && reward_votes_round_ == vote->getRound()) { - db_->saveRewardVote(vote); + if (is_valid_potential_reward_vote) { + extra_reward_votes_.emplace_back(vote->getHash()); + db_->saveExtraRewardVote(vote); } const auto total_weight = (found_voted_value_it->second.first += weight); - // Not enough votes - do not set 2t+1 voted block for period,round and step + // Unable to get 2t+1 const auto two_t_plus_one = getPbftTwoTPlusOne(vote->getPeriod() - 1, vote->getType()); - if (total_weight < two_t_plus_one) { + if (!two_t_plus_one.has_value()) [[unlikely]] { + LOG(log_er_) << "Cannot set(or not) 2t+1 voted block as 2t+1 threshold is unavailable, vote " << vote->getHash(); + return true; + } + + // Calculate t+1 + const auto t_plus_one = ((*two_t_plus_one - 1) / 2) + 1; + // Set network_t_plus_one_step - used for triggering exponential backoff + if (vote->getType() == PbftVoteTypes::next_vote && total_weight >= t_plus_one && + vote->getStep() > found_round_it->second.network_t_plus_one_step) { + found_round_it->second.network_t_plus_one_step = vote->getStep(); + } + + // Not enough votes - do not set 2t+1 voted block for period,round and step + if (total_weight < *two_t_plus_one) { return true; } @@ -249,7 +285,9 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { {two_plus_one_voted_block_type, std::make_pair(vote->getBlockHash(), vote->getStep())}); // Save only current pbft period & round 2t+1 votes bundles into db - if (vote->getPeriod() == current_pbft_period_ && vote->getRound() == current_pbft_round_) { + // Cert votes are saved once the pbft block is pushed in the chain + if (vote->getType() != PbftVoteTypes::cert_vote && vote->getPeriod() == current_pbft_period_ && + vote->getRound() == current_pbft_round_) { std::vector> votes; votes.reserve(found_voted_value_it->second.second.size()); for (const auto& tmp_vote : found_voted_value_it->second.second) { @@ -511,22 +549,70 @@ PbftPeriod VoteManager::getRewardVotesPbftBlockPeriod() { return reward_votes_period_; } -void VoteManager::resetRewardVotesInfo(PbftPeriod period, PbftRound round, const blk_hash_t& block_hash) { +void VoteManager::resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, + DbStorage::Batch& batch) { + // Save 2t+1 cert votes to database, remove old reward votes { std::scoped_lock lock(reward_votes_info_mutex_); - reward_votes_block_hash_ = block_hash; reward_votes_period_ = period; reward_votes_round_ = round; } + std::scoped_lock lock(verified_votes_access_); + auto found_period_it = verified_votes_.find(period); + if (found_period_it == verified_votes_.end()) { + LOG(log_er_) << "resetRewardVotes missing period"; + assert(false); + return; + } + auto found_round_it = found_period_it->second.find(round); + if (found_round_it == found_period_it->second.end()) { + LOG(log_er_) << "resetRewardVotes missing round" << round; + assert(false); + return; + } + auto found_step_it = found_round_it->second.step_votes.find(step); + if (found_step_it == found_round_it->second.step_votes.end()) { + LOG(log_er_) << "resetRewardVotes missing step" << step; + assert(false); + return; + } + auto found_two_t_plus_one_voted_block = + found_round_it->second.two_t_plus_one_voted_blocks_.find(TwoTPlusOneVotedBlockType::CertVotedBlock); + if (found_two_t_plus_one_voted_block == found_round_it->second.two_t_plus_one_voted_blocks_.end()) { + LOG(log_er_) << "resetRewardVotes missing cert voted block"; + assert(false); + return; + } + if (found_two_t_plus_one_voted_block->second.first != block_hash) { + LOG(log_er_) << "resetRewardVotes incorrect block " << found_two_t_plus_one_voted_block->second.first + << " expected " << block_hash; + assert(false); + return; + } + auto found_voted_value_it = found_step_it->second.votes.find(block_hash); + if (found_voted_value_it == found_step_it->second.votes.end()) { + LOG(log_er_) << "resetRewardVotes missing vote block " << block_hash; + assert(false); + return; + } + std::vector> votes; + votes.reserve(found_voted_value_it->second.second.size()); + for (const auto& tmp_vote : found_voted_value_it->second.second) { + votes.push_back(tmp_vote.second); + } + + db_->replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType::CertVotedBlock, votes, batch); + db_->removeExtraRewardVotes(extra_reward_votes_, batch); + extra_reward_votes_.clear(); + LOG(log_dg_) << "Reward votes info reset to: block_hash: " << block_hash << ", period: " << period << ", round: " << round; } bool VoteManager::isValidRewardVote(const std::shared_ptr& vote) const { std::shared_lock lock(reward_votes_info_mutex_); - if (vote->getType() != PbftVoteTypes::cert_vote) { LOG(log_tr_) << "Invalid reward vote: type " << static_cast(vote->getType()) << " is different from cert type"; @@ -607,19 +693,27 @@ std::pair>> VoteManager::checkRewardVote } }; - std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + blk_hash_t reward_votes_block_hash; + PbftRound reward_votes_period; + PbftRound reward_votes_round; + { + std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + reward_votes_block_hash = reward_votes_block_hash_; + reward_votes_period = reward_votes_period_; + reward_votes_round = reward_votes_round_; + } std::shared_lock verified_votes_lock(verified_votes_access_); - const auto found_period_it = verified_votes_.find(reward_votes_period_); + const auto found_period_it = verified_votes_.find(reward_votes_period); if (found_period_it == verified_votes_.end()) { - LOG(log_er_) << "No reward votes found for period " << reward_votes_period_; + LOG(log_er_) << "No reward votes found for period " << reward_votes_period; assert(false); return {false, {}}; } - const auto found_round_it = found_period_it->second.find(reward_votes_round_); + const auto found_round_it = found_period_it->second.find(reward_votes_round); if (found_round_it == found_period_it->second.end()) { - LOG(log_er_) << "No reward votes found for round " << reward_votes_round_; + LOG(log_er_) << "No reward votes found for round " << reward_votes_round; assert(false); return {false, {}}; } @@ -627,7 +721,7 @@ std::pair>> VoteManager::checkRewardVote const auto reward_votes_hashes = pbft_block->getRewardVotes(); // Most of the time we should get the reward votes based on reward_votes_period_ and reward_votes_round_ - auto reward_votes = getRewardVotes(found_round_it, reward_votes_hashes, reward_votes_block_hash_, copy_votes); + auto reward_votes = getRewardVotes(found_round_it, reward_votes_hashes, reward_votes_block_hash, copy_votes); if (reward_votes.first) [[likely]] { return {true, std::move(reward_votes.second)}; } @@ -636,13 +730,13 @@ std::pair>> VoteManager::checkRewardVote // and when they included the reward votes in new block, these votes have different round than what saved in // reward_votes_round_ -> therefore we have to iterate over all rounds and find the correct round for (auto round_it = found_period_it->second.begin(); round_it != found_period_it->second.end(); round_it++) { - const auto tmp_reward_votes = getRewardVotes(round_it, reward_votes_hashes, reward_votes_block_hash_, copy_votes); + const auto tmp_reward_votes = getRewardVotes(round_it, reward_votes_hashes, reward_votes_block_hash, copy_votes); if (!tmp_reward_votes.first) { LOG(log_dg_) << "No (or not enough) reward votes found for block " << pbft_block->getBlockHash() << ", period: " << pbft_block->getPeriod() << ", prev. block hash: " << pbft_block->getPrevBlockHash() - << ", reward_votes_period_: " << reward_votes_period_ << ", reward_votes_round_: " << round_it->first - << ", reward_votes_block_hash_: " << reward_votes_block_hash_; + << ", reward_votes_period: " << reward_votes_period << ", reward_votes_round_: " << round_it->first + << ", reward_votes_block_hash: " << reward_votes_block_hash; continue; } @@ -651,19 +745,28 @@ std::pair>> VoteManager::checkRewardVote LOG(log_er_) << "No (or not enough) reward votes found for block " << pbft_block->getBlockHash() << ", period: " << pbft_block->getPeriod() << ", prev. block hash: " << pbft_block->getPrevBlockHash() - << ", reward_votes_period_: " << reward_votes_period_ << ", reward_votes_round_: " << reward_votes_round_ - << ", reward_votes_block_hash_: " << reward_votes_block_hash_; + << ", reward_votes_period: " << reward_votes_period << ", reward_votes_round_: " << reward_votes_round + << ", reward_votes_block_hash: " << reward_votes_block_hash; return {false, {}}; } -std::vector> VoteManager::getProposeRewardVotes() { - std::shared_lock lock(reward_votes_info_mutex_); - const auto reward_votes = getTwoTPlusOneVotedBlockVotes(reward_votes_period_, reward_votes_round_, - TwoTPlusOneVotedBlockType::CertVotedBlock); +std::vector> VoteManager::getRewardVotes() { + blk_hash_t reward_votes_block_hash; + PbftRound reward_votes_period; + PbftRound reward_votes_round; + { + std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + reward_votes_block_hash = reward_votes_block_hash_; + reward_votes_period = reward_votes_period_; + reward_votes_round = reward_votes_round_; + } + std::shared_lock lock(verified_votes_access_); + auto reward_votes = + getTwoTPlusOneVotedBlockVotes(reward_votes_period, reward_votes_round, TwoTPlusOneVotedBlockType::CertVotedBlock); - if (!reward_votes.empty() && reward_votes[0]->getBlockHash() != reward_votes_block_hash_) { + if (!reward_votes.empty() && reward_votes[0]->getBlockHash() != reward_votes_block_hash) { // This should never happen - LOG(log_er_) << "Proposal reward votes block hash mismatch. reward_votes_block_hash_ " << reward_votes_block_hash_ + LOG(log_er_) << "Proposal reward votes block hash mismatch. reward_votes_block_hash " << reward_votes_block_hash << ", reward_votes[0]->getBlockHash() " << reward_votes[0]->getBlockHash(); assert(false); return {}; @@ -871,11 +974,9 @@ std::optional VoteManager::getTwoTPlusOneVotedBlock(PbftPeriod perio return two_t_plus_one_voted_block_it->second.first; } -std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes( - PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type, - const std::shared_ptr& peer_filter) const { +std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, + TwoTPlusOneVotedBlockType type) const { std::shared_lock lock(verified_votes_access_); - const auto found_period_it = verified_votes_.find(period); if (found_period_it == verified_votes_.end()) { return {}; @@ -909,23 +1010,17 @@ std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes( std::vector> votes; votes.reserve(found_verified_votes_it->second.second.size()); for (const auto& vote : found_verified_votes_it->second.second) { - if (peer_filter && peer_filter->isVoteKnown(vote.first)) { - continue; - } - votes.push_back(vote.second); } return votes; } -std::vector> VoteManager::getAllTwoTPlusOneNextVotes( - PbftPeriod period, PbftRound round, const std::shared_ptr& peer_filter) const { - auto next_votes = - getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedBlock, peer_filter); +std::vector> VoteManager::getAllTwoTPlusOneNextVotes(PbftPeriod period, PbftRound round) const { + auto next_votes = getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedBlock); auto null_block_next_vote = - getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedNullBlock, peer_filter); + getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedNullBlock); if (!null_block_next_vote.empty()) { next_votes.reserve(next_votes.size() + null_block_next_vote.size()); next_votes.insert(next_votes.end(), std::make_move_iterator(null_block_next_vote.begin()), diff --git a/libraries/core_libs/network/graphql/src/query.cpp b/libraries/core_libs/network/graphql/src/query.cpp index a71874823b..7606de867b 100644 --- a/libraries/core_libs/network/graphql/src/query.cpp +++ b/libraries/core_libs/network/graphql/src/query.cpp @@ -1,5 +1,7 @@ #include "graphql/query.hpp" +#include + #include "graphql/account.hpp" #include "graphql/block.hpp" #include "graphql/log.hpp" diff --git a/libraries/core_libs/network/graphql/src/types/dag_block.cpp b/libraries/core_libs/network/graphql/src/types/dag_block.cpp index e093a3acc8..7124be5fd2 100644 --- a/libraries/core_libs/network/graphql/src/types/dag_block.cpp +++ b/libraries/core_libs/network/graphql/src/types/dag_block.cpp @@ -1,5 +1,7 @@ #include "graphql/types/dag_block.hpp" +#include + #include "graphql/account.hpp" #include "graphql/transaction.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp index 7f140fc72d..0ba977ef2c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp @@ -63,14 +63,6 @@ class ExtVotesPacketHandler : public PacketHandler { const std::shared_ptr& peer, bool validate_max_round_step); - /** - * @brief Common validation for all types of votes - * - * @param vote to be validated - * @return vote validation passed, otherwise - */ - std::pair validateVote(const std::shared_ptr& vote) const; - /** * @brief Validates provided vote if voted value == provided block * diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp index c503066bbb..856bc52eca 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp @@ -5,6 +5,7 @@ namespace taraxa { class PbftChain; class DbStorage; +class VoteManager; } // namespace taraxa namespace taraxa::network::tarcap { @@ -16,7 +17,8 @@ class GetPbftSyncPacketHandler final : public PacketHandler { GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, - std::shared_ptr db, const addr_t& node_addr); + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t& node_addr); void sendPbftBlocks(dev::p2p::NodeID const& peer_id, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); @@ -30,6 +32,7 @@ class GetPbftSyncPacketHandler final : public PacketHandler { std::shared_ptr pbft_syncing_state_; std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; std::shared_ptr db_; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp index d205ec2470..cd71341cf5 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp @@ -27,8 +27,7 @@ class TransactionPacketHandler final : public PacketHandler { * @param transactions serialized transactions * */ - void sendTransactions(std::shared_ptr const& peer, - std::vector>&& transactions); + void sendTransactions(std::shared_ptr peer, std::vector>&& transactions); /** * @brief Sends batch of transactions to all connected peers diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index 7dd18406cc..0fcd33dbb9 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -49,11 +49,11 @@ class WsSession : public std::enable_shared_from_this { virtual std::string processRequest(const std::string_view& request) = 0; - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload); - void newDagBlock(DagBlock const& blk); - void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); - void newPbftBlockExecuted(Json::Value const& payload); - void newPendingTransaction(trx_hash_t const& trx_hash); + void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); + void newDagBlock(const DagBlock& blk); + void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); + void newPbftBlockExecuted(const Json::Value& payload); + void newPendingTransaction(const trx_hash_t& trx_hash); bool is_closed() const { return closed_; } bool is_normal(const beast::error_code& ec) const; LOG_OBJECTS_DEFINE @@ -90,11 +90,11 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: // Start accepting incoming connections void run(); - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload); - void newDagBlock(DagBlock const& blk); - void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); - void newPbftBlockExecuted(PbftBlock const& sche_blk, std::vector const& finalized_dag_blk_hashes); - void newPendingTransaction(trx_hash_t const& trx_hash); + void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); + void newDagBlock(const DagBlock& blk); + void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); + void newPbftBlockExecuted(const PbftBlock& sche_blk, const std::vector& finalized_dag_blk_hashes); + void newPendingTransaction(const trx_hash_t& trx_hash); virtual std::shared_ptr createSession(tcp::socket&& socket) = 0; diff --git a/libraries/core_libs/network/rpc/CMakeLists.txt b/libraries/core_libs/network/rpc/CMakeLists.txt index 08056792fa..23273a886a 100644 --- a/libraries/core_libs/network/rpc/CMakeLists.txt +++ b/libraries/core_libs/network/rpc/CMakeLists.txt @@ -1,20 +1,20 @@ # Note: run make gen_rpc_stubs to re-generate rpc classes -include(EthDependencies) -include(EthExecutableHelper) +# include(EthDependencies) +# include(EthExecutableHelper) -find_program(ETH_JSON_RPC_STUB jsonrpcstub) +# find_program(ETH_JSON_RPC_STUB jsonrpcstub) -set(CPP_NAMESPACE taraxa::net) -file(GLOB API_DEF_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.jsonrpc.json") -add_custom_target(gen_rpc_stubs) -foreach (api_def_file ${API_DEF_FILES}) - get_filename_component(api_name ${api_def_file} NAME_WE) - jsonrpcstub_create( - gen_rpc_stubs ${api_name}.jsonrpc.json - ${CPP_NAMESPACE}::${api_name}Face - ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Face - ${CPP_NAMESPACE}::${api_name}Client - ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Client - ) -endforeach () +# set(CPP_NAMESPACE taraxa::net) +# file(GLOB API_DEF_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.jsonrpc.json") +# add_custom_target(gen_rpc_stubs) +# foreach (api_def_file ${API_DEF_FILES}) +# get_filename_component(api_name ${api_def_file} NAME_WE) +# jsonrpcstub_create( +# gen_rpc_stubs ${api_name}.jsonrpc.json +# ${CPP_NAMESPACE}::${api_name}Face +# ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Face +# ${CPP_NAMESPACE}::${api_name}Client +# ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Client +# ) +# endforeach () diff --git a/libraries/core_libs/network/rpc/Eth.jsonrpc.json b/libraries/core_libs/network/rpc/Eth.jsonrpc.json index eb1efbe28e..77d724669c 100644 --- a/libraries/core_libs/network/rpc/Eth.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Eth.jsonrpc.json @@ -33,7 +33,7 @@ "name": "eth_getBalance", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -43,7 +43,7 @@ "params": [ "", "", - "" + {} ], "order": [], "returns": "" @@ -61,7 +61,7 @@ "name": "eth_getTransactionCount", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -102,7 +102,7 @@ "name": "eth_getCode", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -111,7 +111,7 @@ "name": "eth_call", "params": [ {}, - "" + {} ], "order": [], "returns": "" diff --git a/libraries/core_libs/network/rpc/EthClient.h b/libraries/core_libs/network/rpc/EthClient.h index 0e9a220298..47869dfe8b 100644 --- a/libraries/core_libs/network/rpc/EthClient.h +++ b/libraries/core_libs/network/rpc/EthClient.h @@ -59,7 +59,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_getBalance(const std::string& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_getBalance(const std::string& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -70,7 +70,7 @@ class EthClient : public jsonrpc::Client { throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } std::string eth_getStorageAt(const std::string& param1, const std::string& param2, - const std::string& param3) throw(jsonrpc::JsonRpcException) { + const Json::Value& param3) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -93,7 +93,7 @@ class EthClient : public jsonrpc::Client { throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } std::string eth_getTransactionCount(const std::string& param1, - const std::string& param2) throw(jsonrpc::JsonRpcException) { + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -139,7 +139,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_getCode(const std::string& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_getCode(const std::string& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -149,7 +149,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_call(const Json::Value& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_call(const Json::Value& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); diff --git a/libraries/core_libs/network/rpc/EthFace.h b/libraries/core_libs/network/rpc/EthFace.h index d050fad608..d3d415636c 100644 --- a/libraries/core_libs/network/rpc/EthFace.h +++ b/libraries/core_libs/network/rpc/EthFace.h @@ -9,6 +9,8 @@ namespace taraxa { namespace net { +// Please read README +const int JSON_ANY = 0; class EthFace : public ServerInterface { public: EthFace() { @@ -25,24 +27,24 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_blockNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_blockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBalance", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getBalanceI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getStorageAt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, "param3", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getStorageAtI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getStorageRoot", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getStorageRootI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getTransactionCount", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getTransactionCountI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getBlockTransactionCountByHashI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByNumber", jsonrpc::PARAMS_BY_POSITION, - jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_OBJECT, "param1", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getBlockTransactionCountByNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getUncleCountByBlockHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -51,16 +53,16 @@ class EthFace : public ServerInterface { jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getUncleCountByBlockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getCode", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getCodeI); this->bindAndAddMethod(jsonrpc::Procedure("eth_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_OBJECT, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_callI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), &taraxa::net::EthFace::eth_getBlockByHashI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), + "param1", JSON_ANY, "param2", jsonrpc::JSON_BOOLEAN, NULL), &taraxa::net::EthFace::eth_getBlockByNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -69,10 +71,9 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_getTransactionByBlockHashAndIndex", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getTransactionByBlockHashAndIndexI); - this->bindAndAddMethod( - jsonrpc::Procedure("eth_getTransactionByBlockNumberAndIndex", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), - &taraxa::net::EthFace::eth_getTransactionByBlockNumberAndIndexI); + this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionByBlockNumberAndIndex", jsonrpc::PARAMS_BY_POSITION, + jsonrpc::JSON_OBJECT, "param1", JSON_ANY, "param2", JSON_ANY, NULL), + &taraxa::net::EthFace::eth_getTransactionByBlockNumberAndIndexI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionReceipt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getTransactionReceiptI); @@ -138,16 +139,16 @@ class EthFace : public ServerInterface { response = this->eth_blockNumber(); } inline virtual void eth_getBalanceI(const Json::Value &request, Json::Value &response) { - response = this->eth_getBalance(request[0u].asString(), request[1u].asString()); + response = this->eth_getBalance(request[0u].asString(), request[1u]); } inline virtual void eth_getStorageAtI(const Json::Value &request, Json::Value &response) { - response = this->eth_getStorageAt(request[0u].asString(), request[1u].asString(), request[2u].asString()); + response = this->eth_getStorageAt(request[0u].asString(), request[1u].asString(), request[2u]); } inline virtual void eth_getStorageRootI(const Json::Value &request, Json::Value &response) { response = this->eth_getStorageRoot(request[0u].asString(), request[1u].asString()); } inline virtual void eth_getTransactionCountI(const Json::Value &request, Json::Value &response) { - response = this->eth_getTransactionCount(request[0u].asString(), request[1u].asString()); + response = this->eth_getTransactionCount(request[0u].asString(), request[1u]); } inline virtual void eth_getBlockTransactionCountByHashI(const Json::Value &request, Json::Value &response) { response = this->eth_getBlockTransactionCountByHash(request[0u].asString()); @@ -162,10 +163,10 @@ class EthFace : public ServerInterface { response = this->eth_getUncleCountByBlockNumber(request[0u].asString()); } inline virtual void eth_getCodeI(const Json::Value &request, Json::Value &response) { - response = this->eth_getCode(request[0u].asString(), request[1u].asString()); + response = this->eth_getCode(request[0u].asString(), request[1u]); } inline virtual void eth_callI(const Json::Value &request, Json::Value &response) { - response = this->eth_call(request[0u], request[1u].asString()); + response = this->eth_call(request[0u], request[1u]); } inline virtual void eth_getBlockByHashI(const Json::Value &request, Json::Value &response) { response = this->eth_getBlockByHash(request[0u].asString(), request[1u].asBool()); @@ -233,17 +234,17 @@ class EthFace : public ServerInterface { virtual std::string eth_gasPrice() = 0; virtual Json::Value eth_accounts() = 0; virtual std::string eth_blockNumber() = 0; - virtual std::string eth_getBalance(const std::string ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getBalance(const std::string ¶m1, const Json::Value ¶m2) = 0; virtual std::string eth_getStorageAt(const std::string ¶m1, const std::string ¶m2, - const std::string ¶m3) = 0; + const Json::Value ¶m3) = 0; virtual std::string eth_getStorageRoot(const std::string ¶m1, const std::string ¶m2) = 0; - virtual std::string eth_getTransactionCount(const std::string ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getTransactionCount(const std::string ¶m1, const Json::Value ¶m2) = 0; virtual Json::Value eth_getBlockTransactionCountByHash(const std::string ¶m1) = 0; virtual Json::Value eth_getBlockTransactionCountByNumber(const std::string ¶m1) = 0; virtual Json::Value eth_getUncleCountByBlockHash(const std::string ¶m1) = 0; virtual Json::Value eth_getUncleCountByBlockNumber(const std::string ¶m1) = 0; - virtual std::string eth_getCode(const std::string ¶m1, const std::string ¶m2) = 0; - virtual std::string eth_call(const Json::Value ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getCode(const std::string ¶m1, const Json::Value ¶m2) = 0; + virtual std::string eth_call(const Json::Value ¶m1, const Json::Value ¶m2) = 0; virtual Json::Value eth_getBlockByHash(const std::string ¶m1, bool param2) = 0; virtual Json::Value eth_getBlockByNumber(const std::string ¶m1, bool param2) = 0; virtual Json::Value eth_getTransactionByHash(const std::string ¶m1) = 0; diff --git a/libraries/core_libs/network/rpc/README.md b/libraries/core_libs/network/rpc/README.md new file mode 100644 index 0000000000..761ae481ff --- /dev/null +++ b/libraries/core_libs/network/rpc/README.md @@ -0,0 +1,9 @@ +# DISABLED !!!! How to generate new API interface +``` +sudo apt install libjsonrpccpp-tools +make gen_rpc_stubs +make clang-format +``` + +# PLEASE READ +As libjsonrpccpp doesn't support function arguments overload I had to made HACK and introduce `JSON_ANY`. `jsonrpc::Procedure` is created to check validation of passed arguments, but as it is not class enum we can used anything above 7 or number 0 to disable this check more https://github.com/cinemast/libjson-rpc-cpp/blob/d5ede2277d849f1a9d2dc111c4ec3ea652bd31ec/src/jsonrpccpp/common/specification.h#L29 . That's why generation via `gen_rpc_stubs` is disabled, because it would overwrite my hack in `EthFace.h` \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 87267ce200..7fe42bc23d 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -137,4 +137,18 @@ Json::Value Taraxa::taraxa_getDagBlockByLevel(const string& _blockLevel, bool _i } Json::Value Taraxa::taraxa_getConfig() { return enc_json(tryGetNode()->getConfig().genesis); } + +Json::Value Taraxa::taraxa_getChainStats() { + Json::Value res; + try { + if (auto node = full_node_.lock()) { + res["pbft_period"] = Json::UInt64(node->getPbftChain()->getPbftChainSize()); + res["dag_blocks_executed"] = Json::UInt64(node->getDB()->getNumBlockExecuted()); + res["transactions_executed"] = Json::UInt64(node->getDB()->getNumTransactionExecuted()); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Taraxa.h b/libraries/core_libs/network/rpc/Taraxa.h index cfb774a28e..39b32cca34 100644 --- a/libraries/core_libs/network/rpc/Taraxa.h +++ b/libraries/core_libs/network/rpc/Taraxa.h @@ -28,6 +28,7 @@ class Taraxa : public TaraxaFace { virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string& _period) override; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string& _period) override; virtual Json::Value taraxa_getConfig() override; + virtual Json::Value taraxa_getChainStats() override; protected: std::weak_ptr full_node_; diff --git a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json index 48dca28de3..15e5b2135c 100644 --- a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json @@ -53,6 +53,12 @@ "order": [], "returns": {} }, + { + "name": "taraxa_getChainStats", + "params": [], + "order": [], + "returns": {} + }, { "name": "taraxa_pbftBlockHashByPeriod", "params": [""], diff --git a/libraries/core_libs/network/rpc/TaraxaClient.h b/libraries/core_libs/network/rpc/TaraxaClient.h index 91d49c94dd..a8a3c105a1 100644 --- a/libraries/core_libs/network/rpc/TaraxaClient.h +++ b/libraries/core_libs/network/rpc/TaraxaClient.h @@ -88,6 +88,15 @@ class TaraxaClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value taraxa_getChainStats() throw(jsonrpc::JsonRpcException) { + Json::Value p; + p = Json::nullValue; + Json::Value result = this->CallMethod("taraxa_getChainStats", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } std::string taraxa_pbftBlockHashByPeriod(const std::string& param1) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index bd53cf7b08..acf4d29c0d 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -38,6 +38,9 @@ class TaraxaFace : public ServerInterface { this->bindAndAddMethod( jsonrpc::Procedure("taraxa_getConfig", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::TaraxaFace::taraxa_getConfigI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_getChainStats", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), + &taraxa::net::TaraxaFace::taraxa_getChainStatsI); this->bindAndAddMethod(jsonrpc::Procedure("taraxa_pbftBlockHashByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_pbftBlockHashByPeriodI); @@ -72,6 +75,10 @@ class TaraxaFace : public ServerInterface { (void)request; response = this->taraxa_getConfig(); } + inline virtual void taraxa_getChainStatsI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_getChainStats(); + } inline virtual void taraxa_pbftBlockHashByPeriodI(const Json::Value &request, Json::Value &response) { response = this->taraxa_pbftBlockHashByPeriod(request[0u].asString()); } @@ -83,6 +90,7 @@ class TaraxaFace : public ServerInterface { virtual std::string taraxa_dagBlockPeriod() = 0; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string ¶m1) = 0; virtual Json::Value taraxa_getConfig() = 0; + virtual Json::Value taraxa_getChainStats() = 0; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string ¶m1) = 0; }; diff --git a/libraries/core_libs/network/rpc/Test.cpp b/libraries/core_libs/network/rpc/Test.cpp index cb43a78b3d..0448abd06a 100644 --- a/libraries/core_libs/network/rpc/Test.cpp +++ b/libraries/core_libs/network/rpc/Test.cpp @@ -77,9 +77,8 @@ Json::Value Test::send_coin_transactions(const Json::Value ¶m1) { auto gas = dev::jsToInt(param1["gas"].asString()); auto transactions_count = param1["transaction_count"].asUInt64(); std::vector receivers; - for (auto rec : param1["receiver"]) { - receivers.emplace_back(addr_t(rec.asString())); - } + std::transform(param1["receiver"].begin(), param1["receiver"].end(), std::back_inserter(receivers), + [](const auto rec) { return addr_t(rec.asString()); }); for (uint32_t i = 0; i < transactions_count; i++) { auto trx = std::make_shared(nonce, value, gas_price, gas, bytes(), sk, receivers[i % receivers.size()], kChainId); diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 87cc70bfef..3f796586c7 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -8,11 +8,106 @@ #include "LogFilter.hpp" +using namespace std; +using namespace dev; +using namespace taraxa::final_chain; +using namespace taraxa::state_api; + namespace taraxa::net::rpc::eth { -using namespace ::std; -using namespace ::dev; -using namespace ::taraxa::final_chain; -using namespace ::taraxa::state_api; +void add(Json::Value& obj, const optional& info) { + obj["blockNumber"] = info ? toJS(info->blk_n) : Json::Value(); + obj["blockHash"] = info ? toJS(info->blk_h) : Json::Value(); + obj["transactionIndex"] = info ? toJS(info->index) : Json::Value(); +} + +void add(Json::Value& obj, const ExtendedTransactionLocation& info) { + add(obj, static_cast(info)); + obj["transactionHash"] = toJS(info.trx_hash); +} + +Json::Value toJson(const Transaction& trx, const optional& loc) { + Json::Value res(Json::objectValue); + add(res, loc); + res["hash"] = toJS(trx.getHash()); + res["input"] = toJS(trx.getData()); + res["to"] = toJson(trx.getReceiver()); + res["from"] = toJS(trx.getSender()); + res["gas"] = toJS(trx.getGas()); + res["gasPrice"] = toJS(trx.getGasPrice()); + res["nonce"] = toJS(trx.getNonce()); + res["value"] = toJS(trx.getValue()); + const auto& vrs = trx.getVRS(); + res["r"] = toJS(vrs.r); + res["s"] = toJS(vrs.s); + res["v"] = toJS(vrs.v); + return res; +} + +Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.trx_loc); } + +Json::Value toJson(const BlockHeader& obj) { + Json::Value res(Json::objectValue); + res["parentHash"] = toJS(obj.parent_hash); + res["sha3Uncles"] = toJS(BlockHeader::uncles_hash()); + res["stateRoot"] = toJS(obj.state_root); + res["transactionsRoot"] = toJS(obj.transactions_root); + res["receiptsRoot"] = toJS(obj.receipts_root); + res["number"] = toJS(obj.number); + res["gasUsed"] = toJS(obj.gas_used); + res["gasLimit"] = toJS(obj.gas_limit); + res["extraData"] = toJS(obj.extra_data); + res["logsBloom"] = toJS(obj.log_bloom); + res["timestamp"] = toJS(obj.timestamp); + res["author"] = toJS(obj.author); + res["mixHash"] = toJS(BlockHeader::mix_hash()); + res["nonce"] = toJS(BlockHeader::nonce()); + res["uncles"] = Json::Value(Json::arrayValue); + res["hash"] = toJS(obj.hash); + res["difficulty"] = "0x0"; + res["totalDifficulty"] = "0x0"; + res["totalReward"] = toJS(obj.total_reward); + return res; +} + +Json::Value toJson(const LocalisedLogEntry& lle) { + Json::Value res(Json::objectValue); + add(res, lle.trx_loc); + res["removed"] = false; + res["data"] = toJS(lle.le.data); + res["address"] = toJS(lle.le.address); + res["logIndex"] = toJS(lle.position_in_receipt); + auto& topics_json = res["topics"] = Json::Value(Json::arrayValue); + for (const auto& t : lle.le.topics) { + topics_json.append(toJS(t)); + } + return res; +} + +Json::Value toJson(const LocalisedTransactionReceipt& ltr) { + Json::Value res(Json::objectValue); + add(res, ltr.trx_loc); + res["from"] = toJS(ltr.trx_from); + res["to"] = toJson(ltr.trx_to); + res["status"] = toJS(ltr.r.status_code); + res["gasUsed"] = toJS(ltr.r.gas_used); + res["cumulativeGasUsed"] = toJS(ltr.r.cumulative_gas_used); + res["contractAddress"] = toJson(ltr.r.new_contract_address); + res["logsBloom"] = toJS(ltr.r.bloom()); + auto& logs_json = res["logs"] = Json::Value(Json::arrayValue); + uint log_i = 0; + for (const auto& le : ltr.r.logs) { + logs_json.append(toJson(LocalisedLogEntry{le, ltr.trx_loc, log_i++})); + } + return res; +} + +Json::Value toJson(const SyncStatus& obj) { + Json::Value res(Json::objectValue); + res["startingBlock"] = toJS(obj.starting_block); + res["currentBlock"] = toJS(obj.current_block); + res["highestBlock"] = toJS(obj.highest_block); + return res; +} class EthImpl : public Eth, EthParams { Watches watches_; @@ -32,96 +127,98 @@ class EthImpl : public Eth, EthParams { string eth_blockNumber() override { return toJS(final_chain->last_block_number()); } - string eth_getBalance(string const& _address, string const& _blockNumber) override { - return toJS( - final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)).value_or(ZeroAccount).balance); + string eth_getBalance(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_account(toAddress(_address), block_number).value_or(ZeroAccount).balance); } - string eth_getStorageAt(string const& _address, string const& _position, string const& _blockNumber) override { - return toJS( - final_chain->get_account_storage(toAddress(_address), jsToU256(_position), parse_blk_num(_blockNumber))); + string eth_getStorageAt(const string& _address, const string& _position, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_account_storage(toAddress(_address), jsToU256(_position), block_number)); } - string eth_getStorageRoot(string const& _address, string const& _blockNumber) override { + string eth_getStorageRoot(const string& _address, const string& _blockNumber) override { return toJS(final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)) .value_or(ZeroAccount) .storage_root_eth()); } - string eth_getCode(string const& _address, string const& _blockNumber) override { - return toJS(final_chain->get_code(toAddress(_address), parse_blk_num(_blockNumber))); + string eth_getCode(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_code(toAddress(_address), block_number)); } - string eth_call(Json::Value const& _json, string const& _blockNumber) override { + string eth_call(const Json::Value& _json, const Json::Value& _jsonBlock) override { + const auto block_number = get_block_number_from_json(_jsonBlock); auto t = toTransactionSkeleton(_json); - auto blk_n = parse_blk_num(_blockNumber); - prepare_transaction_for_call(t, blk_n); - return toJS(call(blk_n, t).code_retval); + prepare_transaction_for_call(t, block_number); + return toJS(call(block_number, t).code_retval); } - string eth_estimateGas(Json::Value const& _json) override { + string eth_estimateGas(const Json::Value& _json) override { auto t = toTransactionSkeleton(_json); auto blk_n = final_chain->last_block_number(); prepare_transaction_for_call(t, blk_n); return toJS(call(blk_n, t).gas_used); } - string eth_getTransactionCount(string const& _address, string const& _blockNumber) override { - return toJS(transaction_count(parse_blk_num(_blockNumber), toAddress(_address))); + string eth_getTransactionCount(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(transaction_count(block_number, toAddress(_address))); } - Json::Value eth_getBlockTransactionCountByHash(string const& _blockHash) override { - return toJson(transactionCount(jsToFixed<32>(_blockHash))); + Json::Value eth_getBlockTransactionCountByHash(const string& _blockHash) override { + return toJS(transactionCount(jsToFixed<32>(_blockHash))); } - Json::Value eth_getBlockTransactionCountByNumber(string const& _blockNumber) override { + Json::Value eth_getBlockTransactionCountByNumber(const string& _blockNumber) override { return toJS(final_chain->transactionCount(parse_blk_num(_blockNumber))); } - Json::Value eth_getUncleCountByBlockHash(string const&) override { return toJS(0); } + Json::Value eth_getUncleCountByBlockHash(const string&) override { return toJS(0); } - Json::Value eth_getUncleCountByBlockNumber(string const&) override { return toJS(0); } + Json::Value eth_getUncleCountByBlockNumber(const string&) override { return toJS(0); } - string eth_sendRawTransaction(string const& _rlp) override { + string eth_sendRawTransaction(const string& _rlp) override { auto trx = std::make_shared(jsToBytes(_rlp, OnFailed::Throw), true); send_trx(trx); return toJS(trx->getHash()); } - Json::Value eth_getBlockByHash(string const& _blockHash, bool _includeTransactions) override { + Json::Value eth_getBlockByHash(const string& _blockHash, bool _includeTransactions) override { if (auto blk_n = final_chain->block_number(jsToFixed<32>(_blockHash)); blk_n) { return get_block_by_number(*blk_n, _includeTransactions); } return Json::Value(); } - Json::Value eth_getBlockByNumber(string const& _blockNumber, bool _includeTransactions) override { + Json::Value eth_getBlockByNumber(const string& _blockNumber, bool _includeTransactions) override { return get_block_by_number(parse_blk_num(_blockNumber), _includeTransactions); } - Json::Value eth_getTransactionByHash(string const& _transactionHash) override { + Json::Value eth_getTransactionByHash(const string& _transactionHash) override { return toJson(get_transaction(jsToFixed<32>(_transactionHash))); } - Json::Value eth_getTransactionByBlockHashAndIndex(string const& _blockHash, - string const& _transactionIndex) override { + Json::Value eth_getTransactionByBlockHashAndIndex(const string& _blockHash, + const string& _transactionIndex) override { return toJson(get_transaction(jsToFixed<32>(_blockHash), jsToInt(_transactionIndex))); } - Json::Value eth_getTransactionByBlockNumberAndIndex(string const& _blockNumber, - string const& _transactionIndex) override { + Json::Value eth_getTransactionByBlockNumberAndIndex(const string& _blockNumber, + const string& _transactionIndex) override { return toJson(get_transaction(jsToInt(_transactionIndex), parse_blk_num(_blockNumber))); } - Json::Value eth_getTransactionReceipt(string const& _transactionHash) override { + Json::Value eth_getTransactionReceipt(const string& _transactionHash) override { return toJson(get_transaction_receipt(jsToFixed<32>(_transactionHash))); } - Json::Value eth_getUncleByBlockHashAndIndex(string const&, string const&) override { return Json::Value(); } + Json::Value eth_getUncleByBlockHashAndIndex(const string&, const string&) override { return Json::Value(); } - Json::Value eth_getUncleByBlockNumberAndIndex(string const&, string const&) override { return Json::Value(); } + Json::Value eth_getUncleByBlockNumberAndIndex(const string&, const string&) override { return Json::Value(); } - string eth_newFilter(Json::Value const& _json) override { + string eth_newFilter(const Json::Value& _json) override { return toJS(watches_.logs_.install_watch(parse_log_filter(_json))); } @@ -129,26 +226,26 @@ class EthImpl : public Eth, EthParams { string eth_newPendingTransactionFilter() override { return toJS(watches_.new_transactions_.install_watch()); } - bool eth_uninstallFilter(string const& _filterId) override { + bool eth_uninstallFilter(const string& _filterId) override { auto watch_id = jsToInt(_filterId); return watches_.visit_by_id(watch_id, [=](auto watch) { return watch && watch->uninstall_watch(watch_id); }); } - Json::Value eth_getFilterChanges(string const& _filterId) override { + Json::Value eth_getFilterChanges(const string& _filterId) override { auto watch_id = jsToInt(_filterId); return watches_.visit_by_id(watch_id, [=](auto watch) { return watch ? toJsonArray(watch->poll(watch_id)) : Json::Value(Json::arrayValue); }); } - Json::Value eth_getFilterLogs(string const& _filterId) override { + Json::Value eth_getFilterLogs(const string& _filterId) override { if (auto filter = watches_.logs_.get_watch_params(jsToInt(_filterId))) { return toJsonArray(filter->match_all(*final_chain)); } return Json::Value(Json::arrayValue); } - Json::Value eth_getLogs(Json::Value const& _json) override { + Json::Value eth_getLogs(const Json::Value& _json) override { return toJsonArray(parse_log_filter(_json).match_all(*final_chain)); } @@ -159,8 +256,8 @@ class EthImpl : public Eth, EthParams { Json::Value eth_chainId() override { return chain_id ? Json::Value(toJS(chain_id)) : Json::Value(); } - void note_block_executed(BlockHeader const& blk_header, SharedTransactions const& trxs, - TransactionReceipts const& receipts) override { + void note_block_executed(const BlockHeader& blk_header, const SharedTransactions& trxs, + const TransactionReceipts& receipts) override { watches_.new_blocks_.process_update(blk_header.hash); ExtendedTransactionLocation trx_loc{{{blk_header.number}, blk_header.hash}}; for (; trx_loc.index < trxs.size(); ++trx_loc.index) { @@ -170,7 +267,7 @@ class EthImpl : public Eth, EthParams { } } - void note_pending_transaction(h256 const& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } + void note_pending_transaction(const h256& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } Json::Value get_block_by_number(EthBlockNumber blk_n, bool include_transactions) { auto blk_header = final_chain->block_header(blk_n); @@ -183,20 +280,18 @@ class EthImpl : public Eth, EthParams { ExtendedTransactionLocation loc; loc.blk_n = blk_header->number; loc.blk_h = blk_header->hash; - for (auto const& t : final_chain->transactions(blk_n)) { + for (const auto& t : final_chain->transactions(blk_n)) { trxs_json.append(toJson(*t, loc)); ++loc.index; } } else { auto hashes = final_chain->transaction_hashes(blk_n); - for (size_t i = 0; i < hashes->count(); ++i) { - trxs_json.append(toJson(hashes->get(i))); - } + trxs_json = toJsonArray(*hashes); } return ret; } - optional get_transaction(h256 const& h) const { + optional get_transaction(const h256& h) const { auto trx = get_trx(h); if (!trx) { return {}; @@ -213,11 +308,11 @@ class EthImpl : public Eth, EthParams { optional get_transaction(uint64_t trx_pos, EthBlockNumber blk_n) const { auto hashes = final_chain->transaction_hashes(blk_n); - if (hashes->count() <= trx_pos) { + if (hashes->size() <= trx_pos) { return {}; } return LocalisedTransaction{ - get_trx(hashes->get(trx_pos)), + get_trx(hashes->at(trx_pos)), TransactionLocationWithBlockHash{ {blk_n, trx_pos}, *final_chain->block_hash(blk_n), @@ -225,18 +320,18 @@ class EthImpl : public Eth, EthParams { }; } - optional get_transaction(h256 const& blk_h, uint64_t _i) const { + optional get_transaction(const h256& blk_h, uint64_t _i) const { auto blk_n = final_chain->block_number(blk_h); return blk_n ? get_transaction(_i, *blk_n) : nullopt; } - optional get_transaction_receipt(h256 const& trx_h) const { + optional get_transaction_receipt(const h256& trx_h) const { auto r = final_chain->transaction_receipt(trx_h); if (!r) { return {}; } auto loc_trx = get_transaction(trx_h); - auto const& trx = loc_trx->trx; + const auto& trx = loc_trx->trx; return LocalisedTransactionReceipt{ *r, ExtendedTransactionLocation{*loc_trx->trx_loc, trx_h}, @@ -245,16 +340,16 @@ class EthImpl : public Eth, EthParams { }; } - uint64_t transactionCount(h256 const& block_hash) const { + uint64_t transactionCount(const h256& block_hash) const { auto n = final_chain->block_number(block_hash); return n ? final_chain->transactionCount(n) : 0; } - trx_nonce_t transaction_count(EthBlockNumber n, Address const& addr) { + trx_nonce_t transaction_count(EthBlockNumber n, const Address& addr) { return final_chain->get_account(addr, n).value_or(ZeroAccount).nonce; } - state_api::ExecutionResult call(EthBlockNumber blk_n, TransactionSkeleton const& trx) { + state_api::ExecutionResult call(EthBlockNumber blk_n, const TransactionSkeleton& trx) { const auto result = final_chain->call( { trx.from, @@ -290,7 +385,7 @@ class EthImpl : public Eth, EthParams { } DEV_SIMPLE_EXCEPTION(InvalidAddress); - static Address toAddress(string const& s) { + static Address toAddress(const string& s) { try { if (auto b = fromHex(s.substr(0, 2) == "0x" ? s.substr(2) : s, WhenError::Throw); b.size() == Address::size) { return Address(b); @@ -300,7 +395,7 @@ class EthImpl : public Eth, EthParams { BOOST_THROW_EXCEPTION(InvalidAddress()); } - static TransactionSkeleton toTransactionSkeleton(Json::Value const& _json) { + static TransactionSkeleton toTransactionSkeleton(const Json::Value& _json) { TransactionSkeleton ret; if (!_json.isObject() || _json.empty()) { return ret; @@ -332,45 +427,60 @@ class EthImpl : public Eth, EthParams { return ret; } - static optional parse_blk_num_specific(string const& blk_num_str) { - if (blk_num_str == "latest" || blk_num_str == "pending") { + static optional parse_blk_num_specific(const string& blk_num_str) { + if (blk_num_str == "latest" || blk_num_str == "pending" || blk_num_str == "safe" || blk_num_str == "finalized") { return std::nullopt; } return blk_num_str == "earliest" ? 0 : jsToInt(blk_num_str); } - EthBlockNumber parse_blk_num(string const& blk_num_str) { + EthBlockNumber parse_blk_num(const string& blk_num_str) { auto ret = parse_blk_num_specific(blk_num_str); return ret ? *ret : final_chain->last_block_number(); } - LogFilter parse_log_filter(Json::Value const& json) { + EthBlockNumber get_block_number_from_json(const Json::Value& json) { + if (json.isObject()) { + if (!json["blockNumber"].empty()) { + return parse_blk_num(json["blockNumber"].asString()); + } + if (!json["blockHash"].empty()) { + if (auto ret = final_chain->block_number(jsToFixed<32>(json["blockHash"].asString()))) { + return *ret; + } + throw std::runtime_error("Resource not found"); + } + } + return parse_blk_num(json.asString()); + } + + LogFilter parse_log_filter(const Json::Value& json) { EthBlockNumber from_block; optional to_block; AddressSet addresses; LogFilter::Topics topics; - if (auto const& fromBlock = json["fromBlock"]; !fromBlock.empty()) { + if (const auto& fromBlock = json["fromBlock"]; !fromBlock.empty()) { from_block = parse_blk_num(fromBlock.asString()); } else { from_block = final_chain->last_block_number(); } - if (auto const& toBlock = json["toBlock"]; !toBlock.empty()) { + if (const auto& toBlock = json["toBlock"]; !toBlock.empty()) { to_block = parse_blk_num_specific(toBlock.asString()); } - if (auto const& address = json["address"]; !address.empty()) { + if (const auto& address = json["address"]; !address.empty()) { if (address.isArray()) { - for (auto const& obj : address) { + for (const auto& obj : address) { addresses.insert(toAddress(obj.asString())); } } else { addresses.insert(toAddress(address.asString())); } } - if (auto const& topics_json = json["topics"]; !topics_json.empty()) { + if (const auto& topics_json = json["topics"]; !topics_json.empty()) { for (uint32_t i = 0; i < topics_json.size(); i++) { - auto const& topic_json = topics_json[i]; + const auto& topic_json = topics_json[i]; if (topic_json.isArray()) { - for (auto const& t : topic_json) { + for (const auto& t : topic_json) { if (!t.isNull()) { topics[i].insert(jsToFixed<32>(t.asString())); } @@ -382,124 +492,8 @@ class EthImpl : public Eth, EthParams { } return LogFilter(from_block, to_block, std::move(addresses), std::move(topics)); } - - static void add(Json::Value& obj, optional const& info) { - obj["blockNumber"] = info ? toJson(info->blk_n) : Json::Value(); - obj["blockHash"] = info ? toJson(info->blk_h) : Json::Value(); - obj["transactionIndex"] = info ? toJson(info->index) : Json::Value(); - } - - static void add(Json::Value& obj, ExtendedTransactionLocation const& info) { - add(obj, static_cast(info)); - obj["transactionHash"] = toJson(info.trx_hash); - } - - static Json::Value toJson(Transaction const& trx, optional const& loc) { - Json::Value res(Json::objectValue); - add(res, loc); - res["hash"] = toJson(trx.getHash()); - res["input"] = toJson(trx.getData()); - res["to"] = toJson(trx.getReceiver()); - res["from"] = toJson(trx.getSender()); - res["gas"] = toJson(trx.getGas()); - res["gasPrice"] = toJson(trx.getGasPrice()); - res["nonce"] = toJson(trx.getNonce()); - res["value"] = toJson(trx.getValue()); - auto const& vrs = trx.getVRS(); - res["r"] = toJson(vrs.r); - res["s"] = toJson(vrs.s); - res["v"] = toJson(vrs.v); - return res; - } - - static Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.trx_loc); } - - static Json::Value toJson(BlockHeader const& obj) { - Json::Value res(Json::objectValue); - res["parentHash"] = toJson(obj.parent_hash); - res["sha3Uncles"] = toJson(BlockHeader::uncles_hash()); - res["stateRoot"] = toJson(obj.state_root); - res["transactionsRoot"] = toJson(obj.transactions_root); - res["receiptsRoot"] = toJson(obj.receipts_root); - res["number"] = toJson(obj.number); - res["gasUsed"] = toJson(obj.gas_used); - res["gasLimit"] = toJson(obj.gas_limit); - res["extraData"] = toJson(obj.extra_data); - res["logsBloom"] = toJson(obj.log_bloom); - res["timestamp"] = toJson(obj.timestamp); - res["author"] = toJson(obj.author); - res["mixHash"] = toJson(BlockHeader::mix_hash()); - res["nonce"] = toJson(BlockHeader::nonce()); - res["uncles"] = Json::Value(Json::arrayValue); - res["hash"] = toJson(obj.hash); - res["difficulty"] = "0x0"; - res["totalDifficulty"] = "0x0"; - res["totalReward"] = toJson(obj.total_reward); - return res; - } - - static Json::Value toJson(LocalisedLogEntry const& lle) { - Json::Value res(Json::objectValue); - add(res, lle.trx_loc); - res["removed"] = false; - res["data"] = toJson(lle.le.data); - res["address"] = toJson(lle.le.address); - res["logIndex"] = toJson(lle.position_in_receipt); - auto& topics_json = res["topics"] = Json::Value(Json::arrayValue); - for (auto const& t : lle.le.topics) { - topics_json.append(toJson(t)); - } - return res; - } - - static Json::Value toJson(LocalisedTransactionReceipt const& ltr) { - Json::Value res(Json::objectValue); - add(res, ltr.trx_loc); - res["from"] = toJson(ltr.trx_from); - res["to"] = toJson(ltr.trx_to); - res["status"] = toJson(ltr.r.status_code); - res["gasUsed"] = toJson(ltr.r.gas_used); - res["cumulativeGasUsed"] = toJson(ltr.r.cumulative_gas_used); - res["contractAddress"] = toJson(ltr.r.new_contract_address); - res["logsBloom"] = toJson(ltr.r.bloom()); - auto& logs_json = res["logs"] = Json::Value(Json::arrayValue); - uint log_i = 0; - for (auto const& le : ltr.r.logs) { - logs_json.append(toJson(LocalisedLogEntry{le, ltr.trx_loc, log_i++})); - } - return res; - } - - static Json::Value toJson(SyncStatus const& obj) { - Json::Value res(Json::objectValue); - res["startingBlock"] = toJS(obj.starting_block); - res["currentBlock"] = toJS(obj.current_block); - res["highestBlock"] = toJS(obj.highest_block); - return res; - } - - template - static Json::Value toJson(T const& t) { - return toJS(t); - } - - template - static Json::Value toJsonArray(vector const& _es) { - Json::Value res(Json::arrayValue); - for (auto const& e : _es) { - res.append(toJson(e)); - } - return res; - } - - template - static Json::Value toJson(optional const& t) { - return t ? toJson(*t) : Json::Value(); - } }; -Json::Value toJson(BlockHeader const& obj) { return EthImpl::toJson(obj); } - shared_ptr NewEth(EthParams&& prerequisites) { return make_shared(std::move(prerequisites)); } } // namespace taraxa::net::rpc::eth \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/eth/Eth.h b/libraries/core_libs/network/rpc/eth/Eth.h index 6a256cae83..acf361e0b7 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.h +++ b/libraries/core_libs/network/rpc/eth/Eth.h @@ -1,18 +1,48 @@ #pragma once +#include "data.hpp" #include "final_chain/final_chain.hpp" #include "network/rpc/EthFace.h" #include "watches.hpp" namespace taraxa::net::rpc::eth { +void add(Json::Value& obj, const std::optional& info); +void add(Json::Value& obj, const ExtendedTransactionLocation& info); +Json::Value toJson(const final_chain::BlockHeader& obj); +Json::Value toJson(const Transaction& trx, const std::optional& loc); +Json::Value toJson(const LocalisedTransaction& lt); +Json::Value toJson(const final_chain::BlockHeader& obj); +Json::Value toJson(const LocalisedLogEntry& lle); +Json::Value toJson(const LocalisedTransactionReceipt& ltr); +Json::Value toJson(const SyncStatus& obj); + +template +Json::Value toJson(const T& t) { + return toJS(t); +} + +template +Json::Value toJsonArray(const std::vector& _es) { + Json::Value res(Json::arrayValue); + for (const auto& e : _es) { + res.append(toJson(e)); + } + return res; +} + +template +Json::Value toJson(const std::optional& t) { + return t ? toJson(*t) : Json::Value(); +} + struct EthParams { Address address; uint64_t chain_id = 0; uint64_t gas_limit = ((uint64_t)1 << 53) - 1; std::shared_ptr final_chain; - std::function(h256 const&)> get_trx; - std::function const& trx)> send_trx; + std::function(const h256&)> get_trx; + std::function& trx)> send_trx; std::function gas_pricer = [] { return u256(0); }; std::function()> syncing_probe = [] { return std::nullopt; }; WatchesConfig watches_cfg; @@ -29,13 +59,11 @@ struct Eth : virtual ::taraxa::net::EthFace { ::taraxa::net::EthFace::operator=(std::move(rhs)); return *this; } - virtual void note_block_executed(final_chain::BlockHeader const&, SharedTransactions const&, - final_chain::TransactionReceipts const&) = 0; - virtual void note_pending_transaction(h256 const& trx_hash) = 0; + virtual void note_block_executed(const final_chain::BlockHeader&, const SharedTransactions&, + const final_chain::TransactionReceipts&) = 0; + virtual void note_pending_transaction(const h256& trx_hash) = 0; }; std::shared_ptr NewEth(EthParams&&); -Json::Value toJson(final_chain::BlockHeader const& obj); - } // namespace taraxa::net::rpc::eth diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 1158486a5a..3969a89ae0 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -8,19 +8,14 @@ LogFilter::LogFilter(EthBlockNumber from_block, std::optional to if (!addresses_.empty()) { return; } - for (auto const& t : topics_) { - if (!t.empty()) { - return; - } - } - is_range_only_ = true; + is_range_only_ = std::all_of(topics_.cbegin(), topics_.cend(), [](const auto& t) { return t.empty(); }); } std::vector LogFilter::bloomPossibilities() const { // return combination of each of the addresses/topics std::vector ret; // | every address with every topic - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { // 1st case, there are addresses and topics // // m_addresses = [a0, a1]; @@ -31,12 +26,12 @@ std::vector LogFilter::bloomPossibilities() const { // a1 | t0, a1 | t1a | t1b // ] // - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.empty()) { continue; } auto b = LogBloom().shiftBloom<3>(sha3(i)); - for (auto const& j : t) { + for (const auto& j : t) { b = b.shiftBloom<3>(sha3(j)); } ret.push_back(b); @@ -51,9 +46,8 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [a0, a1]; // if (ret.empty()) { - for (auto const& i : addresses_) { - ret.push_back(LogBloom().shiftBloom<3>(sha3(i))); - } + std::transform(addresses_.cbegin(), addresses_.cend(), std::back_inserter(ret), + [](const auto& i) { return LogBloom().shiftBloom<3>(sha3(i)); }); } // 3rd case, there are no addresses, at least create blooms from topics @@ -64,10 +58,10 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [t0, t1a | t1b]; // if (addresses_.empty()) { - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.size()) { LogBloom b; - for (auto const& j : t) { + for (const auto& j : t) { b = b.shiftBloom<3>(sha3(j)); } ret.push_back(b); @@ -80,7 +74,7 @@ std::vector LogFilter::bloomPossibilities() const { bool LogFilter::matches(LogBloom b) const { if (!addresses_.empty()) { auto ok = false; - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { if (b.containsBloom<3>(sha3(i))) { ok = true; break; @@ -90,12 +84,12 @@ bool LogFilter::matches(LogBloom b) const { return false; } } - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.empty()) { continue; } auto ok = false; - for (auto const& i : t) { + for (const auto& i : t) { if (b.containsBloom<3>(sha3(i))) { ok = true; break; @@ -108,12 +102,12 @@ bool LogFilter::matches(LogBloom b) const { return true; } -void LogFilter::match_one(TransactionReceipt const& r, std::function const& cb) const { +void LogFilter::match_one(const TransactionReceipt& r, const std::function& cb) const { if (!matches(r.bloom())) { return; } for (size_t log_i = 0; log_i < r.logs.size(); ++log_i) { - auto const& e = r.logs[log_i]; + const auto& e = r.logs[log_i]; if (!addresses_.empty() && !addresses_.count(e.address)) { continue; } @@ -134,8 +128,8 @@ bool LogFilter::blk_number_matches(EthBlockNumber blk_n) const { return from_block_ <= blk_n && (!to_block_ || blk_n <= *to_block_); } -void LogFilter::match_one(ExtendedTransactionLocation const& trx_loc, TransactionReceipt const& r, - std::function const& cb) const { +void LogFilter::match_one(const ExtendedTransactionLocation& trx_loc, const TransactionReceipt& r, + const std::function& cb) const { if (!blk_number_matches(trx_loc.blk_n)) { return; } @@ -149,15 +143,14 @@ void LogFilter::match_one(ExtendedTransactionLocation const& trx_loc, Transactio } } -std::vector LogFilter::match_all(FinalChain const& final_chain) const { +std::vector LogFilter::match_all(const FinalChain& final_chain) const { std::vector ret; auto action = [&, this](EthBlockNumber blk_n) { ExtendedTransactionLocation trx_loc{{{blk_n}, *final_chain.block_hash(blk_n)}}; auto hashes = final_chain.transaction_hashes(trx_loc.blk_n); - for (size_t i = 0; i < hashes->count(); ++i) { - trx_loc.trx_hash = hashes->get(i); - match_one(trx_loc, *final_chain.transaction_receipt(trx_loc.trx_hash), - [&](auto const& lle) { ret.push_back(lle); }); + for (const auto& hash : *hashes) { + trx_loc.trx_hash = hash; + match_one(trx_loc, *final_chain.transaction_receipt(hash), [&](const auto& lle) { ret.push_back(lle); }); ++trx_loc.index; } }; @@ -169,7 +162,7 @@ std::vector LogFilter::match_all(FinalChain const& final_chai return ret; } std::set matchingBlocks; - for (auto const& bloom : bloomPossibilities()) { + for (const auto& bloom : bloomPossibilities()) { for (auto blk_n : final_chain.withBlockBloom(bloom, from_block_, to_blk_n)) { matchingBlocks.insert(blk_n); } diff --git a/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp b/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp index 50de2ee673..269d196ba6 100644 --- a/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp +++ b/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp @@ -56,8 +56,8 @@ std::string JsonRpcWsSession::processRequest(const std::string_view &request) { auto handler = ws_server->GetHandler(); if (handler != NULL) { try { - LOG(log_tr_) << "WS Read: " << (char *)buffer_.data().data(); - handler->HandleRequest((char *)buffer_.data().data(), response); + LOG(log_tr_) << "WS Read: " << static_cast(buffer_.data().data()); + handler->HandleRequest(static_cast(buffer_.data().data()), response); } catch (std::exception const &e) { LOG(log_er_) << "Exception " << e.what(); auto &res_json_error = json_response["error"] = Json::Value(Json::objectValue); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index 5ddd8bacab..d9725c4b9d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -30,7 +30,7 @@ bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, const return false; } - // Validate vote's period, roun and step min/max values + // Validate vote's period, round and step min/max values if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; return false; @@ -85,7 +85,8 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract // Do not request round sync too often here - if (std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { // request PBFT chain sync from this node sealAndSend(peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); @@ -113,7 +114,8 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( // Trigger votes(round) syncing only if we are in sync in terms of period if (current_pbft_period == vote->getPeriod()) { // Do not request round sync too often here - if (std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { // request round votes sync from this node requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); last_votes_sync_request_time_ = std::chrono::system_clock::now(); @@ -140,21 +142,6 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( return {true, ""}; } -std::pair ExtVotesPacketHandler::validateVote(const std::shared_ptr &vote) const { - // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote - // (for a value that isn't NBH) per period, round & step - if (auto unique_vote_validation = vote_mgr_->isUniqueVote(vote); !unique_vote_validation.first) { - return unique_vote_validation; - } - - const auto vote_valid = vote_mgr_->validateVote(vote); - if (!vote_valid.first) { - LOG(log_er_) << "Vote \"dpos\" validation failed: " << vote_valid.second; - } - - return vote_valid; -} - bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, const std::shared_ptr &pbft_block) const { if (pbft_block->getBlockHash() != vote->getBlockHash()) { @@ -180,15 +167,15 @@ bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vo bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - // Previous round next vote - if (vote->getPeriod() == current_pbft_period && (current_pbft_round - 1) == vote->getRound() && - vote->getType() == PbftVoteTypes::next_vote) { + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote return true; - } else if (vote->getPeriod() >= current_pbft_period) { - // Standard vote + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote return true; } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { - // Previous round cert vote - potential reward vote + // Previous period cert vote - potential reward vote return true; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp index ed1406e50f..c295494e6a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp @@ -3,6 +3,7 @@ #include "dag/dag.hpp" #include "network/tarcap/packets_handlers/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -61,7 +62,7 @@ void DagSyncPacketHandler::process(const PacketData& packet_data, const std::sha auto trx = std::make_shared(tx_rlp); peer->markTransactionAsKnown(trx->getHash()); transactions.emplace_back(std::move(trx)); - } catch (const Transaction::InvalidSignature& e) { + } catch (const Transaction::InvalidTransaction& e) { throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp index 6a6baa27ca..3b9ddb71c3 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp @@ -33,8 +33,7 @@ void GetNextVotesSyncPacketHandler::process(const PacketData &packet_data, const return; } - std::vector> next_votes = - vote_mgr_->getAllTwoTPlusOneNextVotes(pbft_period, pbft_round - 1, peer); + std::vector> next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(pbft_period, pbft_round - 1); // In edge case this could theoretically happen due to race condition when we moved to the next period or round // right before calling getAllTwoTPlusOneNextVotes with specific period & round if (next_votes.empty()) { @@ -52,13 +51,27 @@ void GetNextVotesSyncPacketHandler::process(const PacketData &packet_data, const return; } - next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(tmp_pbft_period, tmp_pbft_round - 1, peer); + next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(tmp_pbft_period, tmp_pbft_round - 1); if (next_votes.empty()) { LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; return; } } + std::vector> next_votes_to_send; + next_votes_to_send.reserve(next_votes.size()); + for (const auto &vote : next_votes) { + if (!peer->isVoteKnown(vote->getHash())) { + next_votes_to_send.emplace_back(vote); + } + } + + if (next_votes_to_send.empty()) { + LOG(log_dg_) << "Votes already gossiped, no need to send votes sync packet for" << pbft_period << ", round " + << pbft_round - 1; + return; + } + LOG(log_nf_) << "Next votes sync packet with " << next_votes.size() << " votes sent to " << peer->getId(); sendPbftVotesBundle(peer, std::move(next_votes)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp index 2a0de430d9..c5b0113f89 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp @@ -4,17 +4,20 @@ #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" #include "vote/vote.hpp" +#include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, - std::shared_ptr pbft_chain, std::shared_ptr db, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t &node_addr) : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, "GET_PBFT_SYNC_PH"), pbft_syncing_state_(std::move(pbft_syncing_state)), pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), db_(std::move(db)) {} void GetPbftSyncPacketHandler::validatePacketRlpFormat(const PacketData &packet_data) const { @@ -80,7 +83,7 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, P s << last_block; s.appendRaw(data); // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto votes = db_->getRewardVotes(); + const auto votes = vote_mgr_->getRewardVotes(); s.appendList(votes.size()); for (const auto &vote : votes) { s.appendRaw(vote->rlp(true)); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp index 01ba610915..79a9dca4bc 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp @@ -60,7 +60,7 @@ void PbftSyncPacketHandler::process(const PacketData &packet_data, const std::sh PeriodData period_data; try { period_data = PeriodData(packet_data.rlp_[1]); - } catch (const Transaction::InvalidSignature &e) { + } catch (const Transaction::InvalidTransaction &e) { throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp index 0b7d5387fc..6132d0091c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp @@ -3,6 +3,7 @@ #include #include "network/tarcap/shared_states/test_state.hpp" +#include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -61,7 +62,7 @@ inline void TransactionPacketHandler::process(const PacketData &packet_data, con try { transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); received_transactions.emplace_back(trx_hash); - } catch (const Transaction::InvalidSignature &e) { + } catch (const Transaction::InvalidTransaction &e) { throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); } @@ -126,11 +127,7 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra std::vector> peers_with_transactions_to_send; auto peers = peers_state_->getAllPeers(); - std::string transactions_to_log; std::string peers_to_log; - for (auto const &trx : transactions) { - transactions_to_log += trx->getHash().abridged(); - } for (const auto &peer : peers) { // Confirm that status messages were exchanged otherwise message might be ignored and node would // incorrectly markTransactionAsKnown @@ -149,6 +146,9 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra } const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { + auto transactions_to_log = + std::accumulate(transactions.begin(), transactions.end(), std::string{}, + [](const auto &r, const auto &trx) { return r + trx->getHash().abridged(); }); LOG(log_tr_) << "Sending Transactions " << transactions_to_log << " to " << peers_to_log; // Sending it in same order favours some peers over others, always start with a different position uint32_t start_with = rand() % peers_to_send_count; @@ -159,8 +159,9 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra } } -void TransactionPacketHandler::sendTransactions(std::shared_ptr const &peer, +void TransactionPacketHandler::sendTransactions(std::shared_ptr peer, std::vector> &&transactions) { + if (!peer) return; const auto peer_id = peer->getId(); LOG(log_tr_) << "sendTransactions " << transactions.size() << " to " << peer_id; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index bf494f7e32..7c4f3aa6cd 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -63,14 +63,16 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); } - processVote(vote, pbft_block, peer, true); + if (!processVote(vote, pbft_block, peer, true)) { + return; + } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markVoteAsKnown(vote_hash); onNewPbftVote(vote, pbft_block); // Update peer's max chain size - if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { + if (peer_chain_size.has_value() && vote->getVoter() == peer->getId() && *peer_chain_size > peer->pbft_chain_size_) { peer->pbft_chain_size_ = *peer_chain_size; } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp index bdd4d32660..3f38145c36 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp @@ -113,9 +113,15 @@ void VotesSyncPacketHandler::process(const PacketData &packet_data, const std::s // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries // for round and step to actually being able to sync the current round in case network is stalled - bool check_max_round_step = votes_bundle_votes_type == PbftVoteTypes::next_vote ? false : true; - if (votes_bundle_votes_type == PbftVoteTypes::cert_vote) check_max_round_step = false; - processVote(vote, nullptr, peer, check_max_round_step); + bool check_max_round_step = true; + if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { + check_max_round_step = false; + } + + if (!processVote(vote, nullptr, peer, check_max_round_step)) { + continue; + } + votes.push_back(std::move(vote)); } diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index c031e3114e..4744e12bdd 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -237,7 +237,7 @@ void TaraxaCapability::registerPacketHandlers( // TODO there is additional logic, that should be moved outside process function packets_handlers_->registerHandler(kConf, peers_state_, packets_stats, pbft_syncing_state_, - pbft_chain, db, node_addr); + pbft_chain, vote_mgr, db, node_addr); packets_handlers_->registerHandler(kConf, peers_state_, packets_stats, pbft_syncing_state_, pbft_chain, pbft_mgr, dag_mgr, vote_mgr, diff --git a/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp index 67a4259c65..220a0131f8 100644 --- a/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp @@ -114,13 +114,7 @@ std::optional PriorityQueue::pop() { } bool PriorityQueue::empty() const { - for (const auto& queue : packets_queues_) { - if (!queue.empty()) { - return false; - } - } - - return true; + return std::all_of(packets_queues_.cbegin(), packets_queues_.cend(), [](const auto& queue) { return queue.empty(); }); } void PriorityQueue::updateDependenciesStart(const PacketData& packet) { diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index 2f85fb1c1e..03f0bc7da5 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -51,7 +51,7 @@ void WsSession::on_read(beast::error_code ec, std::size_t bytes_transferred) { return close(is_normal(ec)); } - LOG(log_tr_) << "WS READ " << ((char *)buffer_.data().data()); + LOG(log_tr_) << "WS READ " << (static_cast(buffer_.data().data())); const std::string_view str_view(static_cast(buffer_.data().data()), buffer_.size()); const auto response = processRequest(str_view); @@ -89,12 +89,13 @@ void WsSession::on_write_no_read(beast::error_code ec, std::size_t bytes_transfe } } -void WsSession::newEthBlock(::taraxa::final_chain::BlockHeader const &payload) { +void WsSession::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, const TransactionHashes &trx_hashes) { if (new_heads_subscription_ != 0) { Json::Value res, params; res["jsonrpc"] = "2.0"; res["method"] = "eth_subscription"; params["result"] = rpc::eth::toJson(payload); + params["result"]["transactions"] = rpc::eth::toJsonArray(trx_hashes); params["subscription"] = dev::toJS(new_heads_subscription_); res["params"] = params; auto response = util::to_string(res); @@ -318,10 +319,10 @@ void WsServer::newPbftBlockExecuted(PbftBlock const &pbft_blk, } } -void WsServer::newEthBlock(::taraxa::final_chain::BlockHeader const &payload) { +void WsServer::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, const TransactionHashes &trx_hashes) { boost::shared_lock lock(sessions_mtx_); for (auto const &session : sessions) { - if (!session->is_closed()) session->newEthBlock(payload); + if (!session->is_closed()) session->newEthBlock(payload, trx_hashes); } } diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 09f8452c33..ecf04206c9 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -234,7 +234,7 @@ void FullNode::start() { _eth_json_rpc->note_block_executed(*res->final_chain_blk, res->trxs, res->trx_receipts); } if (auto _ws = ws.lock()) { - _ws->newEthBlock(*res->final_chain_blk); + _ws->newEthBlock(*res->final_chain_blk, hashes_from_transactions(res->trxs)); if (auto _db = db.lock()) { auto pbft_blk = _db->getPbftBlock(res->hash); if (const auto &hash = pbft_blk->getPivotDagBlockHash(); hash != kNullBlockHash) { @@ -302,30 +302,6 @@ void FullNode::start() { }, subscription_pool_); - // Subscription to process hardforks - // final_chain_->block_applying_.subscribe([&](uint64_t block_num) { - // // TODO: should have only common hardfork code calling hardfork executor - // auto &state_conf = conf_.genesis.state; - // if (state_conf.hardforks.fix_genesis_fork_block == block_num) { - // for (auto &e : state_conf.dpos->genesis_state) { - // for (auto &b : e.second) { - // b.second *= kOneTara; - // } - // } - // for (auto &b : state_conf.initial_balances) { - // b.second *= kOneTara; - // } - // // we are multiplying it by TARA precision - // state_conf.dpos->eligibility_balance_threshold *= kOneTara; - // // amount of stake per vote should be 10 times smaller than eligibility threshold - // state_conf.dpos->vote_eligibility_balance_step.assign(state_conf.dpos->eligibility_balance_threshold); - // state_conf.dpos->eligibility_balance_threshold *= 10; - // // if this part of code will be needed we need to overwrite genesis json here - // // conf_.overwrite_chain_config_in_file(); - // final_chain_->update_state_config(state_conf); - // } - // }); - vote_mgr_->setNetwork(network_); pbft_mgr_->setNetwork(network_); dag_mgr_->setNetwork(network_); @@ -371,8 +347,8 @@ void FullNode::rebuildDb() { // Read pbft blocks one by one PbftPeriod period = 1; std::shared_ptr period_data, next_period_data; - std::vector> cert_votes; while (true) { + std::vector> cert_votes; if (next_period_data != nullptr) { period_data = next_period_data; } else { @@ -383,8 +359,11 @@ void FullNode::rebuildDb() { auto data = old_db_->getPeriodDataRaw(period + 1); if (data.size() == 0) { next_period_data = nullptr; - // Latest finalized block cert votes are saved in db as reward votes for new blocks - cert_votes = old_db_->getRewardVotes(); + // Latest finalized block cert votes are saved in db as 2t+1 cert votes + auto votes = old_db_->getAllTwoTPlusOneVotes(); + for (auto v : votes) { + if (v->getType() == PbftVoteTypes::cert_vote) cert_votes.push_back(v); + } } else { next_period_data = std::make_shared(std::move(data)); cert_votes = next_period_data->previous_block_cert_votes; diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 5f3ae3fbd1..992fb61b51 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -105,16 +105,13 @@ class DbStorage : public std::enable_shared_from_this { COLUMN(pbft_head); COLUMN(latest_round_own_votes); // own votes of any type for the latest round COLUMN(latest_round_two_t_plus_one_votes); // 2t+1 votes bundles of any type for the latest round - COLUMN(latest_reward_votes); // extra reward votes on top of 2t+1 cert votes bundle from + COLUMN(extra_reward_votes); // extra reward votes on top of 2t+1 cert votes bundle from // latest_round_two_t_plus_one_votes COLUMN(pbft_block_period); COLUMN(dag_block_period); COLUMN_W_COMP(proposal_period_levels_map, getIntComparator()); COLUMN(final_chain_meta); - COLUMN(final_chain_transaction_location_by_hash); - COLUMN(final_chain_replay_protection); COLUMN(final_chain_transaction_hashes_by_blk_number); - COLUMN(final_chain_transaction_count_by_blk_number); COLUMN(final_chain_blk_by_number); COLUMN(final_chain_blk_hash_by_number); COLUMN(final_chain_blk_number_by_hash); @@ -231,6 +228,7 @@ class DbStorage : public std::enable_shared_from_this { void addTransactionPeriodToBatch(Batch& write_batch, trx_hash_t const& trx, PbftPeriod period, uint32_t position); std::optional> getTransactionPeriod(trx_hash_t const& hash) const; std::unordered_map getAllTransactionPeriod(); + uint64_t getTransactionCount(PbftPeriod period) const; // PBFT manager uint32_t getPbftMgrField(PbftMgrField field); @@ -271,11 +269,13 @@ class DbStorage : public std::enable_shared_from_this { // 2t+1 votes bundles for the latest round void replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, const std::vector>& votes); + void replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType type, const std::vector>& votes, + Batch& write_batch); std::vector> getAllTwoTPlusOneVotes(); // Reward votes - cert votes for the latest finalized block - void replaceRewardVotes(const std::vector>& votes, Batch& write_batch); - void saveRewardVote(const std::shared_ptr& vote); + void removeExtraRewardVotes(const std::vector& votes, Batch& write_batch); + void saveExtraRewardVote(const std::shared_ptr& vote); std::vector> getRewardVotes(); // period_pbft_block diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index fd4d10ee94..d3bd850c93 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include "config/version.hpp" @@ -105,7 +106,11 @@ void DbStorage::rebuildColumns(const rocksdb::Options& options) { std::vector handles; handles.reserve(column_families.size()); std::transform(column_families.begin(), column_families.end(), std::back_inserter(descriptors), [](const auto& name) { - return rocksdb::ColumnFamilyDescriptor(name, rocksdb::ColumnFamilyOptions()); + const auto it = std::find_if(Columns::all.begin(), Columns::all.end(), + [&name](const Column& col) { return col.name() == name; }); + auto options = rocksdb::ColumnFamilyOptions(); + if (it != Columns::all.end() && it->comparator_) options.comparator = it->comparator_; + return rocksdb::ColumnFamilyDescriptor(name, options); }); rocksdb::DB* db_ptr = nullptr; checkStatus(rocksdb::DB::Open(options, db_path_.string(), descriptors, &handles, &db_ptr)); @@ -448,10 +453,9 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { auto trx_hashes_raw = lookup(period, DB::Columns::final_chain_transaction_hashes_by_blk_number); auto hashes_count = trx_hashes_raw.size() / trx_hash_t::size; for (uint32_t i = 0; i < hashes_count; i++) { - auto hash = - trx_hash_t((uint8_t*)(trx_hashes_raw.data() + i * trx_hash_t::size), trx_hash_t::ConstructFromPointer); + auto hash = trx_hash_t(reinterpret_cast(trx_hashes_raw.data() + i * trx_hash_t::size), + trx_hash_t::ConstructFromPointer); remove(write_batch, Columns::final_chain_receipt_by_trx_hash, hash); - remove(write_batch, Columns::final_chain_transaction_location_by_hash, hash); } remove(write_batch, Columns::final_chain_transaction_hashes_by_blk_number, EthBlockNumber(period)); if ((period - start_period + 1) % max_batch_delete == 0) { @@ -466,7 +470,6 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { // data in the database and free disk space db_->CompactRange({}, handle(Columns::period_data), &start_slice, &end_slice); db_->CompactRange({}, handle(Columns::final_chain_receipt_by_trx_hash), nullptr, nullptr); - db_->CompactRange({}, handle(Columns::final_chain_transaction_location_by_hash), nullptr, nullptr); db_->CompactRange({}, handle(Columns::final_chain_transaction_hashes_by_blk_number), nullptr, nullptr); } } @@ -608,6 +611,15 @@ std::shared_ptr DbStorage::getTransaction(trx_hash_t const& hash) { return nullptr; } +uint64_t DbStorage::getTransactionCount(PbftPeriod period) const { + auto period_data = getPeriodDataRaw(period); + if (period_data.size()) { + auto period_data_rlp = dev::RLP(period_data); + return period_data_rlp[TRANSACTIONS_POS_IN_PERIOD_DATA].itemCount(); + } + return 0; +} + std::pair, trx_hash_t> DbStorage::getFinalizedTransactions( std::vector const& trx_hashes) const { // Map of period to position of transactions within a period @@ -838,6 +850,17 @@ void DbStorage::replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, insert(Columns::latest_round_two_t_plus_one_votes, static_cast(type), s.out()); } +void DbStorage::replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType type, + const std::vector>& votes, Batch& write_batch) { + remove(write_batch, Columns::latest_round_two_t_plus_one_votes, static_cast(type)); + + dev::RLPStream s(votes.size()); + for (const auto& vote : votes) { + s.appendRaw(vote->rlp(true, true)); + } + insert(write_batch, Columns::latest_round_two_t_plus_one_votes, static_cast(type), s.out()); +} + std::vector> DbStorage::getAllTwoTPlusOneVotes() { std::vector> votes; auto load_db_votes = [this, &votes](TwoTPlusOneVotedBlockType type) { @@ -858,29 +881,20 @@ std::vector> DbStorage::getAllTwoTPlusOneVotes() { return votes; } -void DbStorage::replaceRewardVotes(const std::vector>& votes, Batch& write_batch) { - // TODO: deletion could be optimized if we save votes in memory - // Remove existing reward votes - auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_reward_votes))); - for (it->SeekToFirst(); it->Valid(); it->Next()) { - const auto vote = std::make_shared(asBytes(it->value().ToString())); - remove(write_batch, Columns::latest_reward_votes, vote->getHash().asBytes()); - } - - // Add new reward votes - for (const auto& vote : votes) { - insert(write_batch, Columns::latest_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); +void DbStorage::removeExtraRewardVotes(const std::vector& votes, Batch& write_batch) { + for (const auto& v : votes) { + remove(write_batch, Columns::extra_reward_votes, v.asBytes()); } } -void DbStorage::saveRewardVote(const std::shared_ptr& vote) { - insert(Columns::latest_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); +void DbStorage::saveExtraRewardVote(const std::shared_ptr& vote) { + insert(Columns::extra_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); } std::vector> DbStorage::getRewardVotes() { std::vector> votes; - auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_reward_votes))); + auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::extra_reward_votes))); for (it->SeekToFirst(); it->Valid(); it->Next()) { votes.emplace_back(std::make_shared(asBytes(it->value().ToString()))); } diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index ccfcd6b8a3..afd95c02ba 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -7,7 +7,6 @@ #include "common/types.hpp" #include "dag/dag_block.hpp" -#include "transaction/transaction.hpp" #include "vote/vote.hpp" namespace taraxa { diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index 04e6f6c014..e05cfa3f08 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -10,8 +10,16 @@ namespace taraxa { struct Transaction { - struct InvalidSignature : std::runtime_error { - explicit InvalidSignature(std::string const &msg) : runtime_error("invalid signature:\n" + msg) {} + struct InvalidTransaction : std::runtime_error { + explicit InvalidTransaction(const std::string &msg) : runtime_error("invalid transaction - " + msg) {} + }; + + struct InvalidSignature : InvalidTransaction { + explicit InvalidSignature(const std::string &msg) : InvalidTransaction("signature:\n" + msg) {} + }; + + struct InvalidFormat : InvalidTransaction { + explicit InvalidFormat(const std::string &msg) : InvalidTransaction("rlp format:\n" + msg) {} }; private: @@ -70,7 +78,10 @@ struct Transaction { }; using SharedTransaction = std::shared_ptr; -using Transactions = ::std::vector; -using SharedTransactions = ::std::vector; +using Transactions = std::vector; +using SharedTransactions = std::vector; +using TransactionHashes = std::vector; + +TransactionHashes hashes_from_transactions(const SharedTransactions &transactions); } // namespace taraxa diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index b6f7bc797b..79f25ba161 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -3,6 +3,7 @@ #include +#include #include #include @@ -14,11 +15,19 @@ using namespace dev; uint64_t toChainID(u256 const &val) { if (val == 0 || std::numeric_limits::max() < val) { - BOOST_THROW_EXCEPTION(Transaction::InvalidSignature("eip-155 chain id must be in the open interval: (0, 2^64)")); + BOOST_THROW_EXCEPTION(Transaction::InvalidTransaction("eip-155 chain id must be in the open interval: (0, 2^64)")); } return static_cast(val); } +TransactionHashes hashes_from_transactions(const SharedTransactions &transactions) { + TransactionHashes trx_hashes; + trx_hashes.reserve(transactions.size()); + std::transform(transactions.cbegin(), transactions.cend(), std::back_inserter(trx_hashes), + [](const auto &trx) { return trx->getHash(); }); + return trx_hashes; +} + Transaction::Transaction(const trx_nonce_t &nonce, const val_t &value, const val_t &gas_price, gas_t gas, bytes data, const secret_t &sk, const optional &receiver, uint64_t chain_id) : nonce_(nonce), @@ -71,7 +80,7 @@ void Transaction::fromRLP(const dev::RLP &_rlp, bool verify_strict, const h256 & if (36 < v) { chain_id_ = toChainID((v - 35) / 2); } else if (v != 27 && v != 28) { - BOOST_THROW_EXCEPTION(InvalidSignature( + BOOST_THROW_EXCEPTION(InvalidFormat( "only values 27 and 28 are allowed for non-replay protected transactions for the 'v' signature field")); } vrs_.v = chain_id_ ? byte{v - (u256{chain_id_} * 2 + 35)} : byte{v - 27}; diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 18af5586e6..cc8639e427 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 18af5586e6852c419ab05d2a4107ff98968f1bdf +Subproject commit cc8639e4275171fd5804986d1705256d5b736df4 diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 93c965100c..b5b9bc2bf3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -37,13 +37,6 @@ add_executable(full_node_test full_node_test.cpp) target_link_libraries(full_node_test test_util) add_test(full_node_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/full_node_test) -# add_executable(hardfork_test hardfork_test.cpp) -# target_link_libraries(hardfork_test -# core_libs -# CONAN_PKG::gtest -# ) -# add_test(hardfork_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/hardfork_test) - add_executable(network_test network_test.cpp) target_link_libraries(network_test test_util) add_test(network_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/network_test) diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 9b78ed321b..daaa0b45b9 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -45,17 +45,16 @@ struct FinalChainTest : WithDataDir { SUT = nullptr; SUT = NewFinalChain(db, cfg); std::vector trx_hashes; - int pos = 0; + ++expected_blk_num; for (const auto& trx : trxs) { - db->saveTransactionPeriod(trx->getHash(), 1, pos++); trx_hashes.emplace_back(trx->getHash()); } DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, secret_t::random()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = - std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, 1, addr_t::random(), - dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); + std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, expected_blk_num, + addr_t::random(), dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); std::vector> votes; PeriodData period_data(pbft_block, votes); period_data.dag_blocks.push_back(dag_blk); @@ -67,7 +66,6 @@ struct FinalChainTest : WithDataDir { db->commitWriteBatch(batch); auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); - ++expected_blk_num; const auto& blk_h = *result->final_chain_blk; EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header(blk_h.number))); EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header())); @@ -181,12 +179,6 @@ TEST_F(FinalChainTest, initial_balances) { init(); } -// TEST_F(FinalChainTest, update_state_config) { -// init(); -// cfg.genesis.state.hardforks.fix_genesis_fork_block = 2222222; -// SUT->update_state_config(cfg.genesis.state); -// } - TEST_F(FinalChainTest, contract) { auto sender_keys = dev::KeyPair::create(); const auto& addr = sender_keys.address(); diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index e889fd3da7..16e00a70f6 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -285,9 +285,7 @@ TEST_F(FullNodeTest, db_test) { } EXPECT_TRUE(db.getRewardVotes().empty()); - batch = db.createWriteBatch(); - db.replaceRewardVotes(verified_votes, batch); - db.commitWriteBatch(batch); + for (auto v : verified_votes) db.saveExtraRewardVote(v); const auto db_reward_votes = db.getRewardVotes(); EXPECT_EQ(db_reward_votes.size(), verified_votes_map.size()); @@ -297,7 +295,7 @@ TEST_F(FullNodeTest, db_test) { const auto new_reward_vote = genVote(PbftVoteTypes::cert_vote, 10, 10, 3); verified_votes_map[new_reward_vote->getHash()] = new_reward_vote; - db.saveRewardVote(new_reward_vote); + db.saveExtraRewardVote(new_reward_vote); const auto new_db_reward_votes = db.getRewardVotes(); EXPECT_EQ(new_db_reward_votes.size(), verified_votes_map.size()); @@ -306,7 +304,12 @@ TEST_F(FullNodeTest, db_test) { } batch = db.createWriteBatch(); - db.replaceRewardVotes({}, batch); + + std::vector verified_votes_hashes, new_db_reward_votes_hashes; + for (const auto &v : verified_votes) verified_votes_hashes.emplace_back(v->getHash()); + for (const auto &v : new_db_reward_votes) new_db_reward_votes_hashes.emplace_back(v->getHash()); + db.removeExtraRewardVotes(verified_votes_hashes, batch); + db.removeExtraRewardVotes(new_db_reward_votes_hashes, batch); db.commitWriteBatch(batch); EXPECT_TRUE(db.getRewardVotes().empty()); @@ -1657,7 +1660,7 @@ TEST_F(FullNodeTest, graphql_test) { block = service::ScalarArgument::require("block", data); auto transactionAt = service::ScalarArgument::require("transactionAt", block); const auto hash2 = service::StringArgument::require("hash", transactionAt); - EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->get(0).toString(), hash2); + EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->at(0).toString(), hash2); } } // namespace taraxa::core_tests diff --git a/tests/hardfork_test.cpp b/tests/hardfork_test.cpp deleted file mode 100644 index e48784ed19..0000000000 --- a/tests/hardfork_test.cpp +++ /dev/null @@ -1,199 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "cli/config.hpp" -#include "cli/tools.hpp" -#include "dag/dag.hpp" -#include "logger/logger.hpp" -#include "node/node.hpp" -#include "string" -#include "test_util/samples.hpp" -#include "transaction/transaction_manager.hpp" - -namespace taraxa::core_tests { - -// We need separate fixture for this tests because hardfork is overwriting config file. But we can't change config -// stored in global variable because values will change for next test cases -struct HardforkTest : WithDataDir { - FullNodeConfig node_cfg; - - HardforkTest() { - // creating config this way to prevent config files overwriting - auto cfg_filename = std::string("conf_taraxa1.json"); - auto p = DIR_CONF / cfg_filename; - auto w = DIR_CONF / std::string("wallet1.json"); - Json::Value test_node_wallet_json; - std::ifstream(w.string(), std::ifstream::binary) >> test_node_wallet_json; - node_cfg = FullNodeConfig(p.string(), test_node_wallet_json, data_dir / cfg_filename); - - fs::remove_all(node_cfg.data_path); - fs::create_directories(node_cfg.data_path); - - auto data_path_cfg = node_cfg.data_path / fs::path(node_cfg.json_file_name).filename(); - fs::copy_file(node_cfg.json_file_name, data_path_cfg); - node_cfg.json_file_name = data_path_cfg; - - addr_t root_node_addr("de2b1203d72d3549ee2f733b00b2789414c7cea5"); - node_cfg.genesis.state.initial_balances[root_node_addr] = 9007199254740991; - auto &dpos = *node_cfg.genesis.state.dpos; - dpos.genesis_state[root_node_addr][root_node_addr] = dpos.eligibility_balance_threshold; - // speed up block production - { - node_cfg.genesis.sortition.vrf.threshold_upper = 0xffff; - node_cfg.genesis.sortition.vdf.difficulty_min = 0; - node_cfg.genesis.sortition.vdf.difficulty_max = 3; - node_cfg.genesis.sortition.vdf.difficulty_stale = 3; - node_cfg.genesis.sortition.vdf.lambda_bound = 100; - // PBFT config - node_cfg.genesis.pbft.lambda_ms /= 20; - node_cfg.network.transaction_interval_ms /= 20; - } - } - - ~HardforkTest() { fs::remove_all(node_cfg.data_path); } - - HardforkTest(const HardforkTest &) = delete; - HardforkTest(HardforkTest &&) = delete; - HardforkTest &operator=(const HardforkTest &) = delete; - HardforkTest &operator=(HardforkTest &&) = delete; -}; - -TEST_F(HardforkTest, hardfork_override) { - auto default_json = cli::tools::getConfig(cli::Config::DEFAULT_CHAIN_ID); - auto default_hardforks = default_json["genesis"]["hardforks"]; - Json::Value config = default_json; - auto &state_cfg = config["genesis"]; - state_cfg["hardforks"].removeMember("fix_genesis_fork_block"); - - EXPECT_TRUE(state_cfg["hardforks"]["fix_genesis_fork_block"].isNull()); - cli::Config::addNewHardforks(config, default_json); - EXPECT_EQ(state_cfg["hardforks"], default_hardforks); - - state_cfg.removeMember("hardforks"); - EXPECT_TRUE(state_cfg["hardforks"].isNull()); - - cli::Config::addNewHardforks(config, default_json); - EXPECT_EQ(state_cfg["hardforks"], default_hardforks); -} - -TEST_F(HardforkTest, fix_genesis_fork_block_is_zero) { - auto &cfg = node_cfg.genesis; - cfg.state.hardforks.fix_genesis_fork_block = 0; - auto node = launch_nodes({node_cfg}).front(); - - auto dummy_trx = std::make_shared(1, 0, 0, 0, bytes(), node->getSecretKey(), node->getAddress()); - // broadcast dummy transaction - node->getTransactionManager()->insertTransaction(dummy_trx); - wait({100s, 500ms}, [&](auto &ctx) { - if (node->getFinalChain()->last_block_number() <= cfg.state.hardforks.fix_genesis_fork_block) { - ctx.fail(); - } - }); - EXPECT_EQ(cfg.state.initial_balances.begin()->second, - node->getConfig().genesis.state.initial_balances.begin()->second); -} - -TEST_F(HardforkTest, hardfork) { - auto &cfg = node_cfg.genesis; - cfg.state.hardforks.fix_genesis_fork_block = 10; - cfg.state.dpos->eligibility_balance_threshold = 100000; - cfg.state.dpos->vote_eligibility_balance_step.assign(cfg.state.dpos->eligibility_balance_threshold); - cfg.state.dpos->delegation_delay = 5; - cfg.state.dpos->delegation_locking_period = 5; - - auto random_node = addr_t::random(); - auto random_votes = 3; - for (auto &gb : cfg.state.initial_balances) { - gb.second = 110000000; - } - for (auto &gs : cfg.state.dpos->genesis_state) { - for (auto &b : gs.second) { - b.second = 1100000; - std::cout << b.first << ": " << b.second << std::endl; - } - gs.second.emplace(random_node, random_votes * cfg.state.dpos->vote_eligibility_balance_step); - } - - auto node = launch_nodes({node_cfg}).front(); - auto nonce = 0; - auto dummy_trx = [&nonce, node]() { - auto dummy_trx = std::make_shared(nonce++, 0, 0, 0, bytes(), node->getSecretKey(), node->getAddress()); - // broadcast dummy transaction - node->getTransactionManager()->insertTransaction(dummy_trx); - }; - dummy_trx(); - node->getFinalChain()->block_finalized_.subscribe([&](const std::shared_ptr &res) { - const auto block_num = res->final_chain_blk->number; - if (cfg.state.hardforks.fix_genesis_fork_block == block_num) { - return; - } - dummy_trx(); - dummy_trx(); - }); - std::map balances_before; - for (const auto &b : node->getConfig().genesis.state.initial_balances) { - auto balance = node->getFinalChain()->get_account(b.first)->balance; - balances_before.emplace(b.first, balance); - } - auto votes_count = 11; - EXPECT_EQ(votes_count + random_votes, node->getFinalChain()->dpos_eligible_total_vote_count(0)); - EXPECT_EQ(random_votes, node->getFinalChain()->dpos_eligible_vote_count(0, random_node)); - - wait({100s, 500ms}, [&](auto &ctx) { - if (node->getFinalChain()->last_block_number() < cfg.state.hardforks.fix_genesis_fork_block) { - ctx.fail(); - } - }); - - u256 dpos_genesis_sum = 0; - // Verify DPOS initial balances increasing - for (const auto &gs : node->getConfig().genesis.state.dpos->genesis_state) { - for (const auto &b : gs.second) { - EXPECT_EQ(b.second, node->getFinalChain()->get_staking_balance(b.first)); - dpos_genesis_sum += b.second; - } - } - - for (const auto &b : node->getConfig().genesis.state.initial_balances) { - auto balance_after = node->getFinalChain()->get_account(b.first)->balance; - auto res = b.second - dpos_genesis_sum; - EXPECT_EQ(res, balance_after); - } - - auto block = node->getFinalChain()->last_block_number(); - EXPECT_EQ(votes_count, node->getFinalChain()->dpos_eligible_total_vote_count(block)); - EXPECT_EQ(0, node->getFinalChain()->dpos_eligible_vote_count(block, random_node)); - - // check for dpos_query method - { - const auto &genesis_sender = cfg.state.dpos->genesis_state.begin()->first; - - state_api::DPOSQuery::AccountQuery acc_q; - acc_q.with_staking_balance = true; - acc_q.with_outbound_deposits = true; - acc_q.with_inbound_deposits = true; - state_api::DPOSQuery q; - q.with_eligible_count = true; - q.account_queries[genesis_sender] = acc_q; - - // auto q_res = node->getFinalChain()->dpos_query(q); - auto res = q_res.account_results[genesis_sender]; - EXPECT_EQ(res.inbound_deposits.size(), 1); - EXPECT_EQ(res.inbound_deposits.begin()->first, genesis_sender); - EXPECT_EQ(res.inbound_deposits.begin()->second, res.staking_balance); - } - - EXPECT_EQ(cfg.state.dpos->vote_eligibility_balance_step * kOneTara, - node->getConfig().genesis.state.dpos->vote_eligibility_balance_step); - EXPECT_NE(cfg.state.initial_balances.begin()->second, - node->getConfig().genesis.state.initial_balances.begin()->second); - EXPECT_NE(cfg.state.dpos->eligibility_balance_threshold, - node->getConfig().genesis.state.dpos->eligibility_balance_threshold); -} - -} // namespace taraxa::core_tests diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 2e6c744455..4441728508 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -73,8 +73,7 @@ TEST_F(NetworkTest, transfer_block) { ASSERT_EQ(1, num_received); } -// Test creates two Network setup and verifies sending blocks between is successfull -// This test can not work anymore as we are marking other nodes as malicous becasue of invalid dag blocks +// Test creates two Network setup and verifies sending blocks between is successful TEST_F(NetworkTest, transfer_lot_of_blocks) { auto node_cfgs = make_node_cfgs(2, 1, 20); auto nodes = launch_nodes(node_cfgs); @@ -91,9 +90,8 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { const auto nw1 = node1->getNetwork(); const auto nw2 = node2->getNetwork(); - const auto trxs = samples::createSignedTrxSamples(0, 1500, g_secret); + auto trxs = samples::createSignedTrxSamples(0, 1500, g_secret); const auto estimation = node1->getTransactionManager()->estimateTransactionGas(trxs[0], {}); - const std::vector estimations(trxs.size(), estimation); // node1 add one valid block const auto proposal_level = 1; @@ -106,9 +104,9 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); - auto block_hash = blk.getHash(); + const auto block_hash = blk.getHash(); + dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); std::vector> dag_blocks; - dag_blocks.emplace_back(std::make_shared(std::move(blk))); // creating lot of blocks just for size std::vector trx_hashes; @@ -120,15 +118,20 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { trx_hashes.push_back(trx->getHash()); verified_transactions.push_back(trx); } - - for (int i = 0; i < 100; ++i) { + { const auto proposal_period = *db1->getProposalPeriodForDagLevel(proposal_level + 1); const auto period_block_hash = db1->getPeriodBlockHash(proposal_period); const auto sortition_params = dag_mgr1->sortitionParamsManager().getSortitionParams(proposal_period); - vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), - VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); - DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i + 1]->getHash()}, {}, vdf, node1->getSecretKey()); - dag_blocks.emplace_back(std::make_shared(blk)); + + for (int i = 0; i < 100; ++i) { + vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), + VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); + dev::bytes vdf_msg = DagManager::getVdfMessage(block_hash, {trxs[i + 1]}); + vdf.computeVdfSolution(sortition_params, vdf_msg, false); + DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i + 1]->getHash()}, estimation, vdf, + node1->getSecretKey()); + dag_blocks.emplace_back(std::make_shared(blk)); + } } for (auto trx : verified_transactions) @@ -138,16 +141,13 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { dag_mgr1->addDagBlock(DagBlock(*dag_blocks[i]), {trxs[i]}); } wait({1s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr1->getDagBlock(block_hash), nullptr) }); - - taraxa::thisThreadSleepForSeconds(1); const auto node1_period = node1->getPbftChain()->getPbftChainSize(); const auto node2_period = node2->getPbftChain()->getPbftChainSize(); std::cout << "node1 period " << node1_period << ", node2 period " << node2_period << std::endl; nw1->getSpecificHandler()->sendBlocks( - nw2->getNodeId(), std::move(dag_blocks), {}, node2_period, node1_period); - + nw2->getNodeId(), std::move(dag_blocks), std::move(trxs), node2_period, node1_period); std::cout << "Waiting Sync ..." << std::endl; - wait({30s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr2->getDagBlock(block_hash), nullptr) }); + wait({120s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr2->getDagBlock(block_hash), nullptr) }); } TEST_F(NetworkTest, update_peer_chainsize) { @@ -327,7 +327,7 @@ TEST_F(NetworkTest, transfer_transaction) { nw1->start(); nw2->start(); - EXPECT_HAPPENS({10s, 200ms}, [&](auto& ctx) { + EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) @@ -336,15 +336,18 @@ TEST_F(NetworkTest, transfer_transaction) { auto nw1_nodeid = nw1->getNodeId(); auto nw2_nodeid = nw2->getNodeId(); - EXPECT_NE(nw1->getPeer(nw2_nodeid), nullptr); - EXPECT_NE(nw2->getPeer(nw1_nodeid), nullptr); + + const auto peer2 = nw1->getPeer(nw2_nodeid); + const auto peer1 = nw2->getPeer(nw1_nodeid); + EXPECT_NE(peer2, nullptr); + EXPECT_NE(peer1, nullptr); SharedTransactions transactions; transactions.push_back(g_signed_trx_samples[0]); transactions.push_back(g_signed_trx_samples[1]); transactions.push_back(g_signed_trx_samples[2]); - nw2->getSpecificHandler()->sendTransactions(nw2->getPeer(nw1_nodeid), + nw2->getSpecificHandler()->sendTransactions(peer1, std::move(transactions)); EXPECT_HAPPENS({2s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, nw1->getReceivedTransactionsCount(), 3) }); @@ -612,7 +615,7 @@ TEST_F(NetworkTest, node_pbft_sync) { beneficiary, node1->getSecretKey(), {}); std::vector> votes_for_pbft_blk2; votes_for_pbft_blk2.emplace_back( - node1->getVoteManager()->generateVote(pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, 2, 2, 3)); + node1->getVoteManager()->generateVoteWithWeight(pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, 2, 1, 3)); std::cout << "Generate 1 vote for second PBFT block" << std::endl; // node1 put block2 into pbft chain and store into DB // Add cert votes in DB @@ -626,7 +629,9 @@ TEST_F(NetworkTest, node_pbft_sync) { period_data2.transactions.push_back(g_signed_trx_samples[3]); db1->savePeriodData(period_data2, batch); - db1->replaceRewardVotes(votes_for_pbft_blk2, batch); + node1->getVoteManager()->addVerifiedVote(votes_for_pbft_blk2[0]); + db1->replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType::CertVotedBlock, votes_for_pbft_blk2, batch); + node1->getVoteManager()->resetRewardVotes(2, 1, 3, pbft_block2.getBlockHash(), batch); // Update pbft chain pbft_chain1->updatePbftChain(pbft_block2.getBlockHash(), pbft_block2.getPivotDagBlockHash()); diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 95f38f2ef0..adcaa4b20f 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -192,61 +192,6 @@ struct PbftManagerTest : NodesTest { } }; -// Test that after some amount of elapsed time will not continue soft voting for same value -TEST_F(PbftManagerTest, terminate_soft_voting_pbft_block) { - auto node_cfgs = make_node_cfgs(1, 1, 20); - makeNodesWithNonces(node_cfgs); - - auto pbft_mgr = nodes[0]->getPbftManager(); - auto vote_mgr = nodes[0]->getVoteManager(); - pbft_mgr->stop(); - std::cout << "PBFT manager stopped" << std::endl; - - // Generate bogus votes - auto stale_block_hash = blk_hash_t("0000000100000000000000000000000000000000000000000000000000000000"); - auto propose_vote = vote_mgr->generateVote(stale_block_hash, PbftVoteTypes::propose_vote, 2, 2, 1); - propose_vote->calculateWeight(1, 1, 1); - vote_mgr->addVerifiedVote(propose_vote); - - // uint64_t time_till_stale_ms = 1000; - // std::cout << "Set max wait for soft voted value to " << time_till_stale_ms << "ms..." << std::endl; - // pbft_mgr->setMaxWaitForSoftVotedBlock_ms(time_till_stale_ms); - // pbft_mgr->setMaxWaitForNextVotedBlock_ms(std::numeric_limits::max()); - - auto sleep_time = 1100; - std::cout << "Sleep " << sleep_time << "ms so that last soft voted value of " << stale_block_hash.abridged() - << " becomes stale..." << std::endl; - taraxa::thisThreadSleepForMilliSeconds(sleep_time); - - std::cout << "Initialize PBFT manager at round 2 step 2" << std::endl; - pbft_mgr->setPbftRound(2); - pbft_mgr->setPbftStep(2); - pbft_mgr->resumeSingleState(); - std::cout << "Into cert voted state in round 2..." << std::endl; - EXPECT_EQ(pbft_mgr->getPbftRound(), 2); - EXPECT_EQ(pbft_mgr->getPbftStep(), 3); - - std::cout << "Check did not soft vote for stale soft voted value of " << stale_block_hash.abridged() << "..." - << std::endl; - bool skipped_soft_voting = true; - auto votes = vote_mgr->getVerifiedVotes(); - for (const auto &v : votes) { - if (PbftVoteTypes::soft_vote == v->getType()) { - if (v->getBlockHash() == stale_block_hash) { - skipped_soft_voting = false; - } - std::cout << "Found soft voted value of " << v->getBlockHash().abridged() << " in round 2" << std::endl; - } - } - EXPECT_EQ(skipped_soft_voting, true); - - auto start_round = pbft_mgr->getPbftRound(); - pbft_mgr->resume(); - - std::cout << "Wait ensure node is still advancing in rounds... " << std::endl; - EXPECT_HAPPENS({60s, 50ms}, [&](auto &ctx) { WAIT_EXPECT_NE(ctx, start_round, pbft_mgr->getPbftRound()) }); -} - // Test that after some amount of elapsed time will give up on the next voting value if corresponding DAG blocks can't // be found @@ -870,54 +815,6 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data.dag_blocks)); } -TEST_F(PbftManagerWithDagCreation, DISABLED_pbft_block_is_overweighted) { - auto node_cfgs = make_node_cfgs(1, 5, true); - node_cfgs.front().genesis.dag.gas_limit = 500000; - node_cfgs.front().genesis.pbft.gas_limit = 600000; - makeNode(); - deployContract(); - node->getDagBlockProposer()->stop(); - generateAndApplyInitialDag(); - - EXPECT_HAPPENS({10s, 500ms}, - [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nonce, node->getDB()->getNumTransactionExecuted() + 1); }); - - node->getPbftManager()->stop(); - // create pbft block - auto chain_size_before = node->getPbftChain()->getPbftChainSize(); - { - auto blocks_with_txs = generateDagBlocks(10, 3, 1); - insertBlocks(blocks_with_txs); - auto dag_block_hash = blocks_with_txs.back().blk.getHash(); - - // get DAG block and transaction order - const auto propose_period = node->getPbftChain()->getPbftChainSize() + 1; - auto dag_block_order = node->getDagManager()->getDagBlockOrder(dag_block_hash, propose_period); - ASSERT_TRUE(!dag_block_order.empty()); - - std::vector trx_hashes; - for (const auto &bt : blocks_with_txs) { - std::transform(bt.trxs.begin(), bt.trxs.end(), std::back_inserter(trx_hashes), - [](const auto &t) { return t->getHash(); }); - } - auto order_hash = node->getPbftManager()->calculateOrderHash(dag_block_order); - - const auto &last_hash = node->getPbftChain()->getLastPbftBlockHash(); - auto reward_votes = node->getDB()->getRewardVotes(); - std::vector reward_votes_hashes; - std::transform(reward_votes.begin(), reward_votes.end(), std::back_inserter(reward_votes_hashes), - [](const auto &v) { return v->getHash(); }); - const auto pbft_block = - std::make_shared(last_hash, dag_block_hash, order_hash, kNullBlockHash, propose_period, - node->getAddress(), node->getSecretKey(), std::move(reward_votes_hashes)); - // node->getPbftChain()->pushUnverifiedPbftBlock(pbft_block); - } - - EXPECT_HAPPENS({60s, 500ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getPbftChain()->getPbftChainSize(), chain_size_before + 1); - }); -} - TEST_F(PbftManagerWithDagCreation, proposed_blocks) { auto db = std::make_shared(data_dir); ProposedBlocks proposed_blocks(db); diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index 3675644792..a104432dfb 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include "network/rpc/eth/Eth.h" #include "test_util/gtest.hpp" @@ -232,6 +233,27 @@ TEST_F(RPCTest, eth_getBlock) { EXPECT_EQ(4, dev::jsToU256(block["number"].asString())); EXPECT_GT(dev::jsToU256(block["totalReward"].asString()), 0); } + +TEST_F(RPCTest, eip_1898) { + auto node_cfg = make_node_cfgs(1); + auto nodes = launch_nodes(node_cfg); + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = node_cfg.front().genesis.chain_id; + eth_rpc_params.gas_limit = node_cfg.front().genesis.dag.gas_limit; + eth_rpc_params.final_chain = nodes.front()->getFinalChain(); + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + const auto from = dev::toHex(dev::toAddress(node_cfg.front().node_secret)); + + Json::Value zero_block(Json::objectValue); + zero_block["blockNumber"] = dev::toJS(0); + EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, zero_block)); + + Json::Value genesis_block(Json::objectValue); + genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->block_hash(0)); + EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, genesis_block)); +} + } // namespace taraxa::core_tests using namespace taraxa; diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 6c49267725..49ce8fbe72 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -59,7 +59,6 @@ TEST_F(StateAPITest, DISABLED_dpos_integration) { // dpos_cfg.eligibility_balance_threshold = 1000; // dpos_cfg.vote_eligibility_balance_step = 1000; // addr_1_bal_expected -= dpos_cfg.genesis_state[make_addr(1)][make_addr(1)] = dpos_cfg.eligibility_balance_threshold; - // chain_cfg.hardforks.fix_genesis_fork_block = 0; // uint64_t curr_blk = 0; // StateAPI SUT([&](auto /*n*/) -> h256 { assert(false); }, // diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 061bde4c2a..53637d89a0 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -76,7 +76,7 @@ TEST_F(TransactionTest, sig) { ASSERT_THROW(Transaction(dev::jsToBytes("0xf84980808080808024a01404adc97c8b58fef303b2862d0e72378" "4fb635e7237e0e8d3ea33bbea19c36ca0229e80d57ba91a0f347686" "30fd21ad86e4c403b307de9ac4550d0ccc81c90fe3")), - Transaction::InvalidSignature); + Transaction::InvalidFormat); std::vector> valid_cases{ {0, "0xf647d1d47ce927ce2fb9f57e4e2a3c32b037c5e544b44611077f5cc6980b0bc2"}, {1, "0x49c1cb845df5d3ed238ca37ad25ca96f417e4f22d7911224cf3c2a725985e7ff"}, @@ -112,7 +112,7 @@ TEST_F(TransactionTest, sig) { } } ASSERT_NE(Transaction(with_modified_payload.out()).getSender(), sender); - ASSERT_THROW(Transaction(with_invalid_signature.out()).getSender(), Transaction::InvalidSignature); + ASSERT_THROW(Transaction(with_invalid_signature.out()).getSender(), Transaction::InvalidTransaction); } } } @@ -211,8 +211,6 @@ TEST_F(TransactionTest, transaction_low_nonce) { SharedTransactions trxs{trx_1, trx_2}; period_data.transactions = trxs; auto batch = db->createWriteBatch(); - db->saveTransactionPeriod(trx_1->getHash(), 1, 0); - db->saveTransactionPeriod(trx_2->getHash(), 1, 0); db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); final_chain->finalize(std::move(period_data), {dag_blk.getHash()}).get(); diff --git a/tests/vote_test.cpp b/tests/vote_test.cpp index 20cd913912..b6c8dab36e 100644 --- a/tests/vote_test.cpp +++ b/tests/vote_test.cpp @@ -58,30 +58,29 @@ TEST_F(VoteTest, verified_votes) { TEST_F(VoteTest, round_determine_from_next_votes) { auto node = create_nodes(1, true /*start*/).front(); - // stop PBFT manager, that will place vote - node->getPbftManager()->stop(); + auto pbft_mgr = node->getPbftManager(); + auto vote_mgr = node->getVoteManager(); + // stop PBFT manager, that will place vote + pbft_mgr->stop(); clearAllVotes({node}); - auto vote_mgr = node->getVoteManager(); - size_t two_t_plus_one = 2; + const auto [current_round, current_period] = pbft_mgr->getPbftRoundAndPeriod(); - // Generate votes in 3 rounds, 2 steps, each step have 3 votes + // Generate votes for a few future rounds blk_hash_t voted_block_hash(1); PbftVoteTypes type = PbftVoteTypes::next_vote; - for (int i = 10; i <= 12; i++) { - for (int j = 4; j <= 5; j++) { - PbftPeriod period = i; - PbftRound round = i; - PbftStep step = j; - auto vote = vote_mgr->generateVote(voted_block_hash, type, period, round, step); - vote->calculateWeight(3, 3, 3); - vote_mgr->addVerifiedVote(vote); - } + const PbftRound kMaxRound = current_round + 3; + PbftStep step = 5; + for (PbftRound round = current_round; round <= kMaxRound; round++) { + auto vote = vote_mgr->generateVote(voted_block_hash, type, current_period, round, step); + vote->calculateWeight(3, 3, 3); + vote_mgr->addVerifiedVote(vote); } - auto new_round = vote_mgr->determineNewRound(12, two_t_plus_one); - EXPECT_EQ(*new_round, 13); + auto new_round = vote_mgr->determineNewRound(current_period, kMaxRound); + EXPECT_EQ(new_round.has_value(), true); + EXPECT_EQ(*new_round, kMaxRound + 1); } TEST_F(VoteTest, reconstruct_votes) { @@ -168,7 +167,6 @@ TEST_F(VoteTest, vote_broadcast) { WAIT_EXPECT_EQ(ctx, vote_mgr2->getVerifiedVotesSize(), 1) WAIT_EXPECT_EQ(ctx, vote_mgr3->getVerifiedVotesSize(), 1) }); - EXPECT_EQ(vote_mgr1->getVerifiedVotesSize(), 0); } TEST_F(VoteTest, two_t_plus_one_votes) {