Skip to content

Commit

Permalink
Release66: P2P and Mining Improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
CaptainDero committed Apr 4, 2022
1 parent a66d3d5 commit b2fe65c
Show file tree
Hide file tree
Showing 7 changed files with 83 additions and 18 deletions.
10 changes: 6 additions & 4 deletions cmd/derod/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ var command_line string = `derod
DERO : A secure, private blockchain with smart-contracts
Usage:
derod [--help] [--version] [--testnet] [--debug] [--sync-node] [--timeisinsync] [--fastsync] [--socks-proxy=<socks_ip:port>] [--data-dir=<directory>] [--p2p-bind=<0.0.0.0:18089>] [--add-exclusive-node=<ip:port>]... [--add-priority-node=<ip:port>]... [--min-peers=<11>] [--rpc-bind=<127.0.0.1:9999>] [--getwork-bind=<0.0.0.0:18089>] [--node-tag=<unique name>] [--prune-history=<50>] [--integrator-address=<address>] [--clog-level=1] [--flog-level=1]
derod [--help] [--version] [--testnet] [--debug] [--sync-node] [--timeisinsync] [--fastsync] [--socks-proxy=<socks_ip:port>] [--data-dir=<directory>] [--p2p-bind=<0.0.0.0:18089>] [--add-exclusive-node=<ip:port>]... [--add-priority-node=<ip:port>]... [--min-peers=<11>] [--max-peers=<100>] [--rpc-bind=<127.0.0.1:9999>] [--getwork-bind=<0.0.0.0:18089>] [--node-tag=<unique name>] [--prune-history=<50>] [--integrator-address=<address>] [--clog-level=1] [--flog-level=1]
derod -h | --help
derod --version
Expand All @@ -82,6 +82,8 @@ Options:
--sync-node Sync node automatically with the seeds nodes. This option is for rare use.
--node-tag=<unique name> Unique name of node, visible to everyone
--integrator-address if this node mines a block,Integrator rewards will be given to address.default is dev's address.
--min-peers=<31> Node will try to maintain atleast this many connections to peers
--max-peers=<101> Node will maintain maximim this many connections to peers and will stop accepting connections
--prune-history=<50> prunes blockchain history until the specific topo_height
`
Expand Down Expand Up @@ -886,7 +888,7 @@ restart_loop:

case strings.ToLower(line) == "peer_list": // print peer list
p2p.PeerList_Print()
case strings.ToLower(line) == "sync_info": // print active connections
case strings.ToLower(line) == "syncinfo", strings.ToLower(line) == "sync_info": // print active connections
p2p.Connection_Print()
case strings.ToLower(line) == "bye":
fallthrough
Expand Down Expand Up @@ -1108,7 +1110,7 @@ func usage(w io.Writer) {
io.WriteString(w, "\t\033[1mprint_tx\033[0m\tPrint transaction, print_tx <transaction_hash>\n")
io.WriteString(w, "\t\033[1mstatus\033[0m\t\tShow general information\n")
io.WriteString(w, "\t\033[1mpeer_list\033[0m\tPrint peer list\n")
io.WriteString(w, "\t\033[1msync_info\033[0m\tPrint information about connected peers and their state\n")
io.WriteString(w, "\t\033[1msyncinfo\033[0m\tPrint information about connected peers and their state\n")
io.WriteString(w, "\t\033[1mbye\033[0m\t\tQuit the daemon\n")
io.WriteString(w, "\t\033[1mban\033[0m\t\tBan specific ip from making any connections\n")
io.WriteString(w, "\t\033[1munban\033[0m\t\tRevoke restrictions on previously banned ips\n")
Expand Down Expand Up @@ -1146,7 +1148,7 @@ var completer = readline.NewPrefixCompleter(
// readline.PcItem("print_tx"),
readline.PcItem("setintegratoraddress"),
readline.PcItem("status"),
readline.PcItem("sync_info"),
readline.PcItem("syncinfo"),
readline.PcItem("version"),
readline.PcItem("bye"),
readline.PcItem("exit"),
Expand Down
35 changes: 33 additions & 2 deletions cmd/derod/rpc/websocket_getwork_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"net/http"
"os"

"time"

Expand Down Expand Up @@ -65,14 +66,19 @@ type user_session struct {
var client_list_mutex sync.Mutex
var client_list = map[*websocket.Conn]*user_session{}

var miners_count int

func CountMiners() int {
client_list_mutex.Lock()
defer client_list_mutex.Unlock()
return len(client_list)
miners_count = len(client_list)
return miners_count
}

func SendJob() {

defer globals.Recover(1)

var params rpc.GetBlockTemplate_Result

// get a block template, and then we will fill the address here as optimization
Expand Down Expand Up @@ -282,7 +288,32 @@ func Getwork_server() {
memPool.Put(b)
})

globals.Cron.AddFunc("@every 2s", SendJob) // if daemon restart automaticaly send job
//globals.Cron.AddFunc("@every 2s", SendJob) // if daemon restart automaticaly send job
go func() { // try to be as optimized as possible to lower hash wastage
sleeptime, _ := time.ParseDuration(os.Getenv("JOB_SEND_TIME_DELAY")) // this will hopefully be never required to change
if sleeptime.Milliseconds() < 40 {
sleeptime = 500 * time.Millisecond
}
logger_getwork.Info("Job will be dispatched every", "time", sleeptime)
old_mini_count := 0
old_time := time.Now()
old_height := int64(0)
for {
if miners_count > 0 {
current_mini_count := chain.MiniBlocks.Count()
current_height := chain.Get_Height()
if old_mini_count != current_mini_count || old_height != current_height || time.Now().Sub(old_time) > sleeptime {
old_mini_count = current_mini_count
old_height = current_height
SendJob()
old_time = time.Now()
}
} else {

}
time.Sleep(10 * time.Millisecond)
}
}()

if err = svr.Start(); err != nil {
logger_getwork.Error(err, "nbio.Start failed.")
Expand Down
1 change: 1 addition & 0 deletions config/seed_nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ var Mainnet_seed_nodes = []string{
"45.82.66.54:8080",
"185.107.69.12:11011",
"89.38.97.110:11011",
"45.82.66.55:11011",
}

// some seed node for testnet
Expand Down
2 changes: 1 addition & 1 deletion config/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,4 @@ import "github.com/blang/semver/v4"

// right now it has to be manually changed
// do we need to include git commitsha??
var Version = semver.MustParse("3.4.141-62.DEROHE.STARGATE+26022022")
var Version = semver.MustParse("3.4.141-66.DEROHE.STARGATE+26022022")
19 changes: 10 additions & 9 deletions p2p/connection_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,15 @@ package p2p
* this will also ensure that a single IP is connected only once
*
*/
import "os"
import "fmt"
import "net"
import "math"
import "sync"
import "sort"
import "time"
import "strings"
import "strconv"
import "context"
import "sync/atomic"
import "runtime/debug"
Expand Down Expand Up @@ -405,8 +407,12 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
return connections[i].Latency < connections[j].Latency
})

bw_factor, _ := strconv.Atoi(os.Getenv("BW_FACTOR"))
if bw_factor < 1 {
bw_factor = 1
}

for { // we must send all blocks atleast once, once we are done, break ut
old_count := count
for _, v := range connections {
select {
case <-Exit_Event:
Expand All @@ -415,16 +421,16 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
}
if atomic.LoadUint32(&v.State) != HANDSHAKE_PENDING && PeerID != v.Peer_ID && v.Peer_ID != GetPeerID() { // skip pre-handshake connections

// if the other end is > 50 blocks behind, do not broadcast block to hime
// if the other end is > 2 blocks behind, do not broadcast block to hime
// this is an optimisation, since if the other end is syncing
// every peer will keep on broadcasting and thus making it more lagging
// due to overheads
peer_height := atomic.LoadInt64(&v.Height)
if (our_height - peer_height) > 25 {
if (our_height - peer_height) > 2 {
continue
}

if count > chunk_count {
if count > len(unique_map) && count > bw_factor*chunk_count { // every connected peer shuld get ateleast one chunk
goto done
}

Expand All @@ -450,11 +456,6 @@ func broadcast_Block_Coded(cbl *block.Complete_Block, PeerID uint64, first_seen
count++
}
}
if old_count == count { // exit the loop
break
}
old_count = count

}

done:
Expand Down
27 changes: 25 additions & 2 deletions p2p/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ var backoff = map[string]int64{} // if server receives a connection, then it wil
var backoff_mutex = sync.Mutex{}

var Min_Peers = int64(31) // we need to expose this to be modifieable at runtime without taking daemon offline
var Max_Peers = int64(101)

// return true if we should back off else we can connect
func shouldwebackoff(ip string) bool {
Expand Down Expand Up @@ -111,6 +112,14 @@ func P2P_Init(params map[string]interface{}) error {
logger.Info("P2P is in turbo mode")
}

if os.Getenv("BW_FACTOR") != "" {
bw_factor, _ := strconv.Atoi(os.Getenv("BW_FACTOR"))
if bw_factor <= 0 {
bw_factor = 1
}
logger.Info("", "BW_FACTOR", bw_factor)
}

// permanently unban any seed nodes
if globals.IsMainnet() {
for i := range config.Mainnet_seed_nodes {
Expand Down Expand Up @@ -419,7 +428,21 @@ func maintain_connection_to_peers() {
Min_Peers = i
}
}
logger.Info("Min outgoing peers", "min-peers", Min_Peers)
logger.Info("Min peers", "min-peers", Min_Peers)
}

if _, ok := globals.Arguments["--max-peers"]; ok && globals.Arguments["--max-peers"] != nil { // user specified a limit, use it if possible
i, err := strconv.ParseInt(globals.Arguments["--max-peers"].(string), 10, 64)
if err != nil {
logger.Error(err, "Error Parsing --max-peers")
} else {
if i < Min_Peers {
logger.Error(fmt.Errorf("--max-peers should be positive and more than --min-peers"), "")
} else {
Max_Peers = i
}
}
logger.Info("Max peers", "max-peers", Max_Peers)
}

delay := time.NewTicker(200 * time.Millisecond)
Expand Down Expand Up @@ -481,7 +504,7 @@ func P2P_Server_v2() {

in, out := Peer_Direction_Count()

if int64(in+out) > Min_Peers { // do not allow incoming ddos
if int64(in+out) > Max_Peers { // do not allow incoming ddos
connection.exit()
return
}
Expand Down
7 changes: 7 additions & 0 deletions p2p/peer_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ func Peer_SetSuccess(address string) {
p.ConnectAfter = 0
p.Whitelist = true
p.LastConnected = uint64(time.Now().UTC().Unix()) // set time when last connected

// logger.Infof("Setting peer as white listed")
}

Expand Down Expand Up @@ -316,6 +317,12 @@ func get_peer_list() (peers []Peer_Info) {
peer_mutex.Lock()
defer peer_mutex.Unlock()

for _, v := range peer_map { // trim the white list
if v.Whitelist && !IsAddressConnected(ParseIPNoError(v.Address)) {
delete(peer_map, ParseIPNoError(v.Address))
}
}

for _, v := range peer_map {
if v.Whitelist {
peers = append(peers, Peer_Info{Addr: v.Address})
Expand Down

0 comments on commit b2fe65c

Please sign in to comment.