Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Spec compliant merge shares #261

Merged
merged 35 commits into from
Apr 6, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
35 commits
Select commit Hold shift + click to select a range
494db15
start spec compliant share merging
evan-forbes Mar 31, 2021
0d15df4
refactor and finish unit testing
evan-forbes Apr 1, 2021
3f0d808
whoops
evan-forbes Apr 1, 2021
8535122
linter gods
evan-forbes Apr 1, 2021
8bae88b
fix initial changes and use constants
evan-forbes Apr 1, 2021
cbb5d76
use constant
evan-forbes Apr 1, 2021
b523831
more polish
evan-forbes Apr 1, 2021
f0f38a6
docs fix
evan-forbes Apr 1, 2021
014a00e
review feedback: docs and out of range panic protection
evan-forbes Apr 1, 2021
0896a3b
review feedback: add panic protection from empty input
evan-forbes Apr 1, 2021
96eafc7
use constant instead of recalculating `ShareSize`
evan-forbes Apr 1, 2021
c3d897a
don't redeclare existing var
evan-forbes Apr 1, 2021
28f9769
be more explicit with returned nil
evan-forbes Apr 1, 2021
9ff16f5
use constant instead of recalculating `ShareSize`
evan-forbes Apr 1, 2021
39ce26a
review feedback: use consistent capitalization
evan-forbes Apr 1, 2021
8794411
stop accepting reserved namespaces as normal messages
evan-forbes Apr 1, 2021
2b28c59
use a descriptive var name for message length
evan-forbes Apr 1, 2021
d118716
linter and comparison fix
evan-forbes Apr 1, 2021
3ae1f79
reorg tests, add test for parse delimiter, DataFromBlock and fix evid…
evan-forbes Apr 2, 2021
dfe2a07
catch error for linter
evan-forbes Apr 3, 2021
e9f3a2e
update test MakeShares to include length delimiters for the SHARE_RES…
evan-forbes Apr 3, 2021
221b0fb
minor iteration change
evan-forbes Apr 3, 2021
37316e8
refactor share splitting to fix bug
evan-forbes Apr 3, 2021
e3736d6
fix all bugs with third and final refactor
evan-forbes Apr 4, 2021
632bb37
Merge branch 'master' into evan/merge-shares
evan-forbes Apr 4, 2021
3ca2afa
fix conflict
evan-forbes Apr 4, 2021
0f930fd
revert unnecessary changes
evan-forbes Apr 4, 2021
91c3989
review feedback: better docs
evan-forbes Apr 5, 2021
043812d
reivew feedback: add comment for safeLen
evan-forbes Apr 5, 2021
76d7a4b
review feedback: remove unnecessay comments
evan-forbes Apr 5, 2021
1cb4030
review feedback: split up share merging and splitting into their own …
evan-forbes Apr 5, 2021
a88db3b
review feedback: more descriptive var names
evan-forbes Apr 5, 2021
2aad8fd
fix accidental change
evan-forbes Apr 5, 2021
887aa08
add some constant docs
evan-forbes Apr 5, 2021
40a57c1
spelling error
evan-forbes Apr 5, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 5 additions & 15 deletions types/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -1626,23 +1626,13 @@ func (data *EvidenceData) FromProto(eviData *tmproto.EvidenceList) error {
func (data *EvidenceData) splitIntoShares() NamespacedShares {
rawDatas := make([][]byte, 0, len(data.Evidence))
for _, ev := range data.Evidence {
var rawData []byte
var err error
switch cev := ev.(type) {
case *DuplicateVoteEvidence:
rawData, err = protoio.MarshalDelimited(cev.ToProto())
case *LightClientAttackEvidence:
pcev, iErr := cev.ToProto()
if iErr != nil {
err = iErr
break
}
rawData, err = protoio.MarshalDelimited(pcev)
default:
panic(fmt.Sprintf("unknown evidence included in evidence pool (don't know how to encode this) %#v", ev))
pev, err := EvidenceToProto(ev)
evan-forbes marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
panic("failure to convert evidence to equivalent proto type")
}
rawData, err := protoio.MarshalDelimited(pev)
if err != nil {
panic(fmt.Sprintf("evidence included in evidence pool that can not be encoded %#v, err: %v", ev, err))
panic(err)
}
rawDatas = append(rawDatas, rawData)
}
Expand Down
19 changes: 16 additions & 3 deletions types/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,23 @@ const (
)

var (
TxNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 1}
// See spec for further details on the types of available data
// https://github.com/lazyledger/lazyledger-specs/blob/master/specs/consensus.md#reserved-namespace-ids
// https://github.com/lazyledger/lazyledger-specs/blob/de5f4f74f56922e9fa735ef79d9e6e6492a2bad1/specs/data_structures.md#availabledata

// TxNamespaceID is the namespace reserved for transaction data
TxNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 1}
// IntermediateStateRootsNamespaceID is the namespace reserved for
// intermediate state root data
IntermediateStateRootsNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 2}
EvidenceNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 3}

// EvidenceNamespaceID is the namespace reserved for evidence
EvidenceNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 3}
// MaxReservedNamespace is the lexicographically largest namespace that is
// reserved for protocol use. It is derived from NAMESPACE_ID_MAX_RESERVED
// https://github.com/lazyledger/lazyledger-specs/blob/master/specs/consensus.md#constants
MaxReservedNamespace = namespace.ID{0, 0, 0, 0, 0, 0, 0, 255}
// TailPaddingNamespaceID is the namespace ID for tail padding. All data
// with this namespace will not be
TailPaddingNamespaceID = namespace.ID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE}
ParitySharesNamespaceID = namespace.ID{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}

Expand Down
332 changes: 332 additions & 0 deletions types/share_merging.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,332 @@
package types

import (
"bytes"
"encoding/binary"
"errors"

"github.com/gogo/protobuf/proto"
tmbytes "github.com/lazyledger/lazyledger-core/libs/bytes"
tmproto "github.com/lazyledger/lazyledger-core/proto/tendermint/types"
"github.com/lazyledger/rsmt2d"
)

// DataFromSquare extracts block data from an extended data square.
func DataFromSquare(eds *rsmt2d.ExtendedDataSquare) (Data, error) {
originalWidth := eds.Width() / 2

// sort block data shares by namespace
var (
sortedTxShares [][]byte
sortedISRShares [][]byte
sortedEvdShares [][]byte
sortedMsgShares [][]byte
)

// iterate over each row index
for x := uint(0); x < originalWidth; x++ {
// iterate over each col index
for y := uint(0); y < originalWidth; y++ {
// sort the data of that share types via namespace
share := eds.Cell(x, y)
nid := share[:NamespaceSize]
switch {
case bytes.Equal(TxNamespaceID, nid):
sortedTxShares = append(sortedTxShares, share)

case bytes.Equal(IntermediateStateRootsNamespaceID, nid):
sortedISRShares = append(sortedISRShares, share)

case bytes.Equal(EvidenceNamespaceID, nid):
sortedEvdShares = append(sortedEvdShares, share)

case bytes.Equal(TailPaddingNamespaceID, nid):
continue

// ignore unused but reserved namespaces
case bytes.Compare(nid, MaxReservedNamespace) < 1:
continue

// every other namespaceID should be a message
default:
sortedMsgShares = append(sortedMsgShares, share)
}
}
}

// pass the raw share data to their respective parsers
txs, err := parseTxs(sortedTxShares)
if err != nil {
return Data{}, err
}

isrs, err := parseISRs(sortedISRShares)
if err != nil {
return Data{}, err
}

evd, err := parseEvd(sortedEvdShares)
if err != nil {
return Data{}, err
}

msgs, err := parseMsgs(sortedMsgShares)
if err != nil {
return Data{}, err
}

return Data{
Txs: txs,
IntermediateStateRoots: isrs,
Evidence: evd,
Messages: msgs,
}, nil
}

// parseTxs collects all of the transactions from the shares provided
func parseTxs(shares [][]byte) (Txs, error) {
// parse the sharse
rawTxs, err := processContiguousShares(shares)
if err != nil {
return nil, err
}

// convert to the Tx type
txs := make(Txs, len(rawTxs))
for i := 0; i < len(txs); i++ {
txs[i] = Tx(rawTxs[i])
}

return txs, nil
}

// parseISRs collects all the intermediate state roots from the shares provided
func parseISRs(shares [][]byte) (IntermediateStateRoots, error) {
rawISRs, err := processContiguousShares(shares)
if err != nil {
return IntermediateStateRoots{}, err
}

ISRs := make([]tmbytes.HexBytes, len(rawISRs))
for i := 0; i < len(ISRs); i++ {
ISRs[i] = rawISRs[i]
}

return IntermediateStateRoots{RawRootsList: ISRs}, nil
}

// parseEvd collects all evidence from the shares provided.
func parseEvd(shares [][]byte) (EvidenceData, error) {
// the raw data returned does not have length delimiters or namespaces and
// is ready to be unmarshaled
rawEvd, err := processContiguousShares(shares)
if err != nil {
return EvidenceData{}, err
}

evdList := make(EvidenceList, len(rawEvd))

// parse into protobuf bytes
for i := 0; i < len(rawEvd); i++ {
// unmarshal the evidence
var protoEvd tmproto.Evidence
err := proto.Unmarshal(rawEvd[i], &protoEvd)
if err != nil {
return EvidenceData{}, err
}
evd, err := EvidenceFromProto(&protoEvd)
if err != nil {
return EvidenceData{}, err
}

evdList[i] = evd
}

return EvidenceData{Evidence: evdList}, nil
}

// parseMsgs collects all messages from the shares provided
func parseMsgs(shares [][]byte) (Messages, error) {
msgList, err := parseMsgShares(shares)
if err != nil {
return MessagesEmpty, err
}

return Messages{
MessagesList: msgList,
}, nil
}

// processContiguousShares takes raw shares and extracts out transactions,
// intermediate state roots, or evidence. The returned [][]byte do have
// namespaces or length delimiters and are ready to be unmarshalled
func processContiguousShares(shares [][]byte) (txs [][]byte, err error) {
if len(shares) == 0 {
return nil, nil
}

ss := newShareStack(shares)
return ss.resolve()
}

// shareStack hold variables for peel
type shareStack struct {
shares [][]byte
txLen uint64
txs [][]byte
cursor int
}

func newShareStack(shares [][]byte) *shareStack {
return &shareStack{shares: shares}
}

func (ss *shareStack) resolve() ([][]byte, error) {
if len(ss.shares) == 0 {
return nil, nil
}
err := ss.peel(ss.shares[0][NamespaceSize+ShareReservedBytes:], true)
return ss.txs, err
}

// peel recursively parses each chunk of data (either a transaction,
// intermediate state root, or evidence) and adds it to the underlying slice of data.
func (ss *shareStack) peel(share []byte, delimited bool) (err error) {
if delimited {
var txLen uint64
share, txLen, err = parseDelimiter(share)
if err != nil {
return err
}
if txLen == 0 {
return nil
}
ss.txLen = txLen
}
// safeLen describes the point in the share where it can be safely split. If
// split beyond this point, it is possible to break apart a length
// delimiter, which will result in incorrect share merging
safeLen := len(share) - binary.MaxVarintLen64
if safeLen < 0 {
safeLen = 0
}
if ss.txLen <= uint64(safeLen) {
ss.txs = append(ss.txs, share[:ss.txLen])
share = share[ss.txLen:]
return ss.peel(share, true)
}
// add the next share to the current share to continue merging if possible
if len(ss.shares) > ss.cursor+1 {
ss.cursor++
share := append(share, ss.shares[ss.cursor][NamespaceSize+ShareReservedBytes:]...)
return ss.peel(share, false)
}
// collect any remaining data
if ss.txLen <= uint64(len(share)) {
ss.txs = append(ss.txs, share[:ss.txLen])
share = share[ss.txLen:]
return ss.peel(share, true)
}
return errors.New("failure to parse block data: transaction length exceeded data length")
}

// parseMsgShares iterates through raw shares and separates the contiguous chunks
// of data. It is only used for Messages, i.e. shares with a non-reserved namespace.
func parseMsgShares(shares [][]byte) ([]Message, error) {
if len(shares) == 0 {
return nil, nil
}

// set the first nid and current share
nid := shares[0][:NamespaceSize]
currentShare := shares[0][NamespaceSize:]

// find and remove the msg len delimiter
currentShare, msgLen, err := parseDelimiter(currentShare)
if err != nil {
return nil, err
}

var msgs []Message
for cursor := uint64(0); cursor < uint64(len(shares)); {
var msg Message
currentShare, nid, cursor, msgLen, msg, err = nextMsg(
shares,
currentShare,
nid,
cursor,
msgLen,
)
if err != nil {
return nil, err
}
if msg.Data != nil {
msgs = append(msgs, msg)
}
}

return msgs, nil
}

func nextMsg(
shares [][]byte,
current,
nid []byte,
cursor,
msgLen uint64,
) ([]byte, []byte, uint64, uint64, Message, error) {
switch {
// the message uses all of the current share data and at least some of the
// next share
case msgLen > uint64(len(current)):
// add the next share to the current one and try again
cursor++
current = append(current, shares[cursor][NamespaceSize:]...)
return nextMsg(shares, current, nid, cursor, msgLen)

// the msg we're looking for is contained in the current share
case msgLen <= uint64(len(current)):
msg := Message{nid, current[:msgLen]}
cursor++

// call it a day if the work is done
if cursor >= uint64(len(shares)) {
return nil, nil, cursor, 0, msg, nil
}

nextNid := shares[cursor][:NamespaceSize]
next, msgLen, err := parseDelimiter(shares[cursor][NamespaceSize:])
return next, nextNid, cursor, msgLen, msg, err
}
// this code is unreachable but the compiler doesn't know that
return nil, nil, 0, 0, MessageEmpty, nil
}

// parseDelimiter finds and returns the length delimiter of the message provided
// while also removing the delimiter bytes from the input
func parseDelimiter(input []byte) ([]byte, uint64, error) {
if len(input) == 0 {
return input, 0, nil
}

l := binary.MaxVarintLen64
if len(input) < binary.MaxVarintLen64 {
l = len(input)
}

delimiter := zeroPadIfNecessary(input[:l], binary.MaxVarintLen64)

// read the length of the message
r := bytes.NewBuffer(delimiter)
msgLen, err := binary.ReadUvarint(r)
if err != nil {
return nil, 0, err
}

// calculate the number of bytes used by the delimiter
lenBuf := make([]byte, binary.MaxVarintLen64)
n := binary.PutUvarint(lenBuf, msgLen)

// return the input without the length delimiter
return input[n:], msgLen, nil
}
Loading