Skip to content

Commit

Permalink
feat!: write PFB txs to their own namespace (#1228)
Browse files Browse the repository at this point in the history
Closes #1173,
#1237
Opens #1243
  • Loading branch information
rootulp committed Jan 18, 2023
1 parent a8c8849 commit b6e2651
Show file tree
Hide file tree
Showing 13 changed files with 125 additions and 68 deletions.
41 changes: 26 additions & 15 deletions app/estimate_square_size.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ import (
// NOTE: The estimation process does not have to be perfect. We can overestimate
// because the cost of padding is limited.
func estimateSquareSize(txs []parsedTx) (squareSize uint64, nonreserveStart int) {
txSharesUsed := estimateTxShares(appconsts.DefaultMaxSquareSize, txs)
txSharesUsed := estimateTxSharesUsed(txs)
pfbTxSharesUsed := estimatePFBTxSharesUsed(appconsts.DefaultMaxSquareSize, txs)
blobSharesUsed := 0

for _, ptx := range txs {
Expand All @@ -33,7 +34,7 @@ func estimateSquareSize(txs []parsedTx) (squareSize uint64, nonreserveStart int)
//
// TODO: use a more precise estimation that doesn't over
// estimate as much
totalSharesUsed := uint64(txSharesUsed + blobSharesUsed)
totalSharesUsed := uint64(txSharesUsed + pfbTxSharesUsed + blobSharesUsed)
totalSharesUsed *= 2
minSize := uint64(math.Sqrt(float64(totalSharesUsed)))
squareSize = shares.RoundUpPowerOfTwo(minSize)
Expand All @@ -44,27 +45,37 @@ func estimateSquareSize(txs []parsedTx) (squareSize uint64, nonreserveStart int)
squareSize = appconsts.DefaultMinSquareSize
}

return squareSize, txSharesUsed
return squareSize, txSharesUsed + pfbTxSharesUsed
}

// estimateTxShares estimates the number of shares used by transactions.
func estimateTxShares(squareSize uint64, ptxs []parsedTx) int {
maxWTxOverhead := maxIndexWrapperOverhead(squareSize)
maxIndexOverhead := maxIndexOverhead(squareSize)
txbytes := 0
// estimateTxSharesUsed estimates the number of shares used by ordinary
// transactions (i.e. all transactions that aren't PFBs).
func estimateTxSharesUsed(ptxs []parsedTx) int {
txBytes := 0
for _, pTx := range ptxs {
if len(pTx.normalTx) != 0 {
if pTx.isNormalTx() {
txLen := len(pTx.normalTx)
txLen += shares.DelimLen(uint64(txLen))
txbytes += txLen
continue
txBytes += txLen
}
txLen := len(pTx.blobTx.Tx) + maxWTxOverhead + (maxIndexOverhead * len(pTx.blobTx.Blobs))
txLen += shares.DelimLen(uint64(txLen))
txbytes += txLen
}
return shares.CompactSharesNeeded(txBytes)
}

return shares.CompactSharesNeeded(txbytes)
// estimatePFBTxSharesUsed estimates the number of shares used by PFB
// transactions.
func estimatePFBTxSharesUsed(squareSize uint64, ptxs []parsedTx) int {
maxWTxOverhead := maxIndexWrapperOverhead(squareSize)
maxIndexOverhead := maxIndexOverhead(squareSize)
numBytes := 0
for _, pTx := range ptxs {
if pTx.isBlobTx() {
txLen := len(pTx.blobTx.Tx) + maxWTxOverhead + (maxIndexOverhead * len(pTx.blobTx.Blobs))
txLen += shares.DelimLen(uint64(txLen))
numBytes += txLen
}
}
return shares.CompactSharesNeeded(numBytes)
}

// maxWrappedTxOverhead calculates the maximum amount of overhead introduced by
Expand Down
51 changes: 33 additions & 18 deletions app/estimate_square_size_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/celestiaorg/celestia-app/testutil/blobfactory"
"github.com/celestiaorg/celestia-app/testutil/namespace"
"github.com/celestiaorg/celestia-app/testutil/testfactory"
blobtypes "github.com/celestiaorg/celestia-app/x/blob/types"
"github.com/stretchr/testify/assert"
Expand Down Expand Up @@ -106,34 +107,29 @@ func Test_estimateSquareSize_MultiBlob(t *testing.T) {
}
}

func Test_estimateTxShares(t *testing.T) {
func Test_estimatePFBTxSharesUsed(t *testing.T) {
type test struct {
name string
squareSize uint64
normalTxs int
pfbCount, pfbSize int
}
tests := []test{
{"empty block", appconsts.DefaultMinSquareSize, 0, 0, 0},
{"one normal tx", appconsts.DefaultMinSquareSize, 1, 0, 0},
{"one small pfb small block", 4, 0, 1, 100},
{"one large pfb large block", appconsts.DefaultMaxSquareSize, 0, 1, 1000000},
{"one hundred large pfb large block", appconsts.DefaultMaxSquareSize, 0, 100, 100000},
{"one hundred large pfb medium block", appconsts.DefaultMaxSquareSize / 2, 100, 100, 100000},
{"mixed transactions large block", appconsts.DefaultMaxSquareSize, 100, 100, 100000},
{"mixed transactions large block 2", appconsts.DefaultMaxSquareSize, 1000, 1000, 10000},
{"mostly transactions large block", appconsts.DefaultMaxSquareSize, 10000, 1000, 100},
{"only small pfb large block", appconsts.DefaultMaxSquareSize, 0, 10000, 1},
{"only small pfb medium block", appconsts.DefaultMaxSquareSize / 2, 0, 10000, 1},
{"empty block", appconsts.DefaultMinSquareSize, 0, 0},
{"one small pfb small block", 4, 1, 100},
{"one large pfb large block", appconsts.DefaultMaxSquareSize, 1, 100_000},
{"one hundred large pfb large block", appconsts.DefaultMaxSquareSize, 100, 100_000},
{"one hundred large pfb medium block", appconsts.DefaultMaxSquareSize / 2, 100, 100_000},
{"ten thousand small pfb large block", appconsts.DefaultMaxSquareSize, 10_000, 1},
{"ten thousand small pfb medium block", appconsts.DefaultMaxSquareSize / 2, 10_000, 1},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ptxs := generateMixedParsedTxs(tt.normalTxs, tt.pfbCount, tt.pfbSize)
res := estimateTxShares(tt.squareSize, ptxs)
ptxs := generateParsedTxsWithNIDs(t, namespace.RandomBlobNamespaces(tt.pfbCount), blobfactory.Repeat([]int{tt.pfbSize}, tt.pfbCount))
got := estimatePFBTxSharesUsed(tt.squareSize, ptxs)

// check that our estimate is always larger or equal to the number
// of compact shares actually used
// of pfbTxShares actually used
txs := make([]coretypes.Tx, len(ptxs))
for i, ptx := range ptxs {
if len(ptx.normalTx) != 0 {
Expand All @@ -147,12 +143,31 @@ func Test_estimateTxShares(t *testing.T) {
require.NoError(t, err)
txs[i] = wPFBTx
}
shares := shares.SplitTxs(txs)
assert.LessOrEqual(t, len(shares), res)
_, pfbTxShares := shares.SplitTxs(txs)
assert.LessOrEqual(t, len(pfbTxShares), got)
})
}
}

func Test_estimateTxSharesUsed(t *testing.T) {
type testCase struct {
name string
ptxs []parsedTx
want int
}
testCases := []testCase{
{"empty", []parsedTx{}, 0},
{"one tx", generateNormalParsedTxs(1), 1}, // 1 tx is approximately 312 bytes which fits in 1 share
{"two txs", generateNormalParsedTxs(2), 2}, // 2 txs is approximately 624 bytes which fits in 2 shares
{"ten txs", generateNormalParsedTxs(10), 7}, // 10 txs is approximately 3120 bytes which fits in 7 shares
{"one hundred txs", generateNormalParsedTxs(100), 63}, // 100 txs is approximately 31200 bytes which fits in 63 share
}
for _, tc := range testCases {
got := estimateTxSharesUsed(tc.ptxs)
assert.Equal(t, tc.want, got)
}
}

// The point of this test is to fail if anything to do with the serialization
// of index wrappers change, as changes could lead to tricky bugs.
func Test_expected_maxIndexWrapperOverhead(t *testing.T) {
Expand Down
8 changes: 8 additions & 0 deletions app/parse_txs.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,14 @@ type parsedTx struct {
shareIndexes []uint32
}

func (p parsedTx) isNormalTx() bool {
return len(p.normalTx) != 0
}

func (p parsedTx) isBlobTx() bool {
return !p.isNormalTx()
}

// parseTxs decodes raw tendermint txs along with checking for and processing
// blob transactions.
func parseTxs(txcfg client.TxConfig, rawTxs [][]byte) []parsedTx {
Expand Down
18 changes: 18 additions & 0 deletions app/process_proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@ package app

import (
"bytes"
"fmt"
"sort"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/da"
"github.com/celestiaorg/celestia-app/pkg/inclusion"
"github.com/celestiaorg/celestia-app/pkg/shares"
blobtypes "github.com/celestiaorg/celestia-app/x/blob/types"
"github.com/celestiaorg/nmt/namespace"
"github.com/celestiaorg/rsmt2d"
sdk "github.com/cosmos/cosmos-sdk/types"
abci "github.com/tendermint/tendermint/abci/types"
Expand Down Expand Up @@ -42,6 +44,15 @@ func (app *App) ProcessProposal(req abci.RequestProcessProposal) abci.ResponsePr
}
}

for _, blob := range data.Blobs {
if !isValidBlobNamespace(blob.NamespaceID) {
logInvalidPropBlock(app.Logger(), req.Header, fmt.Sprintf("invalid blob namespace %v", blob.NamespaceID))
return abci.ResponseProcessProposal{
Result: abci.ResponseProcessProposal_REJECT,
}
}
}

dataSquare, err := shares.Split(data, true)
if err != nil {
logInvalidPropBlockError(app.Logger(), req.Header, "failure to compute shares from block data:", err)
Expand Down Expand Up @@ -146,6 +157,13 @@ func hasPFB(msgs []sdk.Msg) (*blobtypes.MsgPayForBlob, bool) {
return nil, false
}

func isValidBlobNamespace(namespace namespace.ID) bool {
isReserved := bytes.Compare(namespace, appconsts.MaxReservedNamespace) <= 0
isParity := bytes.Equal(namespace, appconsts.ParitySharesNamespaceID)
isTailPadding := bytes.Equal(namespace, appconsts.TailPaddingNamespaceID)
return !isReserved && !isParity && !isTailPadding
}

func logInvalidPropBlock(l log.Logger, h tmproto.Header, reason string) {
l.Error(
rejectedPropBlockLog,
Expand Down
17 changes: 1 addition & 16 deletions app/test/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,29 +345,14 @@ func (s *IntegrationTestSuite) TestShareInclusionProof() {
}

for _, hash := range hashes {
txResp, err := queryTx(val.ClientCtx, hash, true)
txResp, err := queryTx(val.ClientCtx, hash, false)
require.NoError(err)
require.Equal(abci.CodeTypeOK, txResp.TxResult.Code)

// verify that the transaction inclusion proof is valid
require.True(txResp.Proof.VerifyProof())

// get the transaction shares
node, err := val.ClientCtx.GetNode()
require.NoError(err)
blockRes, err := node.Block(context.Background(), &txResp.Height)
require.NoError(err)
beginTxShare, endTxShare, err := prove.TxSharePosition(blockRes.Block.Txs, uint64(txResp.Index))
require.NoError(err)

txProof, err := node.ProveShares(
context.Background(),
uint64(txResp.Height),
beginTxShare,
endTxShare,
)
require.NoError(err)
require.NoError(txProof.Validate(blockRes.Block.DataHash))

// get the blob shares
beginBlobShare, endBlobShare, err := prove.BlobShareRange(blockRes.Block.Txs[txResp.Index])
Expand Down
6 changes: 6 additions & 0 deletions app/testutil_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,9 @@ func generateParsedTxsWithNIDs(t *testing.T, nids [][]byte, blobSizes [][]int) [
)
return parseTxs(encCfg.TxConfig, txs)
}

func generateNormalParsedTxs(count int) []parsedTx {
encCfg := encoding.MakeConfig(ModuleEncodingRegisters...)
normieTxs := blobfactory.GenerateManyRawSendTxs(encCfg.TxConfig, count)
return parseTxs(encCfg.TxConfig, coretypes.Txs(normieTxs).ToSliceOfBytes())
}
10 changes: 7 additions & 3 deletions pkg/appconsts/appconsts.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,13 @@ var (
// EvidenceNamespaceID is the namespace reserved for evidence.
EvidenceNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 3}

// TailTransactionPaddingNamespaceId is the namespace used for padding after
// all transactions (ordinary and PFBs) but before blobs.
TailTransactionPaddingNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 255}
// PayForBlobNamespaceID is the namespace reserved for PayForBlob transactions.
PayForBlobNamespaceID = namespace.ID{0, 0, 0, 0, 0, 0, 0, 4}

// ReservedNamespacePadding is the namespace used for padding after all
// reserved namespaces. In practice this padding is after transactions
// (ordinary and PFBs) but before blobs.
ReservedNamespacePadding = namespace.ID{0, 0, 0, 0, 0, 0, 0, 255}

// MaxReservedNamespace is the lexicographically largest namespace that is
// reserved for protocol use.
Expand Down
6 changes: 5 additions & 1 deletion pkg/prove/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,8 @@ func genOrigRowShares(data types.Data, startRow, endRow uint64) []shares.Share {
wantLen := (endRow + 1) * data.SquareSize
startPos := startRow * data.SquareSize

rawShares := shares.SplitTxs(data.Txs)
rawTxShares, pfbTxShares := shares.SplitTxs(data.Txs)
rawShares := append(rawTxShares, pfbTxShares...)
// return if we have enough shares
if uint64(len(rawShares)) >= wantLen {
return rawShares[startPos:wantLen]
Expand All @@ -221,6 +222,9 @@ func genOrigRowShares(data types.Data, startRow, endRow uint64) []shares.Share {
panic(err)
}

// TODO: does this need to account for padding between compact shares
// and the first blob?
// https://github.com/celestiaorg/celestia-app/issues/1226
rawShares = append(rawShares, blobShares...)

// return if we have enough shares
Expand Down
4 changes: 2 additions & 2 deletions pkg/prove/proof_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,11 +268,11 @@ func TestTxSharePosition(t *testing.T) {
positions[i] = startEndPoints{start: start, end: end}
}

splitShares := shares.SplitTxs(tt.txs)
txShares, _ := shares.SplitTxs(tt.txs)

for i, pos := range positions {
rawTx := []byte(tt.txs[i])
rawTxDataForRange, err := stripCompactShares(splitShares[pos.start : pos.end+1])
rawTxDataForRange, err := stripCompactShares(txShares[pos.start : pos.end+1])
assert.NoError(t, err)
assert.Contains(
t,
Expand Down
6 changes: 3 additions & 3 deletions pkg/shares/compact_shares_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func Test_processCompactShares(t *testing.T) {
t.Run(fmt.Sprintf("%s idendically sized", tc.name), func(t *testing.T) {
txs := testfactory.GenerateRandomTxs(tc.txCount, tc.txSize)

shares := SplitTxs(txs)
shares, _ := SplitTxs(txs)
rawShares := ToBytes(shares)

parsedTxs, err := parseCompactShares(rawShares, appconsts.SupportedShareVersions)
Expand All @@ -95,7 +95,7 @@ func Test_processCompactShares(t *testing.T) {
t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) {
txs := testfactory.GenerateRandomlySizedTxs(tc.txCount, tc.txSize)

shares := SplitTxs(txs)
shares, _ := SplitTxs(txs)
rawShares := ToBytes(shares)

parsedTxs, err := parseCompactShares(rawShares, appconsts.SupportedShareVersions)
Expand Down Expand Up @@ -158,7 +158,7 @@ func Test_parseCompactSharesErrors(t *testing.T) {
}

txs := testfactory.GenerateRandomTxs(2, appconsts.ContinuationCompactShareContentSize*4)
shares := SplitTxs(txs)
shares, _ := SplitTxs(txs)
rawShares := ToBytes(shares)

unsupportedShareVersion := 5
Expand Down
2 changes: 1 addition & 1 deletion pkg/shares/share_merging_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestParseShares(t *testing.T) {
blobOneNamespace := namespace.ID{1, 1, 1, 1, 1, 1, 1, 1}
blobTwoNamespace := namespace.ID{2, 2, 2, 2, 2, 2, 2, 2}

transactionShares := SplitTxs(generateRandomTxs(2, 1000))
transactionShares, _ := SplitTxs(generateRandomTxs(2, 1000))
transactionShareStart := transactionShares[0]
transactionShareContinuation := transactionShares[1]

Expand Down
Loading

0 comments on commit b6e2651

Please sign in to comment.