Skip to content

Commit

Permalink
feat!: use BlobTx instead of MsgWirePayForBlob (#1089)
Browse files Browse the repository at this point in the history
## Overview

reference PR for everything that needs to be changed to remove the wire
PFB. This PR **should not** be merged. While there will be at least one
rather large PR, many of these changes can be broken up into chunks.

this PR is mega breaking and currently blocked by
celestiaorg/celestia-core#893

closes #1098
is the implementation for #1005 

## Checklist

- [x] New and updated code has appropriate documentation
- [x] New and updated code has new and/or updated testing
- [x] Required CI checks are passing
- [x] Visual proof for any user facing features like CLI or
documentation updates
- [x] Linked issues closed with keywords

Co-authored-by: Rootul P <rootulp@gmail.com>
Co-authored-by: Callum Waters <cmwaters19@gmail.com>
  • Loading branch information
3 people committed Dec 13, 2022
1 parent 00e31f8 commit 12e77d0
Show file tree
Hide file tree
Showing 50 changed files with 1,915 additions and 2,537 deletions.
2 changes: 1 addition & 1 deletion app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ func New(
cdc := encodingConfig.Amino
interfaceRegistry := encodingConfig.InterfaceRegistry

bApp := baseapp.NewBaseApp(Name, logger, db, encoding.MalleatedTxDecoder(encodingConfig.TxConfig.TxDecoder()), baseAppOptions...)
bApp := baseapp.NewBaseApp(Name, logger, db, encoding.WrappedTxDecoder(encodingConfig.TxConfig.TxDecoder()), baseAppOptions...)
bApp.SetCommitMultiStoreTracer(traceStore)
bApp.SetVersion(version.Version)
bApp.SetInterfaceRegistry(interfaceRegistry)
Expand Down
43 changes: 43 additions & 0 deletions app/check_tx.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
package app

import (
"fmt"

"github.com/celestiaorg/celestia-app/x/blob/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
abci "github.com/tendermint/tendermint/abci/types"
coretypes "github.com/tendermint/tendermint/types"
)

// CheckTx implements the ABCI interface and executes a tx in CheckTx mode. This
// method wraps the default Baseapp's method so that we can handle the parsing
// and checking of blob containing transactions
func (app *App) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
tx := req.Tx
// check if the transaction contains blobs
btx, isBlob := coretypes.UnmarshalBlobTx(tx)

// don't do anything special if we have a normal transactions
if !isBlob {
return app.BaseApp.CheckTx(req)
}

switch req.Type {
// new transactions must be checked in their entirety
case abci.CheckTxType_New:
pBTx, err := types.ProcessBlobTx(app.txConfig, btx)
if err != nil {
return sdkerrors.ResponseCheckTxWithEvents(err, 0, 0, []abci.Event{}, false)
}
tx = pBTx.Tx
case abci.CheckTxType_Recheck:
// only replace the current transaction with the unwrapped one, as we
// have already performed the necessary check for new transactions
tx = btx.Tx
default:
panic(fmt.Sprintf("unknown RequestCheckTx type: %s", req.Type))
}

req.Tx = tx
return app.BaseApp.CheckTx(req)
}
4 changes: 2 additions & 2 deletions app/encoding/malleated_tx_decoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@ import (
coretypes "github.com/tendermint/tendermint/types"
)

func MalleatedTxDecoder(dec sdk.TxDecoder) sdk.TxDecoder {
func WrappedTxDecoder(dec sdk.TxDecoder) sdk.TxDecoder {
return func(txBytes []byte) (sdk.Tx, error) {
if malleatedTx, has := coretypes.UnwrapMalleatedTx(txBytes); has {
if malleatedTx, has := coretypes.UnmarshalIndexWrapper(txBytes); has {
return dec(malleatedTx.Tx)
}
return dec(txBytes)
Expand Down
234 changes: 60 additions & 174 deletions app/estimate_square_size.go
Original file line number Diff line number Diff line change
@@ -1,204 +1,90 @@
package app

import (
"bytes"
"math"
"sort"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/cosmos/cosmos-sdk/client"
coretypes "github.com/tendermint/tendermint/types"
)

// prune removes txs until the set of txs will fit in the square of size
// squareSize. It assumes that the currentShareCount is accurate. This function
// is far from optimal because accurately knowing how many shares any given
// set of transactions and its blob takes up in a data square that is following the
// non-interactive default rules requires recalculating the entire square.
// TODO: include the padding used by each blob when counting removed shares
func prune(txConf client.TxConfig, txs []*parsedTx, currentShareCount, squareSize int) parsedTxs {
maxShares := squareSize * squareSize
if maxShares >= currentShareCount {
return txs
}
goal := currentShareCount - maxShares

removedContiguousShares := 0
contigBytesCursor := 0
removedBlobShares := 0
removedTxs := 0

// adjustContigCursor checks if enough contiguous bytes have been removed
// inorder to tally total contiguous shares removed
adjustContigCursor := func(l int) {
contigBytesCursor += l + shares.DelimLen(uint64(l))
if contigBytesCursor >= appconsts.ContinuationCompactShareContentSize {
removedContiguousShares += (contigBytesCursor / appconsts.ContinuationCompactShareContentSize)
contigBytesCursor = contigBytesCursor % appconsts.ContinuationCompactShareContentSize
}
}

for i := len(txs) - 1; (removedContiguousShares + removedBlobShares) < goal; i-- {
// this normally doesn't happen, but since we don't calculate the number
// of padded shares also being removed, its possible to reach this value
// should there be many small blobs, and we don't want to panic.
if i < 0 {
break
}
removedTxs++
if txs[i].msg == nil {
adjustContigCursor(len(txs[i].rawTx))
// estimateSquareSize uses the provided block data to over estimate the square
// size and the starting share index of non-reserved namespaces. The estimates
// returned are liberal in the sense that we assume close to worst case and
// round up.
//
// NOTE: The estimation process does not have to be perfect. We can overestimate
// because the cost of padding is limited.
func estimateSquareSize(txs []parsedTx) (squareSize uint64, nonreserveStart int) {
txSharesUsed := estimateCompactShares(appconsts.DefaultMaxSquareSize, txs)
blobSharesUsed := 0

for _, ptx := range txs {
if len(ptx.normalTx) != 0 {
continue
}

removedBlobShares += shares.BlobSharesUsed(len(txs[i].msg.GetBlob()))
// we ignore the error here, as if there is an error malleating the tx,
// then we need to remove it anyway and it will not end up contributing
// bytes to the square anyway.
_ = txs[i].malleate(txConf)
adjustContigCursor(len(txs[i].malleatedTx) + appconsts.MalleatedTxBytes)
}

return txs[:len(txs)-(removedTxs)]
}

// calculateCompactShareCount calculates the exact number of compact shares used.
func calculateCompactShareCount(txs []*parsedTx, squareSize int) int {
txSplitter := shares.NewCompactShareSplitter(appconsts.TxNamespaceID, appconsts.ShareVersionZero)
var err error
blobSharesCursor := len(txs)
for _, tx := range txs {
rawTx := tx.rawTx
if tx.malleatedTx != nil {
rawTx, err = coretypes.WrapMalleatedTx(tx.originalHash(), uint32(blobSharesCursor), tx.malleatedTx)
if err != nil {
panic(err)
}
used, _ := shares.BlobSharesUsedNonInteractiveDefaults(blobSharesCursor, squareSize, tx.msg.Size())
blobSharesCursor += used
}
txSplitter.WriteTx(rawTx)
blobSharesUsed += shares.BlobSharesUsed(ptx.blobTx.DataUsed())
}
return txSplitter.Count()
}

// estimateSquareSize uses the provided block data to estimate the square size
// assuming that all malleated txs follow the non interactive default rules.
// Returns the estimated square size and the number of shares used.
func estimateSquareSize(txs []*parsedTx) (uint64, int) {
// get the raw count of shares taken by each type of block data
txShares, msgLens := rawShareCount(txs)
msgShares := 0
for _, msgLen := range msgLens {
msgShares += msgLen
// assume that we have to add a lot of padding by simply doubling the number
// of shares used
//
// TODO: use a more precise estimation that doesn't over
// estimate as much
totalSharesUsed := uint64(txSharesUsed + blobSharesUsed)
totalSharesUsed *= 2
minSize := uint64(math.Sqrt(float64(totalSharesUsed)))
squareSize = shares.RoundUpPowerOfTwo(minSize)
if squareSize >= appconsts.DefaultMaxSquareSize {
squareSize = appconsts.DefaultMaxSquareSize
}

// calculate the smallest possible square size that could contain all the
// shares
squareSize := shares.RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(txShares + msgShares)))))

// the starting square size should at least be the minimum
if squareSize < appconsts.DefaultMinSquareSize {
if squareSize <= appconsts.DefaultMinSquareSize {
squareSize = appconsts.DefaultMinSquareSize
}

var fits bool
for {
// assume that all the msgs in the square use the non-interactive
// default rules and see if we can fit them in the smallest starting
// square size. We start the cursor (share index) at the beginning of
// the blob shares (txShares), because shares that do not
// follow the non-interactive defaults are simple to estimate.
fits, msgShares = shares.FitsInSquare(txShares, squareSize, msgLens...)
switch {
// stop estimating if we know we can reach the max square size
case squareSize >= appconsts.DefaultMaxSquareSize:
return appconsts.DefaultMaxSquareSize, txShares + msgShares
// return if we've found a square size that fits all of the txs
case fits:
return uint64(squareSize), txShares + msgShares
// try the next largest square size if we can't fit all the txs
case !fits:
// double the square size
squareSize = shares.RoundUpPowerOfTwo(squareSize + 1)
}
}
return squareSize, txSharesUsed
}

// rawShareCount calculates the number of shares taken by all of the included
// txs and each blob. blobLens is a slice of the number of shares used by each
// blob without accounting for the non-interactive default rules.
func rawShareCount(txs []*parsedTx) (txShares int, blobLens []int) {
// blobSummary is used to keep track of the size and the namespace so that we
// can sort the blobs by namespace before returning.
type blobSummary struct {
// size is the number of shares used by this blob
size int
namespace []byte
}

var blobSummaries []blobSummary //nolint:prealloc

// we use bytes instead of shares for tx as they are encoded contiguously in
// the square, unlike blobs where each of which is assigned their own set of
// shares
txBytes := 0
for _, pTx := range txs {
// if there is no wire message in this tx, then we can simply add the
// bytes and move on.
if pTx.msg == nil {
txBytes += len(pTx.rawTx)
// estimateCompactShares estimates the number of shares used by compact shares
func estimateCompactShares(squareSize uint64, ptxs []parsedTx) int {
maxWTxOverhead := maxWrappedTxOverhead(squareSize)
txbytes := 0
for _, pTx := range ptxs {
if len(pTx.normalTx) != 0 {
txLen := len(pTx.normalTx)
txLen += shares.DelimLen(uint64(txLen))
txbytes += txLen
continue
}

// if there is a malleated tx, then we want to also account for the
// txs that get included on-chain. The formula used here over
// compensates for the actual size of the blob, and in some cases can
// result in some wasted square space or picking a square size that is
// too large. TODO: improve by making a more accurate estimation formula
txBytes += overEstimateMalleatedTxSize(len(pTx.rawTx), len(pTx.msg.Blob))

blobSummaries = append(blobSummaries, blobSummary{shares.BlobSharesUsed(int(pTx.msg.BlobSize)), pTx.msg.NamespaceId})
txLen := len(pTx.blobTx.Tx) + maxWTxOverhead
txLen += shares.DelimLen(uint64(txLen))
txbytes += txLen
}

txShares = txBytes / appconsts.ContinuationCompactShareContentSize
if txBytes > 0 {
txShares++ // add one to round up
}
// todo: stop rounding up. Here we're rounding up because the calculation for
// tx bytes isn't perfect. This catches those edge cases where we
// estimate the exact number of shares in the square, when in reality we're
// one byte over the number of shares in the square size. This will also cause
// blocks that are one square size too big instead of being perfectly snug.
// The estimation must be perfect or greater than what the square actually
// ends up being.
if txShares > 0 {
txShares++
sharesUsed := 1
if txbytes <= appconsts.FirstCompactShareContentSize {
return sharesUsed
}

// sort the blobSummaries by namespace to order them properly. This is okay to do here
// as we aren't sorting the actual txs, just their summaries for more
// accurate estimations
sort.Slice(blobSummaries, func(i, j int) bool {
return bytes.Compare(blobSummaries[i].namespace, blobSummaries[j].namespace) < 0
})
// account for the first share
txbytes -= appconsts.FirstCompactShareContentSize
sharesUsed += (txbytes / appconsts.ContinuationCompactShareContentSize) + 1 // add 1 to round up and another to account for the first share

// isolate the sizes as we no longer need the namespaces
blobShares := make([]int, len(blobSummaries))
for i, summary := range blobSummaries {
blobShares[i] = summary.size
}
return txShares, blobShares
return sharesUsed
}

// overEstimateMalleatedTxSize estimates the size of a malleated tx. The formula it uses will always over estimate.
func overEstimateMalleatedTxSize(txLen, blobSize int) int {
// the malleated tx uses the original txLen to account for metadata from
// the original tx, but removes the blob
malleatedTxLen := txLen - blobSize
// we need to ensure that the returned number is at least larger than or
// equal to the actual number, which is difficult to calculate without
// actually malleating the tx
return appconsts.MalleatedTxBytes + appconsts.MalleatedTxEstimateBuffer + malleatedTxLen
// maxWrappedTxOverhead calculates the maximum amount of overhead introduced by
// wrapping a transaction with a shares index
//
// TODO: make more efficient by only generating these numbers once or something
// similar. This function alone can take up to 5ms.
func maxWrappedTxOverhead(squareSize uint64) int {
maxTxLen := squareSize * squareSize * appconsts.ContinuationCompactShareContentSize
wtx, err := coretypes.MarshalIndexWrapper(
uint32(squareSize*squareSize),
make([]byte, maxTxLen))
if err != nil {
panic(err)
}
return len(wtx) - int(maxTxLen)
}
Loading

0 comments on commit 12e77d0

Please sign in to comment.