Skip to content

Commit

Permalink
reverting the stats computation under flag
Browse files Browse the repository at this point in the history
  • Loading branch information
Thejas-bhat authored and abhinavdangeti committed Jul 22, 2022
1 parent 587d1e5 commit f603c7a
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 43 deletions.
4 changes: 1 addition & 3 deletions contentcoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,9 +110,7 @@ func (c *chunkedContentCoder) Close() error {
}

func (c *chunkedContentCoder) incrementBytesWritten(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&c.bytesWritten, val)
}
atomic.AddUint64(&c.bytesWritten, val)
}

func (c *chunkedContentCoder) getBytesWritten() uint64 {
Expand Down
4 changes: 1 addition & 3 deletions docvalues.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,7 @@ func (di *docValueReader) ResetBytesRead(val uint64) {
}

func (di *docValueReader) incrementBytesRead(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&di.bytesRead, val)
}
atomic.AddUint64(&di.bytesRead, val)
}

func (di *docValueReader) BytesWritten() uint64 {
Expand Down
12 changes: 6 additions & 6 deletions intDecoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ func newChunkedIntDecoder(buf []byte, offset uint64, rv *chunkedIntDecoder) *chu
rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64])
n += uint64(read)
}
if CollectDiskStats {
atomic.AddUint64(&rv.bytesRead, n)
}

atomic.AddUint64(&rv.bytesRead, n)

rv.dataStartOffset = offset + n
return rv
}
Expand Down Expand Up @@ -91,9 +91,9 @@ func (d *chunkedIntDecoder) loadChunk(chunk int) error {
start += s
end += e
d.curChunkBytes = d.data[start:end]
if CollectDiskStats {
atomic.AddUint64(&d.bytesRead, uint64(len(d.curChunkBytes)))
}

atomic.AddUint64(&d.bytesRead, uint64(len(d.curChunkBytes)))

if d.r == nil {
d.r = newMemUvarintReader(d.curChunkBytes)
} else {
Expand Down
4 changes: 1 addition & 3 deletions intcoder.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,7 @@ func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) {
}

func (c *chunkedIntCoder) incrementBytesWritten(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&c.bytesWritten, val)
}
atomic.AddUint64(&c.bytesWritten, val)
}

func (c *chunkedIntCoder) getBytesWritten() uint64 {
Expand Down
12 changes: 2 additions & 10 deletions new.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,6 @@ var NewSegmentBufferNumResultsBump int = 100
var NewSegmentBufferNumResultsFactor float64 = 1.0
var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0

// This flag controls the disk stats collection from the segment files
// during indexing and querying
var CollectDiskStats bool

// ValidateDocFields can be set by applications to perform additional checks
// on fields in a document being added to a new segment, by default it does
// nothing.
Expand Down Expand Up @@ -498,9 +494,7 @@ func (s *interim) getBytesWritten() uint64 {
}

func (s *interim) incrementBytesWritten(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&s.bytesWritten, val)
}
atomic.AddUint64(&s.bytesWritten, val)
}

func (s *interim) writeStoredFields() (
Expand Down Expand Up @@ -617,9 +611,7 @@ func (s *interim) writeStoredFields() (
}

func (s *interim) setBytesWritten(val uint64) {
if CollectDiskStats {
atomic.StoreUint64(&s.bytesWritten, val)
}
atomic.StoreUint64(&s.bytesWritten, val)
}

func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) {
Expand Down
8 changes: 2 additions & 6 deletions posting.go
Original file line number Diff line number Diff line change
Expand Up @@ -263,9 +263,7 @@ func (p *PostingsList) BytesRead() uint64 {
}

func (p *PostingsList) incrementBytesRead(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&p.bytesRead, val)
}
atomic.AddUint64(&p.bytesRead, val)
}

func (p *PostingsList) BytesWritten() uint64 {
Expand Down Expand Up @@ -378,9 +376,7 @@ func (i *PostingsIterator) BytesRead() uint64 {
}

func (i *PostingsIterator) incrementBytesRead(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&i.bytesRead, val)
}
atomic.AddUint64(&i.bytesRead, val)
}

func (i *PostingsIterator) BytesWritten() uint64 {
Expand Down
16 changes: 4 additions & 12 deletions segment.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,9 +228,7 @@ func (s *Segment) loadConfig() error {
// read from the on-disk segment as part of the current
// query.
func (s *Segment) ResetBytesRead(val uint64) {
if CollectDiskStats {
atomic.StoreUint64(&s.SegmentBase.bytesRead, val)
}
atomic.StoreUint64(&s.SegmentBase.bytesRead, val)
}

func (s *Segment) BytesRead() uint64 {
Expand All @@ -243,19 +241,15 @@ func (s *Segment) BytesWritten() uint64 {
}

func (s *Segment) incrementBytesRead(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&s.bytesRead, val)
}
atomic.AddUint64(&s.bytesRead, val)
}

func (s *SegmentBase) BytesWritten() uint64 {
return atomic.LoadUint64(&s.bytesWritten)
}

func (s *SegmentBase) setBytesWritten(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&s.bytesWritten, val)
}
atomic.AddUint64(&s.bytesWritten, val)
}

func (s *SegmentBase) BytesRead() uint64 {
Expand All @@ -265,9 +259,7 @@ func (s *SegmentBase) BytesRead() uint64 {
func (s *SegmentBase) ResetBytesRead(val uint64) {}

func (s *SegmentBase) incrementBytesRead(val uint64) {
if CollectDiskStats {
atomic.AddUint64(&s.bytesRead, val)
}
atomic.AddUint64(&s.bytesRead, val)
}

func (s *SegmentBase) loadFields() error {
Expand Down

0 comments on commit f603c7a

Please sign in to comment.