Skip to content

Commit

Permalink
Enable chbenchmark in most CI pipelines (#519)
Browse files Browse the repository at this point in the history
SqlServer support will happen in #517

- [x] oracle chbenchmark
- ~~cockroachdb chbenchmark~~ skipping for now - see #525
  • Loading branch information
bpkroth committed May 31, 2024
1 parent dbf3e89 commit fe3dbeb
Show file tree
Hide file tree
Showing 7 changed files with 449 additions and 17 deletions.
34 changes: 29 additions & 5 deletions .github/workflows/maven.yml
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ jobs:
fail-fast: false
matrix:
# BROKEN: tpch
benchmark: [ 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
benchmark: [ 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
steps:
- name: Download artifact
uses: actions/download-artifact@v4
Expand Down Expand Up @@ -149,6 +149,10 @@ jobs:
echo "The ${{matrix.benchmark}} benchmark is not supported for sqlite."
exit 0
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
# Disable synchronous mode for sqlite tpcc data loading to save some time.
java -jar benchbase.jar -b tpcc -c config/sqlite/sample_tpcc_nosync_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/sqlite/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand Down Expand Up @@ -179,7 +183,7 @@ jobs:
fail-fast: false
matrix:
# FIXME: Add tpch back in (#333).
benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
mariadb: # https://hub.docker.com/_/mariadb
image: mariadb:latest
Expand Down Expand Up @@ -233,6 +237,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh mariadb) &
java -jar benchbase.jar -b tpcc -c config/mariadb/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
java -jar benchbase.jar -b tpcc -c config/mariadb/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/mariadb/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand All @@ -258,7 +265,7 @@ jobs:
strategy:
fail-fast: false
matrix:
benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
mysql: # https://hub.docker.com/_/mysql
image: mysql:latest
Expand Down Expand Up @@ -311,6 +318,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh mysql) &
java -jar benchbase.jar -b tpcc -c config/mysql/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
java -jar benchbase.jar -b tpcc -c config/mysql/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/mysql/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand All @@ -336,7 +346,7 @@ jobs:
strategy:
fail-fast: false
matrix:
benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
oracle:
image: gvenzl/oracle-xe:21.3.0-slim-faststart
Expand Down Expand Up @@ -393,6 +403,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh oracle) &
java -jar benchbase.jar -b tpcc -c config/oracle/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
java -jar benchbase.jar -b tpcc -c config/oracle/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/oracle/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand Down Expand Up @@ -424,7 +437,7 @@ jobs:
strategy:
fail-fast: false
matrix:
benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
steps:
# Note: we download just the docker-compose scripts/configs rather than the
# whole source code repo for better testing.
Expand Down Expand Up @@ -479,6 +492,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh postgres) &
java -jar benchbase.jar -b tpcc -c config/postgres/sample_tpcc_config.xml -im 1000 -mt advanced --execute=true --json-histograms results/histograms.json
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
java -jar benchbase.jar -b tpcc -c config/postgres/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/postgres/sample_${{matrix.benchmark}}_config.xml -im 1000 -mt advanced --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand Down Expand Up @@ -515,12 +531,14 @@ jobs:
fail-fast: false
matrix:
# TODO: Add tpcc-with-reconnects benchmark support
# TODO: Add chbenchmark benchmark support
benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
cockroach: # https://hub.docker.com/repository/docker/timveil/cockroachdb-single-node
image: timveil/cockroachdb-single-node:latest
env:
DATABASE_NAME: benchbase
# TODO: Expand for additional config adjustments (See Also: #405, #519, #525)
MEMORY_SIZE: .75
ports:
- 26257:26257
Expand Down Expand Up @@ -557,6 +575,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh cockroachdb) &
java -jar benchbase.jar -b tpcc -c config/cockroachdb/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
java -jar benchbase.jar -b tpcc -c config/cockroachdb/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/cockroachdb/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand Down Expand Up @@ -655,6 +676,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh sqlserver) &
java -jar benchbase.jar -b tpcc -c config/sqlserver/sample_tpcc_config.xml -im 1000 -mt advanced --execute=true --json-histograms results/histograms.json
else
if [ ${{matrix.benchmark}} == chbenchmark ]; then
java -jar benchbase.jar -b tpcc -c config/sqlserver/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/sqlserver/sample_${{matrix.benchmark}}_config.xml -im 1000 -mt advanced --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
Expand Down
161 changes: 161 additions & 0 deletions config/sqlite/sample_chbenchmark_config.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,161 @@
<?xml version="1.0"?>
<parameters>

<!-- Connection details -->
<type>SQLITE</type>
<driver>org.sqlite.JDBC</driver>
<url>jdbc:sqlite:tpcc.db</url>
<isolation>TRANSACTION_SERIALIZABLE</isolation>
<batchsize>128</batchsize>

<!-- Scale factor is the number of warehouses in TPCC -->
<scalefactor>1</scalefactor>

<!-- SQLITE only supports one writer thread -->
<loaderThreads>1</loaderThreads>

<!-- The workload -->
<!-- Number of terminal per workload -->
<terminals>1</terminals>

<!-- Extra Features (Commented Out) -->
<!-- Can be workload-specific -->
<!-- <terminals bench="tpcc">2</terminals> -->

<!-- Workload-specific options a marked with @bench=[workload_name] -->
<!-- Workload-specific number of terminals -->
<!-- <terminals bench="chbenchmark">2</terminals> -->

<works>

<!-- A Basic WorkPhase for Mixed Workloads -->
<work>
<time>60</time>

<!-- Note: The rate can be set to UNLIMITED or DISABLED -->
<rate>200</rate>

<!-- Need to Specify transaction weights for each workload .. Otherwise the number of fields won't match -->
<weights bench="tpcc">45,43,4,4,4</weights>
<weights bench="chbenchmark">3, 2, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5</weights>
</work>

<!-- Extra features showcase -->
<!-- <work> -->
<!-- <time>60</time> -->

<!-- <rate>200</rate> -->
<!-- <rate bench="chbenchmark">disabled</rate> -->

<!-- NOTE: TPCC workers won't be distributed evenly between warehouses if not all workers are active -->
<!-- <active_terminals>1</active_terminals> -->
<!-- <active_terminals bench="chbenchmark">1</active_terminals> -->

<!-- Specifies transaction weight for each workload. -->
<!-- <weights bench="tpcc">45,43,4,4,4</weights> -->
<!-- <weights bench="chbenchmark">3, 2, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5</weights> -->
<!-- </work> -->

<!--
<work>
<time>60</time>
<rate>100</rate>
<rate bench="chbenchmark">unlimited</rate>
<weights bench="tpcc">45,43,4,4,4</weights>
<weights bench="chbenchmark">3, 2, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5</weights>
</work>
-->
</works>


<!-- CH specific -->
<transactiontypes bench="chbenchmark">
<transactiontype>
<name>Q1</name>
</transactiontype>
<transactiontype>
<name>Q2</name>
</transactiontype>
<transactiontype>
<name>Q3</name>
</transactiontype>
<transactiontype>
<name>Q4</name>
</transactiontype>
<transactiontype>
<name>Q5</name>
</transactiontype>
<transactiontype>
<name>Q6</name>
</transactiontype>
<transactiontype>
<name>Q7</name>
</transactiontype>
<transactiontype>
<name>Q8</name>
</transactiontype>
<transactiontype>
<name>Q9</name>
</transactiontype>
<transactiontype>
<name>Q10</name>
</transactiontype>
<transactiontype>
<name>Q11</name>
</transactiontype>
<transactiontype>
<name>Q12</name>
</transactiontype>
<transactiontype>
<name>Q13</name>
</transactiontype>
<transactiontype>
<name>Q14</name>
</transactiontype>
<transactiontype>
<name>Q15</name>
</transactiontype>
<transactiontype>
<name>Q16</name>
</transactiontype>
<transactiontype>
<name>Q17</name>
</transactiontype>
<transactiontype>
<name>Q18</name>
</transactiontype>
<transactiontype>
<name>Q19</name>
</transactiontype>
<transactiontype>
<name>Q20</name>
</transactiontype>
<transactiontype>
<name>Q21</name>
</transactiontype>
<transactiontype>
<name>Q22</name>
</transactiontype>
</transactiontypes>

<!-- TPCC specific -->
<transactiontypes bench="tpcc">
<transactiontype>
<name>NewOrder</name>
</transactiontype>
<transactiontype>
<name>Payment</name>
</transactiontype>
<transactiontype>
<name>OrderStatus</name>
</transactiontype>
<transactiontype>
<name>Delivery</name>
</transactiontype>
<transactiontype>
<name>StockLevel</name>
</transactiontype>
</transactiontypes>
</parameters>
4 changes: 2 additions & 2 deletions config/sqlite/sample_epinions_config.xml
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@
<!-- Connection details -->
<type>SQLITE</type>
<driver>org.sqlite.JDBC</driver>
<url>jdbc:sqlite:resourcestresser.db</url>
<url>jdbc:sqlite:epinions.db</url>
<isolation>TRANSACTION_SERIALIZABLE</isolation>
<batchsize>128</batchsize>

<!-- Scalefactor in Epinions scales by *2000 the number of users-->
<scalefactor>0.1</scalefactor>

<!-- SQLITE only supports one writer thread -->
<loaderThreads>1</loaderThreads>

Expand Down
23 changes: 16 additions & 7 deletions docker/build-run-benchmark-with-docker.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ if [ "$BENCHBASE_PROFILE" == 'sqlite' ]; then
fi
EXTRA_DOCKER_ARGS="-v $SRC_DIR/$benchmark.db:/benchbase/profiles/sqlite/$benchmark.db"

if [ "$benchmark" == 'templated' ]; then
if echo "$benchmark" | egrep -qx '(templated|chbenchmark)'; then
# See notes below:
EXTRA_DOCKER_ARGS+=" -v $SRC_DIR/$benchmark.db:/benchbase/profiles/sqlite/tpcc.db"
fi
Expand All @@ -49,7 +49,7 @@ if [ "${SKIP_LOAD_DB:-false}" != 'true' ]; then
# For templated benchmarks, we need to preload some data for the test since by
# design, templated benchmarks do not support the 'load' operation
# In this case, we load the tpcc data.
if [ "$benchmark" == 'templated' ]; then
if echo "$benchmark" | egrep -qx '(templated|chbenchmark)'; then
load_benchmark='tpcc'

echo "INFO: Loading tpcc data for templated benchmark"
Expand All @@ -59,15 +59,24 @@ if [ "${SKIP_LOAD_DB:-false}" != 'true' ]; then
else
config="config/sample_tpcc_config.xml"
fi
else

BUILD_IMAGE=false EXTRA_DOCKER_ARGS="--network=host $EXTRA_DOCKER_ARGS" \
./docker/benchbase/run-full-image.sh \
--config "$config" --bench "$load_benchmark" \
--create=true --load=true --execute=false
fi

# For chbenchmark, we also load it's data in addition to tpcc.
if ! echo "$benchmark" | egrep -qx '(templated)'; then
echo "INFO: Loading $benchmark data"
load_benchmark="$benchmark"
config="config/sample_${benchmark}_config.xml"

BUILD_IMAGE=false EXTRA_DOCKER_ARGS="--network=host $EXTRA_DOCKER_ARGS" \
./docker/benchbase/run-full-image.sh \
--config "$config" --bench "$load_benchmark" \
--create=true --load=true --execute=false
fi
BUILD_IMAGE=false EXTRA_DOCKER_ARGS="--network=host $EXTRA_DOCKER_ARGS" \
./docker/benchbase/run-full-image.sh \
--config "$config" --bench "$load_benchmark" \
--create=true --load=true --execute=false
else
echo "INFO: Skipping load of $benchmark data"
fi
Expand Down
31 changes: 31 additions & 0 deletions src/main/resources/benchmarks/chbenchmark/ddl-sqlite.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
DROP TABLE IF EXISTS supplier;
DROP TABLE IF EXISTS nation;
DROP TABLE IF EXISTS region;

create table region
(
r_regionkey int not null,
r_name char(55) not null,
r_comment char(152) not null,
PRIMARY KEY (r_regionkey)
);

create table nation
(
n_nationkey int not null,
n_name char(25) not null,
n_regionkey int not null references region(r_regionkey) ON DELETE CASCADE,
n_comment char(152) not null,
PRIMARY KEY ( n_nationkey )
);

create table supplier (
su_suppkey int not null,
su_name char(25) not null,
su_address varchar(40) not null,
su_nationkey int not null references nation(n_nationkey) ON DELETE CASCADE,
su_phone char(15) not null,
su_acctbal numeric(12,2) not null,
su_comment char(101) not null,
PRIMARY KEY ( su_suppkey )
);
Loading

0 comments on commit fe3dbeb

Please sign in to comment.