From 1c4c3106fd9a04f502ac85814bf91e177c1fc7b7 Mon Sep 17 00:00:00 2001 From: tiboratAS Date: Tue, 14 Mar 2017 06:49:54 -0700 Subject: [PATCH 01/16] [aerospike] Change the write policy to REPLACE_ONLY (#937) The original Aerospike interface layer was created when the REPLACE_ONLY option was not available. This provides a policy more in line with the policies that other databases have implemented their interface layers. --- aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java b/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java index 5aa80e46cf..e651a7e61e 100644 --- a/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java +++ b/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java @@ -57,7 +57,7 @@ public class AerospikeClient extends com.yahoo.ycsb.DB { @Override public void init() throws DBException { insertPolicy.recordExistsAction = RecordExistsAction.CREATE_ONLY; - updatePolicy.recordExistsAction = RecordExistsAction.UPDATE_ONLY; + updatePolicy.recordExistsAction = RecordExistsAction.REPLACE_ONLY; Properties props = getProperties(); From 78c3cfaedb51af9809cacde2f1c62eeb045acb91 Mon Sep 17 00:00:00 2001 From: siamaktz Date: Fri, 17 Mar 2017 13:43:27 -0700 Subject: [PATCH 02/16] [cloudspanner] Add binding for Google's Cloud Spanner. (#939) --- bin/bindings.properties | 1 + bin/ycsb | 5 +- cloudspanner/README.md | 111 +++++ cloudspanner/conf/cloudspanner.properties | 26 ++ cloudspanner/pom.xml | 53 +++ .../db/cloudspanner/CloudSpannerClient.java | 397 ++++++++++++++++++ .../ycsb/db/cloudspanner/package-info.java | 22 + distribution/pom.xml | 5 + pom.xml | 2 + 9 files changed, 620 insertions(+), 2 deletions(-) create mode 100644 cloudspanner/README.md create mode 100644 cloudspanner/conf/cloudspanner.properties create mode 100644 cloudspanner/pom.xml create mode 100644 cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java create mode 100644 cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/package-info.java diff --git a/bin/bindings.properties b/bin/bindings.properties index 24ffa09f64..231a3a240c 100644 --- a/bin/bindings.properties +++ b/bin/bindings.properties @@ -34,6 +34,7 @@ azuretablestorage:com.yahoo.ycsb.db.azuretablestorage.AzureClient basic:com.yahoo.ycsb.BasicDB cassandra-cql:com.yahoo.ycsb.db.CassandraCQLClient cassandra2-cql:com.yahoo.ycsb.db.CassandraCQLClient +cloudspanner:com.yahoo.ycsb.db.cloudspanner.CloudSpannerClient couchbase:com.yahoo.ycsb.db.CouchbaseClient couchbase2:com.yahoo.ycsb.db.couchbase2.Couchbase2Client azuredocumentdb:com.yahoo.ycsb.db.azuredocumentdb.AzureDocumentDBClient diff --git a/bin/ycsb b/bin/ycsb index e41786b4fc..d454c6cb0c 100755 --- a/bin/ycsb +++ b/bin/ycsb @@ -54,14 +54,15 @@ DATABASES = { "accumulo" : "com.yahoo.ycsb.db.accumulo.AccumuloClient", "aerospike" : "com.yahoo.ycsb.db.AerospikeClient", "arangodb" : "com.yahoo.ycsb.db.ArangoDBClient", - "arangodb3" : "com.yahoo.ycsb.db.arangodb.ArangoDB3Client", + "arangodb3" : "com.yahoo.ycsb.db.arangodb.ArangoDB3Client", "asynchbase" : "com.yahoo.ycsb.db.AsyncHBaseClient", "basic" : "com.yahoo.ycsb.BasicDB", "cassandra-cql": "com.yahoo.ycsb.db.CassandraCQLClient", "cassandra2-cql": "com.yahoo.ycsb.db.CassandraCQLClient", + "cloudspanner" : "com.yahoo.ycsb.db.cloudspanner.CloudSpannerClient", "couchbase" : "com.yahoo.ycsb.db.CouchbaseClient", "couchbase2" : "com.yahoo.ycsb.db.couchbase2.Couchbase2Client", - "azuredocumentdb" : "com.yahoo.ycsb.db.azuredocumentdb.AzureDocumentDBClient", + "azuredocumentdb" : "com.yahoo.ycsb.db.azuredocumentdb.AzureDocumentDBClient", "dynamodb" : "com.yahoo.ycsb.db.DynamoDBClient", "elasticsearch": "com.yahoo.ycsb.db.ElasticsearchClient", "geode" : "com.yahoo.ycsb.db.GeodeClient", diff --git a/cloudspanner/README.md b/cloudspanner/README.md new file mode 100644 index 0000000000..8b60e38c0a --- /dev/null +++ b/cloudspanner/README.md @@ -0,0 +1,111 @@ + + +# Cloud Spanner Driver for YCSB + +This driver provides a YCSB workload binding for Google's Cloud Spanner database, the first relational database service that is both strongly consistent and horizontally scalable. This binding is implemented using the official Java client library for Cloud Spanner which uses GRPC for making calls. + +For best results, we strongly recommend running the benchmark from a Google Compute Engine (GCE) VM. + +## Running a Workload + +We recommend reading the [general guidelines](https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload) in the YCSB documentation, and following the Cloud Spanner specific steps below. + +### 1. Set up Cloud Spanner with the Expected Schema + +Follow the [Quickstart instructions](https://cloud.google.com/spanner/docs/quickstart-console) in the Cloud Spanner documentation to set up a Cloud Spanner instance, and create a database with the following schema: + +``` +CREATE TABLE usertable ( + id STRING(MAX), + field0 STRING(MAX), + field1 STRING(MAX), + field2 STRING(MAX), + field3 STRING(MAX), + field4 STRING(MAX), + field5 STRING(MAX), + field6 STRING(MAX), + field7 STRING(MAX), + field8 STRING(MAX), + field9 STRING(MAX), +) PRIMARY KEY(id); +``` +Make note of your project ID, instance ID, and database name. + +### 2. Set Up Your Environment and Auth + +Follow the [set up instructions](https://cloud.google.com/spanner/docs/getting-started/set-up) in the Cloud Spanner documentation to set up your environment and authentication. When not running on a GCE VM, make sure you run `gcloud auth application-default login`. + +### 3. Edit Properties + +In your YCSB root directory, edit `cloudspanner/conf/cloudspanner.properties` and specify your project ID, instance ID, and database name. + +### 4. Run the YCSB Shell + +Start the YCBS shell connected to Cloud Spanner using the following command: + +``` +./bin/ycsb shell cloudspanner -P cloudspanner/conf/cloudspanner.properties +``` + +You can use the `insert`, `read`, `update`, `scan`, and `delete` commands in the shell to experiment with your database and make sure the connection works. For example, try the following: + +``` +insert name field0=adam +read name field0 +delete name +``` + +### 5. Load the Data + +You can load, say, 10 GB of data into your YCSB database using the following command: + +``` +./bin/ycsb load cloudspanner -P cloudspanner/conf/cloudspanner.properties -P workloads/workloada -p recordcount=10000000 -p cloudspanner.batchinserts=1000 -threads 10 -s +``` + +We recommend batching insertions so as to reach ~1 MB of data per commit request; this is controlled via the `cloudspanner.batchinserts` parameter which we recommend setting to `1000` during data load. + +If you wish to load a large database, you can run YCSB on multiple client VMs in parallel and use the `insertstart` and `insertcount` parameters to distribute the load as described [here](https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload-in-Parallel). In this case, we recommend the following: + +* Use ordered inserts via specifying the YCSB parameter `insertorder=ordered`; +* Use zero-padding so that ordered inserts are actually lexicographically ordered; the option `zeropadding = 12` is set in the default `cloudspanner.properties` file; +* Split the key range evenly between client VMs; +* Use few threads on each client VM, so that each individual commit request contains keys which are (close to) consecutive, and would thus likely address a single split; this also helps avoid overloading the servers. + +The idea is that we have a number of 'write heads' which are all writing to different parts of the database (and thus talking to different servers), but each individual head is writing its own data (more or less) in order. See the [best practices page](https://cloud.google.com/spanner/docs/best-practices#loading_data) for further details. + +### 6. Run a Workload + +After data load, you can a run a workload, say, workload B, using the following command: + +``` +./bin/ycsb run cloudspanner -P cloudspanner/conf/cloudspanner.properties -P workloads/workloadb -p recordcount=10000000 -p operationcount=1000000 -threads 10 -s +``` + +Make sure that you use the same `insertorder` (i.e. `ordered` or `hashed`) and `zeropadding` as specified during the data load. Further details about running workloads are given in the [YCSB wiki pages](https://github.com/brianfrankcooper/YCSB/wiki/Running-a-Workload). + +## Configuration Options + +In addition to the standard YCSB parameters, the following Cloud Spanner specific options can be configured using the `-p` parameter or in `cloudspanner/conf/cloudspanner.properties`. + +* `cloudspanner.database`: (Required) The name of the database created in the instance, e.g. `ycsb-database`. +* `cloudspanner.instance`: (Required) The ID of the Cloud Spanner instance, e.g. `ycsb-instance`. +* `cloudspanner.project`: The ID of the project containing the Cloud Spanner instance, e.g. `myproject`. This is not strictly required and can often be automatically inferred from the environment. +* `cloudspanner.readmode`: Allows choosing between the `read` and `query` interface of Cloud Spanner. The default is `query`. +* `cloudspanner.batchinserts`: The number of inserts to batch into a single commit request. The default value is 1 which means no batching is done. Recommended value during data load is 1000. +* `cloudspanner.boundedstaleness`: Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). For performance gains, this should be set to 10 seconds. diff --git a/cloudspanner/conf/cloudspanner.properties b/cloudspanner/conf/cloudspanner.properties new file mode 100644 index 0000000000..c296525876 --- /dev/null +++ b/cloudspanner/conf/cloudspanner.properties @@ -0,0 +1,26 @@ +# Copyright (c) 2017 YCSB contributors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. See accompanying +# LICENSE file. + +# Core YCSB properties. +table = usertable +zeropadding = 12 + +# Cloud Spanner properties +cloudspanner.instance = ycsb-instance +cloudspanner.database = ycsb-database + +cloudspanner.readmode = query +cloudspanner.boundedstaleness = 0 +cloudspanner.batchinserts = 1 diff --git a/cloudspanner/pom.xml b/cloudspanner/pom.xml new file mode 100644 index 0000000000..4aba685ed5 --- /dev/null +++ b/cloudspanner/pom.xml @@ -0,0 +1,53 @@ + + + + + 4.0.0 + + com.yahoo.ycsb + binding-parent + 0.13.0-SNAPSHOT + ../binding-parent/ + + + cloudspanner-binding + Cloud Spanner DB Binding + jar + + + + com.google.cloud + google-cloud-spanner + ${cloudspanner.version} + + + com.google.guava + guava-jdk5 + + + + + + com.yahoo.ycsb + core + ${project.version} + provided + + + + diff --git a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java new file mode 100644 index 0000000000..78611c3412 --- /dev/null +++ b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java @@ -0,0 +1,397 @@ +/** + * Copyright (c) 2017 YCSB contributors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ +package com.yahoo.ycsb.db.cloudspanner; + +import com.google.common.base.Joiner; +import com.google.cloud.spanner.DatabaseId; +import com.google.cloud.spanner.DatabaseClient; +import com.google.cloud.spanner.Key; +import com.google.cloud.spanner.KeySet; +import com.google.cloud.spanner.KeyRange; +import com.google.cloud.spanner.Mutation; +import com.google.cloud.spanner.Options; +import com.google.cloud.spanner.ResultSet; +import com.google.cloud.spanner.SessionPoolOptions; +import com.google.cloud.spanner.Spanner; +import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.Statement; +import com.google.cloud.spanner.Struct; +import com.google.cloud.spanner.StructReader; +import com.google.cloud.spanner.TimestampBound; +import com.yahoo.ycsb.ByteIterator; +import com.yahoo.ycsb.Client; +import com.yahoo.ycsb.DB; +import com.yahoo.ycsb.DBException; +import com.yahoo.ycsb.Status; +import com.yahoo.ycsb.StringByteIterator; +import com.yahoo.ycsb.workloads.CoreWorkload; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.Vector; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.concurrent.TimeUnit; + +/** + * YCSB Client for Google's Cloud Spanner. + */ +public class CloudSpannerClient extends DB { + + /** + * The names of properties which can be specified in the config files and flags. + */ + public static final class CloudSpannerProperties { + private CloudSpannerProperties() {} + + /** + * The Cloud Spanner database name to use when running the YCSB benchmark, e.g. 'ycsb-database'. + */ + static final String DATABASE = "cloudspanner.database"; + /** + * The Cloud Spanner instance ID to use when running the YCSB benchmark, e.g. 'ycsb-instance'. + */ + static final String INSTANCE = "cloudspanner.instance"; + /** + * Choose between 'read' and 'query'. Affects both read() and scan() operations. + */ + static final String READ_MODE = "cloudspanner.readmode"; + /** + * The number of inserts to batch during the bulk loading phase. The default value is 1, which means no batching + * is done. Recommended value during data load is 1000. + */ + static final String BATCH_INSERTS = "cloudspanner.batchinserts"; + /** + * Number of seconds we allow reads to be stale for. Set to 0 for strong reads (default). + * For performance gains, this should be set to 10 seconds. + */ + static final String BOUNDED_STALENESS = "cloudspanner.boundedstaleness"; + + // The properties below usually do not need to be set explicitly. + + /** + * The Cloud Spanner project ID to use when running the YCSB benchmark, e.g. 'myproject'. This is not strictly + * necessary and can often be inferred from the environment. + */ + static final String PROJECT = "cloudspanner.project"; + /** + * The Cloud Spanner host name to use in the YCSB run. + */ + static final String HOST = "cloudspanner.host"; + /** + * Number of Cloud Spanner client channels to use. It's recommended to leave this to be the default value. + */ + static final String NUM_CHANNELS = "cloudspanner.channels"; + } + + private static int fieldCount; + + private static boolean queriesForReads; + + private static int batchInserts; + + private static TimestampBound timestampBound; + + private static String standardQuery; + + private static String standardScan; + + private static final ArrayList STANDARD_FIELDS = new ArrayList<>(); + + private static final String PRIMARY_KEY_COLUMN = "id"; + + private static final Logger LOGGER = Logger.getLogger(CloudSpannerClient.class.getName()); + + // Static lock for the class. + private static final Object CLASS_LOCK = new Object(); + + // Single Spanner client per process. + private static Spanner spanner = null; + + // Single database client per process. + private static DatabaseClient dbClient = null; + + // Buffered mutations on a per object/thread basis for batch inserts. + // Note that we have a separate CloudSpannerClient object per thread. + private final ArrayList bufferedMutations = new ArrayList<>(); + + private static void constructStandardQueriesAndFields(Properties properties) { + String table = properties.getProperty(CoreWorkload.TABLENAME_PROPERTY, CoreWorkload.TABLENAME_PROPERTY_DEFAULT); + standardQuery = new StringBuilder() + .append("SELECT * FROM ").append(table).append(" WHERE id=@key").toString(); + standardScan = new StringBuilder() + .append("SELECT * FROM ").append(table).append(" WHERE id>=@startKey LIMIT @count").toString(); + for (int i = 0; i < fieldCount; i++) { + STANDARD_FIELDS.add("field" + i); + } + } + + private static Spanner getSpanner(Properties properties, String host, String project) { + if (spanner != null) { + return spanner; + } + String numChannels = properties.getProperty(CloudSpannerProperties.NUM_CHANNELS); + int numThreads = Integer.parseInt(properties.getProperty(Client.THREAD_COUNT_PROPERTY, "1")); + SpannerOptions.Builder optionsBuilder = SpannerOptions.newBuilder() + .setSessionPoolOption(SessionPoolOptions.newBuilder() + .setMinSessions(numThreads) + // Since we have no read-write transactions, we can set the write session fraction to 0. + .setWriteSessionsFraction(0) + .build()); + if (host != null) { + optionsBuilder.setHost(host); + } + if (project != null) { + optionsBuilder.setProjectId(project); + } + if (numChannels != null) { + optionsBuilder.setNumChannels(Integer.parseInt(numChannels)); + } + spanner = optionsBuilder.build().getService(); + Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { + @Override + public void run() { + spanner.closeAsync(); + } + }); + return spanner; + } + + @Override + public void init() throws DBException { + synchronized (CLASS_LOCK) { + if (dbClient != null) { + return; + } + Properties properties = getProperties(); + String host = properties.getProperty(CloudSpannerProperties.HOST); + String project = properties.getProperty(CloudSpannerProperties.PROJECT); + String instance = properties.getProperty(CloudSpannerProperties.INSTANCE, "ycsb-instance"); + String database = properties.getProperty(CloudSpannerProperties.DATABASE, "ycsb-database"); + + fieldCount = Integer.parseInt(properties.getProperty( + CoreWorkload.FIELD_COUNT_PROPERTY, CoreWorkload.FIELD_COUNT_PROPERTY_DEFAULT)); + queriesForReads = properties.getProperty(CloudSpannerProperties.READ_MODE, "query").equals("query"); + batchInserts = Integer.parseInt(properties.getProperty(CloudSpannerProperties.BATCH_INSERTS, "1")); + constructStandardQueriesAndFields(properties); + + int boundedStalenessSeconds = Integer.parseInt(properties.getProperty( + CloudSpannerProperties.BOUNDED_STALENESS, "0")); + timestampBound = (boundedStalenessSeconds <= 0) ? + TimestampBound.strong() : TimestampBound.ofMaxStaleness(boundedStalenessSeconds, TimeUnit.SECONDS); + + try { + spanner = getSpanner(properties, host, project); + if (project == null) { + project = spanner.getOptions().getProjectId(); + } + dbClient = spanner.getDatabaseClient(DatabaseId.of(project, instance, database)); + } catch (Exception e) { + LOGGER.log(Level.SEVERE, "init()", e); + throw new DBException(e); + } + + LOGGER.log(Level.INFO, new StringBuilder() + .append("\nHost: ").append(spanner.getOptions().getHost()) + .append("\nProject: ").append(project) + .append("\nInstance: ").append(instance) + .append("\nDatabase: ").append(database) + .append("\nUsing queries for reads: ").append(queriesForReads) + .append("\nBatching inserts: ").append(batchInserts) + .append("\nBounded staleness seconds: ").append(boundedStalenessSeconds) + .toString()); + } + } + + private Status readUsingQuery( + String table, String key, Set fields, HashMap result) { + Statement query; + Iterable columns = fields == null ? STANDARD_FIELDS : fields; + if (fields == null || fields.size() == fieldCount) { + query = Statement.newBuilder(standardQuery).bind("key").to(key).build(); + } else { + Joiner joiner = Joiner.on(','); + query = Statement.newBuilder("SELECT ") + .append(joiner.join(fields)) + .append(" FROM ") + .append(table) + .append(" WHERE id=@key") + .bind("key").to(key) + .build(); + } + try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { + resultSet.next(); + decodeStruct(columns, resultSet, result); + if (resultSet.next()) { + throw new Exception("Expected exactly one row for each read."); + } + + return Status.OK; + } catch (Exception e) { + LOGGER.log(Level.INFO, "readUsingQuery()", e); + return Status.ERROR; + } + } + + @Override + public Status read( + String table, String key, Set fields, HashMap result) { + if (queriesForReads) { + return readUsingQuery(table, key, fields, result); + } + Iterable columns = fields == null ? STANDARD_FIELDS : fields; + try { + Struct row = dbClient.singleUse(timestampBound).readRow(table, Key.of(key), columns); + decodeStruct(columns, row, result); + return Status.OK; + } catch (Exception e) { + LOGGER.log(Level.INFO, "read()", e); + return Status.ERROR; + } + } + + private Status scanUsingQuery( + String table, String startKey, int recordCount, Set fields, + Vector> result) { + Iterable columns = fields == null ? STANDARD_FIELDS : fields; + Statement query; + if (fields == null || fields.size() == fieldCount) { + query = Statement.newBuilder(standardScan).bind("startKey").to(startKey).bind("count").to(recordCount).build(); + } else { + Joiner joiner = Joiner.on(','); + query = Statement.newBuilder("SELECT ") + .append(joiner.join(fields)) + .append(" FROM ") + .append(table) + .append(" WHERE id>=@startKey LIMIT @count") + .bind("startKey").to(startKey) + .bind("count").to(recordCount) + .build(); + } + try (ResultSet resultSet = dbClient.singleUse(timestampBound).executeQuery(query)) { + while (resultSet.next()) { + HashMap row = new HashMap<>(); + decodeStruct(columns, resultSet, row); + result.add(row); + } + return Status.OK; + } catch (Exception e) { + LOGGER.log(Level.INFO, "scanUsingQuery()", e); + return Status.ERROR; + } + } + + @Override + public Status scan( + String table, String startKey, int recordCount, Set fields, + Vector> result) { + if (queriesForReads) { + return scanUsingQuery(table, startKey, recordCount, fields, result); + } + Iterable columns = fields == null ? STANDARD_FIELDS : fields; + KeySet keySet = + KeySet.newBuilder().addRange(KeyRange.closedClosed(Key.of(startKey), Key.of())).build(); + try (ResultSet resultSet = dbClient.singleUse(timestampBound) + .read(table, keySet, columns, Options.limit(recordCount))) { + while (resultSet.next()) { + HashMap row = new HashMap<>(); + decodeStruct(columns, resultSet, row); + result.add(row); + } + return Status.OK; + } catch (Exception e) { + LOGGER.log(Level.INFO, "scan()", e); + return Status.ERROR; + } + } + + @Override + public Status update(String table, String key, HashMap values) { + Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); + m.set(PRIMARY_KEY_COLUMN).to(key); + for (Map.Entry e : values.entrySet()) { + m.set(e.getKey()).to(e.getValue().toString()); + } + try { + dbClient.writeAtLeastOnce(Arrays.asList(m.build())); + } catch (Exception e) { + LOGGER.log(Level.INFO, "update()", e); + return Status.ERROR; + } + return Status.OK; + } + + @Override + public Status insert(String table, String key, HashMap values) { + if (bufferedMutations.size() < batchInserts) { + Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); + m.set(PRIMARY_KEY_COLUMN).to(key); + for (Map.Entry e : values.entrySet()) { + m.set(e.getKey()).to(e.getValue().toString()); + } + bufferedMutations.add(m.build()); + } else { + LOGGER.log(Level.INFO, "Limit of cached mutations reached. The given mutation with key " + key + + " is ignored. Is this a retry?"); + } + if (bufferedMutations.size() < batchInserts) { + return Status.BATCHED_OK; + } + try { + dbClient.writeAtLeastOnce(bufferedMutations); + bufferedMutations.clear(); + } catch (Exception e) { + LOGGER.log(Level.INFO, "insert()", e); + return Status.ERROR; + } + return Status.OK; + } + + @Override + public void cleanup() { + try { + if (bufferedMutations.size() > 0) { + dbClient.writeAtLeastOnce(bufferedMutations); + bufferedMutations.clear(); + } + } catch (Exception e) { + LOGGER.log(Level.INFO, "cleanup()", e); + } + } + + @Override + public Status delete(String table, String key) { + try { + dbClient.writeAtLeastOnce(Arrays.asList(Mutation.delete(table, Key.of(key)))); + } catch (Exception e) { + LOGGER.log(Level.INFO, "delete()", e); + return Status.ERROR; + } + return Status.OK; + } + + private static void decodeStruct( + Iterable columns, StructReader structReader, HashMap result) { + for (String col : columns) { + result.put(col, new StringByteIterator(structReader.getString(col))); + } + } +} diff --git a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/package-info.java b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/package-info.java new file mode 100644 index 0000000000..fb31ae7f39 --- /dev/null +++ b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/package-info.java @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2017 YCSB contributors. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +/** + * The YCSB binding for Google's + * Cloud Spanner. + */ +package com.yahoo.ycsb.db.cloudspanner; diff --git a/distribution/pom.xml b/distribution/pom.xml index c2071adfd9..506f1170f8 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -69,6 +69,11 @@ LICENSE file. cassandra-binding ${project.version} + + com.yahoo.ycsb + cloudspanner-binding + ${project.version} + com.yahoo.ycsb couchbase-binding diff --git a/pom.xml b/pom.xml index 7693d22469..a8c38210c5 100644 --- a/pom.xml +++ b/pom.xml @@ -100,6 +100,7 @@ LICENSE file. 2.7.3 4.1.7 4.0.0 + 0.9.3-beta @@ -114,6 +115,7 @@ LICENSE file. asynchbase azuretablestorage cassandra + cloudspanner couchbase couchbase2 distribution From ddde8e3c7abf47d560759cec56c85493e4e8fae2 Mon Sep 17 00:00:00 2001 From: Josh Elser Date: Thu, 6 Apr 2017 17:46:20 -0400 Subject: [PATCH 03/16] [accumulo] A general "refresh" to the Accumulo binding (#947) * Expand on the README, covering table creation and best-practices for table config * Avoid unnecessary Text object creations (in loops and instead of byte[] usage) * Use a ConcurrentHashMap to better match the DB API * Fix error messages and always call printStackTrace() on exceptions * Use BATCHED_OK instead of OK in insert() (more correct) --- accumulo/README.md | 37 ++- .../ycsb/db/accumulo/AccumuloClient.java | 293 ++++++++++-------- 2 files changed, 194 insertions(+), 136 deletions(-) diff --git a/accumulo/README.md b/accumulo/README.md index fd9b4e8d7a..38e444cb7c 100644 --- a/accumulo/README.md +++ b/accumulo/README.md @@ -36,7 +36,42 @@ Git clone YCSB and compile: cd YCSB mvn -pl com.yahoo.ycsb:aerospike-binding -am clean package -### 3. Load Data and Run Tests +### 3. Create the Accumulo table + +By default, YCSB uses a table with the name "usertable". Users must create this table before loading +data into Accumulo. For maximum Accumulo performance, the Accumulo table must be pre-split. A simple +Ruby script, based on the HBase README, can generate adequate split-point. 10's of Tablets per +TabletServer is a good starting point. Unless otherwise specified, the following commands should run +on any version of Accumulo. + + $ echo 'num_splits = 20; puts (1..num_splits).map {|i| "user#{1000+i*(9999-1000)/num_splits}"}' | ruby > /tmp/splits.txt + $ accumulo shell -u -p -e "createtable usertable" + $ accumulo shell -u -p -e "addsplits -t usertable -sf /tmp/splits.txt" + $ accumulo shell -u -p -e "config -t usertable -s table.cache.block.enable=true" + +Additionally, there are some other configuration properties which can increase performance. These +can be set on the Accumulo table via the shell after it is created. Setting the table durability +to `flush` relaxes the constraints on data durability during hard power-outages (avoids calls +to fsync). Accumulo defaults table compression to `gzip` which is not particularly fast; `snappy` +is a faster and similarly-efficient option. The mutation queue property controls how many writes +that Accumulo will buffer in memory before performing a flush; this property should be set relative +to the amount of JVM heap the TabletServers are given. + +Please note that the `table.durability` and `tserver.total.mutation.queue.max` properties only +exists for >=Accumulo-1.7. There are no concise replacements for these properties in earlier versions. + + accumulo> config -s table.durability=flush + accumulo> config -s tserver.total.mutation.queue.max=256M + accumulo> config -t usertable -s table.file.compress.type=snappy + +On repeated data loads, the following commands may be helpful to re-set the state of the table quickly. + + accumulo> createtable tmp --copy-splits usertable --copy-config usertable + accumulo> deletetable --force usertable + accumulo> renametable tmp usertable + accumulo> compact --wait -t accumulo.metadata + +### 4. Load Data and Run Tests Load the data: diff --git a/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java b/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java index 96b869e2b7..41d6f7f6fa 100644 --- a/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java +++ b/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java @@ -18,17 +18,25 @@ package com.yahoo.ycsb.db.accumulo; -import com.yahoo.ycsb.ByteArrayByteIterator; -import com.yahoo.ycsb.ByteIterator; -import com.yahoo.ycsb.DB; -import com.yahoo.ycsb.DBException; -import com.yahoo.ycsb.Status; +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedMap; +import java.util.Vector; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import org.apache.accumulo.core.client.AccumuloException; import org.apache.accumulo.core.client.AccumuloSecurityException; import org.apache.accumulo.core.client.BatchWriter; import org.apache.accumulo.core.client.BatchWriterConfig; import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.IteratorSetting; import org.apache.accumulo.core.client.MutationsRejectedException; import org.apache.accumulo.core.client.Scanner; import org.apache.accumulo.core.client.TableNotFoundException; @@ -39,16 +47,16 @@ import org.apache.accumulo.core.data.Mutation; import org.apache.accumulo.core.data.Range; import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.iterators.user.WholeRowIterator; import org.apache.accumulo.core.security.Authorizations; import org.apache.accumulo.core.util.CleanUp; import org.apache.hadoop.io.Text; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.Vector; -import java.util.concurrent.TimeUnit; +import com.yahoo.ycsb.ByteArrayByteIterator; +import com.yahoo.ycsb.ByteIterator; +import com.yahoo.ycsb.DB; +import com.yahoo.ycsb.DBException; +import com.yahoo.ycsb.Status; /** * Accumulo binding for YCSB. @@ -57,14 +65,11 @@ public class AccumuloClient extends DB { private ZooKeeperInstance inst; private Connector connector; - private String table = ""; - private BatchWriter bw = null; private Text colFam = new Text(""); - private Scanner singleScanner = null; // A scanner for reads/deletes. - private Scanner scanScanner = null; // A scanner for use by scan() + private byte[] colFamBytes = new byte[0]; + private final ConcurrentHashMap writers = new ConcurrentHashMap<>(); static { - Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { @@ -76,6 +81,7 @@ public void run() { @Override public void init() throws DBException { colFam = new Text(getProperties().getProperty("accumulo.columnFamily")); + colFamBytes = colFam.toString().getBytes(UTF_8); inst = new ZooKeeperInstance( getProperties().getProperty("accumulo.instanceName"), @@ -85,9 +91,7 @@ public void init() throws DBException { AuthenticationToken token = new PasswordToken(getProperties().getProperty("accumulo.password")); connector = inst.getConnector(principal, token); - } catch (AccumuloException e) { - throw new DBException(e); - } catch (AccumuloSecurityException e) { + } catch (AccumuloException | AccumuloSecurityException e) { throw new DBException(e); } @@ -100,45 +104,56 @@ public void init() throws DBException { @Override public void cleanup() throws DBException { try { - if (bw != null) { - bw.close(); + Iterator iterator = writers.values().iterator(); + while (iterator.hasNext()) { + BatchWriter writer = iterator.next(); + writer.close(); + iterator.remove(); } } catch (MutationsRejectedException e) { throw new DBException(e); } } - /** - * Commonly repeated functionality: Before doing any operation, make sure - * we're working on the correct table. If not, open the correct one. - * - * @param t - * The table to open. - */ - public void checkTable(String t) throws TableNotFoundException { - if (!table.equals(t)) { - getTable(t); - } - } - /** * Called when the user specifies a table that isn't the same as the existing * table. Connect to it and if necessary, close our current connection. * - * @param t + * @param table * The table to open. */ - public void getTable(String t) throws TableNotFoundException { - if (bw != null) { // Close the existing writer if necessary. - try { - bw.close(); - } catch (MutationsRejectedException e) { - // Couldn't spit out the mutations we wanted. - // Ignore this for now. - System.err.println("MutationsRejectedException: " + e.getMessage()); + public BatchWriter getWriter(String table) throws TableNotFoundException { + // tl;dr We're paying a cost for the ConcurrentHashMap here to deal with the DB api. + // We know that YCSB is really only ever going to send us data for one table, so using + // a concurrent data structure is overkill (especially in such a hot code path). + // However, the impact seems to be relatively negligible in trivial local tests and it's + // "more correct" WRT to the API. + BatchWriter writer = writers.get(table); + if (null == writer) { + BatchWriter newWriter = createBatchWriter(table); + BatchWriter oldWriter = writers.putIfAbsent(table, newWriter); + // Someone beat us to creating a BatchWriter for this table, use their BatchWriters + if (null != oldWriter) { + try { + // Make sure to clean up our new batchwriter! + newWriter.close(); + } catch (MutationsRejectedException e) { + throw new RuntimeException(e); + } + writer = oldWriter; + } else { + writer = newWriter; } } + return writer; + } + /** + * Creates a BatchWriter with the expected configuration. + * + * @param table The table to write to + */ + private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() @@ -146,16 +161,15 @@ public void getTable(String t) throws TableNotFoundException { TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); - bwc.setMaxWriteThreads(Integer.parseInt( - getProperties().getProperty("accumulo.batchWriterThreads", "1"))); - - bw = connector.createBatchWriter(t, bwc); - - // Create our scanners - singleScanner = connector.createScanner(t, Authorizations.EMPTY); - scanScanner = connector.createScanner(t, Authorizations.EMPTY); - - table = t; // Store the name of the table we have open. + final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); + // Try to saturate the client machine. + int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); + if (null != numThreadsValue) { + numThreads = Integer.parseInt(numThreadsValue); + } + System.err.println("Using " + numThreads + " threads to write data"); + bwc.setMaxWriteThreads(numThreads); + return connector.createBatchWriter(table, bwc); } /** @@ -165,120 +179,120 @@ public void getTable(String t) throws TableNotFoundException { * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ - private Scanner getRow(Text row, Set fields) { - singleScanner.clearColumns(); - singleScanner.setRange(new Range(row)); + private Scanner getRow(String table, Text row, Set fields) throws TableNotFoundException { + Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); + scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { - singleScanner.fetchColumn(colFam, new Text(field)); + scanner.fetchColumn(colFam, new Text(field)); } } - return singleScanner; + return scanner; } @Override - public Status read(String t, String key, Set fields, + public Status read(String table, String key, Set fields, HashMap result) { + Scanner scanner = null; try { - checkTable(t); - } catch (TableNotFoundException e) { - System.err.println("Error trying to connect to Accumulo table." + e); - return Status.ERROR; - } - - try { + scanner = getRow(table, new Text(key), null); // Pick out the results we care about. - for (Entry entry : getRow(new Text(key), null)) { + final Text cq = new Text(); + for (Entry entry : scanner) { + entry.getKey().getColumnQualifier(cq); Value v = entry.getValue(); byte[] buf = v.get(); - result.put(entry.getKey().getColumnQualifier().toString(), + result.put(cq.toString(), new ByteArrayByteIterator(buf)); } } catch (Exception e) { - System.err.println("Error trying to reading Accumulo table" + key + e); + System.err.println("Error trying to reading Accumulo table " + table + " " + key); + e.printStackTrace(); return Status.ERROR; + } finally { + if (null != scanner) { + scanner.close(); + } } return Status.OK; } @Override - public Status scan(String t, String startkey, int recordcount, + public Status scan(String table, String startkey, int recordcount, Set fields, Vector> result) { - try { - checkTable(t); - } catch (TableNotFoundException e) { - System.err.println("Error trying to connect to Accumulo table." + e); - return Status.ERROR; - } - - // There doesn't appear to be a way to create a range for a given - // LENGTH. Just start and end keys. So we'll do this the hard way for - // now: // Just make the end 'infinity' and only read as much as we need. - scanScanner.clearColumns(); - scanScanner.setRange(new Range(new Text(startkey), null)); - - // Batch size is how many key/values to try to get per call. Here, I'm - // guessing that the number of keys in a row is equal to the number of - // fields we're interested in. - - // We try to fetch one more so as to tell when we've run out of fields. - - // If no fields are provided, we assume one column/row. - if (fields != null) { - // And add each of them as fields we want. - for (String field : fields) { - scanScanner.fetchColumn(colFam, new Text(field)); + Scanner scanner = null; + try { + scanner = connector.createScanner(table, Authorizations.EMPTY); + scanner.setRange(new Range(new Text(startkey), null)); + + // Have Accumulo send us complete rows, serialized in a single Key-Value pair + IteratorSetting cfg = new IteratorSetting(100, WholeRowIterator.class); + scanner.addScanIterator(cfg); + + // If no fields are provided, we assume one column/row. + if (fields != null) { + // And add each of them as fields we want. + for (String field : fields) { + scanner.fetchColumn(colFam, new Text(field)); + } } - } - String rowKey = ""; - HashMap currentHM = null; - int count = 0; - - // Begin the iteration. - for (Entry entry : scanScanner) { - // Check for a new row. - if (!rowKey.equals(entry.getKey().getRow().toString())) { + int count = 0; + for (Entry entry : scanner) { + // Deserialize the row + SortedMap row = WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()); + HashMap rowData; + if (null != fields) { + rowData = new HashMap<>(fields.size()); + } else { + rowData = new HashMap<>(); + } + result.add(rowData); + // Parse the data in the row, avoid unnecessary Text object creation + final Text cq = new Text(); + for (Entry rowEntry : row.entrySet()) { + rowEntry.getKey().getColumnQualifier(cq); + rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get())); + } if (count++ == recordcount) { // Done reading the last row. break; } - rowKey = entry.getKey().getRow().toString(); - if (fields != null) { - // Initial Capacity for all keys. - currentHM = new HashMap(fields.size()); - } else { - // An empty result map. - currentHM = new HashMap(); - } - result.add(currentHM); } - // Now add the key to the hashmap. - Value v = entry.getValue(); - byte[] buf = v.get(); - currentHM.put(entry.getKey().getColumnQualifier().toString(), - new ByteArrayByteIterator(buf)); + } catch (TableNotFoundException e) { + System.err.println("Error trying to connect to Accumulo table."); + e.printStackTrace(); + return Status.ERROR; + } catch (IOException e) { + System.err.println("Error deserializing data from Accumulo."); + e.printStackTrace(); + return Status.ERROR; + } finally { + if (null != scanner) { + scanner.close(); + } } return Status.OK; } @Override - public Status update(String t, String key, + public Status update(String table, String key, HashMap values) { + BatchWriter bw = null; try { - checkTable(t); + bw = getWriter(table); } catch (TableNotFoundException e) { - System.err.println("Error trying to connect to Accumulo table." + e); + System.err.println("Error opening batch writer to Accumulo table " + table); + e.printStackTrace(); return Status.ERROR; } - Mutation mutInsert = new Mutation(new Text(key)); + Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry entry : values.entrySet()) { - mutInsert.put(colFam, new Text(entry.getKey()), - System.currentTimeMillis(), new Value(entry.getValue().toArray())); + mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { @@ -289,7 +303,7 @@ public Status update(String t, String key, return Status.ERROR; } - return Status.OK; + return Status.BATCHED_OK; } @Override @@ -299,17 +313,19 @@ public Status insert(String t, String key, } @Override - public Status delete(String t, String key) { + public Status delete(String table, String key) { + BatchWriter bw; try { - checkTable(t); + bw = getWriter(table); } catch (TableNotFoundException e) { - System.err.println("Error trying to connect to Accumulo table." + e); + System.err.println("Error trying to connect to Accumulo table."); + e.printStackTrace(); return Status.ERROR; } try { - deleteRow(new Text(key)); - } catch (MutationsRejectedException e) { + deleteRow(table, new Text(key), bw); + } catch (TableNotFoundException | MutationsRejectedException e) { System.err.println("Error performing delete."); e.printStackTrace(); return Status.ERROR; @@ -323,24 +339,31 @@ public Status delete(String t, String key) { } // These functions are adapted from RowOperations.java: - private void deleteRow(Text row) throws MutationsRejectedException { - deleteRow(getRow(row, null)); + private void deleteRow(String table, Text row, BatchWriter bw) throws MutationsRejectedException, + TableNotFoundException { + // TODO Use a batchDeleter instead + deleteRow(getRow(table, row, null), bw); } /** * Deletes a row, given a Scanner of JUST that row. */ - private void deleteRow(Scanner scanner) throws MutationsRejectedException { + private void deleteRow(Scanner scanner, BatchWriter bw) throws MutationsRejectedException { Mutation deleter = null; // iterate through the keys + final Text row = new Text(); + final Text cf = new Text(); + final Text cq = new Text(); for (Entry entry : scanner) { // create a mutation for the row if (deleter == null) { - deleter = new Mutation(entry.getKey().getRow()); + entry.getKey().getRow(row); + deleter = new Mutation(row); } + entry.getKey().getColumnFamily(cf); + entry.getKey().getColumnQualifier(cq); // the remove function adds the key with the delete flag set to true - deleter.putDelete(entry.getKey().getColumnFamily(), - entry.getKey().getColumnQualifier()); + deleter.putDelete(cf, cq); } bw.addMutation(deleter); From b0ff7270fc29418cb8d47760466fb03ea28d3db1 Mon Sep 17 00:00:00 2001 From: sashas83 Date: Fri, 12 May 2017 17:06:44 +0300 Subject: [PATCH 04/16] [memcached] support binary protocol (#965) Adding support for memcached binary protocol as described in https://github.com/memcached/memcached/blob/master/doc/protocol.txt. Protocol can be set via memcached.protocol property of YCSB memcached workload. if specified protocol must be "binary" or "text". If unspecified text version is used. --- memcached/README.md | 4 ++++ .../src/main/java/com/yahoo/ycsb/db/MemcachedClient.java | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/memcached/README.md b/memcached/README.md index 2126b2da09..c46a24fdb0 100644 --- a/memcached/README.md +++ b/memcached/README.md @@ -91,6 +91,10 @@ A sample configuration is provided in What to do with failures; this is one of `net.spy.memcached.FailureMode` enum values, which are currently: `Redistribute`, `Retry`, or `Cancel`. +- `memcached.protocol` + Set to 'binary' to use memcached binary protocol. Set to 'text' or omit this field + to use memcached text protocol + You can set properties on the command line via `-p`, e.g.: ./bin/ycsb load memcached -s -P workloads/workloada \ diff --git a/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java b/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java index 9ce0b93c92..85f1f5dc6d 100644 --- a/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java +++ b/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java @@ -98,6 +98,10 @@ public class MemcachedClient extends DB { public static final FailureMode FAILURE_MODE_PROPERTY_DEFAULT = FailureMode.Redistribute; + public static final String PROTOCOL_PROPERTY = "memcached.protocol"; + public static final ConnectionFactoryBuilder.Protocol DEFAULT_PROTOCOL = + ConnectionFactoryBuilder.Protocol.TEXT; + /** * The MemcachedClient implementation that will be used to communicate * with the memcached server. @@ -142,6 +146,11 @@ protected net.spy.memcached.MemcachedClient createMemcachedClient() connectionFactoryBuilder.setOpTimeout(Integer.parseInt( getProperties().getProperty(OP_TIMEOUT_PROPERTY, DEFAULT_OP_TIMEOUT))); + String protocolString = getProperties().getProperty(PROTOCOL_PROPERTY); + connectionFactoryBuilder.setProtocol( + protocolString == null ? DEFAULT_PROTOCOL + : ConnectionFactoryBuilder.Protocol.valueOf(protocolString.toUpperCase())); + String failureString = getProperties().getProperty(FAILURE_MODE_PROPERTY); connectionFactoryBuilder.setFailureMode( failureString == null ? FAILURE_MODE_PROPERTY_DEFAULT From 57c22606aa000a4136dce868068b1252f3ccf625 Mon Sep 17 00:00:00 2001 From: Matt Emmerton Date: Fri, 12 May 2017 18:51:55 -0400 Subject: [PATCH 05/16] [distro] Refresh Apache licence text (#969) Closes #967 --- LICENSE.txt | 363 +++++++++++++++++++++++++++++----------------------- 1 file changed, 201 insertions(+), 162 deletions(-) diff --git a/LICENSE.txt b/LICENSE.txt index c32809f361..d645695673 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,163 +1,202 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, -and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the -copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other -entities that control, are controlled by, or are under common control -with that entity. For the purposes of this definition, "control" means -(i) the power, direct or indirect, to cause the direction or -management of such entity, whether by contract or otherwise, or (ii) -ownership of fifty percent (50%) or more of the outstanding shares, or -(iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, -including but not limited to software source code, documentation -source, and configuration files. - -"Object" form shall mean any form resulting from mechanical -transformation or translation of a Source form, including but not -limited to compiled object code, generated documentation, and -conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object -form, made available under the License, as indicated by a copyright -notice that is included in or attached to the work (an example is -provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object -form, that is based on (or derived from) the Work and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. For the -purposes of this License, Derivative Works shall not include works -that remain separable from, or merely link (or bind by name) to the -interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the -original version of the Work and any modifications or additions to -that Work or Derivative Works thereof, that is intentionally submitted -to Licensor for inclusion in the Work by the copyright owner or by an -individual or Legal Entity authorized to submit on behalf of the -copyright owner. For the purposes of this definition, "submitted" -means any form of electronic, verbal, or written communication sent to -the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control -systems, and issue tracking systems that are managed by, or on behalf -of, the Licensor for the purpose of discussing and improving the Work, -but excluding communication that is conspicuously marked or otherwise -designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity -on behalf of whom a Contribution has been received by Licensor and -subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have - made, use, offer to sell, sell, import, and otherwise transfer the - Work, where such license applies only to those patent claims - licensable by such Contributor that are necessarily infringed by - their Contribution(s) alone or by combination of their - Contribution(s) with the Work to which such Contribution(s) was - submitted. If You institute patent litigation against any entity - (including a cross-claim or counterclaim in a lawsuit) alleging - that the Work or a Contribution incorporated within the Work - constitutes direct or contributory patent infringement, then any - patent licenses granted to You under this License for that Work - shall terminate as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the Work - or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You meet - the following conditions: - -(a) You must give any other recipients of the Work or Derivative Works -a copy of this License; and - -(b) You must cause any modified files to carry prominent notices -stating that You changed the files; and - -(c) You must retain, in the Source form of any Derivative Works that -You distribute, all copyright, patent, trademark, and attribution -notices from the Source form of the Work, excluding those notices that -do not pertain to any part of the Derivative Works; and - -(d) If the Work includes a "NOTICE" text file as part of its -distribution, then any Derivative Works that You distribute must -include a readable copy of the attribution notices contained within -such NOTICE file, excluding those notices that do not pertain to any -part of the Derivative Works, in at least one of the following places: -within a NOTICE text file distributed as part of the Derivative Works; -within the Source form or documentation, if provided along with the -Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The -contents of the NOTICE file are for informational purposes only and do -not modify the License. You may add Your own attribution notices -within Derivative Works that You distribute, alongside or as an -addendum to the NOTICE text from the Work, provided that such -additional attribution notices cannot be construed as modifying the -License. - -You may add Your own copyright statement to Your modifications and may -provide additional or different license terms and conditions for use, -reproduction, or distribution of Your modifications, or for any such -Derivative Works as a whole, provided Your use, reproduction, and -distribution of the Work otherwise complies with the conditions stated -in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or - conditions. Notwithstanding the above, nothing herein shall - supersede or modify the terms of any separate license agreement you - may have executed with Licensor regarding such Contributions. - -6. Trademarks. This License does nr work. - -To apply the Apache License to your work, attach the following -boilerplate notice, with the fields enclosed by brackets "[]" replaced -with your own identifying information. (Don't include the brackets!) -The text should be enclosed in the appropriate comment syntax for the -file format. We also recommend that a file or class name and -description of purpose be included on the same "printed page" as the -copyright notice for easier identification within third-party -archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied. - -See the License for the specific language governing permissions and -limitations under the License. - + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. From 6c20ca60daa75fc6840e09eebf7ea8e6687e8d1d Mon Sep 17 00:00:00 2001 From: Sean Busbey Date: Fri, 19 May 2017 16:50:05 -0700 Subject: [PATCH 06/16] [hbase12] Add HBase 1.2+ specific client that relies on the shaded client artifact provided by those versions. (#970) --- bin/bindings.properties | 1 + bin/ycsb | 1 + distribution/pom.xml | 5 + hbase12/README.md | 27 +++ hbase12/pom.xml | 85 +++++++ .../yahoo/ycsb/db/hbase12/HBaseClient12.java | 28 +++ .../yahoo/ycsb/db/hbase12/package-info.java | 23 ++ .../ycsb/db/hbase12/HBaseClient12Test.java | 213 ++++++++++++++++++ hbase12/src/test/resources/hbase-site.xml | 34 +++ hbase12/src/test/resources/log4j.properties | 28 +++ pom.xml | 2 + 11 files changed, 447 insertions(+) create mode 100644 hbase12/README.md create mode 100644 hbase12/pom.xml create mode 100644 hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/HBaseClient12.java create mode 100644 hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/package-info.java create mode 100644 hbase12/src/test/java/com/yahoo/ycsb/db/hbase12/HBaseClient12Test.java create mode 100644 hbase12/src/test/resources/hbase-site.xml create mode 100644 hbase12/src/test/resources/log4j.properties diff --git a/bin/bindings.properties b/bin/bindings.properties index 231a3a240c..a2aeb9a64b 100644 --- a/bin/bindings.properties +++ b/bin/bindings.properties @@ -46,6 +46,7 @@ googledatastore:com.yahoo.ycsb.db.GoogleDatastoreClient hbase094:com.yahoo.ycsb.db.HBaseClient hbase098:com.yahoo.ycsb.db.HBaseClient hbase10:com.yahoo.ycsb.db.HBaseClient10 +hbase12:com.yahoo.ycsb.db.hbase12.HBaseClient12 hypertable:com.yahoo.ycsb.db.HypertableClient infinispan-cs:com.yahoo.ycsb.db.InfinispanRemoteClient infinispan:com.yahoo.ycsb.db.InfinispanClient diff --git a/bin/ycsb b/bin/ycsb index d454c6cb0c..7fb7518024 100755 --- a/bin/ycsb +++ b/bin/ycsb @@ -71,6 +71,7 @@ DATABASES = { "hbase094" : "com.yahoo.ycsb.db.HBaseClient", "hbase098" : "com.yahoo.ycsb.db.HBaseClient", "hbase10" : "com.yahoo.ycsb.db.HBaseClient10", + "hbase12" : "com.yahoo.ycsb.db.hbase12.HBaseClient12", "hypertable" : "com.yahoo.ycsb.db.HypertableClient", "infinispan-cs": "com.yahoo.ycsb.db.InfinispanRemoteClient", "infinispan" : "com.yahoo.ycsb.db.InfinispanClient", diff --git a/distribution/pom.xml b/distribution/pom.xml index 506f1170f8..d3de7a92eb 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -129,6 +129,11 @@ LICENSE file. hbase10-binding ${project.version} + + com.yahoo.ycsb + hbase12-binding + ${project.version} + com.yahoo.ycsb hypertable-binding diff --git a/hbase12/README.md b/hbase12/README.md new file mode 100644 index 0000000000..ea658e7c05 --- /dev/null +++ b/hbase12/README.md @@ -0,0 +1,27 @@ + + +# HBase (1.2+) Driver for YCSB +This driver is a binding for the YCSB facilities to operate against a HBase 1.2+ Server cluster, using a shaded client that tries to avoid leaking third party libraries. + +See `hbase098/README.md` for a quickstart to setup HBase for load testing and common configuration details. + +## Configuration Options +In addition to those options available for the `hbase098` binding, the following options are available for the `hbase12` binding: + +* `durability`: Whether or not writes should be appended to the WAL. Bypassing the WAL can improve throughput but data cannot be recovered in the event of a crash. The default is true. + diff --git a/hbase12/pom.xml b/hbase12/pom.xml new file mode 100644 index 0000000000..0754b28e19 --- /dev/null +++ b/hbase12/pom.xml @@ -0,0 +1,85 @@ + + + + + 4.0.0 + + com.yahoo.ycsb + binding-parent + 0.13.0-SNAPSHOT + ../binding-parent/ + + + hbase12-binding + HBase 1.2 DB Binding + + + + true + + true + + + + com.yahoo.ycsb + hbase10-binding + ${project.version} + + + + org.apache.hbase + hbase-client + + + + + com.yahoo.ycsb + core + ${project.version} + provided + + + org.apache.hbase + hbase-shaded-client + ${hbase12.version} + + + junit + junit + 4.12 + test + + + + diff --git a/hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/HBaseClient12.java b/hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/HBaseClient12.java new file mode 100644 index 0000000000..a59a0543cc --- /dev/null +++ b/hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/HBaseClient12.java @@ -0,0 +1,28 @@ +/** + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.yahoo.ycsb.db.hbase12; + +/** + * HBase 1.2 client for YCSB framework. + * + * A modified version of HBaseClient (which targets HBase v1.2) utilizing the + * shaded client. + * + * It should run equivalent to following the hbase098 binding README. + * + */ +public class HBaseClient12 extends com.yahoo.ycsb.db.HBaseClient10 { +} diff --git a/hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/package-info.java b/hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/package-info.java new file mode 100644 index 0000000000..de54dbd799 --- /dev/null +++ b/hbase12/src/main/java/com/yahoo/ycsb/db/hbase12/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2014, Yahoo!, Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +/** + * The YCSB binding for HBase + * using the HBase 1.2+ shaded API. + */ +package com.yahoo.ycsb.db.hbase12; + diff --git a/hbase12/src/test/java/com/yahoo/ycsb/db/hbase12/HBaseClient12Test.java b/hbase12/src/test/java/com/yahoo/ycsb/db/hbase12/HBaseClient12Test.java new file mode 100644 index 0000000000..954153ae89 --- /dev/null +++ b/hbase12/src/test/java/com/yahoo/ycsb/db/hbase12/HBaseClient12Test.java @@ -0,0 +1,213 @@ +/** + * Licensed under the Apache License, Version 2.0 (the "License"); you + * may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. See accompanying + * LICENSE file. + */ + +package com.yahoo.ycsb.db.hbase12; + +import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY; +import static com.yahoo.ycsb.workloads.CoreWorkload.TABLENAME_PROPERTY_DEFAULT; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +import com.yahoo.ycsb.ByteIterator; +import com.yahoo.ycsb.Status; +import com.yahoo.ycsb.StringByteIterator; +import com.yahoo.ycsb.measurements.Measurements; +import com.yahoo.ycsb.workloads.CoreWorkload; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Properties; +import java.util.Vector; + +/** + * Integration tests for the YCSB HBase client 1.2, using an HBase minicluster. + */ +public class HBaseClient12Test { + + private final static String COLUMN_FAMILY = "cf"; + + private static HBaseTestingUtility testingUtil; + private HBaseClient12 client; + private Table table = null; + private String tableName; + + private static boolean isWindows() { + final String os = System.getProperty("os.name"); + return os.startsWith("Windows"); + } + + /** + * Creates a mini-cluster for use in these tests. + * + * This is a heavy-weight operation, so invoked only once for the test class. + */ + @BeforeClass + public static void setUpClass() throws Exception { + // Minicluster setup fails on Windows with an UnsatisfiedLinkError. + // Skip if windows. + assumeTrue(!isWindows()); + testingUtil = HBaseTestingUtility.createLocalHTU(); + testingUtil.startMiniCluster(); + } + + /** + * Tears down mini-cluster. + */ + @AfterClass + public static void tearDownClass() throws Exception { + if (testingUtil != null) { + testingUtil.shutdownMiniCluster(); + } + } + + /** + * Sets up the mini-cluster for testing. + * + * We re-create the table for each test. + */ + @Before + public void setUp() throws Exception { + client = new HBaseClient12(); + client.setConfiguration(new Configuration(testingUtil.getConfiguration())); + + Properties p = new Properties(); + p.setProperty("columnfamily", COLUMN_FAMILY); + + Measurements.setProperties(p); + final CoreWorkload workload = new CoreWorkload(); + workload.init(p); + + tableName = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); + table = testingUtil.createTable(TableName.valueOf(tableName), Bytes.toBytes(COLUMN_FAMILY)); + + client.setProperties(p); + client.init(); + } + + @After + public void tearDown() throws Exception { + table.close(); + testingUtil.deleteTable(tableName); + } + + @Test + public void testRead() throws Exception { + final String rowKey = "row1"; + final Put p = new Put(Bytes.toBytes(rowKey)); + p.addColumn(Bytes.toBytes(COLUMN_FAMILY), + Bytes.toBytes("column1"), Bytes.toBytes("value1")); + p.addColumn(Bytes.toBytes(COLUMN_FAMILY), + Bytes.toBytes("column2"), Bytes.toBytes("value2")); + table.put(p); + + final HashMap result = new HashMap(); + final Status status = client.read(tableName, rowKey, null, result); + assertEquals(Status.OK, status); + assertEquals(2, result.size()); + assertEquals("value1", result.get("column1").toString()); + assertEquals("value2", result.get("column2").toString()); + } + + @Test + public void testReadMissingRow() throws Exception { + final HashMap result = new HashMap(); + final Status status = client.read(tableName, "Missing row", null, result); + assertEquals(Status.NOT_FOUND, status); + assertEquals(0, result.size()); + } + + @Test + public void testScan() throws Exception { + // Fill with data + final String colStr = "row_number"; + final byte[] col = Bytes.toBytes(colStr); + final int n = 10; + final List puts = new ArrayList(n); + for(int i = 0; i < n; i++) { + final byte[] key = Bytes.toBytes(String.format("%05d", i)); + final byte[] value = java.nio.ByteBuffer.allocate(4).putInt(i).array(); + final Put p = new Put(key); + p.addColumn(Bytes.toBytes(COLUMN_FAMILY), col, value); + puts.add(p); + } + table.put(puts); + + // Test + final Vector> result = + new Vector>(); + + // Scan 5 records, skipping the first + client.scan(tableName, "00001", 5, null, result); + + assertEquals(5, result.size()); + for(int i = 0; i < 5; i++) { + final HashMap row = result.get(i); + assertEquals(1, row.size()); + assertTrue(row.containsKey(colStr)); + final byte[] bytes = row.get(colStr).toArray(); + final ByteBuffer buf = ByteBuffer.wrap(bytes); + final int rowNum = buf.getInt(); + assertEquals(i + 1, rowNum); + } + } + + @Test + public void testUpdate() throws Exception{ + final String key = "key"; + final HashMap input = new HashMap(); + input.put("column1", "value1"); + input.put("column2", "value2"); + final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input)); + assertEquals(Status.OK, status); + + // Verify result + final Get get = new Get(Bytes.toBytes(key)); + final Result result = this.table.get(get); + assertFalse(result.isEmpty()); + assertEquals(2, result.size()); + for(final java.util.Map.Entry entry : input.entrySet()) { + assertEquals(entry.getValue(), + new String(result.getValue(Bytes.toBytes(COLUMN_FAMILY), + Bytes.toBytes(entry.getKey())))); + } + } + + @Test + @Ignore("Not yet implemented") + public void testDelete() { + fail("Not yet implemented"); + } +} + diff --git a/hbase12/src/test/resources/hbase-site.xml b/hbase12/src/test/resources/hbase-site.xml new file mode 100644 index 0000000000..a8b29e451f --- /dev/null +++ b/hbase12/src/test/resources/hbase-site.xml @@ -0,0 +1,34 @@ + + + + + + hbase.master.info.port + -1 + The port for the hbase master web UI + Set to -1 if you do not want the info server to run. + + + + hbase.regionserver.info.port + -1 + The port for the hbase regionserver web UI + Set to -1 if you do not want the info server to run. + + + diff --git a/hbase12/src/test/resources/log4j.properties b/hbase12/src/test/resources/log4j.properties new file mode 100644 index 0000000000..a9df32e044 --- /dev/null +++ b/hbase12/src/test/resources/log4j.properties @@ -0,0 +1,28 @@ +# +# Copyright (c) 2015 YCSB contributors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. See accompanying +# LICENSE file. +# + +# Root logger option +log4j.rootLogger=WARN, stderr + +log4j.appender.stderr=org.apache.log4j.ConsoleAppender +log4j.appender.stderr.target=System.err +log4j.appender.stderr.layout=org.apache.log4j.PatternLayout +log4j.appender.stderr.layout.conversionPattern=%d{yyyy/MM/dd HH:mm:ss} %-5p %c %x - %m%n + +# Suppress messages from ZKTableStateManager: Creates a large number of table +# state change messages. +log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKTableStateManager=ERROR diff --git a/pom.xml b/pom.xml index a8c38210c5..567376f72c 100644 --- a/pom.xml +++ b/pom.xml @@ -72,6 +72,7 @@ LICENSE file. 0.94.27 0.98.14-hadoop2 1.0.2 + 1.2.5 1.6.0 3.0.0 1.0.0-incubating.M3 @@ -128,6 +129,7 @@ LICENSE file. hbase094 hbase098 hbase10 + hbase12 hypertable infinispan jdbc From a56a00eea2cce6d70755b3ef34f06ee95a1b2087 Mon Sep 17 00:00:00 2001 From: Chris Larsen Date: Wed, 2 Aug 2017 23:19:33 -0700 Subject: [PATCH 07/16] [core] Add a reset() method to the ByteIterator abstract and implementations for each of the children. This lets us re-use byte iterators if we need to access the values again (when applicable). --- .../com/yahoo/ycsb/ByteArrayByteIterator.java | 8 ++++++++ .../java/com/yahoo/ycsb/ByteIterator.java | 9 +++++++++ .../yahoo/ycsb/InputStreamByteIterator.java | 19 +++++++++++++++++++ .../com/yahoo/ycsb/RandomByteIterator.java | 6 ++++++ .../com/yahoo/ycsb/StringByteIterator.java | 5 +++++ 5 files changed, 47 insertions(+) diff --git a/core/src/main/java/com/yahoo/ycsb/ByteArrayByteIterator.java b/core/src/main/java/com/yahoo/ycsb/ByteArrayByteIterator.java index 3f07ad57b6..8f8762b4f2 100644 --- a/core/src/main/java/com/yahoo/ycsb/ByteArrayByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/ByteArrayByteIterator.java @@ -20,6 +20,7 @@ * A ByteIterator that iterates through a byte array. */ public class ByteArrayByteIterator extends ByteIterator { + private final int originalOffset; private byte[] str; private int off; private final int len; @@ -28,12 +29,14 @@ public ByteArrayByteIterator(byte[] s) { this.str = s; this.off = 0; this.len = s.length; + originalOffset = 0; } public ByteArrayByteIterator(byte[] s, int off, int len) { this.str = s; this.off = off; this.len = off + len; + originalOffset = off; } @Override @@ -53,4 +56,9 @@ public long bytesLeft() { return len - off; } + @Override + public void reset() { + off = originalOffset; + } + } diff --git a/core/src/main/java/com/yahoo/ycsb/ByteIterator.java b/core/src/main/java/com/yahoo/ycsb/ByteIterator.java index 5e2adf5c6e..9be84d5a05 100644 --- a/core/src/main/java/com/yahoo/ycsb/ByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/ByteIterator.java @@ -73,6 +73,15 @@ public void remove() { throw new UnsupportedOperationException(); } + /** Resets the iterator so that it can be consumed again. Not all + * implementations support this call. + * @throws UnsupportedOperationException if the implementation hasn't implemented + * the method. + */ + public void reset() { + throw new UnsupportedOperationException(); + } + /** Consumes remaining contents of this object, and returns them as a string. */ public String toString() { Charset cset = Charset.forName("UTF-8"); diff --git a/core/src/main/java/com/yahoo/ycsb/InputStreamByteIterator.java b/core/src/main/java/com/yahoo/ycsb/InputStreamByteIterator.java index c90ae878a8..02ca38006c 100644 --- a/core/src/main/java/com/yahoo/ycsb/InputStreamByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/InputStreamByteIterator.java @@ -16,6 +16,7 @@ */ package com.yahoo.ycsb; +import java.io.IOException; import java.io.InputStream; /** @@ -25,11 +26,16 @@ public class InputStreamByteIterator extends ByteIterator { private long len; private InputStream ins; private long off; + private final boolean resetable; public InputStreamByteIterator(InputStream ins, long len) { this.len = len; this.ins = ins; off = 0; + resetable = ins.markSupported(); + if (resetable) { + ins.mark((int) len); + } } @Override @@ -57,4 +63,17 @@ public long bytesLeft() { return len - off; } + @Override + public void reset() { + if (resetable) { + try { + ins.reset(); + ins.mark((int) len); + } catch (IOException e) { + throw new IllegalStateException("Failed to reset the input stream", e); + } + } + throw new UnsupportedOperationException(); + } + } diff --git a/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java b/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java index daacc6202e..681224e8fb 100644 --- a/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/RandomByteIterator.java @@ -93,4 +93,10 @@ public int nextBuf(byte[] buffer, int bufOffset) { public long bytesLeft() { return len - off - bufOff; } + + @Override + public void reset() { + off = 0; + } + } diff --git a/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java b/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java index e60f93860f..13a79600e0 100644 --- a/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java @@ -96,6 +96,11 @@ public long bytesLeft() { return str.length() - off; } + @Override + public void reset() { + off = 0; + } + /** * Specialization of general purpose toString() to avoid unnecessary * copies. From 9135cc16c6cee647b4b859be1e8367b0faae0ae7 Mon Sep 17 00:00:00 2001 From: Sami Z Date: Tue, 25 Jul 2017 16:37:23 -0700 Subject: [PATCH 08/16] [core] Update to use newer version of Google Cloud Spanner client and associated required change Signed-off-by: Chris Larsen --- .../java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java | 2 +- pom.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java index 78611c3412..1822c55617 100644 --- a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java +++ b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java @@ -169,7 +169,7 @@ private static Spanner getSpanner(Properties properties, String host, String pro Runtime.getRuntime().addShutdownHook(new Thread("spannerShutdown") { @Override public void run() { - spanner.closeAsync(); + spanner.close(); } }); return spanner; diff --git a/pom.xml b/pom.xml index 567376f72c..7ba8bed422 100644 --- a/pom.xml +++ b/pom.xml @@ -101,7 +101,7 @@ LICENSE file. 2.7.3 4.1.7 4.0.0 - 0.9.3-beta + 0.20.3-beta From 73802a5b3da896e9f4c5338e5ccbfb95f50a6382 Mon Sep 17 00:00:00 2001 From: Anthony Baker Date: Fri, 21 Jul 2017 15:01:15 -0700 Subject: [PATCH 09/16] [geode] Update to apache-geode 1.2.0 release Also fix a read serialization issue with client mode driver. Signed-off-by: Chris Larsen --- geode/README.md | 2 +- .../java/com/yahoo/ycsb/db/GeodeClient.java | 23 ++++++++++--------- pom.xml | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/geode/README.md b/geode/README.md index 59690bfe49..2b93af8e73 100644 --- a/geode/README.md +++ b/geode/README.md @@ -40,9 +40,9 @@ Start a locator and two servers: ``` gfsh> start locator --name=locator1 +gfsh> configure pdx --read-serialized=true gfsh> start server --name=server1 --server-port=40404 gfsh> start server --name=server2 --server-port=40405 -gfsh> configure pdx --read-serialized=true ``` Create the "usertable" region required by YCSB driver: diff --git a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java index 603b7b7348..8aa2af7624 100644 --- a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java +++ b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java @@ -17,16 +17,16 @@ package com.yahoo.ycsb.db; -import com.gemstone.gemfire.cache.*; -import com.gemstone.gemfire.cache.client.ClientCache; -import com.gemstone.gemfire.cache.client.ClientCacheFactory; -import com.gemstone.gemfire.cache.client.ClientRegionFactory; -import com.gemstone.gemfire.cache.client.ClientRegionShortcut; -import com.gemstone.gemfire.internal.admin.remote.DistributionLocatorId; -import com.gemstone.gemfire.internal.cache.GemFireCacheImpl; -import com.gemstone.gemfire.pdx.JSONFormatter; -import com.gemstone.gemfire.pdx.PdxInstance; -import com.gemstone.gemfire.pdx.PdxInstanceFactory; +import org.apache.geode.cache.*; +import org.apache.geode.cache.client.ClientCache; +import org.apache.geode.cache.client.ClientCacheFactory; +import org.apache.geode.cache.client.ClientRegionFactory; +import org.apache.geode.cache.client.ClientRegionShortcut; +import org.apache.geode.internal.admin.remote.DistributionLocatorId; +import org.apache.geode.internal.cache.GemFireCacheImpl; +import org.apache.geode.pdx.JSONFormatter; +import org.apache.geode.pdx.PdxInstance; +import org.apache.geode.pdx.PdxInstanceFactory; import com.yahoo.ycsb.*; import java.util.*; @@ -125,6 +125,7 @@ public void init() throws DBException { locator = new DistributionLocatorId(locatorStr); } ClientCacheFactory ccf = new ClientCacheFactory(); + ccf.setPdxReadSerialized(true); if (serverPort != 0) { ccf.addPoolServer(serverHost, serverPort); } else if (locator != null) { @@ -207,4 +208,4 @@ private Region getRegion(String table) { } return r; } -} \ No newline at end of file +} diff --git a/pom.xml b/pom.xml index 7ba8bed422..96ea2cebcf 100644 --- a/pom.xml +++ b/pom.xml @@ -75,7 +75,7 @@ LICENSE file. 1.2.5 1.6.0 3.0.0 - 1.0.0-incubating.M3 + 1.2.0 1.8.1 0.2.3 7.2.2.Final From eaff913d28ab736d517fb4df2488ecb563fd2fc2 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Tue, 6 Jun 2017 09:47:40 -0400 Subject: [PATCH 10/16] Upgrading googlebigtable to the latest version. The API used by googlebigtable has had quite a bit of churn. This is the minimal set of changes required for the upgrade. Signed-off-by: Chris Larsen --- googlebigtable/README.md | 23 ++-- googlebigtable/pom.xml | 8 +- .../yahoo/ycsb/db/GoogleBigtableClient.java | 100 ++++++++++-------- pom.xml | 2 +- 4 files changed, 72 insertions(+), 61 deletions(-) diff --git a/googlebigtable/README.md b/googlebigtable/README.md index 3938b525ea..81b6cf484a 100644 --- a/googlebigtable/README.md +++ b/googlebigtable/README.md @@ -21,9 +21,9 @@ This driver provides a YCSB workload binding for Google's hosted Bigtable, the i ## Quickstart -### 1. Setup a Bigtable Cluster +### 1. Setup a Bigtable Instance -Login to the Google Cloud Console and follow the [Creating Cluster](https://cloud.google.com/bigtable/docs/creating-cluster) steps. Make a note of your cluster name, zone and project ID. +Login to the Google Cloud Console and follow the [Creating Instance](https://cloud.google.com/bigtable/docs/creating-instance) steps. Make a note of your instance ID and project ID. ### 2. Launch the Bigtable Shell @@ -40,29 +40,25 @@ hbase(main):002:0> create 'usertable', 'cf', {SPLITS => (1..n_splits).map {|i| " Make a note of the column family, in this example it's `cf``. -### 4. Fetch the Proper ALPN Boot Jar - -The Bigtable protocol uses HTTP/2 which requires an ALPN protocol negotiation implementation. On JVM instantiation the implementation must be loaded before attempting to connect to the cluster. If you're using Java 7 or 8, use this [Jetty Version Table](http://www.eclipse.org/jetty/documentation/current/alpn-chapter.html#alpn-versions) to determine the version appropriate for your JVM. (ALPN is included in JDK 9+). Download the proper jar from [Maven](http://search.maven.org/#search%7Cgav%7C1%7Cg%3A%22org.mortbay.jetty.alpn%22%20AND%20a%3A%22alpn-boot%22) somewhere on your system. - -### 5. Download JSON Credentials +### 4. Download JSON Credentials Follow these instructions for [Generating a JSON key](https://cloud.google.com/bigtable/docs/installing-hbase-shell#service-account) and save it to your host. -### 6. Load a Workload +### 5. Load a Workload -Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family, cluster properties and the ALPN jar to load. +Switch to the root of the YCSB repo and choose the workload you want to run and `load` it first. With the CLI you must provide the column family and instance properties to load. ``` -bin/ycsb load googlebigtable -p columnfamily=cf -p google.bigtable.project.id= -p google.bigtable.cluster.name= -p google.bigtable.zone.name= -p google.bigtable.auth.service.account.enable=true -p google.bigtable.auth.json.keyfile= -jvm-args='-Xbootclasspath/p:' -P workloads/workloada +bin/ycsb load googlebigtable -p columnfamily=cf -p google.bigtable.project.id= -p google.bigtable.instance.id= -p google.bigtable.auth.json.keyfile= -P workloads/workloada ``` -Make sure to replace the variables in the angle brackets above with the proper value from your cluster. Additional configuration parameters are available below. +Make sure to replace the variables in the angle brackets above with the proper value from your instance. Additional configuration parameters are available below. The `load` step only executes inserts into the datastore. After loading data, run the same workload to mix reads with writes. ``` -bin/ycsb run googlebigtable -p columnfamily=cf -p google.bigtable.project.id= -p google.bigtable.cluster.name= -p google.bigtable.zone.name= -p google.bigtable.auth.service.account.enable=true -p google.bigtable.auth.json.keyfile= -jvm-args='-Xbootclasspath/p:' -P workloads/workloada +bin/ycsb run googlebigtable -p columnfamily=cf -p google.bigtable.project.id= -p google.bigtable.instance.id= -p google.bigtable.auth.json.keyfile= -P workloads/workloada ``` @@ -72,8 +68,7 @@ The following options can be configured using CLI (using the `-p` parameter) or * `columnfamily`: (Required) The Bigtable column family to target. * `google.bigtable.project.id`: (Required) The ID of a Bigtable project. -* `google.bigtable.cluster.name`: (Required) The name of a Bigtable cluster. -* `google.bigtable.zone.name`: (Required) Zone where the Bigtable cluster is running. +* `google.bigtable.instance.id`: (Required) The name of a Bigtable instance. * `google.bigtable.auth.service.account.enable`: Whether or not to authenticate with a service account. The default is true. * `google.bigtable.auth.json.keyfile`: (Required) A service account key for authentication. * `debug`: If true, prints debug information to standard out. The default is false. diff --git a/googlebigtable/pom.xml b/googlebigtable/pom.xml index 61eb48c46a..7353e53a79 100644 --- a/googlebigtable/pom.xml +++ b/googlebigtable/pom.xml @@ -36,6 +36,12 @@ LICENSE file. ${googlebigtable.version} + + io.netty + netty-tcnative-boringssl-static + 1.1.33.Fork26 + + com.yahoo.ycsb core @@ -44,4 +50,4 @@ LICENSE file. - \ No newline at end of file + diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java index d0d21dda4b..c035032cfb 100644 --- a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java +++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java @@ -34,23 +34,24 @@ import java.util.concurrent.ExecutionException; import com.google.bigtable.repackaged.com.google.protobuf.ByteString; -import com.google.bigtable.repackaged.com.google.protobuf.ServiceException; -import com.google.bigtable.v1.Column; -import com.google.bigtable.v1.Family; -import com.google.bigtable.v1.MutateRowRequest; -import com.google.bigtable.v1.Mutation; -import com.google.bigtable.v1.ReadRowsRequest; -import com.google.bigtable.v1.Row; -import com.google.bigtable.v1.RowFilter; -import com.google.bigtable.v1.RowRange; -import com.google.bigtable.v1.Mutation.DeleteFromRow; -import com.google.bigtable.v1.Mutation.SetCell; -import com.google.bigtable.v1.RowFilter.Chain.Builder; +import com.google.bigtable.v2.Column; +import com.google.bigtable.v2.Family; +import com.google.bigtable.v2.MutateRowRequest; +import com.google.bigtable.v2.Mutation; +import com.google.bigtable.v2.ReadRowsRequest; +import com.google.bigtable.v2.Row; +import com.google.bigtable.v2.RowFilter; +import com.google.bigtable.v2.RowRange; +import com.google.bigtable.v2.RowSet; +import com.google.bigtable.v2.Mutation.DeleteFromRow; +import com.google.bigtable.v2.Mutation.SetCell; +import com.google.bigtable.v2.RowFilter.Chain.Builder; import com.google.cloud.bigtable.config.BigtableOptions; import com.google.cloud.bigtable.grpc.BigtableDataClient; import com.google.cloud.bigtable.grpc.BigtableSession; +import com.google.cloud.bigtable.grpc.BigtableTableName; import com.google.cloud.bigtable.grpc.async.AsyncExecutor; -import com.google.cloud.bigtable.grpc.async.HeapSizeManager; +import com.google.cloud.bigtable.grpc.async.BulkMutation; import com.google.cloud.bigtable.hbase.BigtableOptionsFactory; import com.google.cloud.bigtable.util.ByteStringer; import com.yahoo.ycsb.ByteArrayByteIterator; @@ -89,7 +90,6 @@ public class GoogleBigtableClient extends com.yahoo.ycsb.DB { /** Thread loacal Bigtable native API objects. */ private BigtableDataClient client; - private HeapSizeManager heapSizeManager; private AsyncExecutor asyncExecutor; /** The column family use for the workload. */ @@ -105,13 +105,21 @@ public class GoogleBigtableClient extends com.yahoo.ycsb.DB { */ private boolean clientSideBuffering = false; + private BulkMutation bulkMutation; + @Override public void init() throws DBException { Properties props = getProperties(); // Defaults the user can override if needed - CONFIG.set("google.bigtable.auth.service.account.enable", "true"); - + if (getProperties().containsKey(ASYNC_MUTATOR_MAX_MEMORY)) { + CONFIG.set(BigtableOptionsFactory.BIGTABLE_BUFFERED_MUTATOR_MAX_MEMORY_KEY, + getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY)); + } + if (getProperties().containsKey(ASYNC_MAX_INFLIGHT_RPCS)) { + CONFIG.set(BigtableOptionsFactory.BIGTABLE_BULK_MAX_ROW_KEY_COUNT, + getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS)); + } // make it easy on ourselves by copying all CLI properties into the config object. final Iterator> it = props.entrySet().iterator(); while (it.hasNext()) { @@ -143,14 +151,7 @@ public void init() throws DBException { } if (clientSideBuffering) { - heapSizeManager = new HeapSizeManager( - Long.parseLong( - getProperties().getProperty(ASYNC_MUTATOR_MAX_MEMORY, - Long.toString(AsyncExecutor.ASYNC_MUTATOR_MAX_MEMORY_DEFAULT))), - Integer.parseInt( - getProperties().getProperty(ASYNC_MAX_INFLIGHT_RPCS, - Integer.toString(AsyncExecutor.MAX_INFLIGHT_RPCS_DEFAULT)))); - asyncExecutor = new AsyncExecutor(client, heapSizeManager); + asyncExecutor = session.createAsyncExecutor(); } } @@ -169,6 +170,13 @@ public void init() throws DBException { @Override public void cleanup() throws DBException { + if (bulkMutation != null) { + try { + bulkMutation.flush(); + } catch(RuntimeException e){ + throw new DBException(e); + } + } if (asyncExecutor != null) { try { asyncExecutor.flush(); @@ -226,7 +234,8 @@ public Status read(String table, String key, Set fields, final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder() .setTableNameBytes(ByteStringer.wrap(lastTableBytes)) .setFilter(filter) - .setRowKey(ByteStringer.wrap(key.getBytes())); + .setRows(RowSet.newBuilder() + .addRowKeys(ByteStringer.wrap(key.getBytes()))); List rows; try { @@ -292,13 +301,17 @@ public Status scan(String table, String startkey, int recordcount, } final RowRange range = RowRange.newBuilder() - .setStartKey(ByteStringer.wrap(startkey.getBytes())) + .setStartKeyClosed(ByteStringer.wrap(startkey.getBytes())) .build(); - + + final RowSet rowSet = RowSet.newBuilder() + .addRowRanges(range) + .build(); + final ReadRowsRequest.Builder rrr = ReadRowsRequest.newBuilder() .setTableNameBytes(ByteStringer.wrap(lastTableBytes)) .setFilter(filter) - .setRowRange(range); + .setRows(rowSet); List rows; try { @@ -372,19 +385,14 @@ public Status update(String table, String key, try { if (clientSideBuffering) { - asyncExecutor.mutateRowAsync(rowMutation.build()); + bulkMutation.add(rowMutation.build()); } else { client.mutateRow(rowMutation.build()); } return Status.OK; - } catch (ServiceException e) { + } catch (RuntimeException e) { System.err.println("Failed to insert key: " + key + " " + e.getMessage()); return Status.ERROR; - } catch (InterruptedException e) { - System.err.println("Interrupted while inserting key: " + key + " " - + e.getMessage()); - Thread.currentThread().interrupt(); - return Status.ERROR; // never get here, but lets make the compiler happy } } @@ -410,19 +418,14 @@ public Status delete(String table, String key) { try { if (clientSideBuffering) { - asyncExecutor.mutateRowAsync(rowMutation.build()); + bulkMutation.add(rowMutation.build()); } else { client.mutateRow(rowMutation.build()); } return Status.OK; - } catch (ServiceException e) { + } catch (RuntimeException e) { System.err.println("Failed to delete key: " + key + " " + e.getMessage()); return Status.ERROR; - } catch (InterruptedException e) { - System.err.println("Interrupted while delete key: " + key + " " - + e.getMessage()); - Thread.currentThread().interrupt(); - return Status.ERROR; // never get here, but lets make the compiler happy } } @@ -434,11 +437,18 @@ public Status delete(String table, String key) { private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; - lastTableBytes = options - .getClusterName() - .toTableName(table) + BigtableTableName tableName = options + .getInstanceName() + .toTableName(table); + lastTableBytes = tableName .toString() .getBytes(); + synchronized(this) { + if (bulkMutation != null) { + bulkMutation.flush(); + } + bulkMutation = session.createBulkMutation(tableName, asyncExecutor); + } } } diff --git a/pom.xml b/pom.xml index 96ea2cebcf..9910871efb 100644 --- a/pom.xml +++ b/pom.xml @@ -77,7 +77,7 @@ LICENSE file. 3.0.0 1.2.0 1.8.1 - 0.2.3 + 0.9.7 7.2.2.Final 1.1.0 2.1.1 From a12928ec2ef4e234718dc45236d1b28e06adf975 Mon Sep 17 00:00:00 2001 From: Chris Larsen Date: Wed, 2 Aug 2017 23:43:20 -0700 Subject: [PATCH 11/16] [core] Fix an issue where the threadid and threadCount were not passed to the workload client threads. Had to use setters to get around the checkstyle complaint of having too many parameters. --- core/src/main/java/com/yahoo/ycsb/Client.java | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/com/yahoo/ycsb/Client.java b/core/src/main/java/com/yahoo/ycsb/Client.java index 4fc427b07e..94d14ce29b 100644 --- a/core/src/main/java/com/yahoo/ycsb/Client.java +++ b/core/src/main/java/com/yahoo/ycsb/Client.java @@ -405,6 +405,14 @@ public ClientThread(DB db, boolean dotransactions, Workload workload, Properties this.completeLatch = completeLatch; } + public void setThreadId(final int threadId) { + threadid = threadId; + } + + public void setThreadCount(final int threadCount) { + threadcount = threadCount; + } + public int getOpsDone() { return opsdone; } @@ -877,7 +885,8 @@ private static List initDb(String dbname, Properties props, int th ClientThread t = new ClientThread(db, dotransactions, workload, props, threadopcount, targetperthreadperms, completeLatch); - + t.setThreadId(threadid); + t.setThreadCount(threadcount); clients.add(t); } From e4aecc1b476dcfeeb9f9d5874f0c3595f88e09a3 Mon Sep 17 00:00:00 2001 From: Chris Larsen Date: Wed, 2 Aug 2017 23:45:03 -0700 Subject: [PATCH 12/16] [core] Add a Fisher-Yates array shuffle to the Utils class. --- core/src/main/java/com/yahoo/ycsb/Utils.java | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/core/src/main/java/com/yahoo/ycsb/Utils.java b/core/src/main/java/com/yahoo/ycsb/Utils.java index 9a95cbd6ae..ade6a1cee3 100644 --- a/core/src/main/java/com/yahoo/ycsb/Utils.java +++ b/core/src/main/java/com/yahoo/ycsb/Utils.java @@ -226,4 +226,19 @@ public static Map getGCStatst() { } return map; } + + /** + * Simple Fisher-Yates array shuffle to randomize discrete sets. + * @param array The array to randomly shuffle. + * @return The shuffled array. + */ + public static T [] shuffleArray(final T[] array) { + for (int i = array.length -1; i > 0; i--) { + final int idx = RAND.nextInt(i + 1); + final T temp = array[idx]; + array[idx] = array[i]; + array[i] = temp; + } + return array; + } } From d04d9325ec89125708743c07049cade5d2bc7677 Mon Sep 17 00:00:00 2001 From: Chris Larsen Date: Thu, 3 Aug 2017 08:20:03 -0700 Subject: [PATCH 13/16] [core] Add an operation enum to the Workload class. This can eventually be used to replace the strings. --- core/src/main/java/com/yahoo/ycsb/Workload.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/src/main/java/com/yahoo/ycsb/Workload.java b/core/src/main/java/com/yahoo/ycsb/Workload.java index 81294fb0bb..11aae6723c 100644 --- a/core/src/main/java/com/yahoo/ycsb/Workload.java +++ b/core/src/main/java/com/yahoo/ycsb/Workload.java @@ -43,6 +43,15 @@ public abstract class Workload { private volatile AtomicBoolean stopRequested = new AtomicBoolean(false); + /** Operations available for a database. */ + public enum Operation { + READ, + UPDATE, + INSERT, + SCAN, + DELETE + } + /** * Initialize the scenario. Create any generators and other shared objects here. * Called once, in the main client thread, before any operations are started. From 2c66bc652b3e768975a5e8bf08855c40fcf9b2f9 Mon Sep 17 00:00:00 2001 From: Jiongxin Liu Date: Mon, 26 Jun 2017 17:02:47 -0400 Subject: [PATCH 14/16] [core] Export totalHistogram for HdrHistogram measurement Signed-off-by: Chris Larsen --- .../measurements/OneMeasurementHdrHistogram.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHdrHistogram.java b/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHdrHistogram.java index aff6acf0d7..28c284dc33 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHdrHistogram.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHdrHistogram.java @@ -19,6 +19,7 @@ import com.yahoo.ycsb.measurements.exporter.MeasurementsExporter; import org.HdrHistogram.Histogram; +import org.HdrHistogram.HistogramIterationValue; import org.HdrHistogram.HistogramLogWriter; import org.HdrHistogram.Recorder; @@ -112,6 +113,18 @@ public void exportMeasurements(MeasurementsExporter exporter) throws IOException } exportStatusCounts(exporter); + + // also export totalHistogram + for (HistogramIterationValue v : totalHistogram.recordedValues()) { + int value; + if (v.getValueIteratedTo() > (long)Integer.MAX_VALUE) { + value = Integer.MAX_VALUE; + } else { + value = (int)v.getValueIteratedTo(); + } + + exporter.write(getName(), Integer.toString(value), (double)v.getCountAtValueIteratedTo()); + } } /** From 59bc986a088a508a07144b425a9389ff6fc85843 Mon Sep 17 00:00:00 2001 From: Mairbek Khadikov Date: Mon, 23 Jan 2017 18:30:47 -0800 Subject: [PATCH 15/16] [core] Use longs instead of ints to support larger key spaces. Changed int to long in Measurements code to support large scale workloads. (manolama - fixed checkstyle errors) Signed-off-by: Chris Larsen --- .../AcknowledgedCounterGenerator.java | 16 +++---- .../ycsb/generator/CounterGenerator.java | 14 +++---- .../generator/HotspotIntegerGenerator.java | 28 ++++++------- .../ycsb/generator/SequentialGenerator.java | 19 +++++---- .../ycsb/generator/UniformGenerator.java | 8 ++-- ...nerator.java => UniformLongGenerator.java} | 17 ++++---- .../measurements/OneMeasurementHistogram.java | 12 +++--- .../OneMeasurementTimeSeries.java | 6 +-- .../JSONArrayMeasurementsExporter.java | 8 ++++ .../exporter/JSONMeasurementsExporter.java | 8 ++++ .../exporter/MeasurementsExporter.java | 10 +++++ .../exporter/TextMeasurementsExporter.java | 5 +++ .../yahoo/ycsb/workloads/CoreWorkload.java | 42 +++++++++---------- .../yahoo/ycsb/workloads/RestWorkload.java | 5 ++- .../AcknowledgedCounterGeneratorTest.java | 10 ++--- 15 files changed, 121 insertions(+), 87 deletions(-) rename core/src/main/java/com/yahoo/ycsb/generator/{UniformIntegerGenerator.java => UniformLongGenerator.java} (64%) diff --git a/core/src/main/java/com/yahoo/ycsb/generator/AcknowledgedCounterGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/AcknowledgedCounterGenerator.java index fb3cd4d0ff..3bbce0f8e6 100644 --- a/core/src/main/java/com/yahoo/ycsb/generator/AcknowledgedCounterGenerator.java +++ b/core/src/main/java/com/yahoo/ycsb/generator/AcknowledgedCounterGenerator.java @@ -31,12 +31,12 @@ public class AcknowledgedCounterGenerator extends CounterGenerator { private final ReentrantLock lock; private final boolean[] window; - private volatile int limit; + private volatile long limit; /** * Create a counter that starts at countstart. */ - public AcknowledgedCounterGenerator(int countstart) { + public AcknowledgedCounterGenerator(long countstart) { super(countstart); lock = new ReentrantLock(); window = new boolean[WINDOW_SIZE]; @@ -48,15 +48,15 @@ public AcknowledgedCounterGenerator(int countstart) { * (as opposed to the highest generated counter value). */ @Override - public Integer lastValue() { + public Long lastValue() { return limit; } /** * Make a generated counter value available via lastInt(). */ - public void acknowledge(int value) { - final int currentSlot = (value & WINDOW_MASK); + public void acknowledge(long value) { + final int currentSlot = (int)(value & WINDOW_MASK); if (window[currentSlot]) { throw new RuntimeException("Too many unacknowledged insertion keys."); } @@ -68,10 +68,10 @@ public void acknowledge(int value) { // over to the "limit" variable try { // Only loop through the entire window at most once. - int beforeFirstSlot = (limit & WINDOW_MASK); - int index; + long beforeFirstSlot = (limit & WINDOW_MASK); + long index; for (index = limit + 1; index != beforeFirstSlot; ++index) { - int slot = (index & WINDOW_MASK); + int slot = (int)(index & WINDOW_MASK); if (!window[slot]) { break; } diff --git a/core/src/main/java/com/yahoo/ycsb/generator/CounterGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/CounterGenerator.java index 5666390f13..416502b52a 100644 --- a/core/src/main/java/com/yahoo/ycsb/generator/CounterGenerator.java +++ b/core/src/main/java/com/yahoo/ycsb/generator/CounterGenerator.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved. + * Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -17,29 +17,29 @@ package com.yahoo.ycsb.generator; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; /** * Generates a sequence of integers. * (0, 1, ...) */ public class CounterGenerator extends NumberGenerator { - private final AtomicInteger counter; + private final AtomicLong counter; /** * Create a counter that starts at countstart. */ - public CounterGenerator(int countstart) { - counter = new AtomicInteger(countstart); + public CounterGenerator(long countstart) { + counter=new AtomicLong(countstart); } @Override - public Integer nextValue() { + public Long nextValue() { return counter.getAndIncrement(); } @Override - public Integer lastValue() { + public Long lastValue() { return counter.get() - 1; } diff --git a/core/src/main/java/com/yahoo/ycsb/generator/HotspotIntegerGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/HotspotIntegerGenerator.java index 98c8f55c68..677ebd2f2b 100644 --- a/core/src/main/java/com/yahoo/ycsb/generator/HotspotIntegerGenerator.java +++ b/core/src/main/java/com/yahoo/ycsb/generator/HotspotIntegerGenerator.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved. + * Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -31,10 +31,10 @@ */ public class HotspotIntegerGenerator extends NumberGenerator { - private final int lowerBound; - private final int upperBound; - private final int hotInterval; - private final int coldInterval; + private final long lowerBound; + private final long upperBound; + private final long hotInterval; + private final long coldInterval; private final double hotsetFraction; private final double hotOpnFraction; @@ -46,7 +46,7 @@ public class HotspotIntegerGenerator extends NumberGenerator { * @param hotsetFraction percentage of data item * @param hotOpnFraction percentage of operations accessing the hot set. */ - public HotspotIntegerGenerator(int lowerBound, int upperBound, + public HotspotIntegerGenerator(long lowerBound, long upperBound, double hotsetFraction, double hotOpnFraction) { if (hotsetFraction < 0.0 || hotsetFraction > 1.0) { System.err.println("Hotset fraction out of range. Setting to 0.0"); @@ -59,29 +59,29 @@ public HotspotIntegerGenerator(int lowerBound, int upperBound, if (lowerBound > upperBound) { System.err.println("Upper bound of Hotspot generator smaller than the lower bound. " + "Swapping the values."); - int temp = lowerBound; + long temp = lowerBound; lowerBound = upperBound; upperBound = temp; } this.lowerBound = lowerBound; this.upperBound = upperBound; this.hotsetFraction = hotsetFraction; - int interval = upperBound - lowerBound + 1; + long interval = upperBound - lowerBound + 1; this.hotInterval = (int) (interval * hotsetFraction); this.coldInterval = interval - hotInterval; this.hotOpnFraction = hotOpnFraction; } @Override - public Integer nextValue() { - int value = 0; + public Long nextValue() { + long value = 0; Random random = Utils.random(); if (random.nextDouble() < hotOpnFraction) { // Choose a value from the hot set. - value = lowerBound + random.nextInt(hotInterval); + value = lowerBound + Math.abs(Utils.random().nextLong()) % hotInterval; } else { // Choose a value from the cold set. - value = lowerBound + hotInterval + random.nextInt(coldInterval); + value = lowerBound + hotInterval + Math.abs(Utils.random().nextLong()) % coldInterval; } setLastValue(value); return value; @@ -90,14 +90,14 @@ public Integer nextValue() { /** * @return the lowerBound */ - public int getLowerBound() { + public long getLowerBound() { return lowerBound; } /** * @return the upperBound */ - public int getUpperBound() { + public long getUpperBound() { return upperBound; } diff --git a/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java index 350c222a40..878fb8e137 100644 --- a/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java +++ b/core/src/main/java/com/yahoo/ycsb/generator/SequentialGenerator.java @@ -17,38 +17,39 @@ package com.yahoo.ycsb.generator; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; /** * Generates a sequence of integers 0, 1, ... */ public class SequentialGenerator extends NumberGenerator { - protected final AtomicInteger counter; - protected int interval, countstart; + private final AtomicLong counter; + private long interval; + private long countstart; /** * Create a counter that starts at countstart. */ - public SequentialGenerator(int countstart, int countend) { - counter = new AtomicInteger(); + public SequentialGenerator(long countstart, long countend) { + counter = new AtomicLong(); setLastValue(counter.get()); this.countstart = countstart; interval = countend - countstart + 1; } /** - * If the generator returns numeric (integer) values, return the next value as an int. + * If the generator returns numeric (long) values, return the next value as an long. * Default is to return -1, which is appropriate for generators that do not return numeric values. */ - public int nextInt() { - int ret = countstart + counter.getAndIncrement() % interval; + public long nextLong() { + long ret = countstart + counter.getAndIncrement() % interval; setLastValue(ret); return ret; } @Override public Number nextValue() { - int ret = countstart + counter.getAndIncrement() % interval; + long ret = countstart + counter.getAndIncrement() % interval; setLastValue(ret); return ret; } diff --git a/core/src/main/java/com/yahoo/ycsb/generator/UniformGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/UniformGenerator.java index 7943d110fc..aeaf35cb75 100644 --- a/core/src/main/java/com/yahoo/ycsb/generator/UniformGenerator.java +++ b/core/src/main/java/com/yahoo/ycsb/generator/UniformGenerator.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved. + * Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -27,7 +27,7 @@ public class UniformGenerator extends Generator { private final List values; private String laststring; - private final UniformIntegerGenerator gen; + private final UniformLongGenerator gen; /** * Creates a generator that will return strings from the specified set uniformly randomly. @@ -35,7 +35,7 @@ public class UniformGenerator extends Generator { public UniformGenerator(Collection values) { this.values = new ArrayList<>(values); laststring = null; - gen = new UniformIntegerGenerator(0, values.size() - 1); + gen = new UniformLongGenerator(0, values.size() - 1); } /** @@ -43,7 +43,7 @@ public UniformGenerator(Collection values) { */ @Override public String nextValue() { - laststring = values.get(gen.nextValue()); + laststring = values.get(gen.nextValue().intValue()); return laststring; } diff --git a/core/src/main/java/com/yahoo/ycsb/generator/UniformIntegerGenerator.java b/core/src/main/java/com/yahoo/ycsb/generator/UniformLongGenerator.java similarity index 64% rename from core/src/main/java/com/yahoo/ycsb/generator/UniformIntegerGenerator.java rename to core/src/main/java/com/yahoo/ycsb/generator/UniformLongGenerator.java index 5be015c3bf..2d1994f957 100644 --- a/core/src/main/java/com/yahoo/ycsb/generator/UniformIntegerGenerator.java +++ b/core/src/main/java/com/yahoo/ycsb/generator/UniformLongGenerator.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010-2016 Yahoo! Inc., 2017 YCSB contributors. All rights reserved. + * Copyright (c) 2010 Yahoo! Inc. Copyright (c) 2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -20,27 +20,28 @@ import com.yahoo.ycsb.Utils; /** - * Generates integers randomly uniform from an interval. + * Generates longs randomly uniform from an interval. */ -public class UniformIntegerGenerator extends NumberGenerator { - private final int lb, ub, interval; +public class UniformLongGenerator extends NumberGenerator { + private final long lb, ub, interval; /** - * Creates a generator that will return integers uniformly randomly from the interval [lb,ub] inclusive. + * Creates a generator that will return longs uniformly randomly from the + * interval [lb,ub] inclusive (that is, lb and ub are possible values) * (lb and ub are possible values). * * @param lb the lower bound (inclusive) of generated values * @param ub the upper bound (inclusive) of generated values */ - public UniformIntegerGenerator(int lb, int ub) { + public UniformLongGenerator(long lb, long ub) { this.lb = lb; this.ub = ub; interval = this.ub - this.lb + 1; } @Override - public Integer nextValue() { - int ret = Utils.random().nextInt(interval) + lb; + public Long nextValue() { + long ret = Math.abs(Utils.random().nextLong()) % interval + lb; setLastValue(ret); return ret; diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHistogram.java b/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHistogram.java index 9b85518261..de550d12b2 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHistogram.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementHistogram.java @@ -39,17 +39,17 @@ public class OneMeasurementHistogram extends OneMeasurement { /** * Groups operations in discrete blocks of 1ms width. */ - private final int[] histogram; + private long[] histogram; /** * Counts all operations outside the histogram's range. */ - private int histogramoverflow; + private long histogramoverflow; /** * The total number of reported operations. */ - private int operations; + private long operations; /** * The sum of each latency measurement over all operations. @@ -65,7 +65,7 @@ public class OneMeasurementHistogram extends OneMeasurement { private double totalsquaredlatency; //keep a windowed version of these stats for printing status - private int windowoperations; + private long windowoperations; private long windowtotallatency; private int min; @@ -74,7 +74,7 @@ public class OneMeasurementHistogram extends OneMeasurement { public OneMeasurementHistogram(String name, Properties props) { super(name); buckets = Integer.parseInt(props.getProperty(BUCKETS, BUCKETS_DEFAULT)); - histogram = new int[buckets]; + histogram = new long[buckets]; histogramoverflow = 0; operations = 0; totallatency = 0; @@ -120,7 +120,7 @@ public void exportMeasurements(MeasurementsExporter exporter) throws IOException exporter.write(getName(), "MinLatency(us)", min); exporter.write(getName(), "MaxLatency(us)", max); - int opcounter = 0; + long opcounter=0; boolean done95th = false; for (int i = 0; i < buckets; i++) { opcounter += histogram[i]; diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementTimeSeries.java b/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementTimeSeries.java index 678cfbe490..e6abc685da 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementTimeSeries.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/OneMeasurementTimeSeries.java @@ -54,9 +54,9 @@ public class OneMeasurementTimeSeries extends OneMeasurement { private long start = -1; private long currentunit = -1; - private int count = 0; - private int sum = 0; - private int operations = 0; + private long count = 0; + private long sum = 0; + private long operations = 0; private long totallatency = 0; //keep a windowed version of these stats for printing status diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java index e45b50ef6e..ef29e18992 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONArrayMeasurementsExporter.java @@ -47,6 +47,14 @@ public void write(String metric, String measurement, int i) throws IOException { g.writeEndObject(); } + public void write(String metric, String measurement, long i) throws IOException { + g.writeStartObject(); + g.writeStringField("metric", metric); + g.writeStringField("measurement", measurement); + g.writeNumberField("value", i); + g.writeEndObject(); + } + public void write(String metric, String measurement, double d) throws IOException { g.writeStartObject(); g.writeStringField("metric", metric); diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONMeasurementsExporter.java b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONMeasurementsExporter.java index eb3e214754..addcb649c4 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONMeasurementsExporter.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/JSONMeasurementsExporter.java @@ -48,6 +48,14 @@ public void write(String metric, String measurement, int i) throws IOException { g.writeEndObject(); } + public void write(String metric, String measurement, long i) throws IOException { + g.writeStartObject(); + g.writeStringField("metric", metric); + g.writeStringField("measurement", measurement); + g.writeNumberField("value", i); + g.writeEndObject(); + } + public void write(String metric, String measurement, double d) throws IOException { g.writeStartObject(); g.writeStringField("metric", metric); diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/MeasurementsExporter.java b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/MeasurementsExporter.java index e42ea2c539..fb7da1e898 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/MeasurementsExporter.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/MeasurementsExporter.java @@ -39,6 +39,16 @@ public interface MeasurementsExporter extends Closeable { * * @param metric Metric name, for example "READ LATENCY". * @param measurement Measurement name, for example "Average latency". + * @param i Measurement to write. + * @throws IOException if writing failed + */ + void write(String metric, String measurement, long i) throws IOException; + + /** + * Write a measurement to the exported format. + * + * @param metric Metric name, for example "READ LATENCY". + * @param measurement Measurement name, for example "Average latency". * @param d Measurement to write. * @throws IOException if writing failed */ diff --git a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/TextMeasurementsExporter.java b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/TextMeasurementsExporter.java index b17a0f12ac..75b24c395d 100644 --- a/core/src/main/java/com/yahoo/ycsb/measurements/exporter/TextMeasurementsExporter.java +++ b/core/src/main/java/com/yahoo/ycsb/measurements/exporter/TextMeasurementsExporter.java @@ -36,6 +36,11 @@ public void write(String metric, String measurement, int i) throws IOException { bw.newLine(); } + public void write(String metric, String measurement, long i) throws IOException { + bw.write("[" + metric + "], " + measurement + ", " + i); + bw.newLine(); + } + public void write(String metric, String measurement, double d) throws IOException { bw.write("[" + metric + "], " + measurement + ", " + d); bw.newLine(); diff --git a/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java b/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java index 3e4adfaa5f..6e9f79de96 100644 --- a/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java +++ b/core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2016 YCSB contributors. All rights reserved. + * Copyright (c) 2010 Yahoo! Inc., Copyright (c) 2016-2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -19,6 +19,7 @@ import com.yahoo.ycsb.*; import com.yahoo.ycsb.generator.*; +import com.yahoo.ycsb.generator.UniformLongGenerator; import com.yahoo.ycsb.measurements.Measurements; import java.io.IOException; @@ -82,9 +83,7 @@ public class CoreWorkload extends Workload { * Default number of fields in a record. */ public static final String FIELD_COUNT_PROPERTY_DEFAULT = "10"; - - protected int fieldcount; - + private List fieldnames; /** @@ -315,7 +314,8 @@ public class CoreWorkload extends Workload { protected AcknowledgedCounterGenerator transactioninsertkeysequence; protected NumberGenerator scanlength; protected boolean orderedinserts; - protected int recordcount; + protected long fieldcount; + protected long recordcount; protected int zeropadding; protected int insertionRetryLimit; protected int insertionRetryInterval; @@ -333,7 +333,7 @@ protected static NumberGenerator getFieldLengthGenerator(Properties p) throws Wo if (fieldlengthdistribution.compareTo("constant") == 0) { fieldlengthgenerator = new ConstantIntegerGenerator(fieldlength); } else if (fieldlengthdistribution.compareTo("uniform") == 0) { - fieldlengthgenerator = new UniformIntegerGenerator(1, fieldlength); + fieldlengthgenerator = new UniformLongGenerator(1, fieldlength); } else if (fieldlengthdistribution.compareTo("zipfian") == 0) { fieldlengthgenerator = new ZipfianGenerator(1, fieldlength); } else if (fieldlengthdistribution.compareTo("histogram") == 0) { @@ -359,7 +359,7 @@ public void init(Properties p) throws WorkloadException { table = p.getProperty(TABLENAME_PROPERTY, TABLENAME_PROPERTY_DEFAULT); fieldcount = - Integer.parseInt(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT)); + Long.parseLong(p.getProperty(FIELD_COUNT_PROPERTY, FIELD_COUNT_PROPERTY_DEFAULT)); fieldnames = new ArrayList<>(); for (int i = 0; i < fieldcount; i++) { fieldnames.add("field" + i); @@ -367,7 +367,7 @@ public void init(Properties p) throws WorkloadException { fieldlengthgenerator = CoreWorkload.getFieldLengthGenerator(p); recordcount = - Integer.parseInt(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT)); + Long.parseLong(p.getProperty(Client.RECORD_COUNT_PROPERTY, Client.DEFAULT_RECORD_COUNT)); if (recordcount == 0) { recordcount = Integer.MAX_VALUE; } @@ -378,9 +378,9 @@ public void init(Properties p) throws WorkloadException { String scanlengthdistrib = p.getProperty(SCAN_LENGTH_DISTRIBUTION_PROPERTY, SCAN_LENGTH_DISTRIBUTION_PROPERTY_DEFAULT); - int insertstart = - Integer.parseInt(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT)); - int insertcount = + long insertstart = + Long.parseLong(p.getProperty(INSERT_START_PROPERTY, INSERT_START_PROPERTY_DEFAULT)); + long insertcount= Integer.parseInt(p.getProperty(INSERT_COUNT_PROPERTY, String.valueOf(recordcount - insertstart))); // Confirm valid values for insertstart and insertcount in relation to recordcount if (recordcount < (insertstart + insertcount)) { @@ -426,7 +426,7 @@ public void init(Properties p) throws WorkloadException { transactioninsertkeysequence = new AcknowledgedCounterGenerator(recordcount); if (requestdistrib.compareTo("uniform") == 0) { - keychooser = new UniformIntegerGenerator(insertstart, insertstart + insertcount - 1); + keychooser = new UniformLongGenerator(insertstart, insertstart + insertcount - 1); } else if (requestdistrib.compareTo("sequential") == 0) { keychooser = new SequentialGenerator(insertstart, insertstart + insertcount - 1); } else if (requestdistrib.compareTo("zipfian") == 0) { @@ -458,10 +458,10 @@ public void init(Properties p) throws WorkloadException { throw new WorkloadException("Unknown request distribution \"" + requestdistrib + "\""); } - fieldchooser = new UniformIntegerGenerator(0, fieldcount - 1); + fieldchooser = new UniformLongGenerator(0, fieldcount - 1); if (scanlengthdistrib.compareTo("uniform") == 0) { - scanlength = new UniformIntegerGenerator(1, maxscanlength); + scanlength = new UniformLongGenerator(1, maxscanlength); } else if (scanlengthdistrib.compareTo("zipfian") == 0) { scanlength = new ZipfianGenerator(1, maxscanlength); } else { @@ -646,8 +646,8 @@ protected void verifyRow(String key, HashMap cells) { measurements.reportStatus("VERIFY", verifyStatus); } - protected int nextKeynum() { - int keynum; + long nextKeynum() { + long keynum; if (keychooser instanceof ExponentialGenerator) { do { keynum = transactioninsertkeysequence.lastValue() - keychooser.nextValue().intValue(); @@ -662,7 +662,7 @@ protected int nextKeynum() { public void doTransactionRead(DB db) { // choose a random key - int keynum = nextKeynum(); + long keynum = nextKeynum(); String keyname = buildKeyName(keynum); @@ -689,7 +689,7 @@ public void doTransactionRead(DB db) { public void doTransactionReadModifyWrite(DB db) { // choose a random key - int keynum = nextKeynum(); + long keynum = nextKeynum(); String keyname = buildKeyName(keynum); @@ -736,7 +736,7 @@ public void doTransactionReadModifyWrite(DB db) { public void doTransactionScan(DB db) { // choose a random key - int keynum = nextKeynum(); + long keynum = nextKeynum(); String startkeyname = buildKeyName(keynum); @@ -758,7 +758,7 @@ public void doTransactionScan(DB db) { public void doTransactionUpdate(DB db) { // choose a random key - int keynum = nextKeynum(); + long keynum = nextKeynum(); String keyname = buildKeyName(keynum); @@ -777,7 +777,7 @@ public void doTransactionUpdate(DB db) { public void doTransactionInsert(DB db) { // choose the next key - int keynum = transactioninsertkeysequence.nextValue(); + long keynum = transactioninsertkeysequence.nextValue(); try { String dbkey = buildKeyName(keynum); diff --git a/core/src/main/java/com/yahoo/ycsb/workloads/RestWorkload.java b/core/src/main/java/com/yahoo/ycsb/workloads/RestWorkload.java index 6eb0a96eda..e215ef1650 100644 --- a/core/src/main/java/com/yahoo/ycsb/workloads/RestWorkload.java +++ b/core/src/main/java/com/yahoo/ycsb/workloads/RestWorkload.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2016 YCSB contributors. All rights reserved. + * Copyright (c) 2016-2017 YCSB contributors. All rights reserved. *

* Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -30,6 +30,7 @@ import java.util.Map; import java.util.Properties; +import com.yahoo.ycsb.generator.UniformLongGenerator; /** * Typical RESTFul services benchmarking scenario. Represents a set of client * calling REST operations like HTTP DELETE, GET, POST, PUT on a web service. @@ -171,7 +172,7 @@ private static NumberGenerator getKeyChooser(String requestDistrib, int recordCo keychooser = new ExponentialGenerator(percentile, recordCount * frac); break; case "uniform": - keychooser = new UniformIntegerGenerator(0, recordCount - 1); + keychooser = new UniformLongGenerator(0, recordCount - 1); break; case "zipfian": keychooser = new ZipfianGenerator(recordCount, zipfContant); diff --git a/core/src/test/java/com/yahoo/ycsb/generator/AcknowledgedCounterGeneratorTest.java b/core/src/test/java/com/yahoo/ycsb/generator/AcknowledgedCounterGeneratorTest.java index f4aa88b4f0..8e7752757b 100644 --- a/core/src/test/java/com/yahoo/ycsb/generator/AcknowledgedCounterGeneratorTest.java +++ b/core/src/test/java/com/yahoo/ycsb/generator/AcknowledgedCounterGeneratorTest.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2015 YCSB contributors. All rights reserved. + * Copyright (c) 2015-2017 YCSB contributors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You @@ -38,19 +38,19 @@ public void testIncrementPastIntegerMaxValue() { new AcknowledgedCounterGenerator(Integer.MAX_VALUE - 1000); Random rand = new Random(System.currentTimeMillis()); - BlockingQueue pending = new ArrayBlockingQueue(1000); + BlockingQueue pending = new ArrayBlockingQueue(1000); for (long i = 0; i < toTry; ++i) { - int value = generator.nextValue(); + long value = generator.nextValue(); while (!pending.offer(value)) { - Integer first = pending.poll(); + Long first = pending.poll(); // Don't always advance by one. if (rand.nextBoolean()) { generator.acknowledge(first); } else { - Integer second = pending.poll(); + Long second = pending.poll(); pending.add(first); generator.acknowledge(second); } From cf5d2ca5f554fe721e7924a92ca5e5a6e0cdf8ce Mon Sep 17 00:00:00 2001 From: Kirill Vlasov Date: Thu, 24 Dec 2015 09:49:10 +0500 Subject: [PATCH 16/16] [core] Fixing squid:S1319 - Declarations should use Java collection interfaces such as "List" rather than specific implementation classes such as "LinkedList". (manolama - updated bindings added since the PR) Signed-off-by: Chris Larsen --- .../ycsb/db/accumulo/AccumuloClient.java | 6 +-- .../com/yahoo/ycsb/db/AerospikeClient.java | 8 +-- .../com/yahoo/ycsb/db/ArangoDBClient.java | 10 ++-- .../ycsb/db/arangodb/ArangoDB3Client.java | 8 +-- .../com/yahoo/ycsb/db/AsyncHBaseClient.java | 7 +-- .../AzureDocumentDBClient.java | 7 +-- .../db/azuretablestorage/AzureClient.java | 15 +++--- .../com/yahoo/ycsb/db/CassandraCQLClient.java | 6 +-- .../yahoo/ycsb/db/CassandraCQLClientTest.java | 2 +- .../db/cloudspanner/CloudSpannerClient.java | 10 ++-- .../src/main/java/com/yahoo/ycsb/BasicDB.java | 7 +-- .../main/java/com/yahoo/ycsb/CommandLine.java | 2 +- core/src/main/java/com/yahoo/ycsb/DB.java | 7 +-- .../main/java/com/yahoo/ycsb/DBWrapper.java | 9 ++-- .../java/com/yahoo/ycsb/GoodBadUglyDB.java | 7 +-- .../com/yahoo/ycsb/StringByteIterator.java | 4 +- .../com/yahoo/ycsb/db/CouchbaseClient.java | 14 ++--- .../ycsb/db/couchbase2/Couchbase2Client.java | 52 +++++++++---------- .../com/yahoo/ycsb/db/DynamoDBClient.java | 9 ++-- .../yahoo/ycsb/db/ElasticsearchClient.java | 7 +-- .../java/com/yahoo/ycsb/db/GeodeClient.java | 6 +-- .../yahoo/ycsb/db/GoogleBigtableClient.java | 7 +-- .../yahoo/ycsb/db/GoogleDatastoreClient.java | 8 +-- .../java/com/yahoo/ycsb/db/HBaseClient.java | 7 +-- .../java/com/yahoo/ycsb/db/HBaseClient10.java | 6 +-- .../com/yahoo/ycsb/db/HBaseClient10Test.java | 5 +- .../com/yahoo/ycsb/db/HypertableClient.java | 6 +-- .../com/yahoo/ycsb/db/InfinispanClient.java | 9 ++-- .../yahoo/ycsb/db/InfinispanRemoteClient.java | 6 +-- .../java/com/yahoo/ycsb/db/JdbcDBClient.java | 10 ++-- .../com/yahoo/ycsb/db/JdbcDBClientTest.java | 10 ++-- .../com/yahoo/ycsb/db/KuduYCSBClient.java | 11 ++-- .../com/yahoo/ycsb/db/MapKeeperClient.java | 6 +-- .../com/yahoo/ycsb/db/MemcachedClient.java | 8 +-- .../com/yahoo/ycsb/db/AsyncMongoDbClient.java | 8 +-- .../java/com/yahoo/ycsb/db/MongoDbClient.java | 6 +-- .../yahoo/ycsb/db/AbstractDBTestCases.java | 3 +- .../java/com/yahoo/ycsb/db/NoSqlDbClient.java | 9 ++-- .../com/yahoo/ycsb/db/OrientDBClient.java | 12 +++-- .../java/com/yahoo/ycsb/db/RadosClient.java | 7 +-- .../java/com/yahoo/ycsb/db/RedisClient.java | 7 +-- .../ycsb/webservice/rest/RestClient.java | 9 ++-- .../com/yahoo/ycsb/db/riak/RiakKVClient.java | 11 ++-- .../main/java/com/yahoo/ycsb/db/S3Client.java | 12 ++--- .../com/yahoo/ycsb/db/solr/SolrClient.java | 6 +-- .../com/yahoo/ycsb/db/solr6/SolrClient.java | 6 +-- .../com/yahoo/ycsb/db/TarantoolClient.java | 8 +-- .../com/yahoo/ycsb/db/VoldemortClient.java | 6 +-- 48 files changed, 213 insertions(+), 199 deletions(-) diff --git a/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java b/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java index 41d6f7f6fa..28dbbd13e2 100644 --- a/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java +++ b/accumulo/src/main/java/com/yahoo/ycsb/db/accumulo/AccumuloClient.java @@ -192,7 +192,7 @@ private Scanner getRow(String table, Text row, Set fields) throws TableN @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { Scanner scanner = null; try { @@ -280,7 +280,7 @@ public Status scan(String table, String startkey, int recordcount, @Override public Status update(String table, String key, - HashMap values) { + Map values) { BatchWriter bw = null; try { bw = getWriter(table); @@ -308,7 +308,7 @@ public Status update(String table, String key, @Override public Status insert(String t, String key, - HashMap values) { + Map values) { return update(t, key, values); } diff --git a/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java b/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java index e651a7e61e..bd3b6cf02a 100644 --- a/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java +++ b/aerospike/src/main/java/com/yahoo/ycsb/db/AerospikeClient.java @@ -98,7 +98,7 @@ public void cleanup() throws DBException { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Record record; @@ -134,7 +134,7 @@ public Status scan(String table, String start, int count, Set fields, } private Status write(String table, String key, WritePolicy writePolicy, - HashMap values) { + Map values) { Bin[] bins = new Bin[values.size()]; int index = 0; @@ -156,13 +156,13 @@ private Status write(String table, String key, WritePolicy writePolicy, @Override public Status update(String table, String key, - HashMap values) { + Map values) { return write(table, key, updatePolicy, values); } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return write(table, key, insertPolicy, values); } diff --git a/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java b/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java index 1a9d185f53..838f944a1a 100644 --- a/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java +++ b/arangodb/src/main/java/com/yahoo/ycsb/db/ArangoDBClient.java @@ -187,7 +187,7 @@ public void cleanup() throws DBException { * {@link DB} class's description for a discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry entry : values.entrySet()) { @@ -225,7 +225,7 @@ public Status insert(String table, String key, HashMap val */ @SuppressWarnings("unchecked") @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { DocumentEntity targetDoc = arangoDriver.getDocument(table, key, BaseDocument.class); BaseDocument aDocument = targetDoc.getEntity(); @@ -261,7 +261,7 @@ public Status read(String table, String key, Set fields, HashMap values) { + public Status update(String table, String key, Map values) { try { if (!transactionUpdate) { @@ -455,8 +455,8 @@ private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } - private String mapToJson(HashMap values) { - HashMap intervalRst = new HashMap(); + private String mapToJson(Map values) { + Map intervalRst = new HashMap(); for (Map.Entry entry : values.entrySet()) { intervalRst.put(entry.getKey(), byteIteratorToString(entry.getValue())); } diff --git a/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java b/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java index 4ab5b9fa65..1a02624d1d 100644 --- a/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java +++ b/arangodb3/src/main/java/com/yahoo/ycsb/db/arangodb/ArangoDB3Client.java @@ -175,7 +175,7 @@ public void cleanup() throws DBException { * {@link DB} class's description for a discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { BaseDocument toInsert = new BaseDocument(key); for (Map.Entry entry : values.entrySet()) { @@ -205,7 +205,7 @@ public Status insert(String table, String key, HashMap val * @return Zero on success, a non-zero error code on error or "not found". */ @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { VPackSlice document = arangoDB.db(databaseName).collection(table).getDocument(key, VPackSlice.class, null); if (!this.fillMap(result, document, fields)) { @@ -233,7 +233,7 @@ public Status read(String table, String key, Set fields, HashMap values) { + public Status update(String table, String key, Map values) { try { if (!transactionUpdate) { BaseDocument updateDoc = new BaseDocument(); @@ -414,7 +414,7 @@ private ByteIterator stringToByteIterator(String content) { return new StringByteIterator(content); } - private String mapToJson(HashMap values) { + private String mapToJson(Map values) { VPackBuilder builder = new VPackBuilder().add(ValueType.OBJECT); for (Map.Entry entry : values.entrySet()) { builder.add(entry.getKey(), byteIteratorToString(entry.getValue())); diff --git a/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java b/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java index eecbee36f9..ac98c7d6d7 100644 --- a/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java +++ b/asynchbase/src/main/java/com/yahoo/ycsb/db/AsyncHBaseClient.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; @@ -196,7 +197,7 @@ public void cleanup() throws DBException { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { setTable(table); final GetRequest get = new GetRequest( @@ -299,7 +300,7 @@ public Status scan(String table, String startkey, int recordcount, @Override public Status update(String table, String key, - HashMap values) { + Map values) { setTable(table); if (debug) { @@ -347,7 +348,7 @@ public Status update(String table, String key, @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return update(table, key, values); } diff --git a/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java b/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java index ee9e964780..48b6690b44 100644 --- a/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java +++ b/azuredocumentdb/src/main/java/com/yahoo/ycsb/db/azuredocumentdb/AzureDocumentDBClient.java @@ -23,6 +23,7 @@ import com.microsoft.azure.documentdb.FeedOptions; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Vector; @@ -74,7 +75,7 @@ public void init() throws DBException { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { Document record = getDocumentById(table, key); if (record != null) { @@ -95,7 +96,7 @@ public Status read(String table, String key, Set fields, @Override public Status update(String table, String key, - HashMap values) { + Map values) { Document record = getDocumentById(table, key); if (record == null) { @@ -120,7 +121,7 @@ public Status update(String table, String key, @Override public Status insert(String table, String key, - HashMap values) { + Map values) { Document record = new Document(); record.set("id", key); diff --git a/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java b/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java index 87b2a208fb..8247750c53 100644 --- a/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java +++ b/azuretablestorage/src/main/java/com/yahoo/ycsb/db/azuretablestorage/AzureClient.java @@ -35,6 +35,7 @@ import java.util.Date; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; @@ -105,7 +106,7 @@ public void cleanup() { @Override public Status read(String table, String key, Set fields, - final HashMap result) { + Map result) { if (fields != null) { return readSubset(key, fields, result); } else { @@ -145,12 +146,12 @@ public Status scan(String table, String startkey, int recordcount, } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { return insertOrUpdate(key, values); } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { if (batchSize == 1) { return insertOrUpdate(key, values); } else { @@ -187,7 +188,7 @@ private String getStorageConnectionString(String protocol, String account, Strin /* * Read subset of properties instead of full fields with projection. */ - public Status readSubset(String key, Set fields, HashMap result) { + public Status readSubset(String key, Set fields, Map result) { String whereStr = String.format("RowKey eq '%s'", key); TableQuery projectionQuery = TableQuery.from( @@ -220,7 +221,7 @@ public HashMap resolve(String partitionkey, String rowKey, } } - private Status readEntity(String key, HashMap result) { + private Status readEntity(String key, Map result) { try { // firstly, retrieve the entity to be deleted TableOperation retrieveOp = @@ -238,7 +239,7 @@ private Status readEntity(String key, HashMap result) { } } - private Status insertBatch(String key, HashMap values) { + private Status insertBatch(String key, Map values) { HashMap properties = new HashMap(); for (Entry entry : values.entrySet()) { String fieldName = entry.getKey(); @@ -259,7 +260,7 @@ private Status insertBatch(String key, HashMap values) { return Status.OK; } - private Status insertOrUpdate(String key, HashMap values) { + private Status insertOrUpdate(String key, Map values) { HashMap properties = new HashMap(); for (Entry entry : values.entrySet()) { String fieldName = entry.getKey(); diff --git a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java index ee2dd8f541..aefd7798a2 100644 --- a/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java +++ b/cassandra/src/main/java/com/yahoo/ycsb/db/CassandraCQLClient.java @@ -239,7 +239,7 @@ public void cleanup() throws DBException { */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Statement stmt; Select.Builder selectBuilder; @@ -402,7 +402,7 @@ public Status scan(String table, String startkey, int recordcount, */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { // Insert and updates provide the same functionality return insert(table, key, values); } @@ -422,7 +422,7 @@ public Status update(String table, String key, */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { try { Insert insertStmt = QueryBuilder.insertInto(table); diff --git a/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java b/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java index 82c2298376..9c13666675 100644 --- a/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java +++ b/cassandra/src/test/java/com/yahoo/ycsb/db/CassandraCQLClientTest.java @@ -157,7 +157,7 @@ public void testReadSingleColumn() throws Exception { @Test public void testUpdate() throws Exception { final String key = "key"; - final HashMap input = new HashMap(); + final Map input = new HashMap(); input.put("field0", "value1"); input.put("field1", "value2"); diff --git a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java index 1822c55617..e909e81311 100644 --- a/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java +++ b/cloudspanner/src/main/java/com/yahoo/ycsb/db/cloudspanner/CloudSpannerClient.java @@ -222,7 +222,7 @@ public void init() throws DBException { } private Status readUsingQuery( - String table, String key, Set fields, HashMap result) { + String table, String key, Set fields, Map result) { Statement query; Iterable columns = fields == null ? STANDARD_FIELDS : fields; if (fields == null || fields.size() == fieldCount) { @@ -253,7 +253,7 @@ private Status readUsingQuery( @Override public Status read( - String table, String key, Set fields, HashMap result) { + String table, String key, Set fields, Map result) { if (queriesForReads) { return readUsingQuery(table, key, fields, result); } @@ -324,7 +324,7 @@ public Status scan( } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); for (Map.Entry e : values.entrySet()) { @@ -340,7 +340,7 @@ public Status update(String table, String key, HashMap val } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { if (bufferedMutations.size() < batchInserts) { Mutation.WriteBuilder m = Mutation.newInsertOrUpdateBuilder(table); m.set(PRIMARY_KEY_COLUMN).to(key); @@ -389,7 +389,7 @@ public Status delete(String table, String key) { } private static void decodeStruct( - Iterable columns, StructReader structReader, HashMap result) { + Iterable columns, StructReader structReader, Map result) { for (String col : columns) { result.put(col, new StringByteIterator(structReader.getString(col))); } diff --git a/core/src/main/java/com/yahoo/ycsb/BasicDB.java b/core/src/main/java/com/yahoo/ycsb/BasicDB.java index dab23a013a..dfb88a0c52 100644 --- a/core/src/main/java/com/yahoo/ycsb/BasicDB.java +++ b/core/src/main/java/com/yahoo/ycsb/BasicDB.java @@ -18,6 +18,7 @@ package com.yahoo.ycsb; import java.util.*; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.LockSupport; @@ -107,7 +108,7 @@ protected static StringBuilder getStringBuilder() { * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { delay(); if (verbose) { @@ -170,7 +171,7 @@ public Status scan(String table, String startkey, int recordcount, Set f * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { delay(); if (verbose) { @@ -197,7 +198,7 @@ public Status update(String table, String key, HashMap val * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { delay(); if (verbose) { diff --git a/core/src/main/java/com/yahoo/ycsb/CommandLine.java b/core/src/main/java/com/yahoo/ycsb/CommandLine.java index 9e95bd5a4d..ff781f4192 100644 --- a/core/src/main/java/com/yahoo/ycsb/CommandLine.java +++ b/core/src/main/java/com/yahoo/ycsb/CommandLine.java @@ -303,7 +303,7 @@ int record = 0; } else { System.out.println("--------------------------------"); } - for (HashMap result : results) { + for (Map result : results) { System.out.println("Record " + (record++)); for (Map.Entry ent : result.entrySet()) { System.out.println(ent.getKey() + "=" + ent.getValue()); diff --git a/core/src/main/java/com/yahoo/ycsb/DB.java b/core/src/main/java/com/yahoo/ycsb/DB.java index 2002474b5d..b93fd483a6 100644 --- a/core/src/main/java/com/yahoo/ycsb/DB.java +++ b/core/src/main/java/com/yahoo/ycsb/DB.java @@ -18,6 +18,7 @@ package com.yahoo.ycsb; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; @@ -85,7 +86,7 @@ public void cleanup() throws DBException { * @param result A HashMap of field/value pairs for the result * @return The result of the operation. */ - public abstract Status read(String table, String key, Set fields, HashMap result); + public abstract Status read(String table, String key, Set fields, Map result); /** * Perform a range scan for a set of records in the database. Each field/value pair from the result will be stored @@ -110,7 +111,7 @@ public abstract Status scan(String table, String startkey, int recordcount, Set< * @param values A HashMap of field/value pairs to update in the record * @return The result of the operation. */ - public abstract Status update(String table, String key, HashMap values); + public abstract Status update(String table, String key, Map values); /** * Insert a record in the database. Any field/value pairs in the specified values HashMap will be written into the @@ -121,7 +122,7 @@ public abstract Status scan(String table, String startkey, int recordcount, Set< * @param values A HashMap of field/value pairs to insert in the record * @return The result of the operation. */ - public abstract Status insert(String table, String key, HashMap values); + public abstract Status insert(String table, String key, Map values); /** * Delete a record from the database. diff --git a/core/src/main/java/com/yahoo/ycsb/DBWrapper.java b/core/src/main/java/com/yahoo/ycsb/DBWrapper.java index 880cccc160..a15664e8cc 100644 --- a/core/src/main/java/com/yahoo/ycsb/DBWrapper.java +++ b/core/src/main/java/com/yahoo/ycsb/DBWrapper.java @@ -17,6 +17,7 @@ package com.yahoo.ycsb; +import java.util.Map; import com.yahoo.ycsb.measurements.Measurements; import org.apache.htrace.core.TraceScope; import org.apache.htrace.core.Tracer; @@ -33,7 +34,7 @@ public class DBWrapper extends DB { private final Tracer tracer; private boolean reportLatencyForEachError = false; - private HashSet latencyTrackedErrors = new HashSet(); + private Set latencyTrackedErrors = new HashSet(); private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY = "reportlatencyforeacherror"; private static final String REPORT_LATENCY_FOR_EACH_ERROR_PROPERTY_DEFAULT = "false"; @@ -127,7 +128,7 @@ public void cleanup() throws DBException { * @return The result of the operation. */ public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try (final TraceScope span = tracer.newScope(scopeStringRead)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); @@ -190,7 +191,7 @@ private void measure(String op, Status result, long intendedStartTimeNanos, * @return The result of the operation. */ public Status update(String table, String key, - HashMap values) { + Map values) { try (final TraceScope span = tracer.newScope(scopeStringUpdate)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); @@ -213,7 +214,7 @@ public Status update(String table, String key, * @return The result of the operation. */ public Status insert(String table, String key, - HashMap values) { + Map values) { try (final TraceScope span = tracer.newScope(scopeStringInsert)) { long ist = measurements.getIntendedtartTimeNs(); long st = System.nanoTime(); diff --git a/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java b/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java index db4ba3fdb1..1cbf3a5d0f 100644 --- a/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java +++ b/core/src/main/java/com/yahoo/ycsb/GoodBadUglyDB.java @@ -18,6 +18,7 @@ package com.yahoo.ycsb; import java.util.HashMap; +import java.util.Map; import java.util.Random; import java.util.Set; import java.util.Vector; @@ -93,7 +94,7 @@ public void init() { * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { delay(); return Status.OK; } @@ -125,7 +126,7 @@ public Status scan(String table, String startkey, int recordcount, Set f * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { delay(); return Status.OK; @@ -140,7 +141,7 @@ public Status update(String table, String key, HashMap val * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { delay(); return Status.OK; } diff --git a/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java b/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java index 13a79600e0..8223549188 100644 --- a/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java +++ b/core/src/main/java/com/yahoo/ycsb/StringByteIterator.java @@ -51,7 +51,7 @@ public static void putAllAsStrings(Map out, Map getByteIteratorMap(Map m) { + public static Map getByteIteratorMap(Map m) { HashMap ret = new HashMap(); @@ -65,7 +65,7 @@ public static HashMap getByteIteratorMap(Map getStringMap(Map m) { + public static Map getStringMap(Map m) { HashMap ret = new HashMap(); for (Map.Entry entry : m.entrySet()) { diff --git a/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java b/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java index 4a829202c1..7a0f1be208 100644 --- a/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java +++ b/couchbase/src/main/java/com/yahoo/ycsb/db/CouchbaseClient.java @@ -180,7 +180,7 @@ public void cleanup() { @Override public Status read(final String table, final String key, final Set fields, - final HashMap result) { + final Map result) { String formattedKey = formatKey(table, key); try { @@ -225,7 +225,7 @@ public Status scan(final String table, final String startkey, final int recordco } @Override - public Status update(final String table, final String key, final HashMap values) { + public Status update(final String table, final String key, final Map values) { String formattedKey = formatKey(table, key); try { @@ -240,7 +240,7 @@ public Status update(final String table, final String key, final HashMap values) { + public Status insert(final String table, final String key, final Map values) { String formattedKey = formatKey(table, key); try { @@ -301,7 +301,7 @@ private Status checkFutureStatus(final OperationFuture future) { * @param fields the fields to check. * @param dest the result passed back to the ycsb core. */ - private void decode(final Object source, final Set fields, final HashMap dest) { + private void decode(final Object source, final Set fields, final Map dest) { if (useJson) { try { JsonNode json = JSON_MAPPER.readTree((String) source); @@ -321,7 +321,7 @@ private void decode(final Object source, final Set fields, final HashMap throw new RuntimeException("Could not decode JSON"); } } else { - HashMap converted = (HashMap) source; + Map converted = (HashMap) source; for (Map.Entry entry : converted.entrySet()) { dest.put(entry.getKey(), new StringByteIterator(entry.getValue())); } @@ -334,8 +334,8 @@ private void decode(final Object source, final Set fields, final HashMap * @param source the source value. * @return the storable object. */ - private Object encode(final HashMap source) { - HashMap stringMap = StringByteIterator.getStringMap(source); + private Object encode(final Map source) { + Map stringMap = StringByteIterator.getStringMap(source); if (!useJson) { return stringMap; } diff --git a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java index 6697a0c357..251aaf114f 100644 --- a/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java +++ b/couchbase2/src/main/java/com/yahoo/ycsb/db/couchbase2/Couchbase2Client.java @@ -241,7 +241,7 @@ private void logParams() { @Override public Status read(final String table, final String key, Set fields, - final HashMap result) { + final Map result) { try { String docId = formatId(table, key); if (kv) { @@ -256,14 +256,14 @@ public Status read(final String table, final String key, Set fields, } /** - * Performs the {@link #read(String, String, Set, HashMap)} operation via Key/Value ("get"). + * Performs the {@link #read(String, String, Set, Map)} operation via Key/Value ("get"). * * @param docId the document ID * @param fields the fields to be loaded * @param result the result map where the doc needs to be converted into * @return The result of the operation. */ - private Status readKv(final String docId, final Set fields, final HashMap result) + private Status readKv(final String docId, final Set fields, final Map result) throws Exception { RawJsonDocument loaded = bucket.get(docId, RawJsonDocument.class); if (loaded == null) { @@ -274,7 +274,7 @@ private Status readKv(final String docId, final Set fields, final HashMa } /** - * Performs the {@link #read(String, String, Set, HashMap)} operation via N1QL ("SELECT"). + * Performs the {@link #read(String, String, Set, Map)} operation via N1QL ("SELECT"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * @@ -283,7 +283,7 @@ private Status readKv(final String docId, final Set fields, final HashMa * @param result the result map where the doc needs to be converted into * @return The result of the operation. */ - private Status readN1ql(final String docId, Set fields, final HashMap result) + private Status readN1ql(final String docId, Set fields, final Map result) throws Exception { String readQuery = "SELECT " + joinFields(fields) + " FROM `" + bucketName + "` USE KEYS [$1]"; N1qlQueryResult queryResult = bucket.query(N1qlQuery.parameterized( @@ -319,7 +319,7 @@ private Status readN1ql(final String docId, Set fields, final HashMap values) { + public Status update(final String table, final String key, final Map values) { if (upsert) { return upsert(table, key, values); } @@ -338,13 +338,13 @@ public Status update(final String table, final String key, final HashMap values) { + private Status updateKv(final String docId, final Map values) { waitForMutationResponse(bucket.async().replace( RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, @@ -354,7 +354,7 @@ private Status updateKv(final String docId, final HashMap } /** - * Performs the {@link #update(String, String, HashMap)} operation via N1QL ("UPDATE"). + * Performs the {@link #update(String, String, Map)} operation via N1QL ("UPDATE"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * @@ -362,7 +362,7 @@ private Status updateKv(final String docId, final HashMap * @param values the values to update the document with. * @return The result of the operation. */ - private Status updateN1ql(final String docId, final HashMap values) + private Status updateN1ql(final String docId, final Map values) throws Exception { String fields = encodeN1qlFields(values); String updateQuery = "UPDATE `" + bucketName + "` USE KEYS [$1] SET " + fields; @@ -381,7 +381,7 @@ private Status updateN1ql(final String docId, final HashMap values) { + public Status insert(final String table, final String key, final Map values) { if (upsert) { return upsert(table, key, values); } @@ -400,7 +400,7 @@ public Status insert(final String table, final String key, final HashMap values) { + private Status insertKv(final String docId, final Map values) { int tries = 60; // roughly 60 seconds with the 1 second sleep, not 100% accurate. for(int i = 0; i < tries; i++) { @@ -435,7 +435,7 @@ private Status insertKv(final String docId, final HashMap } /** - * Performs the {@link #insert(String, String, HashMap)} operation via N1QL ("INSERT"). + * Performs the {@link #insert(String, String, Map)} operation via N1QL ("INSERT"). * * If this option should be used, the "-p couchbase.kv=false" property must be set. * @@ -443,7 +443,7 @@ private Status insertKv(final String docId, final HashMap * @param values the values to update the document with. * @return The result of the operation. */ - private Status insertN1ql(final String docId, final HashMap values) + private Status insertN1ql(final String docId, final Map values) throws Exception { String insertQuery = "INSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)"; @@ -470,7 +470,7 @@ private Status insertN1ql(final String docId, final HashMap values) { + private Status upsert(final String table, final String key, final Map values) { try { String docId = formatId(table, key); if (kv) { @@ -485,7 +485,7 @@ private Status upsert(final String table, final String key, final HashMap values) { + private Status upsertKv(final String docId, final Map values) { waitForMutationResponse(bucket.async().upsert( RawJsonDocument.create(docId, documentExpiry, encode(values)), persistTo, @@ -503,7 +503,7 @@ private Status upsertKv(final String docId, final HashMap } /** - * Performs the {@link #upsert(String, String, HashMap)} operation via N1QL ("UPSERT"). + * Performs the {@link #upsert(String, String, Map)} operation via N1QL ("UPSERT"). * * If this option should be used, the "-p couchbase.upsert=true -p couchbase.kv=false" properties must be set. * @@ -511,7 +511,7 @@ private Status upsertKv(final String docId, final HashMap * @param values the values to update the document with. * @return The result of the operation. */ - private Status upsertN1ql(final String docId, final HashMap values) + private Status upsertN1ql(final String docId, final Map values) throws Exception { String upsertQuery = "UPSERT INTO `" + bucketName + "`(KEY,VALUE) VALUES ($1,$2)"; @@ -734,12 +734,12 @@ public void onNext(Document document) { } /** - * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, HashMap)}. + * Helper method to turn the values into a String, used with {@link #upsertN1ql(String, Map)}. * * @param values the values to encode. * @return the encoded string. */ - private static String encodeN1qlFields(final HashMap values) { + private static String encodeN1qlFields(final Map values) { if (values.isEmpty()) { return ""; } @@ -760,7 +760,7 @@ private static String encodeN1qlFields(final HashMap value * @param values the values to transform. * @return the created json object. */ - private static JsonObject valuesToJsonObject(final HashMap values) { + private static JsonObject valuesToJsonObject(final Map values) { JsonObject result = JsonObject.create(); for (Map.Entry entry : values.entrySet()) { result.put(entry.getKey(), entry.getValue().toString()); @@ -853,7 +853,7 @@ private static PersistTo parsePersistTo(final String property) throws DBExceptio * @param dest the result passed back to YCSB. */ private void decode(final String source, final Set fields, - final HashMap dest) { + final Map dest) { try { JsonNode json = JacksonTransformers.MAPPER.readTree(source); boolean checkFields = fields != null && !fields.isEmpty(); @@ -879,8 +879,8 @@ private void decode(final String source, final Set fields, * @param source the source value. * @return the encoded string. */ - private String encode(final HashMap source) { - HashMap stringMap = StringByteIterator.getStringMap(source); + private String encode(final Map source) { + Map stringMap = StringByteIterator.getStringMap(source); ObjectNode node = JacksonTransformers.MAPPER.createObjectNode(); for (Map.Entry pair : stringMap.entrySet()) { node.put(pair.getKey(), pair.getValue()); diff --git a/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java b/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java index b1f471f1d5..d861fd70e5 100644 --- a/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java +++ b/dynamodb/src/main/java/com/yahoo/ycsb/db/DynamoDBClient.java @@ -137,7 +137,7 @@ public void init() throws DBException { } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("readkey: " + key + " from table: " + table); } @@ -228,7 +228,7 @@ public Status scan(String table, String startkey, int recordcount, } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("updatekey: " + key + " from table: " + table); } @@ -254,7 +254,7 @@ public Status update(String table, String key, HashMap val } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("insertkey: " + primaryKeyName + "-" + key + " from table: " + table); } @@ -302,8 +302,7 @@ public Status delete(String table, String key) { return Status.OK; } - private static Map createAttributes(HashMap values) { - //leave space for the PrimaryKey + private static Map createAttributes(Map values) { Map attributes = new HashMap<>(values.size() + 1); for (Entry val : values.entrySet()) { attributes.put(val.getKey(), new AttributeValue(val.getValue().toString())); diff --git a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java index 76ddee1d2e..12c3febad2 100644 --- a/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java +++ b/elasticsearch/src/main/java/com/yahoo/ycsb/db/ElasticsearchClient.java @@ -46,6 +46,7 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; @@ -195,7 +196,7 @@ public void cleanup() throws DBException { * description for a discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { final XContentBuilder doc = jsonBuilder().startObject(); @@ -254,7 +255,7 @@ public Status delete(String table, String key) { * @return Zero on success, a non-zero error code on error or "not found". */ @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet(); @@ -295,7 +296,7 @@ public Status read(String table, String key, Set fields, HashMap values) { + public Status update(String table, String key, Map values) { try { final GetResponse response = client.prepareGet(indexKey, table, key).execute().actionGet(); diff --git a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java index 8aa2af7624..aab0c99c58 100644 --- a/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java +++ b/geode/src/main/java/com/yahoo/ycsb/db/GeodeClient.java @@ -136,7 +136,7 @@ public void init() throws DBException { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { Region r = getRegion(table); PdxInstance val = r.get(key); if (val != null) { @@ -162,13 +162,13 @@ public Status scan(String table, String startkey, int recordcount, } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { getRegion(table).put(key, convertToBytearrayMap(values)); return Status.OK; } diff --git a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java index c035032cfb..8e0c7b0053 100644 --- a/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java +++ b/googlebigtable/src/main/java/com/yahoo/ycsb/db/GoogleBigtableClient.java @@ -20,6 +20,7 @@ import java.nio.charset.Charset; import java.util.Arrays; import java.util.HashMap; +import java.util.Map; import java.util.Iterator; import java.util.List; import java.util.Map.Entry; @@ -198,7 +199,7 @@ public void cleanup() throws DBException { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { if (debug) { System.out.println("Doing read from Bigtable columnfamily " + new String(columnFamilyBytes)); @@ -360,7 +361,7 @@ public Status scan(String table, String startkey, int recordcount, @Override public Status update(String table, String key, - HashMap values) { + Map values) { if (debug) { System.out.println("Setting up put for key: " + key); } @@ -398,7 +399,7 @@ public Status update(String table, String key, @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return update(table, key, values); } diff --git a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java index 7eb35b1e15..62db58333e 100644 --- a/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java +++ b/googledatastore/src/main/java/com/yahoo/ycsb/db/GoogleDatastoreClient.java @@ -181,7 +181,7 @@ public void init() throws DBException { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { LookupRequest.Builder lookupRequest = LookupRequest.newBuilder(); lookupRequest.addKeys(buildPrimaryKey(table, key)); lookupRequest.getReadOptionsBuilder().setReadConsistency( @@ -241,14 +241,14 @@ public Status scan(String table, String startkey, int recordcount, @Override public Status update(String table, String key, - HashMap values) { + Map values) { return doSingleItemMutation(table, key, values, MutationType.UPDATE); } @Override public Status insert(String table, String key, - HashMap values) { + Map values) { // Use Upsert to allow overwrite of existing key instead of failing the // load (or run) just because the DB already has the key. // This is the same behavior as what other DB does here (such as @@ -275,7 +275,7 @@ private Key.Builder buildPrimaryKey(String table, String key) { } private Status doSingleItemMutation(String table, String key, - @Nullable HashMap values, + @Nullable Map values, MutationType mutationType) { // First build the key. Key.Builder datastoreKey = buildPrimaryKey(table, key); diff --git a/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java b/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java index 0d62baad61..90cbb6a38b 100644 --- a/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java +++ b/hbase098/src/main/java/com/yahoo/ycsb/db/HBaseClient.java @@ -168,7 +168,7 @@ private void getHTable(String table) throws IOException { * @param result A HashMap of field/value pairs for the result * @return Zero on success, a non-zero error code on error */ - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { //if this is a "new" tableName, init HTable object. Else, use existing one if (!this.tableName.equals(table)) { hTable = null; @@ -307,7 +307,7 @@ public Status scan(String table, String startkey, int recordcount, Set f * @param values A HashMap of field/value pairs to update in the record * @return Zero on success, a non-zero error code on error */ - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { //if this is a "new" tableName, init HTable object. Else, use existing one if (!this.tableName.equals(table)) { hTable = null; @@ -358,7 +358,7 @@ public Status update(String table, String key, HashMap val * @param values A HashMap of field/value pairs to insert in the record * @return Zero on success, a non-zero error code on error */ - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { return update(table, key, values); } @@ -439,6 +439,7 @@ public void run() { long st = System.currentTimeMillis(); Status result; Vector> scanResults = new Vector<>(); + Set scanFields = new HashSet(); result = cli.scan("table1", "user2", 20, null, scanResults); long en = System.currentTimeMillis(); diff --git a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java index 2ef4defacc..96d8cf0624 100644 --- a/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java +++ b/hbase10/src/main/java/com/yahoo/ycsb/db/HBaseClient10.java @@ -250,7 +250,7 @@ public void getHTable(String table) throws IOException { * @return Zero on success, a non-zero error code on error */ public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; @@ -418,7 +418,7 @@ public Status scan(String table, String startkey, int recordcount, */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { // if this is a "new" table, init HTable object. Else, use existing one if (!tableName.equals(table)) { currentTable = null; @@ -480,7 +480,7 @@ public Status update(String table, String key, */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { return update(table, key, values); } diff --git a/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java b/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java index f77595ba83..040c417b86 100644 --- a/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java +++ b/hbase10/src/test/java/com/yahoo/ycsb/db/HBaseClient10Test.java @@ -47,6 +47,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; +import java.util.Map; import java.util.List; import java.util.Properties; import java.util.Vector; @@ -173,7 +174,7 @@ public void testScan() throws Exception { assertEquals(5, result.size()); for(int i = 0; i < 5; i++) { - final HashMap row = result.get(i); + final Map row = result.get(i); assertEquals(1, row.size()); assertTrue(row.containsKey(colStr)); final byte[] bytes = row.get(colStr).toArray(); @@ -186,7 +187,7 @@ public void testScan() throws Exception { @Test public void testUpdate() throws Exception{ final String key = "key"; - final HashMap input = new HashMap(); + final Map input = new HashMap(); input.put("column1", "value1"); input.put("column2", "value2"); final Status status = client.insert(tableName, key, StringByteIterator.getByteIteratorMap(input)); diff --git a/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java b/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java index 22b6a2c855..4107ec41b6 100644 --- a/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java +++ b/hypertable/src/main/java/com/yahoo/ycsb/db/HypertableClient.java @@ -117,7 +117,7 @@ public void cleanup() throws DBException { */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { // SELECT _column_family:field[i] // FROM table WHERE ROW=key MAX_VERSIONS 1; @@ -252,7 +252,7 @@ public Status scan(String table, String startkey, int recordcount, */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { return insert(table, key, values); } @@ -271,7 +271,7 @@ public Status update(String table, String key, */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { // INSERT INTO table VALUES // (key, _column_family:entry,getKey(), entry.getValue()), (...); diff --git a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java index 7fa75fd13f..e6373089ac 100644 --- a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java +++ b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanClient.java @@ -65,8 +65,7 @@ public void cleanup() { infinispanManager = null; } - public Status read(String table, String key, Set fields, - HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { Map row; if (clustered) { @@ -98,8 +97,7 @@ public Status scan(String table, String startkey, int recordcount, return Status.OK; } - public Status update(String table, String key, - HashMap values) { + public Status update(String table, String key, Map values) { try { if (clustered) { AtomicMap row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key); @@ -122,8 +120,7 @@ public Status update(String table, String key, } } - public Status insert(String table, String key, - HashMap values) { + public Status insert(String table, String key, Map values) { try { if (clustered) { AtomicMap row = AtomicMapLookup.getAtomicMap(infinispanManager.getCache(table), key); diff --git a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java index 26ce835942..d2a535d1a2 100644 --- a/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java +++ b/infinispan/src/main/java/com/yahoo/ycsb/db/InfinispanRemoteClient.java @@ -51,7 +51,7 @@ public void cleanup() { } @Override - public Status insert(String table, String recordKey, HashMap values) { + public Status insert(String table, String recordKey, Map values) { String compositKey = createKey(table, recordKey); Map stringValues = new HashMap<>(); StringByteIterator.putAllAsStrings(stringValues, values); @@ -65,7 +65,7 @@ public Status insert(String table, String recordKey, HashMap fields, HashMap result) { + public Status read(String table, String recordKey, Set fields, Map result) { String compositKey = createKey(table, recordKey); try { Map values = cache().get(compositKey); @@ -100,7 +100,7 @@ public Status scan(String table, String startkey, int recordcount, Set f } @Override - public Status update(String table, String recordKey, HashMap values) { + public Status update(String table, String recordKey, Map values) { String compositKey = createKey(table, recordKey); try { Map stringValues = new HashMap<>(); diff --git a/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java b/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java index aeaf431fdc..294fa096c4 100644 --- a/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java +++ b/jdbc/src/main/java/com/yahoo/ycsb/db/JdbcDBClient.java @@ -82,7 +82,7 @@ public class JdbcDBClient extends DB { /** The field name prefix in the table. */ public static final String COLUMN_PREFIX = "FIELD"; - private ArrayList conns; + private List conns; private boolean initialized = false; private Properties props; private int jdbcFetchSize; @@ -312,7 +312,7 @@ private PreparedStatement createAndCacheScanStatement(StatementType scanType, St } @Override - public Status read(String tableName, String key, Set fields, HashMap result) { + public Status read(String tableName, String key, Set fields, Map result) { try { StatementType type = new StatementType(StatementType.Type.READ, tableName, 1, "", getShardIndexByKey(key)); PreparedStatement readStatement = cachedStatements.get(type); @@ -370,7 +370,7 @@ public Status scan(String tableName, String startKey, int recordcount, Set values) { + public Status update(String tableName, String key, Map values) { try { int numFields = values.size(); OrderedFieldInfo fieldInfo = getFieldInfo(values); @@ -397,7 +397,7 @@ public Status update(String tableName, String key, HashMap } @Override - public Status insert(String tableName, String key, HashMap values) { + public Status insert(String tableName, String key, Map values) { try { int numFields = values.size(); OrderedFieldInfo fieldInfo = getFieldInfo(values); @@ -483,7 +483,7 @@ public Status delete(String tableName, String key) { } } - private OrderedFieldInfo getFieldInfo(HashMap values) { + private OrderedFieldInfo getFieldInfo(Map values) { String fieldKeys = ""; List fieldValues = new ArrayList<>(); int count = 0; diff --git a/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java b/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java index d298f9e45f..212ebc0e26 100644 --- a/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java +++ b/jdbc/src/test/java/com/yahoo/ycsb/db/JdbcDBClientTest.java @@ -26,7 +26,9 @@ import java.sql.*; import java.util.HashMap; +import java.util.Map; import java.util.HashSet; +import java.util.Set; import java.util.Properties; import java.util.Vector; @@ -246,7 +248,7 @@ public void updateTest() { public void readTest() { String insertKey = "user0"; HashMap insertMap = insertRow(insertKey); - HashSet readFields = new HashSet(); + Set readFields = new HashSet(); HashMap readResultMap = new HashMap(); // Test reading a single field @@ -300,12 +302,12 @@ public void deleteTest() { @Test public void scanTest() throws SQLException { - HashMap> keyMap = new HashMap>(); + Map> keyMap = new HashMap>(); for (int i = 0; i < 5; i++) { String insertKey = KEY_PREFIX + i; keyMap.put(insertKey, insertRow(insertKey)); } - HashSet fieldSet = new HashSet(); + Set fieldSet = new HashSet(); fieldSet.add("FIELD0"); fieldSet.add("FIELD1"); int startIndex = 1; @@ -318,7 +320,7 @@ public void scanTest() throws SQLException { assertEquals("Assert the correct number of results rows were returned", resultRows, resultVector.size()); // Check each vector row to make sure we have the correct fields int testIndex = startIndex; - for (HashMap result: resultVector) { + for (Map result: resultVector) { assertEquals("Assert that this row has the correct number of fields", fieldSet.size(), result.size()); for (String field: fieldSet) { assertEquals("Assert this field is correct in this row", keyMap.get(KEY_PREFIX + testIndex).get(field).toString(), result.get(field).toString()); diff --git a/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java b/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java index b7ae0e2b0e..ebd6a8225f 100644 --- a/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java +++ b/kudu/src/main/java/com/yahoo/ycsb/db/KuduYCSBClient.java @@ -32,6 +32,7 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.Map; import java.util.List; import java.util.Properties; import java.util.Set; @@ -188,10 +189,8 @@ public void cleanup() throws DBException { } @Override - public Status read(String table, - String key, - Set fields, - HashMap result) { + public Status read(String table, String key, Set fields, + Map result) { Vector> results = new Vector<>(); final Status status = scan(table, key, 1, fields, results); if (!status.equals(Status.OK)) { @@ -272,7 +271,7 @@ private void addAllRowsToResult(RowResultIterator it, } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { Update update = this.kuduTable.newUpdate(); PartialRow row = update.getRow(); row.addString(KEY, key); @@ -288,7 +287,7 @@ public Status update(String table, String key, HashMap val } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { Insert insert = this.kuduTable.newInsert(); PartialRow row = insert.getRow(); row.addString(KEY, key); diff --git a/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java b/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java index 012a96cbf1..0d50c41d6f 100644 --- a/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java +++ b/mapkeeper/src/main/java/com/yahoo/ycsb/db/MapKeeperClient.java @@ -129,7 +129,7 @@ String strResponse(BinaryResponse buf) { @Override public int read(String table, String key, Set fields, - HashMap result) { + Map result) { try { ByteBuffer buf = bufStr(key); @@ -177,7 +177,7 @@ public int scan(String table, String startkey, int recordcount, @Override public int update(String table, String key, - HashMap values) { + Map values) { try { if(!writeallfields) { HashMap oldval = new HashMap(); @@ -197,7 +197,7 @@ public int update(String table, String key, @Override public int insert(String table, String key, - HashMap values) { + Map values) { try { int ret = ycsbThriftRet(c.insert(table, bufStr(key), encode(values)), ResponseCode.Success, ResponseCode.RecordExists); return ret; diff --git a/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java b/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java index 85f1f5dc6d..b8f69384ed 100644 --- a/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java +++ b/memcached/src/main/java/com/yahoo/ycsb/db/MemcachedClient.java @@ -180,7 +180,7 @@ protected net.spy.memcached.MemcachedClient createMemcachedClient() @Override public Status read( String table, String key, Set fields, - HashMap result) { + Map result) { key = createQualifiedKey(table, key); try { GetFuture future = memcachedClient().asyncGet(key); @@ -204,7 +204,7 @@ public Status scan( @Override public Status update( - String table, String key, HashMap values) { + String table, String key, Map values) { key = createQualifiedKey(table, key); try { OperationFuture future = @@ -218,7 +218,7 @@ public Status update( @Override public Status insert( - String table, String key, HashMap values) { + String table, String key, Map values) { key = createQualifiedKey(table, key); try { OperationFuture future = @@ -290,7 +290,7 @@ protected static void fromJson( protected static String toJson(Map values) throws IOException { ObjectNode node = MAPPER.createObjectNode(); - HashMap stringMap = StringByteIterator.getStringMap(values); + Map stringMap = StringByteIterator.getStringMap(values); for (Map.Entry pair : stringMap.entrySet()) { node.put(pair.getKey(), pair.getValue()); } diff --git a/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java b/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java index a977fbe456..3bbb5fbde8 100644 --- a/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java +++ b/mongodb/src/main/java/com/yahoo/ycsb/db/AsyncMongoDbClient.java @@ -251,7 +251,7 @@ public final void init() throws DBException { */ @Override public final Status insert(final String table, final String key, - final HashMap values) { + final Map values) { try { final MongoCollection collection = database.getCollection(table); final DocumentBuilder toInsert = @@ -329,7 +329,7 @@ public final Status insert(final String table, final String key, */ @Override public final Status read(final String table, final String key, - final Set fields, final HashMap result) { + final Set fields, final Map result) { try { final MongoCollection collection = database.getCollection(table); final DocumentBuilder query = @@ -450,7 +450,7 @@ public final Status scan(final String table, final String startkey, */ @Override public final Status update(final String table, final String key, - final HashMap values) { + final Map values) { try { final MongoCollection collection = database.getCollection(table); final DocumentBuilder query = BuilderFactory.start().add("_id", key); @@ -477,7 +477,7 @@ public final Status update(final String table, final String key, * @param queryResult * The document to fill from. */ - protected final void fillMap(final HashMap result, + protected final void fillMap(final Map result, final Document queryResult) { for (final Element be : queryResult) { if (be.getType() == ElementType.BINARY) { diff --git a/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java b/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java index 2b7cb114fe..5704b413dc 100644 --- a/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java +++ b/mongodb/src/main/java/com/yahoo/ycsb/db/MongoDbClient.java @@ -251,7 +251,7 @@ public void init() throws DBException { */ @Override public Status insert(String table, String key, - HashMap values) { + Map values) { try { MongoCollection collection = database.getCollection(table); Document toInsert = new Document("_id", key); @@ -315,7 +315,7 @@ public Status insert(String table, String key, */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { MongoCollection collection = database.getCollection(table); Document query = new Document("_id", key); @@ -428,7 +428,7 @@ public Status scan(String table, String startkey, int recordcount, */ @Override public Status update(String table, String key, - HashMap values) { + Map values) { try { MongoCollection collection = database.getCollection(table); diff --git a/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java b/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java index 9a0b095f3d..90d343ebf0 100644 --- a/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java +++ b/mongodb/src/test/java/com/yahoo/ycsb/db/AbstractDBTestCases.java @@ -37,6 +37,7 @@ import java.net.Socket; import java.util.Collections; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; @@ -281,7 +282,7 @@ public void testScan() { assertThat("Read did not return success (0).", result, is(Status.OK)); assertThat(results.size(), is(5)); for (int i = 0; i < 5; ++i) { - HashMap read = results.get(i); + Map read = results.get(i); for (String key : keys) { ByteIterator iter = read.get(key); diff --git a/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java b/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java index 8282062053..e4faad6558 100644 --- a/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java +++ b/nosqldb/src/main/java/com/yahoo/ycsb/db/NoSqlDbClient.java @@ -180,8 +180,7 @@ private static String getFieldFromKey(Key key) { } @Override - public Status read(String table, String key, Set fields, - HashMap result) { + public Status read(String table, String key, Set fields, Map result) { Key kvKey = createKey(table, key); SortedMap kvResult; try { @@ -212,8 +211,7 @@ public Status scan(String table, String startkey, int recordcount, } @Override - public Status update(String table, String key, - HashMap values) { + public Status update(String table, String key, Map values) { for (Map.Entry entry : values.entrySet()) { Key kvKey = createKey(table, key, entry.getKey()); Value kvValue = Value.createValue(entry.getValue().toArray()); @@ -229,8 +227,7 @@ public Status update(String table, String key, } @Override - public Status insert(String table, String key, - HashMap values) { + public Status insert(String table, String key, Map values) { return update(table, key, values); } diff --git a/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java b/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java index 5c54d0c8ac..caa5f7dc43 100644 --- a/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java +++ b/orientdb/src/main/java/com/yahoo/ycsb/db/OrientDBClient.java @@ -31,7 +31,11 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.util.*; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.Vector; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -192,7 +196,7 @@ public void cleanup() throws DBException { } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODocument document = new ODocument(CLASS); @@ -228,7 +232,7 @@ public Status delete(String table, String key) { } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); final ODocument document = dictionary.get(key); @@ -251,7 +255,7 @@ public Status read(String table, String key, Set fields, HashMap values) { + public Status update(String table, String key, Map values) { while (true) { try (ODatabaseDocumentTx db = databasePool.acquire()) { final ODictionary dictionary = db.getMetadata().getIndexManager().getDictionary(); diff --git a/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java b/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java index 4e296abc9a..8f599fd43e 100644 --- a/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java +++ b/rados/src/main/java/com/yahoo/ycsb/db/RadosClient.java @@ -32,6 +32,7 @@ import java.io.File; import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; @@ -102,7 +103,7 @@ public void cleanup() throws DBException { } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { byte[] buffer; try { @@ -137,7 +138,7 @@ public Status read(String table, String key, Set fields, HashMap values) { + public Status insert(String table, String key, Map values) { JSONObject json = new JSONObject(); for (final Entry e : values.entrySet()) { json.put(e.getKey(), e.getValue().toString()); @@ -162,7 +163,7 @@ public Status delete(String table, String key) { } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { Status rtn = delete(table, key); if (rtn.equals(Status.OK)) { return insert(table, key, values); diff --git a/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java b/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java index fbcfcb0a31..a571a2c0de 100644 --- a/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java +++ b/redis/src/main/java/com/yahoo/ycsb/db/RedisClient.java @@ -34,6 +34,7 @@ import redis.clients.jedis.Protocol; import java.util.HashMap; +import java.util.Map; import java.util.Iterator; import java.util.List; import java.util.Properties; @@ -94,7 +95,7 @@ private double hash(String key) { @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { if (fields == null) { StringByteIterator.putAllAsByteIterators(result, jedis.hgetAll(key)); } else { @@ -116,7 +117,7 @@ public Status read(String table, String key, Set fields, @Override public Status insert(String table, String key, - HashMap values) { + Map values) { if (jedis.hmset(key, StringByteIterator.getStringMap(values)) .equals("OK")) { jedis.zadd(INDEX_KEY, hash(key), key); @@ -133,7 +134,7 @@ public Status delete(String table, String key) { @Override public Status update(String table, String key, - HashMap values) { + Map values) { return jedis.hmset(key, StringByteIterator.getStringMap(values)) .equals("OK") ? Status.OK : Status.ERROR; } diff --git a/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java b/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java index 2fd14673c3..3daa7fe472 100644 --- a/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java +++ b/rest/src/main/java/com/yahoo/ycsb/webservice/rest/RestClient.java @@ -23,6 +23,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.util.HashMap; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.Vector; @@ -101,7 +102,7 @@ private void setupClient() { } @Override - public Status read(String table, String endpoint, Set fields, HashMap result) { + public Status read(String table, String endpoint, Set fields, Map result) { int responseCode; try { responseCode = httpGet(urlPrefix + endpoint, result); @@ -116,7 +117,7 @@ public Status read(String table, String endpoint, Set fields, HashMap values) { + public Status insert(String table, String endpoint, Map values) { int responseCode; try { responseCode = httpExecute(new HttpPost(urlPrefix + endpoint), values.get("data").toString()); @@ -146,7 +147,7 @@ public Status delete(String table, String endpoint) { } @Override - public Status update(String table, String endpoint, HashMap values) { + public Status update(String table, String endpoint, Map values) { int responseCode; try { responseCode = httpExecute(new HttpPut(urlPrefix + endpoint), values.get("data").toString()); @@ -199,7 +200,7 @@ private int handleExceptions(Exception e, String url, String method) { } // Connection is automatically released back in case of an exception. - private int httpGet(String endpoint, HashMap result) throws IOException { + private int httpGet(String endpoint, Map result) throws IOException { requestTimedout.setIsSatisfied(false); Thread timer = new Thread(new Timer(execTimeout, requestTimedout)); timer.start(); diff --git a/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java index 42c3e90e40..d08f4dd532 100644 --- a/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java +++ b/riak/src/main/java/com/yahoo/ycsb/db/riak/RiakKVClient.java @@ -228,7 +228,7 @@ public void init() throws DBException { * @return Zero on success, a non-zero error code on error */ @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { Location location = new Location(new Namespace(bucketType, table), key); FetchValue fv = new FetchValue.Builder(location).withOption(FetchValue.Option.R, rvalue).build(); FetchValue.Response response; @@ -258,8 +258,9 @@ public Status read(String table, String key, Set fields, HashMap partialResult = new HashMap<>(); + createResultHashMap(fields, response, partialResult); + result.putAll(partialResult); return Status.OK; } @@ -403,7 +404,7 @@ private FetchValue.Response fetch(FetchValue fv) throws TimeoutException { * @return Zero on success, a non-zero error code on error */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { Location location = new Location(new Namespace(bucketType, table), key); RiakObject object = new RiakObject(); @@ -492,7 +493,7 @@ public RiakObject apply(RiakObject original) { * @return Zero on success, a non-zero error code on error */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { // If eventual consistency model is in use, then an update operation is pratically equivalent to an insert one. if (!strongConsistency) { return insert(table, key, values); diff --git a/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java b/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java index 8ef3f5be38..71b7f7cd53 100644 --- a/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java +++ b/s3/src/main/java/com/yahoo/ycsb/db/S3Client.java @@ -258,7 +258,7 @@ public void init() throws DBException { */ @Override public Status insert(String bucket, String key, - HashMap values) { + Map values) { return writeToStorage(bucket, key, values, true, sse, ssecKey); } /** @@ -278,7 +278,7 @@ public Status insert(String bucket, String key, */ @Override public Status read(String bucket, String key, Set fields, - HashMap result) { + Map result) { return readFromStorage(bucket, key, result, ssecKey); } /** @@ -296,7 +296,7 @@ public Status read(String bucket, String key, Set fields, */ @Override public Status update(String bucket, String key, - HashMap values) { + Map values) { return writeToStorage(bucket, key, values, false, sse, ssecKey); } /** @@ -336,8 +336,8 @@ public Status scan(String bucket, String startkey, int recordcount, * */ protected Status writeToStorage(String bucket, String key, - HashMap values, Boolean updateMarker, - String sseLocal, SSECustomerKey ssecLocal) { + Map values, Boolean updateMarker, + String sseLocal, SSECustomerKey ssecLocal) { int totalSize = 0; int fieldCount = values.size(); //number of fields to concatenate // getting the first field in the values @@ -422,7 +422,7 @@ protected Status writeToStorage(String bucket, String key, * */ protected Status readFromStorage(String bucket, String key, - HashMap result, SSECustomerKey ssecLocal) { + Map result, SSECustomerKey ssecLocal) { try { Map.Entry objectAndMetadata = getS3ObjectAndMetadata(bucket, key, ssecLocal); InputStream objectData = objectAndMetadata.getKey().getObjectContent(); //consuming the stream diff --git a/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java b/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java index 4232221bb6..9b13136357 100644 --- a/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java +++ b/solr/src/main/java/com/yahoo/ycsb/db/solr/SolrClient.java @@ -116,7 +116,7 @@ public void cleanup() throws DBException { * discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { SolrInputDocument doc = new SolrInputDocument(); @@ -182,7 +182,7 @@ public Status delete(String table, String key) { */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Boolean returnFields = false; String[] fieldList = null; @@ -225,7 +225,7 @@ public Status read(String table, String key, Set fields, * discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { SolrInputDocument updatedDoc = new SolrInputDocument(); updatedDoc.addField("id", key); diff --git a/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java b/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java index 50346b7841..3affce3710 100644 --- a/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java +++ b/solr6/src/main/java/com/yahoo/ycsb/db/solr6/SolrClient.java @@ -115,7 +115,7 @@ public void cleanup() throws DBException { * discussion of error codes. */ @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { try { SolrInputDocument doc = new SolrInputDocument(); @@ -181,7 +181,7 @@ public Status delete(String table, String key) { */ @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { try { Boolean returnFields = false; String[] fieldList = null; @@ -224,7 +224,7 @@ public Status read(String table, String key, Set fields, * discussion of error codes. */ @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { try { SolrInputDocument updatedDoc = new SolrInputDocument(); updatedDoc.addField("id", key); diff --git a/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java b/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java index e86120e1cf..8d16b0a824 100644 --- a/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java +++ b/tarantool/src/main/java/com/yahoo/ycsb/db/TarantoolClient.java @@ -60,7 +60,7 @@ public void cleanup() throws DBException { } @Override - public Status insert(String table, String key, HashMap values) { + public Status insert(String table, String key, Map values) { return replace(key, values, "Can't insert element"); } @@ -78,7 +78,7 @@ private HashMap tupleConvertFilter(List input, Set } @Override - public Status read(String table, String key, Set fields, HashMap result) { + public Status read(String table, String key, Set fields, Map result) { try { List response = this.connection.select(this.spaceNo, 0, Arrays.asList(key), 0, 1, 0); result = tupleConvertFilter(response, fields); @@ -127,11 +127,11 @@ public Status delete(String table, String key) { } @Override - public Status update(String table, String key, HashMap values) { + public Status update(String table, String key, Map values) { return replace(key, values, "Can't replace element"); } - private Status replace(String key, HashMap values, String exceptionDescription) { + private Status replace(String key, Map values, String exceptionDescription) { int j = 0; String[] tuple = new String[1 + 2 * values.size()]; tuple[0] = key; diff --git a/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java b/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java index a4132b2f40..4bc716cb55 100644 --- a/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java +++ b/voldemort/src/main/java/com/yahoo/ycsb/db/VoldemortClient.java @@ -85,7 +85,7 @@ public Status delete(String table, String key) { @Override public Status insert(String table, String key, - HashMap values) { + Map values) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; } @@ -96,7 +96,7 @@ public Status insert(String table, String key, @Override public Status read(String table, String key, Set fields, - HashMap result) { + Map result) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; } @@ -130,7 +130,7 @@ public Status scan(String table, String startkey, int recordcount, @Override public Status update(String table, String key, - HashMap values) { + Map values) { if (checkStore(table) == Status.ERROR) { return Status.ERROR; }