From a92281c37b6b79dd0d9d18ef77ca9eceece6f31b Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Mon, 10 Jul 2023 10:17:32 +0530 Subject: [PATCH 01/46] Updating issue template for adding issue SLO (#1216) * adding bug SLO * adding bug SLO * Fixing comment --- .github/ISSUE_TEMPLATE/gcsfuse-bug-report.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/gcsfuse-bug-report.md b/.github/ISSUE_TEMPLATE/gcsfuse-bug-report.md index be723d9b20..f83861f6b4 100644 --- a/.github/ISSUE_TEMPLATE/gcsfuse-bug-report.md +++ b/.github/ISSUE_TEMPLATE/gcsfuse-bug-report.md @@ -26,3 +26,6 @@ Steps to reproduce the behavior: **Additional context** Add any other context about the problem here. + +**SLO:** +24 hrs to respond and 7 days to close the issue. From 4f04191f7d56e597761afc7d4a7c58f62bfc04da Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Tue, 11 Jul 2023 22:27:52 +0530 Subject: [PATCH 02/46] Integration tests --key-file flag and GOOGLE_APPLICATION_CREDENTIALS env with admin permission tests (#1167) * updating go version * empty commit * local commit * local changes * local changes * local changes * adding key file tests * testing * testing * testing * testing * local changes * local changes * local changes * local changes * testing * testing * testing * testing * testing * adding test for admin creds * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * testing * formating * testing defer statement * testing defer statement for deleting credentials * adding comment * testing with error * testing with error * testing with error * removing testing statement * adding testbucket and mntdir in commnd * adding comment * updating bucket name * updating bucket name * removing unnecessary changes * removing unnecessary changes * removing unnecessary changes * formatting * conflict * adding error handling * testing * small fix * removing creds tests from implicit and explicit dir tests * testing * testing * testing * testing * removing testing statement * adding creds tests in operations back * Testing * Testing * Testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * create service account key testing * adding remaining changes * adding remaining changes * adding remaining changes * testing service account * testing service account * testing service account * adding comments * removing unnecessary changes * formatting * testing * testing * testing * testing * removing without key file tests * small fix * formalizing for reuse * small fix * removing unnecessary changes * formatting * updating comment * updating comment * updating comment * fixing comments * adding comment * testing * testing * adding condintion for service account already exsit * adding condintion for service account already exsit * testing time * running tests only for operations --- .../operations/operations_test.go | 6 + .../util/creds_tests/creds.go | 104 ++++++++++++++++++ .../creds_tests/testdata/create_key_file.sh | 17 +++ .../testdata/create_service_account.sh | 22 ++++ .../testdata/provide_permission.sh | 20 ++++ ...on_and_delete_service_account_and_creds.sh | 20 ++++ 6 files changed, 189 insertions(+) create mode 100644 tools/integration_tests/util/creds_tests/creds.go create mode 100644 tools/integration_tests/util/creds_tests/testdata/create_key_file.sh create mode 100644 tools/integration_tests/util/creds_tests/testdata/create_service_account.sh create mode 100644 tools/integration_tests/util/creds_tests/testdata/provide_permission.sh create mode 100644 tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh diff --git a/tools/integration_tests/operations/operations_test.go b/tools/integration_tests/operations/operations_test.go index 8acf14058b..01200a4a44 100644 --- a/tools/integration_tests/operations/operations_test.go +++ b/tools/integration_tests/operations/operations_test.go @@ -20,6 +20,7 @@ import ( "os" "testing" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/creds_tests" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/only_dir_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" @@ -107,5 +108,10 @@ func TestMain(m *testing.M) { successCode = only_dir_mounting.RunTests(flags, m) } + if successCode == 0 { + // Test for admin permission on test bucket. + successCode = creds_tests.RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(flags, "objectAdmin", m) + } + os.Exit(successCode) } diff --git a/tools/integration_tests/util/creds_tests/creds.go b/tools/integration_tests/util/creds_tests/creds.go new file mode 100644 index 0000000000..f1106f7e44 --- /dev/null +++ b/tools/integration_tests/util/creds_tests/creds.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Run tests for --key-file flag and GOOGLE_APPLICATION_CREDENTIALS env variable + +package creds_tests + +import ( + "fmt" + "log" + "os" + "path" + "testing" + + "cloud.google.com/go/compute/metadata" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const NameOfServiceAccount = "creds-test-gcsfuse" + +func setPermission(permission string, serviceAccount string) { + // Provide permission to the bucket. + setup.RunScriptForTestData("../util/creds_tests/testdata/provide_permission.sh", setup.TestBucket(), serviceAccount, permission) +} + +func RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(testFlagSet [][]string, permission string, m *testing.M) (successCode int) { + // Fetching project-id to get service account id. + id, err := metadata.ProjectID() + if err != nil { + log.Printf("Error in fetching project id: %v", err) + } + + // Service account id format is name@project-id.iam.gserviceaccount.com + serviceAccount := NameOfServiceAccount + "@" + id + ".iam.gserviceaccount.com" + + // Create service account + setup.RunScriptForTestData("../util/creds_tests/testdata/create_service_account.sh", NameOfServiceAccount, serviceAccount) + + key_file_path := path.Join(os.Getenv("HOME"), "creds.json") + + // Create credentials + setup.RunScriptForTestData("../util/creds_tests/testdata/create_key_file.sh", key_file_path, serviceAccount) + + // Provide permission to service account for testing. + setPermission(permission, serviceAccount) + + // Revoke the permission and delete creds and service account after testing. + defer setup.RunScriptForTestData("../util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh", serviceAccount, key_file_path) + + // Without –key-file flag and GOOGLE_APPLICATION_CREDENTIALS + // This case will not get covered as gcsfuse internally authenticates from a metadata server on GCE VM. + // https://github.com/golang/oauth2/blob/master/google/default.go#L160 + + // Testing with GOOGLE_APPLICATION_CREDENTIALS env variable + err = os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", key_file_path) + if err != nil { + setup.LogAndExit(fmt.Sprintf("Error in setting environment variable: %v", err)) + } + + successCode = static_mounting.RunTests(testFlagSet, m) + + if successCode != 0 { + return + } + + // Testing with --key-file and GOOGLE_APPLICATION_CREDENTIALS env variable set + keyFileFlag := "--key-file=" + key_file_path + + for i := 0; i < len(testFlagSet); i++ { + testFlagSet[i] = append(testFlagSet[i], keyFileFlag) + } + + successCode = static_mounting.RunTests(testFlagSet, m) + + if successCode != 0 { + return + } + + err = os.Unsetenv("GOOGLE_APPLICATION_CREDENTIALS") + if err != nil { + setup.LogAndExit(fmt.Sprintf("Error in unsetting environment variable: %v", err)) + } + + // Testing with --key-file flag only + successCode = static_mounting.RunTests(testFlagSet, m) + + if successCode != 0 { + return + } + + return successCode +} diff --git a/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh b/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh new file mode 100644 index 0000000000..a7888bce19 --- /dev/null +++ b/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh @@ -0,0 +1,17 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KEY_FILE_PATH=$1 +SERVICE_ACCOUNT=$2 +gcloud iam service-accounts keys create $KEY_FILE_PATH --iam-account=$SERVICE_ACCOUNT diff --git a/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh b/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh new file mode 100644 index 0000000000..5b59979f2b --- /dev/null +++ b/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh @@ -0,0 +1,22 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SERVICE_ACCOUNT=$1 +SERVICE_ACCOUNT_ID=$2 +# Delete service account if already exist. +gcloud iam service-accounts delete $SERVICE_ACCOUNT_ID +if [ $? -eq 1 ]; then + echo "Service account does not exist." +fi +gcloud iam service-accounts create $SERVICE_ACCOUNT --description="$SERVICE_ACCOUNT" --display-name="$SERVICE_ACCOUNT" diff --git a/tools/integration_tests/util/creds_tests/testdata/provide_permission.sh b/tools/integration_tests/util/creds_tests/testdata/provide_permission.sh new file mode 100644 index 0000000000..18d0e564c6 --- /dev/null +++ b/tools/integration_tests/util/creds_tests/testdata/provide_permission.sh @@ -0,0 +1,20 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Provide permission to the bucket. +TEST_BUCKET=$1 +SERVICE_ACCOUNT=$2 +PERMISSION=$3 + +gsutil iam ch serviceAccount:$SERVICE_ACCOUNT:$PERMISSION gs://$TEST_BUCKET diff --git a/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh b/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh new file mode 100644 index 0000000000..d43378cd29 --- /dev/null +++ b/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh @@ -0,0 +1,20 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Delete service account after testing +SERVICE_ACCOUNT=$1 +KEY_FILE=$2 +gcloud auth revoke $SERVICE_ACCOUNT +gcloud iam service-accounts delete $SERVICE_ACCOUNT +rm $KEY_FILE From 8cf5254b763e71ee96e9dc1eca867dc377a982f7 Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Wed, 12 Jul 2023 14:08:24 +0530 Subject: [PATCH 03/46] Changing the number of epoch based on previous observation. (#1222) --- perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh | 2 +- .../scripts/ml_tests/tf/resnet/setup_scripts/resnet_runner.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh b/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh index e9fc617d95..1af6bd1f55 100644 --- a/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh +++ b/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh @@ -74,7 +74,7 @@ gsutil cp start_time.txt $ARTIFACTS_BUCKET_PATH/ --norm_last_layer False \ --use_fp16 False \ --clip_grad 0 \ - --epochs 100 \ + --epochs 80 \ --global_crops_scale 0.25 1.0 \ --local_crops_number 10 \ --local_crops_scale 0.05 0.25 \ diff --git a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/resnet_runner.py b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/resnet_runner.py index 447c1d4300..6d7ae33f87 100644 --- a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/resnet_runner.py +++ b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/resnet_runner.py @@ -111,5 +111,5 @@ params=exp_config, model_dir=model_dir, run_post_eval=True, - epochs=1000, + epochs=675, clear_kernel_cache=True) From e2244bff8c338f493a380eeceb5ea5e6609bfb6f Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Wed, 12 Jul 2023 17:18:36 +0530 Subject: [PATCH 04/46] Adding rpm digest while creating rpm package (#1215) --- tools/package_gcsfuse_docker/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/package_gcsfuse_docker/Dockerfile b/tools/package_gcsfuse_docker/Dockerfile index 87463b2cef..2eef93278a 100644 --- a/tools/package_gcsfuse_docker/Dockerfile +++ b/tools/package_gcsfuse_docker/Dockerfile @@ -66,6 +66,7 @@ RUN fpm \ -C ${GCSFUSE_BIN} \ -v ${GCSFUSE_VERSION} \ -d fuse \ + --rpm-digest sha256 \ --vendor "" \ --url "https://$GCSFUSE_REPO" \ --description "A user-space file system for Google Cloud Storage." From 3399736d3e2773a6b597977b4b15f3044dd635be Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Wed, 12 Jul 2023 18:01:48 +0530 Subject: [PATCH 05/46] removing defer as not working properly in for loop (#1223) --- tools/integration_tests/util/operations/dir_operations.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/integration_tests/util/operations/dir_operations.go b/tools/integration_tests/util/operations/dir_operations.go index 7443e97738..23f3928cc3 100644 --- a/tools/integration_tests/util/operations/dir_operations.go +++ b/tools/integration_tests/util/operations/dir_operations.go @@ -76,7 +76,7 @@ func CreateDirectoryWithNFiles(numberOfFiles int, dirPath string, prefix string, } // Closing file at the end. - defer CloseFile(file) + CloseFile(file) } } From 90b976727c1afa6f008852e82367a50ee732c9d3 Mon Sep 17 00:00:00 2001 From: Ashmeen Kaur <57195160+ashmeenkaur@users.noreply.github.com> Date: Tue, 18 Jul 2023 10:44:37 +0530 Subject: [PATCH 06/46] Added CD pipeline scripts (#1224) * Added scripts * review comments * move cd_scripts to tools/ --- tools/cd_scripts/e2e_test.sh | 121 +++++++++++++++++++++++++++++++ tools/cd_scripts/install_test.sh | 107 +++++++++++++++++++++++++++ 2 files changed, 228 insertions(+) create mode 100644 tools/cd_scripts/e2e_test.sh create mode 100644 tools/cd_scripts/install_test.sh diff --git a/tools/cd_scripts/e2e_test.sh b/tools/cd_scripts/e2e_test.sh new file mode 100644 index 0000000000..ebec1237af --- /dev/null +++ b/tools/cd_scripts/e2e_test.sh @@ -0,0 +1,121 @@ +#! /bin/bash +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Print commands and their arguments as they are executed. +set -x +# Exit immediately if a command exits with a non-zero status. +set -e + +#details.txt file contains the release version and commit hash of the current release. +gsutil cp gs://gcsfuse-release-packages/version-detail/details.txt . +# Writing VM instance name to details.txt (Format: release-test-) +curl http://metadata.google.internal/computeMetadata/v1/instance/name -H "Metadata-Flavor: Google" >> details.txt + +# Based on the os type(from vm instance name) in detail.txt, run the following commands to add starterscriptuser +if grep -q ubuntu details.txt || grep -q debian details.txt; +then +# For ubuntu and debian os + sudo adduser --ingroup google-sudoers --disabled-password --home=/home/starterscriptuser --gecos "" starterscriptuser +else +# For rhel and centos + sudo adduser -g google-sudoers --home-dir=/home/starterscriptuser starterscriptuser +fi + +# Run the following as starterscriptuser +sudo -u starterscriptuser bash -c ' +# Exit immediately if a command exits with a non-zero status. +set -e +# Print commands and their arguments as they are executed. +set -x + +#Copy details.txt to starterscriptuser home directory and create logs.txt +cd ~/ +cp /details.txt . +touch logs.txt + +echo User: $USER &>> ~/logs.txt +echo Current Working Directory: $(pwd) &>> ~/logs.txt + +# Based on the os type in detail.txt, run the following commands for setup +if grep -q ubuntu details.txt || grep -q debian details.txt; +then +# For Debian and Ubuntu os + sudo apt update + + #Install fuse + sudo apt install -y fuse + + # download and install gcsfuse deb package + gsutil cp gs://gcsfuse-release-packages/v$(sed -n 1p details.txt)/gcsfuse_$(sed -n 1p details.txt)_amd64.deb . + sudo dpkg -i gcsfuse_$(sed -n 1p details.txt)_amd64.deb |& tee -a ~/logs.txt + + # install wget + sudo apt install -y wget + + #install git + sudo apt install -y git + + #install build-essentials + sudo apt install -y build-essential +else +# For rhel and centos + sudo yum makecache + sudo yum check-update + + #Install fuse + sudo yum -y install fuse + + #download and install gcsfuse rpm package + gsutil cp gs://gcsfuse-release-packages/v$(sed -n 1p details.txt)/gcsfuse-$(sed -n 1p details.txt)-1.x86_64.rpm . + sudo yum -y localinstall gcsfuse-$(sed -n 1p details.txt)-1.x86_64.rpm + + #install wget + sudo yum -y install wget + + #install git + sudo yum -y install git + + #install Development tools + sudo yum -y install gcc gcc-c++ make +fi + +# install go +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.4.linux-amd64.tar.gz +sudo tar -C /usr/local -xzf go_tar.tar.gz +export PATH=${PATH}:/usr/local/go/bin + +#Write gcsfuse and go version to log file +gcsfuse --version |& tee -a ~/logs.txt +go version |& tee -a ~/logs.txt + +# Clone and checkout gcsfuse repo +export PATH=${PATH}:/usr/local/go/bin +git clone https://github.com/googlecloudplatform/gcsfuse |& tee -a ~/logs.txt +cd gcsfuse +git checkout $(sed -n 2p ~/details.txt) |& tee -a ~/logs.txt + +#run tests with testbucket flag +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=$(sed -n 3p ~/details.txt) --testInstalledPackage --timeout=60m &>> ~/logs.txt + +if grep -q FAIL ~/logs.txt; +then + echo "Test failures detected" &>> ~/logs.txt +else + touch success.txt + gsutil cp success.txt gs://gcsfuse-release-packages/v$(sed -n 1p ~/details.txt)/$(sed -n 3p ~/details.txt)/ +fi + +gsutil cp ~/logs.txt gs://gcsfuse-release-packages/v$(sed -n 1p ~/details.txt)/$(sed -n 3p ~/details.txt)/ +' diff --git a/tools/cd_scripts/install_test.sh b/tools/cd_scripts/install_test.sh new file mode 100644 index 0000000000..d56a2fd6c6 --- /dev/null +++ b/tools/cd_scripts/install_test.sh @@ -0,0 +1,107 @@ +#! /bin/bash +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Print commands and their arguments as they are executed. +set -x + +#details.txt file contains the release version and commit hash of the current release. +gsutil cp gs://gcsfuse-release-packages/version-detail/details.txt . +# Writing VM instance name to details.txt (Format: release-test-) +curl http://metadata.google.internal/computeMetadata/v1/instance/name -H "Metadata-Flavor: Google" >> details.txt +touch ~/logs.txt + +# Based on the os type(from vm instance name) in detail.txt, run the following commands to install apt-transport-artifact-registry +if grep -q ubuntu details.txt || grep -q debian details.txt; +then +# For ubuntu and debian os + curl https://us-central1-apt.pkg.dev/doc/repo-signing-key.gpg | sudo apt-key add - && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - + echo 'deb http://packages.cloud.google.com/apt apt-transport-artifact-registry-stable main' | sudo tee -a /etc/apt/sources.list.d/artifact-registry.list + sudo apt update + sudo apt install apt-transport-artifact-registry + echo 'deb ar+https://us-apt.pkg.dev/projects/gcs-fuse-prod $(lsb_release -cs) main' | sudo tee -a /etc/apt/sources.list.d/artifact-registry.list + sudo apt update + + # Install released gcsfuse version + sudo apt install -y gcsfuse=$(sed -n 1p details.txt) -t gcsfuse-$(lsb_release -cs) |& tee -a ~/logs.txt +else +# For rhel and centos + sudo yum makecache + sudo yum -y install yum-plugin-artifact-registry +sudo tee -a /etc/yum.repos.d/artifact-registry.repo << EOF +[gcsfuse-el7-x86-64] +name=gcsfuse-el7-x86-64 +baseurl=https://asia-yum.pkg.dev/projects/gcs-fuse-prod/gcsfuse-el7-x86-64 +enabled=1 +repo_gpgcheck=0 +gpgcheck=1 +gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg + https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg +EOF + sudo yum makecache + sudo yum -y --enablerepo=gcsfuse-el7-x86-64 install gcsfuse-$(sed -n 1p details.txt)-1 |& tee -a ~/logs.txt +fi + +# Verify gcsfuse version (successful installation) +gcsfuse --version |& tee version.txt +installed_version=$(echo $(sed -n 1p version.txt) | cut -d' ' -f3) +if grep -q $installed_version details.txt; then + echo "GCSFuse latest version installed correctly." &>> ~/logs.txt +else + echo "Failure detected in latest gcsfuse version installation." &>> ~/logs.txt +fi + +# Uninstall gcsfuse latest version and install old version +if grep -q ubuntu details.txt || grep -q debian details.txt; +then + sudo apt remove -y gcsfuse + sudo apt install -y gcsfuse=0.42.5 -t gcsfuse-$(lsb_release -cs) |& tee -a ~/logs.txt +else + sudo yum -y remove gcsfuse + sudo yum -y install gcsfuse-0.42.5-1 |& tee -a ~/logs.txt +fi + +# verify old version installation +gcsfuse --version |& tee version.txt +installed_version=$(echo $(sed -n 1p version.txt) | cut -d' ' -f3) +if [ $installed_version == "0.42.5" ]; then + echo "GCSFuse old version (0.42.5) installed successfully" &>> ~/logs.txt +else + echo "Failure detected in GCSFuse old version installation." &>> ~/logs.txt +fi + +# Upgrade gcsfuse to latest version +if grep -q ubuntu details.txt || grep -q debian details.txt; +then + sudo apt install --only-upgrade gcsfuse |& tee -a ~/logs.txt +else + sudo yum -y upgrade gcsfuse |& tee -a ~/logs.txt +fi + +gcsfuse --version |& tee version.txt +installed_version=$(echo $(sed -n 1p version.txt) | cut -d' ' -f3) +if grep -q $installed_version details.txt; then + echo "GCSFuse successfully upgraded to latest version $installed_version." &>> ~/logs.txt +else + echo "Failure detected in upgrading to latest gcsfuse version." &>> ~/logs.txt +fi + +if grep -q Failure ~/logs.txt; then + echo "Test failed" &>> ~/logs.txt ; +else + touch success.txt + gsutil cp success.txt gs://gcsfuse-release-packages/v$(sed -n 1p details.txt)/installation-test/$(sed -n 3p details.txt)/ ; +fi + +gsutil cp ~/logs.txt gs://gcsfuse-release-packages/v$(sed -n 1p details.txt)/installation-test/$(sed -n 3p details.txt)/ From f396dc53db99ee5d36a69804a583a067b5bc1ec6 Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Tue, 18 Jul 2023 10:46:16 +0530 Subject: [PATCH 07/46] Integration tests for key-file flag with viewer permission tests (#1228) * testing with viewer permission * testing on operations * testing on operations * final testing * testing * testing --- tools/integration_tests/readonly/readonly_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/integration_tests/readonly/readonly_test.go b/tools/integration_tests/readonly/readonly_test.go index 75df9fce8e..a9636152ea 100644 --- a/tools/integration_tests/readonly/readonly_test.go +++ b/tools/integration_tests/readonly/readonly_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/creds_tests" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) @@ -78,6 +79,11 @@ func TestMain(m *testing.M) { setup.SetUpTestDirForTestBucketFlag() successCode := static_mounting.RunTests(flags, m) + if successCode == 0 { + // Test for viewer permission on test bucket. + successCode = creds_tests.RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(flags, "objectViewer", m) + } + // Delete objects from bucket after testing. setup.RunScriptForTestData("testdata/delete_objects.sh", setup.TestBucket()) From 438869ae6b4030d42823429fc2bbcc4c3e6bdb35 Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Tue, 18 Jul 2023 12:13:50 +0530 Subject: [PATCH 08/46] Get dir attribute integration tests (#1220) * adding test for dir attribute * renaming * fixing bytes comparison * adding more tests * adding sub dir attribute tests * fix failure * adding comment * adding const variable * removing unnecessary file * fixing comment * fix lint --- .../file_and_dir_attributes_test.go | 87 +++++++++++++++++++ .../operations/file_attributes_test.go | 51 ----------- 2 files changed, 87 insertions(+), 51 deletions(-) create mode 100644 tools/integration_tests/operations/file_and_dir_attributes_test.go delete mode 100644 tools/integration_tests/operations/file_attributes_test.go diff --git a/tools/integration_tests/operations/file_and_dir_attributes_test.go b/tools/integration_tests/operations/file_and_dir_attributes_test.go new file mode 100644 index 0000000000..66159a40d3 --- /dev/null +++ b/tools/integration_tests/operations/file_and_dir_attributes_test.go @@ -0,0 +1,87 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for file and directory attributes. +package operations_test + +import ( + "os" + "path" + "testing" + "time" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const DirAttrTest = "dirAttrTest" +const PrefixFileInDirAttrTest = "fileInDirAttrTest" +const NumberOfFilesInDirAttrTest = 2 +const BytesWrittenInFile = 14 + +func checkIfObjectAttrIsCorrect(objName string, preCreateTime time.Time, postCreateTime time.Time, byteSize int64, t *testing.T) { + oStat, err := os.Stat(objName) + + if err != nil { + t.Errorf("os.Stat error: %s, %v", objName, err) + } + statObjName := path.Join(setup.MntDir(), oStat.Name()) + if objName != statObjName { + t.Errorf("File name not matched in os.Stat, found: %s, expected: %s", statObjName, objName) + } + if (preCreateTime.After(oStat.ModTime())) || (postCreateTime.Before(oStat.ModTime())) { + t.Errorf("File modification time not in the expected time-range") + } + + if oStat.Size() != byteSize { + t.Errorf("File size is not %v bytes, found size: %d bytes", BytesWrittenInFile, oStat.Size()) + } +} + +func TestFileAttributes(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + preCreateTime := time.Now() + fileName := setup.CreateTempFile() + postCreateTime := time.Now() + + // The file size in createTempFile() is BytesWrittenInFile bytes + // https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/tools/integration_tests/util/setup/setup.go#L124 + checkIfObjectAttrIsCorrect(fileName, preCreateTime, postCreateTime, BytesWrittenInFile, t) +} + +func TestEmptyDirAttributes(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + preCreateTime := time.Now() + dirName := path.Join(setup.MntDir(), DirAttrTest) + operations.CreateDirectoryWithNFiles(0, dirName, "", t) + postCreateTime := time.Now() + + checkIfObjectAttrIsCorrect(dirName, preCreateTime, postCreateTime, 0, t) +} + +func TestNonEmptyDirAttributes(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + preCreateTime := time.Now() + dirName := path.Join(setup.MntDir(), DirAttrTest) + operations.CreateDirectoryWithNFiles(NumberOfFilesInDirAttrTest, dirName, PrefixFileInDirAttrTest, t) + postCreateTime := time.Now() + + checkIfObjectAttrIsCorrect(dirName, preCreateTime, postCreateTime, 0, t) +} diff --git a/tools/integration_tests/operations/file_attributes_test.go b/tools/integration_tests/operations/file_attributes_test.go deleted file mode 100644 index 8e591a3439..0000000000 --- a/tools/integration_tests/operations/file_attributes_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2023 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Provides integration tests for file attributes. -package operations_test - -import ( - "os" - "path" - "testing" - "time" - - "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" -) - -func TestFileAttributes(t *testing.T) { - // Clean the mountedDirectory before running test. - setup.CleanMntDir() - - preCreateTime := time.Now() - fileName := setup.CreateTempFile() - postCreateTime := time.Now() - - fStat, err := os.Stat(fileName) - - if err != nil { - t.Errorf("os.Stat error: %s, %v", fileName, err) - } - statFileName := path.Join(setup.MntDir(), fStat.Name()) - if fileName != statFileName { - t.Errorf("File name not matched in os.Stat, found: %s, expected: %s", statFileName, fileName) - } - if (preCreateTime.After(fStat.ModTime())) || (postCreateTime.Before(fStat.ModTime())) { - t.Errorf("File modification time not in the expected time-range") - } - // The file size in createTempFile() is 14 bytes - if fStat.Size() != 14 { - t.Errorf("File size is not 14 bytes, found size: %d bytes", fStat.Size()) - } -} From 640bed7d7094a2ca5c0f685f77bba77d8a70cc4c Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Thu, 20 Jul 2023 15:00:08 +0530 Subject: [PATCH 09/46] Persistent mounting integration tests (#1218) * adding test for persistent mounting * adding test for persistent mounting * adding test for persistent mounting * adding test for persistent mounting * Testing * Testing * Testing * Testing * Testing * Testing * Testing * Testing * Testing * testing * formating argument * adding -o flag * adding mounting test in every package * small fix * removing --o in * removing --o in * adding commands to run tests for mountedDirectory flag * small fix * fixing commands * adding comment * small fix * small fix * adding comment * adding comments and small fixed * fixing lint * fixing merge conflict * smallfi x * testing * testing * small fix * removing unnecessary commands * removing extra line * removing extra line --- .../operations/operations_test.go | 5 + .../readonly/readonly_test.go | 6 + .../rename_dir_limit/rename_dir_limit_test.go | 5 + .../run_tests_mounted_directory.sh | 121 ++++++++++++++++++ .../util/mounting/mounting.go | 4 +- .../only_dir_mounting/only_dir_mounting.go | 2 +- .../perisistent_mounting.go | 90 +++++++++++++ .../static_mounting/static_mounting.go | 2 +- .../implicit_and_explicit_dir_setup.go | 5 + tools/integration_tests/util/setup/setup.go | 15 ++- 10 files changed, 247 insertions(+), 8 deletions(-) create mode 100644 tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go diff --git a/tools/integration_tests/operations/operations_test.go b/tools/integration_tests/operations/operations_test.go index 01200a4a44..6d37b19c13 100644 --- a/tools/integration_tests/operations/operations_test.go +++ b/tools/integration_tests/operations/operations_test.go @@ -22,6 +22,7 @@ import ( "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/creds_tests" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/only_dir_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/persistent_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) @@ -108,6 +109,10 @@ func TestMain(m *testing.M) { successCode = only_dir_mounting.RunTests(flags, m) } + if successCode == 0 { + successCode = persistent_mounting.RunTests(flags, m) + } + if successCode == 0 { // Test for admin permission on test bucket. successCode = creds_tests.RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(flags, "objectAdmin", m) diff --git a/tools/integration_tests/readonly/readonly_test.go b/tools/integration_tests/readonly/readonly_test.go index a9636152ea..0a9003781a 100644 --- a/tools/integration_tests/readonly/readonly_test.go +++ b/tools/integration_tests/readonly/readonly_test.go @@ -22,6 +22,7 @@ import ( "testing" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/creds_tests" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/persistent_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) @@ -79,6 +80,11 @@ func TestMain(m *testing.M) { setup.SetUpTestDirForTestBucketFlag() successCode := static_mounting.RunTests(flags, m) + if successCode == 0 { + // Test for viewer permission on test bucket. + successCode = persistent_mounting.RunTests(flags, m) + } + if successCode == 0 { // Test for viewer permission on test bucket. successCode = creds_tests.RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(flags, "objectViewer", m) diff --git a/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go b/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go index c910de2612..28c5b19399 100644 --- a/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go +++ b/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/only_dir_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/persistent_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) @@ -59,5 +60,9 @@ func TestMain(m *testing.M) { successCode = only_dir_mounting.RunTests(flags, m) } + if successCode == 0 { + successCode = persistent_mounting.RunTests(flags, m) + } + os.Exit(successCode) } diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index 770df4d35c..d58e636140 100644 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -27,107 +27,228 @@ gcsfuse --enable-storage-client-library=true --implicit-dirs=true $TEST_BUCKET_N GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=true,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=false +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --implicit-dirs=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=false +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + # Run integration tests for operations with --only-dir mounting. gcsfuse --only-dir testDir --enable-storage-client-library=true --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=true,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=false +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --implicit-dirs=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=false +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + # Run integration tests for readonly directory with static mounting gcsfuse --o=ro --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o ro,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR + gcsfuse --file-mode=544 --dir-mode=544 --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o file_mode=544,dir_mode=544,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR + # Run integration tests for readonly with --only-dir mounting. gcsfuse --only-dir testDir --o=ro --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o ro,only_dir=testDir,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --file-mode=544 --dir-mode=544 --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,file_mode=544,dir_mode=544,implicit_dirs=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +sudo umount $MOUNT_DIR + # Run integration tests for rename_dir_limit directory with static mounting gcsfuse --rename-dir-limit=3 --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o rename_dir_limit=3,implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --rename-dir-limit=3 $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o rename_dir_limit=3 +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + # Run integration tests for rename_dir_limit with --only-dir mounting. gcsfuse --only-dir testDir --rename-dir-limit=3 --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,rename_dir_limit=3,implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --rename-dir-limit=3 $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,rename_dir_limit=3,implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + # Run integration tests for implicit_dir directory with static mounting gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR + gcsfuse --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=false,implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR + + # Run integration tests for implicit_dir with --only-dir mounting. gcsfuse --only-dir testDir --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=false,implicit_dirs +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +sudo umount $MOUNT_DIR + # Run integration tests for explicit_dir directory with static mounting gcsfuse --enable-storage-client-library=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR + gcsfuse --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=false +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR + # Run integration tests for explicit_dir with --only-dir mounting. gcsfuse --only-dir testDir --enable-storage-client-library=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +sudo umount $MOUNT_DIR + gcsfuse --only-dir testDir --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with persistent mounting +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=false +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +sudo umount $MOUNT_DIR + # Run integration tests for list_large_dir directory with static mounting gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/list_large_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME diff --git a/tools/integration_tests/util/mounting/mounting.go b/tools/integration_tests/util/mounting/mounting.go index 131e2b3d6f..68db9791a0 100644 --- a/tools/integration_tests/util/mounting/mounting.go +++ b/tools/integration_tests/util/mounting/mounting.go @@ -24,9 +24,9 @@ import ( "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) -func MountGcsfuse(flags []string) error { +func MountGcsfuse(binaryFile string, flags []string) error { mountCmd := exec.Command( - setup.BinFile(), + binaryFile, flags..., ) diff --git a/tools/integration_tests/util/mounting/only_dir_mounting/only_dir_mounting.go b/tools/integration_tests/util/mounting/only_dir_mounting/only_dir_mounting.go index 0b89b74bc8..8f389ea6ff 100644 --- a/tools/integration_tests/util/mounting/only_dir_mounting/only_dir_mounting.go +++ b/tools/integration_tests/util/mounting/only_dir_mounting/only_dir_mounting.go @@ -41,7 +41,7 @@ func mountGcsfuseWithOnlyDir(flags []string, dir string) (err error) { flags = append(flags, defaultArg[i]) } - err = mounting.MountGcsfuse(flags) + err = mounting.MountGcsfuse(setup.BinFile(), flags) return err } diff --git a/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go b/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go new file mode 100644 index 0000000000..a664237192 --- /dev/null +++ b/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go @@ -0,0 +1,90 @@ +//Copyright 2023 Google Inc. All Rights Reserved. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package persistent_mounting + +import ( + "fmt" + "log" + "strings" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +// make e.g --enable-storage-client-library in enable_storage_client_library +func makePersistentMountingArgs(flags []string) (args []string, err error) { + var s string + for i := range flags { + // We are already passing flags with -o flag. + s = strings.Replace(flags[i], "--o=", "", -1) + // e.g. Convert --enable-storage-client-library to __enable_storage_client_library + s = strings.Replace(s, "-", "_", -1) + // e.g. Convert __enable_storage_client_library to enable_storage_client_library + s = strings.Replace(s, "__", "", -1) + args = append(args, s) + } + return +} + +func mountGcsfuseWithStaticMounting(flags []string) (err error) { + defaultArg := []string{setup.TestBucket(), + setup.MntDir(), + "-o", + "debug_gcs", + "-o", + "debug_fs", + "-o", + "debug_fuse", + "-o", + "log_file=" + setup.LogFile(), + "-o", + "log_format=text", + } + + persistentMountingArgs, err := makePersistentMountingArgs(flags) + if err != nil { + setup.LogAndExit("Error in converting flags for persistent mounting.") + } + + for i := 0; i < len(persistentMountingArgs); i++ { + // e.g. -o flag1, -o flag2, ... + defaultArg = append(defaultArg, "-o", persistentMountingArgs[i]) + } + + err = mounting.MountGcsfuse(setup.SbinFile(), defaultArg) + + return err +} + +func executeTestsForStatingMounting(flags [][]string, m *testing.M) (successCode int) { + var err error + + for i := 0; i < len(flags); i++ { + if err = mountGcsfuseWithStaticMounting(flags[i]); err != nil { + setup.LogAndExit(fmt.Sprintf("mountGcsfuse: %v\n", err)) + } + setup.ExecuteTestForFlagsSet(flags[i], m) + } + return +} + +func RunTests(flags [][]string, m *testing.M) (successCode int) { + successCode = executeTestsForStatingMounting(flags, m) + + log.Printf("Test log: %s\n", setup.LogFile()) + + return successCode +} diff --git a/tools/integration_tests/util/mounting/static_mounting/static_mounting.go b/tools/integration_tests/util/mounting/static_mounting/static_mounting.go index fac0ec074a..090b783025 100644 --- a/tools/integration_tests/util/mounting/static_mounting/static_mounting.go +++ b/tools/integration_tests/util/mounting/static_mounting/static_mounting.go @@ -36,7 +36,7 @@ func mountGcsfuseWithStaticMounting(flags []string) (err error) { flags = append(flags, defaultArg[i]) } - err = mounting.MountGcsfuse(flags) + err = mounting.MountGcsfuse(setup.BinFile(), flags) return err } diff --git a/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go b/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go index 77fee597c3..bec0d1d538 100644 --- a/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go +++ b/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go @@ -20,6 +20,7 @@ import ( "path" "testing" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/persistent_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" @@ -58,6 +59,10 @@ func RunTestsForImplicitDirAndExplicitDir(flags [][]string, m *testing.M) { successCode := static_mounting.RunTests(flags, m) + if successCode == 0 { + successCode = persistent_mounting.RunTests(flags, m) + } + os.Exit(successCode) } diff --git a/tools/integration_tests/util/setup/setup.go b/tools/integration_tests/util/setup/setup.go index 2d81f503d7..462a051484 100644 --- a/tools/integration_tests/util/setup/setup.go +++ b/tools/integration_tests/util/setup/setup.go @@ -38,10 +38,11 @@ const BufferSize = 100 const FilePermission_0600 = 0600 var ( - binFile string - logFile string - testDir string - mntDir string + binFile string + logFile string + testDir string + mntDir string + sbinFile string ) // Run the shell script to prepare the testData in the specified bucket. @@ -82,6 +83,10 @@ func BinFile() string { return binFile } +func SbinFile() string { + return sbinFile +} + func SetTestDir(testDirValue string) { testDir = testDirValue } @@ -142,10 +147,12 @@ func SetUpTestDir() error { return fmt.Errorf("BuildGcsfuse(%q): %w\n", TestDir(), err) } binFile = path.Join(TestDir(), "bin/gcsfuse") + sbinFile = path.Join(TestDir(), "sbin/mount.gcsfuse") } else { // when testInstalledPackage flag is set, gcsfuse is preinstalled on the // machine. Hence, here we are overwriting binFile to gcsfuse. binFile = "gcsfuse" + sbinFile = "mount.gcsfuse" } logFile = path.Join(TestDir(), "gcsfuse.log") mntDir = path.Join(TestDir(), "mnt") From 6099d7626b836a105b898cf22ee1eb6329fe3caf Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Thu, 20 Jul 2023 19:39:06 +0530 Subject: [PATCH 10/46] Move directory integration tests (#1230) * move dir tests * test move dir tests * test move dir tests * small fixed * fixing typo * fixed comment --- .../rename_dir_limit/move_dir_test.go | 395 ++++++++++++++++++ .../util/operations/dir_operations.go | 10 + 2 files changed, 405 insertions(+) create mode 100644 tools/integration_tests/rename_dir_limit/move_dir_test.go diff --git a/tools/integration_tests/rename_dir_limit/move_dir_test.go b/tools/integration_tests/rename_dir_limit/move_dir_test.go new file mode 100644 index 0000000000..84b878c92f --- /dev/null +++ b/tools/integration_tests/rename_dir_limit/move_dir_test.go @@ -0,0 +1,395 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy +// +//of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for move directory. +package rename_dir_limit_test + +import ( + "log" + "os" + "path" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const SrcMoveDirectory = "srcMoveDir" +const SubSrcMoveDirectory = "subSrcMoveDir" +const SrcMoveFile = "srcMoveFile" +const SrcMoveFileContent = "This is from move file in srcMove directory.\n" +const DestMoveDirectory = "destMoveDir" +const DestNonEmptyMoveDirectory = "destNonEmptyMoveDirectory" +const SubDirInNonEmptyDestMoveDirectory = "subDestMoveDir" +const DestMoveDirectoryNotExist = "notExist" +const NumberOfObjectsInSrcMoveDirectory = 2 +const NumberOfObjectsInNonEmptyDestMoveDirectory = 2 +const DestEmptyMoveDirectory = "destEmptyMoveDirectory" +const EmptySrcDirectoryMoveTest = "emptySrcDirectoryMoveTest" +const NumberOfObjectsInEmptyDestMoveDirectory = 1 + +func checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDirPath string, t *testing.T) { + _, err := os.Stat(srcDirPath) + + if err == nil { + t.Errorf("Directory exist after move operation.") + } +} + +// Create below directory structure. +// srcMoveDir -- Dir +// srcMoveDir/srcMoveFile -- File +// srcMoveDir/subSrcMoveDir -- Dir +func createSrcDirectoryWithObjectsForMoveDirTest(dirPath string, t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + // testBucket/srcMoveDir + err := os.Mkdir(dirPath, setup.FilePermission_0600) + if err != nil { + t.Errorf("Mkdir at %q: %v", dirPath, err) + return + } + + // testBucket/subSrcMoveDir + subDirPath := path.Join(dirPath, SubSrcMoveDirectory) + err = os.Mkdir(subDirPath, setup.FilePermission_0600) + if err != nil { + t.Errorf("Mkdir at %q: %v", subDirPath, err) + return + } + + // testBucket/srcMoveDir/srcMoveFile + filePath := path.Join(dirPath, SrcMoveFile) + + file, err := os.Create(filePath) + if err != nil { + t.Errorf("Error in creating file %v:", err) + } + + // Closing file at the end + defer operations.CloseFile(file) + + err = operations.WriteFile(file.Name(), SrcMoveFileContent) + if err != nil { + t.Errorf("File at %v", err) + } +} + +func checkIfMovedDirectoryHasCorrectData(destDir string, t *testing.T) { + obj, err := os.ReadDir(destDir) + if err != nil { + log.Fatal(err) + } + + // Comparing number of objects in the testBucket - 2 + if len(obj) != NumberOfObjectsInSrcMoveDirectory { + t.Errorf("The number of objects in the current directory doesn't match.") + return + } + + // Comparing first object name and type + // Name - testBucket/destMoveDir/srcMoveFile, Type - file + if obj[0].Name() != SrcMoveFile || obj[0].IsDir() == true { + t.Errorf("Object Listed for bucket directory is incorrect.") + } + + // Comparing second object name and type + // Name - testBucket/destMoveDir/srcMoveDir, Type - dir + if obj[1].Name() != SubSrcMoveDirectory || obj[1].IsDir() != true { + t.Errorf("Object Listed for bucket directory is incorrect.") + } + + destFile := path.Join(destDir, SrcMoveFile) + + content, err := operations.ReadFile(destFile) + if err != nil { + t.Errorf("ReadAll: %v", err) + } + if got, want := string(content), SrcMoveFileContent; got != want { + t.Errorf("File content %q not match %q", got, want) + } +} + +// Move SrcDirectory objects in DestDirectory +// srcMoveDir -- Dir +// srcMoveDir/srcMoveFile -- File +// srcMoveDir/subSrcMoveDir -- Dir + +// destMoveDir -- Dir +// destMoveDir/srcMoveFile -- File +// destMoveDir/subSrcMoveDir -- Dir +func TestMoveDirectoryInNonExistingDirectory(t *testing.T) { + srcDir := path.Join(setup.MntDir(), SrcMoveDirectory) + + createSrcDirectoryWithObjectsForMoveDirTest(srcDir, t) + + destDir := path.Join(setup.MntDir(), DestMoveDirectoryNotExist) + + err := operations.MoveDir(srcDir, destDir) + if err != nil { + t.Errorf("Error in moving directory: %v", err) + } + + checkIfMovedDirectoryHasCorrectData(destDir, t) + checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDir, t) +} + +// Move SrcDirectory in DestDirectory +// srcMoveDir -- Dir +// srcMoveDir/srcMoveFile -- File +// srcMoveDir/subSrcMoveDir -- Dir + +// destMoveDir -- Dir +// destMoveDir/srcMoveDir -- Dir +// destMoveDir/srcMoveDir/srcMoveFile -- File +// destMoveDir/srcMoveDir/subSrcMoveDir -- Dir +func TestMoveDirectoryInEmptyDirectory(t *testing.T) { + srcDir := path.Join(setup.MntDir(), SrcMoveDirectory) + + createSrcDirectoryWithObjectsForMoveDirTest(srcDir, t) + + // Create below directory + // destMoveDir -- Dir + destDir := path.Join(setup.MntDir(), DestMoveDirectory) + err := os.Mkdir(destDir, setup.FilePermission_0600) + if err != nil { + t.Errorf("Error in creating directory: %v", err) + } + + err = operations.MoveDir(srcDir, destDir) + if err != nil { + t.Errorf("Error in moving directory: %v", err) + } + + obj, err := os.ReadDir(destDir) + if err != nil { + log.Fatal(err) + } + + // Check if destMoveDirectory has the correct directory copied. + // destMoveDirectory + // destMoveDirectory/srcMoveDirectory + if len(obj) != 1 || obj[0].Name() != SrcMoveDirectory || obj[0].IsDir() != true { + t.Errorf("Error in moving directory.") + return + } + + destSrc := path.Join(destDir, SrcMoveDirectory) + checkIfMovedDirectoryHasCorrectData(destSrc, t) + checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDir, t) +} + +func createDestNonEmptyDirectoryForMoveTest(t *testing.T) { + destDir := path.Join(setup.MntDir(), DestNonEmptyMoveDirectory) + operations.CreateDirectoryWithNFiles(0, destDir, "", t) + + destSubDir := path.Join(destDir, SubDirInNonEmptyDestMoveDirectory) + operations.CreateDirectoryWithNFiles(0, destSubDir, "", t) +} + +func TestMoveDirectoryInNonEmptyDirectory(t *testing.T) { + srcDir := path.Join(setup.MntDir(), SrcMoveDirectory) + + createSrcDirectoryWithObjectsForMoveDirTest(srcDir, t) + + // Create below directory + // destMoveDir -- Dir + destDir := path.Join(setup.MntDir(), DestNonEmptyMoveDirectory) + createDestNonEmptyDirectoryForMoveTest(t) + + err := operations.MoveDir(srcDir, destDir) + if err != nil { + t.Errorf("Error in moving directory: %v", err) + } + + obj, err := os.ReadDir(destDir) + if err != nil { + log.Fatal(err) + } + + // Check if destMoveDirectory has the correct directory copied. + // destMoveDirectory + // destMoveDirectory/srcMoveDirectory + // destMoveDirectory/subDestMoveDirectory + if len(obj) != NumberOfObjectsInNonEmptyDestMoveDirectory { + t.Errorf("The number of objects in the current directory doesn't match.") + return + } + + // destMoveDirectory/srcMoveDirectory - Dir + if obj[0].Name() != SrcMoveDirectory || obj[0].IsDir() != true { + t.Errorf("Error in moving directory.") + return + } + + // destMoveDirectory/subDirInNonEmptyDestMoveDirectory - Dir + if obj[1].Name() != SubDirInNonEmptyDestMoveDirectory || obj[1].IsDir() != true { + t.Errorf("Existing object affected.") + return + } + + destSrc := path.Join(destDir, SrcMoveDirectory) + checkIfMovedDirectoryHasCorrectData(destSrc, t) + checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDir, t) +} + +func checkIfMovedEmptyDirectoryHasNoData(destSrc string, t *testing.T) { + objs, err := os.ReadDir(destSrc) + if err != nil { + log.Fatal(err) + } + + if len(objs) != 0 { + t.Errorf("Directory has incorrect data.") + } +} + +// Move SrcDirectory in DestDirectory +// emptySrcDirectoryMoveTest + +// destNonEmptyMoveDirectory +// destNonEmptyMoveDirectory/subDirInNonEmptyDestMoveDirectory + +// Output +// destNonEmptyMoveDirectory +// destNonEmptyMoveDirectory/subDirInNonEmptyDestMoveDirectory +// destNonEmptyMoveDirectory/emptySrcDirectoryMoveTest +func TestMoveEmptyDirectoryInNonEmptyDirectory(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + srcDir := path.Join(setup.MntDir(), EmptySrcDirectoryMoveTest) + operations.CreateDirectoryWithNFiles(0, srcDir, "", t) + + // Create below directory + // destNonEmptyMoveDirectory -- Dir + // destNonEmptyMoveDirectory/subDirInNonEmptyDestMoveDirectory -- Dir + destDir := path.Join(setup.MntDir(), DestNonEmptyMoveDirectory) + createDestNonEmptyDirectoryForMoveTest(t) + + err := operations.MoveDir(srcDir, destDir) + if err != nil { + t.Errorf("Error in moving directory: %v", err) + } + + objs, err := os.ReadDir(destDir) + if err != nil { + log.Fatal(err) + } + + // Check if destMoveDirectory has the correct directory copied. + // destNonEmptyMoveDirectory + // destNonEmptyMoveDirectory/emptyDirectoryMoveTest - Dir + // destNonEmptyMoveDirectory/subDestMoveDirectory - Dir + if len(objs) != NumberOfObjectsInNonEmptyDestMoveDirectory { + t.Errorf("The number of objects in the current directory doesn't match.") + return + } + + // destNonEmptyMoveDirectory/srcMoveDirectory - Dir + if objs[0].Name() != EmptySrcDirectoryMoveTest || objs[0].IsDir() != true { + t.Errorf("Error in moving directory.") + return + } + + // destNonEmptyMoveDirectory/subDirInNonEmptyDestMoveDirectory - Dir + if objs[1].Name() != SubDirInNonEmptyDestMoveDirectory || objs[1].IsDir() != true { + t.Errorf("Existing object affected.") + return + } + + movDirPath := path.Join(destDir, EmptySrcDirectoryMoveTest) + checkIfMovedEmptyDirectoryHasNoData(movDirPath, t) + checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDir, t) +} + +// Move SrcDirectory in DestDirectory +// emptySrcDirectoryMoveTest + +// destEmptyMoveDirectory + +// Output +// destEmptyMoveDirectory/emptySrcDirectoryMoveTest +func TestMoveEmptyDirectoryInEmptyDirectory(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + srcDir := path.Join(setup.MntDir(), EmptySrcDirectoryMoveTest) + operations.CreateDirectoryWithNFiles(0, srcDir, "", t) + + // Create below directory + // destMoveDir -- Dir + destDir := path.Join(setup.MntDir(), DestEmptyMoveDirectory) + operations.CreateDirectoryWithNFiles(0, destDir, "", t) + + err := operations.MoveDir(srcDir, destDir) + if err != nil { + t.Errorf("Error in moving directory: %v", err) + } + + obj, err := os.ReadDir(destDir) + if err != nil { + log.Fatal(err) + } + + // Check if destMoveDirectory has the correct directory copied. + // destEmptyMoveDirectory + // destEmptyMoveDirectory/emptyDirectoryMoveTest + if len(obj) != NumberOfObjectsInEmptyDestMoveDirectory { + t.Errorf("The number of objects in the current directory doesn't match.") + return + } + + // destEmptyMoveDirectory/srcMoveDirectory - Dir + if obj[0].Name() != EmptySrcDirectoryMoveTest || obj[0].IsDir() != true { + t.Errorf("Error in moving directory.") + return + } + + movDirPath := path.Join(destDir, EmptySrcDirectoryMoveTest) + checkIfMovedEmptyDirectoryHasNoData(movDirPath, t) + checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDir, t) +} + +// Move SrcDirectory in DestDirectory +// emptySrcDirectoryMoveTest + +// Output +// destMoveDirectoryNotExist +func TestMoveEmptyDirectoryInNonExistingDirectory(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + srcDir := path.Join(setup.MntDir(), EmptySrcDirectoryMoveTest) + operations.CreateDirectoryWithNFiles(0, srcDir, "", t) + + // destMoveDirectoryNotExist -- Dir + destDir := path.Join(setup.MntDir(), DestMoveDirectoryNotExist) + + _, err := os.Stat(destDir) + if err == nil { + t.Errorf("destMoveDirectoryNotExist directory exist.") + } + + err = operations.MoveDir(srcDir, destDir) + if err != nil { + t.Errorf("Error in moving directory: %v", err) + } + + checkIfMovedEmptyDirectoryHasNoData(destDir, t) + checkIfSrcDirectoryGetsRemovedAfterMoveOperation(srcDir, t) +} diff --git a/tools/integration_tests/util/operations/dir_operations.go b/tools/integration_tests/util/operations/dir_operations.go index 23f3928cc3..d221fb5241 100644 --- a/tools/integration_tests/util/operations/dir_operations.go +++ b/tools/integration_tests/util/operations/dir_operations.go @@ -38,6 +38,16 @@ func CopyDir(srcDirPath string, destDirPath string) (err error) { return } +func MoveDir(srcDirPath string, destDirPath string) (err error) { + cmd := exec.Command("mv", srcDirPath, destDirPath) + + err = cmd.Run() + if err != nil { + err = fmt.Errorf("Moving dir operation is failed: %v", err) + } + return +} + func RenameDir(dirName string, newDirName string) (err error) { if _, err = os.Stat(newDirName); err == nil { err = fmt.Errorf("Renamed directory %s already present", newDirName) From 1c931028b3c3d6e8686bade31f5b9d28abaa69f4 Mon Sep 17 00:00:00 2001 From: Ashmeen Kaur <57195160+ashmeenkaur@users.noreply.github.com> Date: Fri, 21 Jul 2023 10:26:45 +0530 Subject: [PATCH 11/46] Add copyright file (#1225) * added copyright file added debian directory lintian fixes Updated main dockerfile * review comments * use gcs-fuse-maintainers@google.com email --- DEBIAN/changelog | 5 ++++ DEBIAN/control | 12 +++++++++ DEBIAN/copyright | 23 ++++++++++++++++ DEBIAN/gcsfuse-docs.docs | 3 +++ tools/package_gcsfuse_docker/Dockerfile | 35 +++++++++++++++++-------- 5 files changed, 67 insertions(+), 11 deletions(-) create mode 100644 DEBIAN/changelog create mode 100644 DEBIAN/control create mode 100644 DEBIAN/copyright create mode 100644 DEBIAN/gcsfuse-docs.docs diff --git a/DEBIAN/changelog b/DEBIAN/changelog new file mode 100644 index 0000000000..b3baea62a3 --- /dev/null +++ b/DEBIAN/changelog @@ -0,0 +1,5 @@ +gcsfuse (1.0.0) stable; urgency=medium + + * Package created with dpkg-deb --build + + -- GCSFuse Team Thu, 13 Jul 2023 05:37:50 +0000 diff --git a/DEBIAN/control b/DEBIAN/control new file mode 100644 index 0000000000..3ea7eb258a --- /dev/null +++ b/DEBIAN/control @@ -0,0 +1,12 @@ +Version: 1.0.0 +Source: gcsfuse +Maintainer: GCSFuse Team +Homepage: https://github.com/GoogleCloudPlatform/gcsfuse +Package: gcsfuse +Architecture: amd64 +Depends: libc6 (>= 2.3.2), fuse +Description: User-space file system for Google Cloud Storage. + GCSFuse is a FUSE adapter that allows you to mount and access Cloud Storage + buckets as local file systems, so applications can read and write objects in + your bucket using standard file system semantics. Cloud Storage FUSE is an + open source product that's supported by Google. diff --git a/DEBIAN/copyright b/DEBIAN/copyright new file mode 100644 index 0000000000..201944d993 --- /dev/null +++ b/DEBIAN/copyright @@ -0,0 +1,23 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: gcsfuse +Upstream-Contact: gcs-fuse-maintainers@google.com + +Files: * +Copyright: Copyright 2020 Google Inc. +License: Apache-2.0 + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + . + On Debian systems, the complete text of the Apache version 2.0 license + can be found in "/usr/share/common-licenses/Apache-2.0". diff --git a/DEBIAN/gcsfuse-docs.docs b/DEBIAN/gcsfuse-docs.docs new file mode 100644 index 0000000000..a2511a80a8 --- /dev/null +++ b/DEBIAN/gcsfuse-docs.docs @@ -0,0 +1,3 @@ +https://cloud.google.com/storage/docs/gcs-fuse +https://github.com/GoogleCloudPlatform/gcsfuse#readme +https://github.com/GoogleCloudPlatform/gcsfuse/tree/master/docs diff --git a/tools/package_gcsfuse_docker/Dockerfile b/tools/package_gcsfuse_docker/Dockerfile index 2eef93278a..48abaaa077 100644 --- a/tools/package_gcsfuse_docker/Dockerfile +++ b/tools/package_gcsfuse_docker/Dockerfile @@ -31,6 +31,9 @@ ENV GCSFUSE_PATH "$GOPATH/src/$GCSFUSE_REPO" RUN go get -d ${GCSFUSE_REPO} WORKDIR ${GCSFUSE_PATH} +ARG DEBEMAIL="gcs-fuse-maintainers@google.com" +ARG DEBFULLNAME="GCSFuse Team" + # Build Arg for building through a particular branch/commit. By default, it uses # the tag corresponding to passed GCSFUSE VERSION ARG BRANCH_NAME="v${GCSFUSE_VERSION}" @@ -39,26 +42,35 @@ RUN git checkout "${BRANCH_NAME}" # Install fpm package using bundle RUN bundle install --gemfile=${GCSFUSE_PATH}/tools/gem_dependency/Gemfile -ARG GCSFUSE_BIN="/gcsfuse" +ARG GCSFUSE_BIN="/gcsfuse_${GCSFUSE_VERSION}_amd64" +ARG GCSFUSE_DOC="${GCSFUSE_BIN}/usr/share/doc/gcsfuse" WORKDIR ${GOPATH} RUN go install ${GCSFUSE_REPO}/tools/build_gcsfuse RUN mkdir -p ${GCSFUSE_BIN} RUN build_gcsfuse ${GCSFUSE_PATH} ${GCSFUSE_BIN} ${GCSFUSE_VERSION} RUN mkdir -p ${GCSFUSE_BIN}/usr && mv ${GCSFUSE_BIN}/bin ${GCSFUSE_BIN}/usr/bin +# Creating structure for debian package as we are using 'dpkg-deb --build' to create debian package +RUN mkdir -p ${GCSFUSE_BIN}/DEBIAN && cp $GOPATH/src/$GCSFUSE_REPO/DEBIAN/* ${GCSFUSE_BIN}/DEBIAN/ +RUN mkdir -p ${GCSFUSE_DOC} +RUN mv ${GCSFUSE_BIN}/DEBIAN/copyright ${GCSFUSE_DOC} && \ + mv ${GCSFUSE_BIN}/DEBIAN/changelog ${GCSFUSE_DOC} && \ + mv ${GCSFUSE_BIN}/DEBIAN/gcsfuse-docs.docs ${GCSFUSE_DOC} +# Update gcsfuse version in changelog and control file +RUN sed -i "1s/.*/gcsfuse (${GCSFUSE_VERSION}) stable; urgency=medium/" ${GCSFUSE_DOC}/changelog && \ + sed -i "1s/.*/Version: ${GCSFUSE_VERSION}/" ${GCSFUSE_BIN}/DEBIAN/control +# Compress changelog as required by lintian +RUN gzip -9 -n ${GCSFUSE_DOC}/changelog +# Strip unneeded from binaries as required by lintian +RUN strip --strip-unneeded ${GCSFUSE_BIN}/usr/bin/gcsfuse && \ + strip --strip-unneeded ${GCSFUSE_BIN}/sbin/mount.gcsfuse + ARG GCSFUSE_PKG="/packages" RUN mkdir -p ${GCSFUSE_PKG} WORKDIR ${GCSFUSE_PKG} -RUN fpm \ - -s dir \ - -t deb \ - -n gcsfuse \ - -C ${GCSFUSE_BIN} \ - -v ${GCSFUSE_VERSION} \ - -d fuse \ - --vendor "" \ - --url "https://$GCSFUSE_REPO" \ - --description "A user-space file system for Google Cloud Storage." +# Build the package +RUN dpkg-deb --build ${GCSFUSE_BIN} +RUN mv ${GCSFUSE_BIN}.deb . RUN fpm \ -s dir \ -t rpm \ @@ -67,6 +79,7 @@ RUN fpm \ -v ${GCSFUSE_VERSION} \ -d fuse \ --rpm-digest sha256 \ + --license Apache-2.0 \ --vendor "" \ --url "https://$GCSFUSE_REPO" \ --description "A user-space file system for Google Cloud Storage." From a30308e0c1a5305066e3c5bed1d537175283614e Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Sat, 22 Jul 2023 12:51:20 +0530 Subject: [PATCH 12/46] Kokoro perf test fix - Integration tests (#1239) * testing * testing * testing * adding comment for a reason * creating function to copy dir with root permission * common function * fix * testing * revert changes * revert changes * adding remove package * adding remove package --- .../list_large_dir/list_large_dir_test.go | 2 ++ .../operations/operations_test.go | 2 ++ .../readonly/readonly_test.go | 2 ++ .../rename_dir_limit/rename_dir_limit_test.go | 2 ++ .../util/operations/dir_operations.go | 20 ++++++++++++++++--- .../implicit_and_explicit_dir_setup.go | 2 ++ tools/integration_tests/util/setup/setup.go | 19 ++++++++++++++++++ 7 files changed, 46 insertions(+), 3 deletions(-) diff --git a/tools/integration_tests/list_large_dir/list_large_dir_test.go b/tools/integration_tests/list_large_dir/list_large_dir_test.go index 3a5a24c58a..0189e429ac 100644 --- a/tools/integration_tests/list_large_dir/list_large_dir_test.go +++ b/tools/integration_tests/list_large_dir/list_large_dir_test.go @@ -49,5 +49,7 @@ func TestMain(m *testing.M) { successCode := static_mounting.RunTests(flags, m) + setup.RemoveBinFileCopiedForTesting() + os.Exit(successCode) } diff --git a/tools/integration_tests/operations/operations_test.go b/tools/integration_tests/operations/operations_test.go index 6d37b19c13..f177e37c34 100644 --- a/tools/integration_tests/operations/operations_test.go +++ b/tools/integration_tests/operations/operations_test.go @@ -118,5 +118,7 @@ func TestMain(m *testing.M) { successCode = creds_tests.RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(flags, "objectAdmin", m) } + setup.RemoveBinFileCopiedForTesting() + os.Exit(successCode) } diff --git a/tools/integration_tests/readonly/readonly_test.go b/tools/integration_tests/readonly/readonly_test.go index 0a9003781a..727e93c15e 100644 --- a/tools/integration_tests/readonly/readonly_test.go +++ b/tools/integration_tests/readonly/readonly_test.go @@ -93,5 +93,7 @@ func TestMain(m *testing.M) { // Delete objects from bucket after testing. setup.RunScriptForTestData("testdata/delete_objects.sh", setup.TestBucket()) + setup.RemoveBinFileCopiedForTesting() + os.Exit(successCode) } diff --git a/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go b/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go index 28c5b19399..83a2ad1bb7 100644 --- a/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go +++ b/tools/integration_tests/rename_dir_limit/rename_dir_limit_test.go @@ -64,5 +64,7 @@ func TestMain(m *testing.M) { successCode = persistent_mounting.RunTests(flags, m) } + setup.RemoveBinFileCopiedForTesting() + os.Exit(successCode) } diff --git a/tools/integration_tests/util/operations/dir_operations.go b/tools/integration_tests/util/operations/dir_operations.go index d221fb5241..346a6470f8 100644 --- a/tools/integration_tests/util/operations/dir_operations.go +++ b/tools/integration_tests/util/operations/dir_operations.go @@ -28,9 +28,7 @@ import ( const FilePermission_0600 = 0600 const FilePermission_0777 = 0777 -func CopyDir(srcDirPath string, destDirPath string) (err error) { - cmd := exec.Command("cp", "--recursive", srcDirPath, destDirPath) - +func executeCommandForCopyOperation(cmd *exec.Cmd) (err error) { err = cmd.Run() if err != nil { err = fmt.Errorf("Copying dir operation is failed: %v", err) @@ -38,6 +36,22 @@ func CopyDir(srcDirPath string, destDirPath string) (err error) { return } +func CopyDir(srcDirPath string, destDirPath string) (err error) { + cmd := exec.Command("cp", "--recursive", srcDirPath, destDirPath) + + err = executeCommandForCopyOperation(cmd) + + return +} + +func CopyDirWithRootPermission(srcDirPath string, destDirPath string) (err error) { + cmd := exec.Command("sudo", "cp", "--recursive", srcDirPath, destDirPath) + + err = executeCommandForCopyOperation(cmd) + + return +} + func MoveDir(srcDirPath string, destDirPath string) (err error) { cmd := exec.Command("mv", srcDirPath, destDirPath) diff --git a/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go b/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go index bec0d1d538..34e8664c42 100644 --- a/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go +++ b/tools/integration_tests/util/setup/implicit_and_explicit_dir_setup/implicit_and_explicit_dir_setup.go @@ -63,6 +63,8 @@ func RunTestsForImplicitDirAndExplicitDir(flags [][]string, m *testing.M) { successCode = persistent_mounting.RunTests(flags, m) } + setup.RemoveBinFileCopiedForTesting() + os.Exit(successCode) } diff --git a/tools/integration_tests/util/setup/setup.go b/tools/integration_tests/util/setup/setup.go index 462a051484..0d0310339f 100644 --- a/tools/integration_tests/util/setup/setup.go +++ b/tools/integration_tests/util/setup/setup.go @@ -148,6 +148,14 @@ func SetUpTestDir() error { } binFile = path.Join(TestDir(), "bin/gcsfuse") sbinFile = path.Join(TestDir(), "sbin/mount.gcsfuse") + + // mount.gcsfuse will find gcsfuse executable in mentioned locations. + // https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/tools/mount_gcsfuse/find.go#L59 + // Copying the executable to /usr/local/bin + err := operations.CopyDirWithRootPermission(binFile, "/usr/local/bin") + if err != nil { + log.Printf("Error in copying bin file:%v", err) + } } else { // when testInstalledPackage flag is set, gcsfuse is preinstalled on the // machine. Hence, here we are overwriting binFile to gcsfuse. @@ -164,6 +172,17 @@ func SetUpTestDir() error { return nil } +// Removing bin file after testing. +func RemoveBinFileCopiedForTesting() { + if !TestInstalledPackage() { + cmd := exec.Command("sudo", "rm", "/usr/local/bin/gcsfuse") + err := cmd.Run() + if err != nil { + log.Printf("Error in removing file:%v", err) + } + } +} + func UnMount() error { fusermount, err := exec.LookPath("fusermount") if err != nil { From 18132348ee4cc4eaccd1feda09e4b6e34e761c77 Mon Sep 17 00:00:00 2001 From: Ashmeen Kaur <57195160+ashmeenkaur@users.noreply.github.com> Date: Mon, 24 Jul 2023 09:41:50 +0530 Subject: [PATCH 13/46] minor changes to test script (#1238) --- tools/cd_scripts/e2e_test.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/cd_scripts/e2e_test.sh b/tools/cd_scripts/e2e_test.sh index ebec1237af..c9ff6cb244 100644 --- a/tools/cd_scripts/e2e_test.sh +++ b/tools/cd_scripts/e2e_test.sh @@ -72,7 +72,7 @@ then else # For rhel and centos sudo yum makecache - sudo yum check-update + sudo yum -y update #Install fuse sudo yum -y install fuse @@ -107,9 +107,10 @@ cd gcsfuse git checkout $(sed -n 2p ~/details.txt) |& tee -a ~/logs.txt #run tests with testbucket flag +set +e GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=$(sed -n 3p ~/details.txt) --testInstalledPackage --timeout=60m &>> ~/logs.txt -if grep -q FAIL ~/logs.txt; +if [ $? -ne 0 ]; then echo "Test failures detected" &>> ~/logs.txt else From 74aa5f26a2f73c237abd572aea29c8ff408e1350 Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Mon, 24 Jul 2023 11:57:50 +0530 Subject: [PATCH 14/46] Sequentially read large file (#1233) * testing sequential read tests * testing sequential read tests * testing sequential read tests * testing sequential read tests 100Mb * adding it in other separate package as it is taking more time * adding comments and fixing lint * adding comments * adding comments * adding comments * adding commands in script * formating * adding commands in script * fixing comments * small fix * removing unnecessary commands * small fix * adding remove copied bin dir function call * formating * updating comment --- .../read_large_files/read_large_files_test.go | 50 ++++++++++++++ .../read_one_large_file_sequentially_test.go | 66 +++++++++++++++++++ .../write_content_of_fix_size_in_file.sh | 20 ++++++ .../run_tests_mounted_directory.sh | 10 ++- .../util/operations/file_operations.go | 51 ++++++++++++++ 5 files changed, 194 insertions(+), 3 deletions(-) create mode 100644 tools/integration_tests/read_large_files/read_large_files_test.go create mode 100644 tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go create mode 100644 tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh diff --git a/tools/integration_tests/read_large_files/read_large_files_test.go b/tools/integration_tests/read_large_files/read_large_files_test.go new file mode 100644 index 0000000000..73eb532990 --- /dev/null +++ b/tools/integration_tests/read_large_files/read_large_files_test.go @@ -0,0 +1,50 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for read large files sequentially and randomly. +package read_large_files + +import ( + "log" + "os" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +func TestMain(m *testing.M) { + setup.ParseSetUpFlags() + + flags := [][]string{{"--implicit-dirs"}, {"--enable-storage-client-library=false", "--implicit-dirs"}} + + setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() + + if setup.TestBucket() != "" && setup.MountedDirectory() != "" { + log.Print("Both --testbucket and --mountedDirectory can't be specified at the same time.") + os.Exit(1) + } + + // Run tests for mountedDirectory only if --mountedDirectory flag is set. + setup.RunTestsForMountedDirectoryFlag(m) + + // Run tests for testBucket + setup.SetUpTestDirForTestBucketFlag() + + successCode := static_mounting.RunTests(flags, m) + + setup.RemoveBinFileCopiedForTesting() + + os.Exit(successCode) +} diff --git a/tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go b/tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go new file mode 100644 index 0000000000..a75080ab9a --- /dev/null +++ b/tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go @@ -0,0 +1,66 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package read_large_files + +import ( + "bytes" + "os" + "path" + "strconv" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const FiveHundredMB = 500 * 1024 * 1024 +const FiveHundredMBFile = "fiveHundredMBFile.txt" +const chunkSize = 200 * 1024 * 1024 + +func TestReadLargeFileSequentially(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + // Create file of 500 MB with random data in local disk. + fileInLocalDisk := path.Join(os.Getenv("HOME"), FiveHundredMBFile) + setup.RunScriptForTestData("testdata/write_content_of_fix_size_in_file.sh", fileInLocalDisk, strconv.Itoa(FiveHundredMB)) + + // Copy the file in mounted directory. + file := path.Join(setup.MntDir(), FiveHundredMBFile) + err := operations.CopyFile(fileInLocalDisk, file) + if err != nil { + t.Errorf("Error in copying file:%v", err) + } + + // Sequentially read the data from file. + content, err := operations.ReadFileSequentially(file, chunkSize) + if err != nil { + t.Errorf("Error in reading file: %v", err) + } + + // Read actual content from file located in local disk. + actualContent, err := operations.ReadFile(fileInLocalDisk) + if err != nil { + t.Errorf("Error in reading file: %v", err) + } + + // Compare actual content and expect content. + if bytes.Equal(actualContent, content) == false { + t.Errorf("Error in reading file sequentially.") + } + + // Removing file after testing. + operations.RemoveFile(fileInLocalDisk) +} diff --git a/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh b/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh new file mode 100644 index 0000000000..60f66b60d0 --- /dev/null +++ b/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh @@ -0,0 +1,20 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FILE_PATH=$1 +FILE_SIZE=$2 +TEST_BUCKET=$3 + +# It will write filesize random data in a file. +head -c $FILE_SIZE $FILE_PATH diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index d58e636140..9e07509562 100644 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -254,7 +254,11 @@ gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/list_large_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run integration tests for list_large_dir with --only-dir mounting. -gcsfuse --only-dir testDir --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/list_large_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir +# Run integration tests for read_large_files directory with static mounting +gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/read_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + +gcsfuse --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/read_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index be34679ca6..f1c4f63fcb 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -141,3 +141,54 @@ func CloseFile(file *os.File) { log.Printf("error in closing: %v", err) } } + +func RemoveFile(filePath string) { + err := os.Remove(filePath) + if err != nil { + log.Printf("Error in removing file:%v", err) + } +} + +func ReadFileSequentially(filePath string, chunkSize int64) (content []byte, err error) { + chunk := make([]byte, chunkSize) + var offset int64 = 0 + + file, err := os.OpenFile(filePath, os.O_RDONLY|syscall.O_DIRECT, FilePermission_0600) + if err != nil { + log.Printf("Error in opening file:%v", err) + } + + // Closing the file at the end. + defer CloseFile(file) + + for err != io.EOF { + var numberOfBytes int + + // Reading 200 MB chunk sequentially from the file. + numberOfBytes, err = file.ReadAt(chunk, offset) + // If the file reaches the end, write the remaining content in the buffer and return. + if err == io.EOF { + + for i := offset; i < offset+int64(numberOfBytes); i++ { + // Adding remaining bytes. + content = append(content, chunk[i-offset]) + } + err = nil + return + } + if err != nil { + return + } + // Write bytes in the buffer to compare with original content. + content = append(content, chunk...) + + // The number of bytes read is not equal to 200MB. + if int64(numberOfBytes) != chunkSize { + log.Printf("Incorrect number of bytes read from file.") + } + + // The offset will shift to read the next chunk. + offset = offset + chunkSize + } + return +} From eb86cd1e6d40f208f10a959f84b53918f0727d7f Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Tue, 25 Jul 2023 13:58:29 +0530 Subject: [PATCH 15/46] Creds test error fixing Integration tests (#1241) * testing * testing * testing * testing * creating service account if does not exist * removing unnecessary params * formatting * formatting * adding relavant comments * updating path * fixing comment * adding command for removing file * small fix in script to update path of file --- .../util/creds_tests/creds.go | 6 +++--- .../creds_tests/testdata/create_key_file.sh | 2 +- .../testdata/create_service_account.sh | 15 +++++++++------ ...creds.sh => revoke_permission_and_creds.sh} | 18 ++++++++++++++++-- 4 files changed, 29 insertions(+), 12 deletions(-) rename tools/integration_tests/util/creds_tests/testdata/{revoke_permission_and_delete_service_account_and_creds.sh => revoke_permission_and_creds.sh} (58%) diff --git a/tools/integration_tests/util/creds_tests/creds.go b/tools/integration_tests/util/creds_tests/creds.go index f1106f7e44..3d98ea4784 100644 --- a/tools/integration_tests/util/creds_tests/creds.go +++ b/tools/integration_tests/util/creds_tests/creds.go @@ -46,7 +46,7 @@ func RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(testFlagSet [][] serviceAccount := NameOfServiceAccount + "@" + id + ".iam.gserviceaccount.com" // Create service account - setup.RunScriptForTestData("../util/creds_tests/testdata/create_service_account.sh", NameOfServiceAccount, serviceAccount) + setup.RunScriptForTestData("../util/creds_tests/testdata/create_service_account.sh", NameOfServiceAccount) key_file_path := path.Join(os.Getenv("HOME"), "creds.json") @@ -56,8 +56,8 @@ func RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(testFlagSet [][] // Provide permission to service account for testing. setPermission(permission, serviceAccount) - // Revoke the permission and delete creds and service account after testing. - defer setup.RunScriptForTestData("../util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh", serviceAccount, key_file_path) + // Revoke the permission and delete creds after testing. + defer setup.RunScriptForTestData("../util/creds_tests/testdata/revoke_permission_and_creds.sh", serviceAccount, key_file_path) // Without –key-file flag and GOOGLE_APPLICATION_CREDENTIALS // This case will not get covered as gcsfuse internally authenticates from a metadata server on GCE VM. diff --git a/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh b/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh index a7888bce19..62ac78623d 100644 --- a/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh +++ b/tools/integration_tests/util/creds_tests/testdata/create_key_file.sh @@ -14,4 +14,4 @@ KEY_FILE_PATH=$1 SERVICE_ACCOUNT=$2 -gcloud iam service-accounts keys create $KEY_FILE_PATH --iam-account=$SERVICE_ACCOUNT +gcloud iam service-accounts keys create $KEY_FILE_PATH --iam-account=$SERVICE_ACCOUNT 2>&1 | tee ~/key_id.txt diff --git a/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh b/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh index 5b59979f2b..5b8b961830 100644 --- a/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh +++ b/tools/integration_tests/util/creds_tests/testdata/create_service_account.sh @@ -11,12 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# Create service account if does not exist. SERVICE_ACCOUNT=$1 -SERVICE_ACCOUNT_ID=$2 -# Delete service account if already exist. -gcloud iam service-accounts delete $SERVICE_ACCOUNT_ID -if [ $? -eq 1 ]; then - echo "Service account does not exist." + +gcloud iam service-accounts create $SERVICE_ACCOUNT --description="$SERVICE_ACCOUNT" --display-name="$SERVICE_ACCOUNT" 2>&1 | tee ~/output.txt +if grep "already exists within project" ~/output.txt; then + echo "Service account exist." + rm ~/output.txt +else + rm ~/output.txt + exit 1 fi -gcloud iam service-accounts create $SERVICE_ACCOUNT --description="$SERVICE_ACCOUNT" --display-name="$SERVICE_ACCOUNT" diff --git a/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh b/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_creds.sh similarity index 58% rename from tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh rename to tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_creds.sh index d43378cd29..2f84fa0cef 100644 --- a/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_delete_service_account_and_creds.sh +++ b/tools/integration_tests/util/creds_tests/testdata/revoke_permission_and_creds.sh @@ -12,9 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Delete service account after testing +# Delete key file after testing SERVICE_ACCOUNT=$1 KEY_FILE=$2 + gcloud auth revoke $SERVICE_ACCOUNT -gcloud iam service-accounts delete $SERVICE_ACCOUNT +# Crete key file output +# e.g. Created key [KEY_ID] of type [json] as [key_file_path] for [service_account] +# Capturing third word from the file to get key-id +# e.g. Capture [KEY_ID] +if [ ! -f "~/key_id.txt" ]; then + echo "file does not exist" +fi +KEY_ID=$(cat ~/key_id.txt | cut -d " " -f 3) +# removing braces +# e.g. capture KEY_ID +KEY_ID=${KEY_ID:1:40} + +gcloud iam service-accounts keys delete $KEY_ID --iam-account=$SERVICE_ACCOUNT +rm ~/key_id.txt rm $KEY_FILE From 5e9612fd57a39e37d5bb214870022e87c6e24094 Mon Sep 17 00:00:00 2001 From: Nitin Garg Date: Thu, 22 Jun 2023 18:04:38 +0530 Subject: [PATCH 16/46] Upgrade to go version 1.20.5 (#1196) Brief list of changes * Changes gcsfuse binary to be statically-linked, removes dependency on libc. * Prefixed/exported CGO_ENABLED=0 for every go build|run|test for every gcsfuse program * Replaced all golang:1.20.4 based base images to corresponding golang:1.20.5 based images in all docker files and all docker run commands * Updated golang 1.20.4 dependencies to golang 1.20.5 * Updated troubleshooting guide * Replaced os.user.Current() with os.Getuid() and os.Getgid() to remove dependency on glibc * Added unit tests for perms.go * Removed libc dependency from DEBIAN/CONTROL --- .github/workflows/ci.yml | 4 +- DEBIAN/control | 2 +- Dockerfile | 2 +- docs/troubleshooting.md | 2 +- internal/perms/perms.go | 35 ++++++-------- internal/perms/perms_test.go | 48 +++++++++++++++++++ .../scripts/compare_fuse_types_using_fio.py | 2 +- .../continuous_test/gcp_ubuntu/build.sh | 6 +-- .../ml_tests/pytorch/dino/setup_container.sh | 5 +- .../ml_tests/run_image_recognition_models.py | 2 +- perfmetrics/scripts/ml_tests/setup.sh | 2 +- .../resnet/setup_scripts/setup_container.sh | 4 +- .../presubmit_test/pr_perf_test/build.sh | 10 ++-- tools/build_gcsfuse/main.go | 1 + tools/cd_scripts/e2e_test.sh | 2 +- tools/containerize_gcsfuse_docker/Dockerfile | 2 +- .../run_tests_mounted_directory.sh | 1 + tools/package_gcsfuse_docker/Dockerfile | 2 +- 18 files changed, 88 insertions(+), 44 deletions(-) create mode 100644 internal/perms/perms_test.go diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd15dcadea..3b70fc7263 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: run: sudo apt-get update && sudo apt-get install -y fuse3 libfuse-dev - name: Build run: | - go build ./... + CGO_ENABLED=0 go build ./... go install ./tools/build_gcsfuse build_gcsfuse . /tmp ${GITHUB_SHA} - name: Test - run: go test -p 1 -count 1 -v -cover ./... + run: CGO_ENABLED=0 go test -p 1 -count 1 -v -cover ./... lint: name: Lint runs-on: ubuntu-20.04 diff --git a/DEBIAN/control b/DEBIAN/control index 3ea7eb258a..2db8d19a36 100644 --- a/DEBIAN/control +++ b/DEBIAN/control @@ -4,7 +4,7 @@ Maintainer: GCSFuse Team Homepage: https://github.com/GoogleCloudPlatform/gcsfuse Package: gcsfuse Architecture: amd64 -Depends: libc6 (>= 2.3.2), fuse +Depends: fuse Description: User-space file system for Google Cloud Storage. GCSFuse is a FUSE adapter that allows you to mount and access Cloud Storage buckets as local file systems, so applications can read and write objects in diff --git a/Dockerfile b/Dockerfile index cb12d822db..62dd0aee4c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ # Mount the gcsfuse to /mnt/gcs: # > docker run --privileged --device /fuse -v /mnt/gcs:/gcs:rw,rshared gcsfuse -FROM golang:1.20.4-alpine as builder +FROM golang:1.20.5-alpine as builder RUN apk add git diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 4015dd11a2..eebc50746a 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -6,7 +6,7 @@ This page enumerates some common user facing issues around GCSFuse and also disc | Generic Mounting Issue | Most of the common mount point issues are around permissions on both local mount point and the Cloud Storage bucket. It is highly recommended to retry with --foreground --debug_fuse --debug_fs --debug_gcs --debug_http flags which would provide much more detailed logs to understand the errors better and possibly provide a solution. | | Mount successful but files not visible | Try mounting the gcsfuse with --implicit-dir flag. Read the [semantics](https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/semantics.md) to know the reasoning. | | Mount failed with fusermount3 exit status 1 | It comes when the bucket is already mounted in a folder and we try to mount it again. You need to unmount first and then remount. | -| Mount failed with error: Current requires cgo or $USER set in environment | It comes when we try mounting by building the gcsfuse codebase. To fix this, build the gcsfuse package by enabling the CGO_ENABLED flag in the go env and then mount back.
  1. Check the current value using - ```go env``` command.
  2. If it is unset, set this using - ```export CGO_ENABLED=1``` command.
| +| version `GLIBC_x.yz` not found | GCSFuse should not be linking to glibc. Please either `export CGO_ENABLED=0` in your environment or prefix `CGO_ENABLED=0` to any `go build\|run\|test` commands that you're invoking. | | Mount get stuck with error: DefaultTokenSource: google: could not find default credentials | Run ```gcloud auth application-default login``` command to fetch default credentials to the VM. This will fetch the credentials to the following locations:
  1. For linux - $HOME/.config/gcloud/application_default_credentials.json
  2. For windows - %APPDATA%/gcloud/applicateion_default_credentials.json
| | Input/Output Error | It’s a generic error, but the most probable culprit is the bucket not having the right permission for Cloud Storage FUSE to operate on. Ref - [here](https://stackoverflow.com/questions/36382704/gcsfuse-input-output-error) | | Generic NO_PUBKEY Error - while installing Cloud Storage FUSE on ubuntu 22.04 | It happens while running - ```sudo apt-get update``` - working on installing Cloud Storage FUSE. You just have to add the pubkey you get in the error using the below command: ```sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys ``` And then try running ```sudo apt-get update``` | diff --git a/internal/perms/perms.go b/internal/perms/perms.go index 42834a2c9b..c2386139f4 100644 --- a/internal/perms/perms.go +++ b/internal/perms/perms.go @@ -17,35 +17,28 @@ package perms import ( "fmt" - "os/user" - "strconv" + "os" ) // MyUserAndGroup returns the UID and GID of this process. -func MyUserAndGroup() (uid uint32, gid uint32, err error) { - // Ask for the current user. - user, err := user.Current() - if err != nil { - err = fmt.Errorf("Fetching current user: %w", err) - return - } +func MyUserAndGroup() (uid, gid uint32, err error) { + signed_uid := os.Getuid() + signed_gid := os.Getgid() - // Parse UID. - uid64, err := strconv.ParseUint(user.Uid, 10, 32) - if err != nil { - err = fmt.Errorf("Parsing UID (%s): %w", user.Uid, err) - return - } + // Not sure in what scenarios uid/gid could be returned as negative. The only + // documented scenario at pkg.go.dev/os#Getuid is windows OS. + if signed_gid < 0 || signed_uid < 0 { + err = fmt.Errorf("failed to get uid/gid. UID = %d, GID = %d", signed_uid, signed_gid) + + // An untested improvement idea to fallback here is to invoke os.current.User() + // and use its partial output even when os.current.User() returned error, as + // the partial output would still be useful. - // Parse GID. - gid64, err := strconv.ParseUint(user.Gid, 10, 32) - if err != nil { - err = fmt.Errorf("Parsing GID (%s): %w", user.Gid, err) return } - uid = uint32(uid64) - gid = uint32(gid64) + uid = uint32(signed_uid) + gid = uint32(signed_gid) return } diff --git a/internal/perms/perms_test.go b/internal/perms/perms_test.go new file mode 100644 index 0000000000..217fbd9cb0 --- /dev/null +++ b/internal/perms/perms_test.go @@ -0,0 +1,48 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// System permissions-related code unit tests. +package perms_test + +import ( + "testing" + + "github.com/googlecloudplatform/gcsfuse/internal/perms" + . "github.com/jacobsa/ogletest" +) + +func TestPerms(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type PermsTest struct { +} + +func init() { RegisterTestSuite(&PermsTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *PermsTest) MyUserAndGroupNoError() { + uid, gid, err := perms.MyUserAndGroup() + ExpectEq(err, nil) + + unexpected_id_signed := -1 + unexpected_id := uint32(unexpected_id_signed) + ExpectNe(uid, unexpected_id) + ExpectNe(gid, unexpected_id) +} diff --git a/perfmetrics/scripts/compare_fuse_types_using_fio.py b/perfmetrics/scripts/compare_fuse_types_using_fio.py index 513baa6b31..bb1ad1d7a2 100644 --- a/perfmetrics/scripts/compare_fuse_types_using_fio.py +++ b/perfmetrics/scripts/compare_fuse_types_using_fio.py @@ -61,7 +61,7 @@ def _install_gcsfuse_source(gcs_bucket, gcsfuse_flags) -> None: os.system(f'''git clone {GCSFUSE_REPO} mkdir gcs cd gcsfuse - go run . {gcsfuse_flags} {gcs_bucket} ../gcs + CGO_ENABLED=0 go run . {gcsfuse_flags} {gcs_bucket} ../gcs cd .. ''') diff --git a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh index 3886b08a20..324c9207a9 100644 --- a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh +++ b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh @@ -6,8 +6,8 @@ echo "Installing git" sudo apt-get install git echo "Installing pip" sudo apt-get install pip -y -echo "Installing go-lang 1.20.4" -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.4.linux-amd64.tar.gz +echo "Installing go-lang 1.20.5" +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz sudo rm -rf /usr/local/go && tar -xzf go_tar.tar.gz && sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin echo "Installing fio" @@ -28,7 +28,7 @@ commitId=$(git log --before='yesterday 23:59:59' --max-count=1 --pretty=%H) git checkout $commitId echo "Executing integration tests" -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test # Checkout back to master branch to use latest CI test scripts in master. git checkout master diff --git a/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh b/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh index 1af6bd1f55..be82b16994 100644 --- a/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh +++ b/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh @@ -1,13 +1,14 @@ #!/bin/bash -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.4.linux-amd64.tar.gz +# Install golang +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz rm -rf /usr/local/go && tar -C /usr/local -xzf go_tar.tar.gz export PATH=$PATH:/usr/local/go/bin # Clone and build the gcsfuse master branch. git clone https://github.com/GoogleCloudPlatform/gcsfuse.git cd gcsfuse -go build . +CGO_ENABLED=0 go build . cd - # Create a directory for gcsfuse logs diff --git a/perfmetrics/scripts/ml_tests/run_image_recognition_models.py b/perfmetrics/scripts/ml_tests/run_image_recognition_models.py index 9ceaf7e5ca..34675cccc3 100644 --- a/perfmetrics/scripts/ml_tests/run_image_recognition_models.py +++ b/perfmetrics/scripts/ml_tests/run_image_recognition_models.py @@ -105,7 +105,7 @@ def _run_from_source(gcs_bucket, data_directory_name) -> None: os.system(f'''mkdir {data_directory_name} git clone {GITHUB_REPO} cd gcsfuse - go run . --implicit-dirs --stat-cache-capacity 1000000 --max-conns-per-host 100 --stackdriver-export-interval=60s {gcs_bucket} ../{data_directory_name} + CGO_ENABLED=0 go run . --implicit-dirs --stat-cache-capacity 1000000 --max-conns-per-host 100 --stackdriver-export-interval=60s {gcs_bucket} ../{data_directory_name} cd .. ''') diff --git a/perfmetrics/scripts/ml_tests/setup.sh b/perfmetrics/scripts/ml_tests/setup.sh index f0990cdd63..0f0987f6ee 100644 --- a/perfmetrics/scripts/ml_tests/setup.sh +++ b/perfmetrics/scripts/ml_tests/setup.sh @@ -4,7 +4,7 @@ # >> source setup.sh # Go version to be installed. -GO_VERSION=go1.20.4.linux-amd64.tar.gz +GO_VERSION=go1.20.5.linux-amd64.tar.gz # This function will install the given module/dependency if it's not alredy # installed. diff --git a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh index 1d6ec891cc..f6299d6b61 100644 --- a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh +++ b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh @@ -5,14 +5,14 @@ # and epochs functionality, and runs the model # Install go lang -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.4.linux-amd64.tar.gz +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz sudo rm -rf /usr/local/go && tar -xzf go_tar.tar.gz && sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin # Clone the repo and build gcsfuse git clone "https://github.com/GoogleCloudPlatform/gcsfuse.git" cd gcsfuse -go build . +CGO_ENABLED=0 go build . cd - # Mount the bucket and run in background so that docker doesn't keep running after resnet_runner.py fails diff --git a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh index 49072ef6ef..9720b1b434 100644 --- a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh +++ b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh @@ -22,8 +22,8 @@ pip install google-cloud pip install google-cloud-vision pip install google-api-python-client pip install prettytable -echo Installing go-lang 1.20.4 -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.4.linux-amd64.tar.gz +echo Installing go-lang 1.20.5 +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz sudo rm -rf /usr/local/go && tar -xzf go_tar.tar.gz && sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin echo Installing fio @@ -38,7 +38,7 @@ GCSFUSE_FLAGS="--implicit-dirs --max-conns-per-host 100" BUCKET_NAME=presubmit-perf-tests MOUNT_POINT=gcs # The VM will itself exit if the gcsfuse mount fails. -go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT +CGO_ENABLED=0 go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT touch result.txt # Running FIO test chmod +x perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh @@ -53,13 +53,13 @@ echo checkout PR branch git checkout pr/$KOKORO_GITHUB_PULL_REQUEST_NUMBER # Executing integration tests -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test # Executing perf tests echo Mounting gcs bucket from pr branch mkdir -p gcs # The VM will itself exit if the gcsfuse mount fails. -go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT +CGO_ENABLED=0 go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT # Running FIO test chmod +x perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh ./perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh diff --git a/tools/build_gcsfuse/main.go b/tools/build_gcsfuse/main.go index bb52ea1191..b1a4022481 100644 --- a/tools/build_gcsfuse/main.go +++ b/tools/build_gcsfuse/main.go @@ -158,6 +158,7 @@ func buildBinaries(dstDir, srcDir, version string, buildArgs []string) (err erro fmt.Sprintf("GOROOT=%s", runtime.GOROOT()), fmt.Sprintf("GOPATH=%s", gopath), fmt.Sprintf("GOCACHE=%s", gocache), + "CGO_ENABLED=0", } // Build. diff --git a/tools/cd_scripts/e2e_test.sh b/tools/cd_scripts/e2e_test.sh index c9ff6cb244..18184c7dc0 100644 --- a/tools/cd_scripts/e2e_test.sh +++ b/tools/cd_scripts/e2e_test.sh @@ -108,7 +108,7 @@ git checkout $(sed -n 2p ~/details.txt) |& tee -a ~/logs.txt #run tests with testbucket flag set +e -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=$(sed -n 3p ~/details.txt) --testInstalledPackage --timeout=60m &>> ~/logs.txt +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=$(sed -n 3p ~/details.txt) --testInstalledPackage --timeout=60m &>> ~/logs.txt if [ $? -ne 0 ]; then diff --git a/tools/containerize_gcsfuse_docker/Dockerfile b/tools/containerize_gcsfuse_docker/Dockerfile index a36d394c97..b638978d4b 100644 --- a/tools/containerize_gcsfuse_docker/Dockerfile +++ b/tools/containerize_gcsfuse_docker/Dockerfile @@ -34,7 +34,7 @@ ARG OS_VERSION ARG OS_NAME # Image with gcsfuse installed and its package (.deb) -FROM golang:1.20.4 as gcsfuse-package +FROM golang:1.20.5 as gcsfuse-package RUN apt-get update -qq && apt-get install -y ruby ruby-dev rubygems build-essential rpm fuse && gem install --no-document bundler diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index 9e07509562..5cabf79eee 100644 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -21,6 +21,7 @@ TEST_BUCKET_NAME=$1 MOUNT_DIR=$2 +export CGO_ENABLED=0 # Run integration tests for operations directory with static mounting gcsfuse --enable-storage-client-library=true --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR diff --git a/tools/package_gcsfuse_docker/Dockerfile b/tools/package_gcsfuse_docker/Dockerfile index 48abaaa077..623984b49a 100644 --- a/tools/package_gcsfuse_docker/Dockerfile +++ b/tools/package_gcsfuse_docker/Dockerfile @@ -17,7 +17,7 @@ # Copy the gcsfuse packages to the host: # > docker run -it -v /tmp:/output gcsfuse-release cp -r /packages /output -FROM golang:1.20.4 as builder +FROM golang:1.20.5 as builder RUN apt-get update -qq && apt-get install -y ruby ruby-dev rubygems build-essential rpm && gem install --no-document bundler From 2d7fc6e5edd77f00492ab9b764bc86d5bd65ecc9 Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:59:59 +0530 Subject: [PATCH 17/46] Dynamic mounting integration tests (#1242) * testing * testing * adding dynamic mounting testing * testing * testing * separating unmounting * small fix * unblocking remaning tests * small fix(updating echo message) * fixing lint tests and adding comments * updating file path * small fix * testing * testing * testing * unblocking code for testing * testing * increasing time as taking more than 10m to run * adding random suffix at the end of bucket name as with fixed bucket it can fail when multiple instance run inetgration tests at a time * assigning it to variable * small fix * small fix * updating const var name * testing * testing * unblocking code * updating comment --- .../continuous_test/gcp_ubuntu/build.sh | 2 +- .../presubmit_test/pr_perf_test/build.sh | 2 +- .../operations/operations_test.go | 5 + .../dynamic_mounting/dynamic_mounting.go | 120 ++++++++++++++++++ .../testdata/create_bucket.sh | 28 ++++ .../testdata/delete_bucket.sh | 18 +++ tools/integration_tests/util/setup/setup.go | 19 +-- 7 files changed, 184 insertions(+), 10 deletions(-) create mode 100644 tools/integration_tests/util/mounting/dynamic_mounting/dynamic_mounting.go create mode 100644 tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh create mode 100644 tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh diff --git a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh index 324c9207a9..cb2d3d38ed 100644 --- a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh +++ b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh @@ -28,7 +28,7 @@ commitId=$(git log --before='yesterday 23:59:59' --max-count=1 --pretty=%H) git checkout $commitId echo "Executing integration tests" -GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 15m # Checkout back to master branch to use latest CI test scripts in master. git checkout master diff --git a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh index 9720b1b434..55e998a7fa 100644 --- a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh +++ b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh @@ -53,7 +53,7 @@ echo checkout PR branch git checkout pr/$KOKORO_GITHUB_PULL_REQUEST_NUMBER # Executing integration tests -GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 15m # Executing perf tests echo Mounting gcs bucket from pr branch diff --git a/tools/integration_tests/operations/operations_test.go b/tools/integration_tests/operations/operations_test.go index f177e37c34..11df5ee42e 100644 --- a/tools/integration_tests/operations/operations_test.go +++ b/tools/integration_tests/operations/operations_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/creds_tests" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/dynamic_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/only_dir_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/persistent_mounting" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" @@ -113,6 +114,10 @@ func TestMain(m *testing.M) { successCode = persistent_mounting.RunTests(flags, m) } + if successCode == 0 { + successCode = dynamic_mounting.RunTests(flags, m) + } + if successCode == 0 { // Test for admin permission on test bucket. successCode = creds_tests.RunTestsForKeyFileAndGoogleApplicationCredentialsEnvVarSet(flags, "objectAdmin", m) diff --git a/tools/integration_tests/util/mounting/dynamic_mounting/dynamic_mounting.go b/tools/integration_tests/util/mounting/dynamic_mounting/dynamic_mounting.go new file mode 100644 index 0000000000..11e8fee3b5 --- /dev/null +++ b/tools/integration_tests/util/mounting/dynamic_mounting/dynamic_mounting.go @@ -0,0 +1,120 @@ +//Copyright 2023 Google Inc. All Rights Reserved. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +package dynamic_mounting + +import ( + "fmt" + "log" + "math/rand" + "path" + "testing" + "time" + + "cloud.google.com/go/compute/metadata" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const PrefixBucketForDynamicMountingTest = "gcsfuse-dynamic-mounting-test-" +const Charset = "abcdefghijklmnopqrstuvwxyz0123456789" + +var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) +var testBucketForDynamicMounting = PrefixBucketForDynamicMountingTest + generateRandomString(5) + +func mountGcsfuseWithDynamicMounting(flags []string) (err error) { + defaultArg := []string{"--debug_gcs", + "--debug_fs", + "--debug_fuse", + "--log-file=" + setup.LogFile(), + "--log-format=text", + setup.MntDir()} + + for i := 0; i < len(defaultArg); i++ { + flags = append(flags, defaultArg[i]) + } + + err = mounting.MountGcsfuse(setup.BinFile(), flags) + + return err +} + +func runTestsOnGivenMountedTestBucket(bucketName string, flags [][]string, rootMntDir string, m *testing.M) (successCode int) { + for i := 0; i < len(flags); i++ { + if err := mountGcsfuseWithDynamicMounting(flags[i]); err != nil { + setup.LogAndExit(fmt.Sprintf("mountGcsfuse: %v\n", err)) + } + + // Changing mntDir to path of bucket mounted in mntDir for testing. + mntDirOfTestBucket := path.Join(setup.MntDir(), bucketName) + + setup.SetMntDir(mntDirOfTestBucket) + + // Running tests on flags. + successCode = setup.ExecuteTest(m) + + // Currently mntDir is mntDir/bucketName. + // Unmounting can happen on rootMntDir. Changing mntDir to rootMntDir for unmounting. + setup.SetMntDir(rootMntDir) + setup.UnMountAndThrowErrorInFailure(flags[i], successCode) + } + return +} + +func executeTestsForDynamicMounting(flags [][]string, m *testing.M) (successCode int) { + rootMntDir := setup.MntDir() + + // In dynamic mounting all the buckets mounted in mntDir which user has permission. + // mntDir - bucket1, bucket2, bucket3, ... + // We will test on passed testBucket and one created bucket. + + // Test on testBucket + successCode = runTestsOnGivenMountedTestBucket(setup.TestBucket(), flags, rootMntDir, m) + + // Test on created bucket. + if successCode == 0 { + successCode = runTestsOnGivenMountedTestBucket(testBucketForDynamicMounting, flags, rootMntDir, m) + } + + // Setting back the original mntDir after testing. + setup.SetMntDir(rootMntDir) + return +} + +func generateRandomString(length int) string { + b := make([]byte, length) + for i := range b { + b[i] = Charset[seededRand.Intn(len(Charset))] + } + return string(b) +} + +func RunTests(flags [][]string, m *testing.M) (successCode int) { + project_id, err := metadata.ProjectID() + if err != nil { + log.Printf("Error in fetching project id: %v", err) + } + + // Create bucket with name gcsfuse-dynamic-mounting-test-xxxxx + setup.RunScriptForTestData("../util/mounting/dynamic_mounting/testdata/create_bucket.sh", testBucketForDynamicMounting, project_id) + + successCode = executeTestsForDynamicMounting(flags, m) + + log.Printf("Test log: %s\n", setup.LogFile()) + + // Deleting bucket after testing. + setup.RunScriptForTestData("../util/mounting/dynamic_mounting/testdata/delete_bucket.sh", testBucketForDynamicMounting) + + return successCode +} diff --git a/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh new file mode 100644 index 0000000000..eb50ff34f0 --- /dev/null +++ b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh @@ -0,0 +1,28 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Create bucket for testing. + +BUCKET_NAME=$1 +PROJECT_ID=$2 +gcloud storage buckets create gs://$BUCKET_NAME --project=$PROJECT_ID --location=us-west1 --uniform-bucket-level-access 2> ~/output.txt +if [ $? -eq 1 ]; then + if grep "HTTPError 409" ~/output.txt; then + echo "Bucket already exist." + rm ~/output.txt + else + rm ~/output.txt + exit 1 + fi +fi +rm ~/output.txt diff --git a/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh new file mode 100644 index 0000000000..33c3167e7d --- /dev/null +++ b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh @@ -0,0 +1,18 @@ +# Copyright 2023 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Delete bucket after testing. + +BUCKET_NAME=$1 + +gcloud storage rm --recursive gs://$BUCKET_NAME/ diff --git a/tools/integration_tests/util/setup/setup.go b/tools/integration_tests/util/setup/setup.go index 0d0310339f..9463b5617b 100644 --- a/tools/integration_tests/util/setup/setup.go +++ b/tools/integration_tests/util/setup/setup.go @@ -195,18 +195,14 @@ func UnMount() error { return nil } -func executeTest(m *testing.M) (successCode int) { +func ExecuteTest(m *testing.M) (successCode int) { successCode = m.Run() return successCode } -func ExecuteTestForFlagsSet(flags []string, m *testing.M) (successCode int) { - var err error - - successCode = executeTest(m) - - err = UnMount() +func UnMountAndThrowErrorInFailure(flags []string, successCode int) { + err := UnMount() if err != nil { LogAndExit(fmt.Sprintf("Error in unmounting bucket: %v", err)) } @@ -217,6 +213,13 @@ func ExecuteTestForFlagsSet(flags []string, m *testing.M) (successCode int) { log.Print("Test Fails on " + f) return } +} + +func ExecuteTestForFlagsSet(flags []string, m *testing.M) (successCode int) { + successCode = ExecuteTest(m) + + UnMountAndThrowErrorInFailure(flags, successCode) + return } @@ -242,7 +245,7 @@ func RunTestsForMountedDirectoryFlag(m *testing.M) { // Execute tests for the mounted directory. if *mountedDirectory != "" { mntDir = *mountedDirectory - successCode := executeTest(m) + successCode := ExecuteTest(m) os.Exit(successCode) } } From 333b6f18b6c3de135bb8b65430a7b7adc9b39890 Mon Sep 17 00:00:00 2001 From: Ashmeen Kaur <57195160+ashmeenkaur@users.noreply.github.com> Date: Wed, 26 Jul 2023 16:40:47 +0530 Subject: [PATCH 18/46] minor change to test script (#1243) --- tools/cd_scripts/install_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/cd_scripts/install_test.sh b/tools/cd_scripts/install_test.sh index d56a2fd6c6..e9b3e6f606 100644 --- a/tools/cd_scripts/install_test.sh +++ b/tools/cd_scripts/install_test.sh @@ -30,7 +30,7 @@ then echo 'deb http://packages.cloud.google.com/apt apt-transport-artifact-registry-stable main' | sudo tee -a /etc/apt/sources.list.d/artifact-registry.list sudo apt update sudo apt install apt-transport-artifact-registry - echo 'deb ar+https://us-apt.pkg.dev/projects/gcs-fuse-prod $(lsb_release -cs) main' | sudo tee -a /etc/apt/sources.list.d/artifact-registry.list + echo "deb ar+https://us-apt.pkg.dev/projects/gcs-fuse-prod gcsfuse-$(lsb_release -cs) main" | sudo tee -a /etc/apt/sources.list.d/artifact-registry.list sudo apt update # Install released gcsfuse version From 56a1232d1e31da2c6d971db5008c43779512b19d Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Wed, 26 Jul 2023 18:51:49 +0530 Subject: [PATCH 19/46] Sequential write large file integration tests (#1240) * adding test for writing file of 100MB * adding test for writing file of 100MB * adding commands in script * adding commands in script * small fix * increasing chunk size * small fix * using min function * small fix * adding comment * fixing lint * writing 500MB in more redable way * fixing comments * fixing comments * changing chunksize to 20MB * testing sync * testing * merging * merging * testing sync * small fix --- .../run_tests_mounted_directory.sh | 9 ++++ .../util/operations/file_operations.go | 44 ++++++++++++++++ .../write_large_files_test.go | 50 ++++++++++++++++++ .../write_one_large_file_sequentially_test.go | 51 +++++++++++++++++++ 4 files changed, 154 insertions(+) create mode 100644 tools/integration_tests/write_large_files/write_large_files_test.go create mode 100644 tools/integration_tests/write_large_files/write_one_large_file_sequentially_test.go diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index 5cabf79eee..9b5938b25f 100644 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -263,3 +263,12 @@ sudo umount $MOUNT_DIR gcsfuse --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/read_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR + +# Run integration tests for write_large_files directory with static mounting +gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/write_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + +gcsfuse --implicit-dirs --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/write_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index f1c4f63fcb..8f36290c91 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -16,6 +16,7 @@ package operations import ( + "crypto/rand" "fmt" "io" "log" @@ -192,3 +193,46 @@ func ReadFileSequentially(filePath string, chunkSize int64) (content []byte, err } return } + +func WriteFileSequentially(filePath string, fileSize int64, chunkSize int64) (err error) { + file, err := os.OpenFile(filePath, os.O_RDWR|syscall.O_DIRECT|os.O_CREATE, FilePermission_0600) + if err != nil { + log.Printf("Error in opening file:%v", err) + } + + // Closing file at the end. + defer CloseFile(file) + + var offset int64 = 0 + + for offset < fileSize { + // Get random chunkSize or remaining filesize data into chunk. + if (fileSize - offset) < chunkSize { + chunkSize = (fileSize - offset) + } + chunk := make([]byte, chunkSize) + _, err = rand.Read(chunk) + if err != nil { + log.Fatalf("error while generating random string: %s", err) + } + + var numberOfBytes int + + // Writes random chunkSize or remaining filesize data into file. + numberOfBytes, err = file.Write(chunk) + err = file.Sync() + if err != nil { + log.Printf("Error in syncing file:%v", err) + } + + if err != nil { + return + } + if int64(numberOfBytes) != chunkSize { + log.Fatalf("Incorrect number of bytes written in the file.") + } + + offset = offset + chunkSize + } + return +} diff --git a/tools/integration_tests/write_large_files/write_large_files_test.go b/tools/integration_tests/write_large_files/write_large_files_test.go new file mode 100644 index 0000000000..7002780abe --- /dev/null +++ b/tools/integration_tests/write_large_files/write_large_files_test.go @@ -0,0 +1,50 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for write large files sequentially and randomly. +package write_large_files + +import ( + "log" + "os" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +func TestMain(m *testing.M) { + setup.ParseSetUpFlags() + + flags := [][]string{{"--implicit-dirs"}, {"--enable-storage-client-library=false", "--implicit-dirs"}} + + setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() + + if setup.TestBucket() != "" && setup.MountedDirectory() != "" { + log.Print("Both --testbucket and --mountedDirectory can't be specified at the same time.") + os.Exit(1) + } + + // Run tests for mountedDirectory only if --mountedDirectory flag is set. + setup.RunTestsForMountedDirectoryFlag(m) + + // Run tests for testBucket + setup.SetUpTestDirForTestBucketFlag() + + successCode := static_mounting.RunTests(flags, m) + + setup.RemoveBinFileCopiedForTesting() + + os.Exit(successCode) +} diff --git a/tools/integration_tests/write_large_files/write_one_large_file_sequentially_test.go b/tools/integration_tests/write_large_files/write_one_large_file_sequentially_test.go new file mode 100644 index 0000000000..805a3aa585 --- /dev/null +++ b/tools/integration_tests/write_large_files/write_one_large_file_sequentially_test.go @@ -0,0 +1,51 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package write_large_files + +import ( + "os" + "path" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const FiveHundredMB = 500 * 1024 * 1024 +const FiveHundredMBFile = "fiveHundredMBFile.txt" +const ChunkSize = 20 * 1024 * 1024 + +func TestWriteLargeFileSequentially(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + filePath := path.Join(setup.MntDir(), FiveHundredMBFile) + + // Sequentially read the data from file. + err := operations.WriteFileSequentially(filePath, FiveHundredMB, ChunkSize) + if err != nil { + t.Errorf("Error in writing file: %v", err) + } + + // Check if 500MB data written in the file. + fStat, err := os.Stat(filePath) + if err != nil { + t.Errorf("Error in stating file:%v", err) + } + + if fStat.Size() != FiveHundredMB { + t.Errorf("Expecred file size %v found %d", FiveHundredMB, fStat.Size()) + } +} From 0ca0033611f9d82e0fb5e404f7e8063d98be1527 Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Fri, 28 Jul 2023 13:37:01 +0530 Subject: [PATCH 20/46] Kokoro perf test fix - gcloud storage command not working (#1248) --- .../util/mounting/dynamic_mounting/testdata/create_bucket.sh | 2 +- .../util/mounting/dynamic_mounting/testdata/delete_bucket.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh index eb50ff34f0..619657a09e 100644 --- a/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh +++ b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/create_bucket.sh @@ -15,7 +15,7 @@ BUCKET_NAME=$1 PROJECT_ID=$2 -gcloud storage buckets create gs://$BUCKET_NAME --project=$PROJECT_ID --location=us-west1 --uniform-bucket-level-access 2> ~/output.txt +gcloud alpha storage buckets create gs://$BUCKET_NAME --project=$PROJECT_ID --location=us-west1 --uniform-bucket-level-access 2> ~/output.txt if [ $? -eq 1 ]; then if grep "HTTPError 409" ~/output.txt; then echo "Bucket already exist." diff --git a/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh index 33c3167e7d..d4eac2498f 100644 --- a/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh +++ b/tools/integration_tests/util/mounting/dynamic_mounting/testdata/delete_bucket.sh @@ -15,4 +15,4 @@ BUCKET_NAME=$1 -gcloud storage rm --recursive gs://$BUCKET_NAME/ +gcloud alpha storage rm --recursive gs://$BUCKET_NAME/ From 2a7c8566b617b90a2cb1fd8e8e86ac4640388557 Mon Sep 17 00:00:00 2001 From: "Minwoo Byeon (Dylan)" Date: Mon, 31 Jul 2023 15:06:18 +0900 Subject: [PATCH 21/46] Update semantics.md -- fix typos (#1251) --- docs/semantics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/semantics.md b/docs/semantics.md index 91b7482ddd..6a47e89f49 100644 --- a/docs/semantics.md +++ b/docs/semantics.md @@ -124,7 +124,7 @@ This is the default behavior, unless a user passes the ```--implicit-dirs``` fla Cloud Storage FUSE supports a flag called ```--implicit-dirs``` that changes the behavior for how pre-existing directory structures, not created by Cloud Storage FUSE, are mounted and visible to Cloud Storage FUSE. When this flag is enabled, name lookup requests from the kernel use the Cloud Storage API's Objects.list operation to search for objects that would implicitly define the existence of a directory with the name in question. -The example above describes how from the local filesystem the user sees only 0.txt, until the user creates A/, A/B/, C/ using mkdir. If instead the ```--implicity-dirs``` flag is passed, you would see the intended directory structure without first having to create the directories A/, A/B/, C/. +The example above describes how from the local filesystem the user sees only 0.txt, until the user creates A/, A/B/, C/ using mkdir. If instead the ```--implicit-dirs``` flag is passed, you would see the intended directory structure without first having to create the directories A/, A/B/, C/. However, implicit directories does have drawbacks: - The feature requires an additional request to Cloud Storage for each name lookup, which may have costs in terms of both charges for operations and latency. From 5b35898e3db64932ae570d44e06dae7d08d6a5ba Mon Sep 17 00:00:00 2001 From: Tulsishah <46474643+Tulsishah@users.noreply.github.com> Date: Tue, 1 Aug 2023 12:44:36 +0530 Subject: [PATCH 22/46] Random read integration tests (#1247) * adding tests for random read files * fixing comments * creating common function * small fix * fixing comment * creating common function * small fix * merge * fixing comments and fixing typos in naming * renaming function name * renaming function name --- .../random_read_large_file_test.go | 59 +++++++++++++++++++ .../read_large_files/read_large_files_test.go | 18 ++++++ ...ly_test.go => seq_read_large_file_test.go} | 17 +----- .../write_content_of_fix_size_in_file.sh | 1 - .../readonly/readonly_test.go | 1 - .../perisistent_mounting.go | 8 +-- .../static_mounting/static_mounting.go | 4 +- .../util/operations/file_operations.go | 37 ++++++++++++ 8 files changed, 123 insertions(+), 22 deletions(-) create mode 100644 tools/integration_tests/read_large_files/random_read_large_file_test.go rename tools/integration_tests/read_large_files/{read_one_large_file_sequentially_test.go => seq_read_large_file_test.go} (74%) diff --git a/tools/integration_tests/read_large_files/random_read_large_file_test.go b/tools/integration_tests/read_large_files/random_read_large_file_test.go new file mode 100644 index 0000000000..35317e8e0e --- /dev/null +++ b/tools/integration_tests/read_large_files/random_read_large_file_test.go @@ -0,0 +1,59 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package read_large_files + +import ( + "bytes" + "math/rand" + "os" + "path" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +func TestReadLargeFileRandomly(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + fileInLocalDisk := path.Join(os.Getenv("HOME"), FiveHundredMBFile) + file := path.Join(setup.MntDir(), FiveHundredMBFile) + // Create and copy the local file in mountedDirectory. + createFileOnDiskAndCopyToMntDir(fileInLocalDisk, file, FiveHundredMB, t) + + for i := 0; i < NumberOfRandomReadCalls; i++ { + offset := rand.Int63n(MaxReadableByteFromFile - MinReadableByteFromFile) + // Randomly read the data from file in mountedDirectory. + content, err := operations.ReadChunkFromFile(file, ChunkSize, offset) + if err != nil { + t.Errorf("Error in reading file: %v", err) + } + + // Read actual content from file located in local disk. + actualContent, err := operations.ReadChunkFromFile(fileInLocalDisk, ChunkSize, offset) + if err != nil { + t.Errorf("Error in reading file: %v", err) + } + + // Compare actual content and expect content. + if bytes.Equal(actualContent, content) == false { + t.Errorf("Error in reading file randomly.") + } + } + + // Removing file after testing. + operations.RemoveFile(fileInLocalDisk) +} diff --git a/tools/integration_tests/read_large_files/read_large_files_test.go b/tools/integration_tests/read_large_files/read_large_files_test.go index 73eb532990..3ebb4d381f 100644 --- a/tools/integration_tests/read_large_files/read_large_files_test.go +++ b/tools/integration_tests/read_large_files/read_large_files_test.go @@ -18,12 +18,22 @@ package read_large_files import ( "log" "os" + "strconv" "testing" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) +const OneMB = 1024 * 1024 +const FiveHundredMB = 500 * OneMB +const FiveHundredMBFile = "fiveHundredMBFile.txt" +const ChunkSize = 200 * OneMB +const NumberOfRandomReadCalls = 200 +const MinReadableByteFromFile = 0 +const MaxReadableByteFromFile = 500 * OneMB + func TestMain(m *testing.M) { setup.ParseSetUpFlags() @@ -48,3 +58,11 @@ func TestMain(m *testing.M) { os.Exit(successCode) } + +func createFileOnDiskAndCopyToMntDir(fileInLocalDisk string, fileInMntDir string, fileSize int, t *testing.T) { + setup.RunScriptForTestData("testdata/write_content_of_fix_size_in_file.sh", fileInLocalDisk, strconv.Itoa(fileSize)) + err := operations.CopyFile(fileInLocalDisk, fileInMntDir) + if err != nil { + t.Errorf("Error in copying file:%v", err) + } +} diff --git a/tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go b/tools/integration_tests/read_large_files/seq_read_large_file_test.go similarity index 74% rename from tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go rename to tools/integration_tests/read_large_files/seq_read_large_file_test.go index a75080ab9a..c1f7fc117a 100644 --- a/tools/integration_tests/read_large_files/read_one_large_file_sequentially_test.go +++ b/tools/integration_tests/read_large_files/seq_read_large_file_test.go @@ -18,34 +18,23 @@ import ( "bytes" "os" "path" - "strconv" "testing" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) -const FiveHundredMB = 500 * 1024 * 1024 -const FiveHundredMBFile = "fiveHundredMBFile.txt" -const chunkSize = 200 * 1024 * 1024 - func TestReadLargeFileSequentially(t *testing.T) { // Clean the mountedDirectory before running test. setup.CleanMntDir() - // Create file of 500 MB with random data in local disk. fileInLocalDisk := path.Join(os.Getenv("HOME"), FiveHundredMBFile) - setup.RunScriptForTestData("testdata/write_content_of_fix_size_in_file.sh", fileInLocalDisk, strconv.Itoa(FiveHundredMB)) - - // Copy the file in mounted directory. file := path.Join(setup.MntDir(), FiveHundredMBFile) - err := operations.CopyFile(fileInLocalDisk, file) - if err != nil { - t.Errorf("Error in copying file:%v", err) - } + // Create and copy the local file in mountedDirectory. + createFileOnDiskAndCopyToMntDir(fileInLocalDisk, file, FiveHundredMB, t) // Sequentially read the data from file. - content, err := operations.ReadFileSequentially(file, chunkSize) + content, err := operations.ReadFileSequentially(file, ChunkSize) if err != nil { t.Errorf("Error in reading file: %v", err) } diff --git a/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh b/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh index 60f66b60d0..c060f00b28 100644 --- a/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh +++ b/tools/integration_tests/read_large_files/testdata/write_content_of_fix_size_in_file.sh @@ -14,7 +14,6 @@ FILE_PATH=$1 FILE_SIZE=$2 -TEST_BUCKET=$3 # It will write filesize random data in a file. head -c $FILE_SIZE $FILE_PATH diff --git a/tools/integration_tests/readonly/readonly_test.go b/tools/integration_tests/readonly/readonly_test.go index 727e93c15e..49917aa2bf 100644 --- a/tools/integration_tests/readonly/readonly_test.go +++ b/tools/integration_tests/readonly/readonly_test.go @@ -81,7 +81,6 @@ func TestMain(m *testing.M) { successCode := static_mounting.RunTests(flags, m) if successCode == 0 { - // Test for viewer permission on test bucket. successCode = persistent_mounting.RunTests(flags, m) } diff --git a/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go b/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go index a664237192..688d85e9bd 100644 --- a/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go +++ b/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go @@ -39,7 +39,7 @@ func makePersistentMountingArgs(flags []string) (args []string, err error) { return } -func mountGcsfuseWithStaticMounting(flags []string) (err error) { +func mountGcsfuseWithPersistentMounting(flags []string) (err error) { defaultArg := []string{setup.TestBucket(), setup.MntDir(), "-o", @@ -69,11 +69,11 @@ func mountGcsfuseWithStaticMounting(flags []string) (err error) { return err } -func executeTestsForStatingMounting(flags [][]string, m *testing.M) (successCode int) { +func executeTestsForPersistentMounting(flags [][]string, m *testing.M) (successCode int) { var err error for i := 0; i < len(flags); i++ { - if err = mountGcsfuseWithStaticMounting(flags[i]); err != nil { + if err = mountGcsfuseWithPersistentMounting(flags[i]); err != nil { setup.LogAndExit(fmt.Sprintf("mountGcsfuse: %v\n", err)) } setup.ExecuteTestForFlagsSet(flags[i], m) @@ -82,7 +82,7 @@ func executeTestsForStatingMounting(flags [][]string, m *testing.M) (successCode } func RunTests(flags [][]string, m *testing.M) (successCode int) { - successCode = executeTestsForStatingMounting(flags, m) + successCode = executeTestsForPersistentMounting(flags, m) log.Printf("Test log: %s\n", setup.LogFile()) diff --git a/tools/integration_tests/util/mounting/static_mounting/static_mounting.go b/tools/integration_tests/util/mounting/static_mounting/static_mounting.go index 090b783025..7bfaf72f25 100644 --- a/tools/integration_tests/util/mounting/static_mounting/static_mounting.go +++ b/tools/integration_tests/util/mounting/static_mounting/static_mounting.go @@ -41,7 +41,7 @@ func mountGcsfuseWithStaticMounting(flags []string) (err error) { return err } -func executeTestsForStatingMounting(flags [][]string, m *testing.M) (successCode int) { +func executeTestsForStaticMounting(flags [][]string, m *testing.M) (successCode int) { var err error for i := 0; i < len(flags); i++ { @@ -54,7 +54,7 @@ func executeTestsForStatingMounting(flags [][]string, m *testing.M) (successCode } func RunTests(flags [][]string, m *testing.M) (successCode int) { - successCode = executeTestsForStatingMounting(flags, m) + successCode = executeTestsForStaticMounting(flags, m) log.Printf("Test log: %s\n", setup.LogFile()) diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index 8f36290c91..d2b20899cb 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -236,3 +236,40 @@ func WriteFileSequentially(filePath string, fileSize int64, chunkSize int64) (er } return } + +func ReadChunkFromFile(filePath string, chunkSize int64, offset int64) (chunk []byte, err error) { + chunk = make([]byte, chunkSize) + + file, err := os.OpenFile(filePath, os.O_RDONLY, FilePermission_0600) + if err != nil { + log.Printf("Error in opening file:%v", err) + return + } + + f, err := os.Stat(filePath) + if err != nil { + log.Printf("Error in stating file:%v", err) + return + } + + // Closing the file at the end. + defer CloseFile(file) + + var numberOfBytes int + + // Reading chunk size randomly from the file. + numberOfBytes, err = file.ReadAt(chunk, offset) + if err == io.EOF { + err = nil + } + if err != nil { + return + } + + // The number of bytes read is not equal to 200MB. + if int64(numberOfBytes) != chunkSize && int64(numberOfBytes) != f.Size()-offset { + log.Printf("Incorrect number of bytes read from file.") + } + + return +} From e0d7a45e36a6f1717cbcbe63cda3e5d47c718cf7 Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Wed, 2 Aug 2023 18:43:11 +0530 Subject: [PATCH 23/46] Read multiple files at a time integration tests (#1249) * adding tests * updating comment * fixing comments and fixing typos in naming * renaming function name * renaming * fix lint test --- .../concurrent_read_files_test.go | 83 +++++++++++++++++++ .../seq_read_large_file_test.go | 2 +- 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 tools/integration_tests/read_large_files/concurrent_read_files_test.go diff --git a/tools/integration_tests/read_large_files/concurrent_read_files_test.go b/tools/integration_tests/read_large_files/concurrent_read_files_test.go new file mode 100644 index 0000000000..1eb1c09bce --- /dev/null +++ b/tools/integration_tests/read_large_files/concurrent_read_files_test.go @@ -0,0 +1,83 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package read_large_files + +import ( + "bytes" + "os" + "path" + "sync" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const FileOne = "fileOne.txt" +const FileTwo = "fileTwo.txt" +const FileThree = "fileThree.txt" +const NumberOfFilesInLocalDiskForConcurrentRead = 3 + +func readFile(fileInLocalDisk string, fileInMntDir string, wg *sync.WaitGroup, t *testing.T) { + // Reduce thread count when it read the file. + defer wg.Done() + + dataInMntDirFile, err := operations.ReadFile(fileInMntDir) + if err != nil { + return + } + + dataInLocalDiskFile, err := operations.ReadFile(fileInLocalDisk) + if err != nil { + return + } + + // Compare actual content and expect content. + if bytes.Equal(dataInLocalDiskFile, dataInMntDirFile) == false { + t.Errorf("Reading incorrect file.") + } +} + +func TestReadFilesConcurrently(t *testing.T) { + // Clean the mountedDirectory before running test. + setup.CleanMntDir() + + filesInLocalDisk := [NumberOfFilesInLocalDiskForConcurrentRead]string{FileOne, FileTwo, FileThree} + var filesPathInLocalDisk []string + var filesPathInMntDir []string + + for i := 0; i < NumberOfFilesInLocalDiskForConcurrentRead; i++ { + fileInLocalDisk := path.Join(os.Getenv("HOME"), filesInLocalDisk[i]) + filesPathInLocalDisk = append(filesPathInLocalDisk, fileInLocalDisk) + + file := path.Join(setup.MntDir(), filesInLocalDisk[i]) + filesPathInMntDir = append(filesPathInMntDir, file) + + createFileOnDiskAndCopyToMntDir(fileInLocalDisk, file, FiveHundredMB, t) + } + + // For waiting on threads. + var wg sync.WaitGroup + + for i := 0; i < NumberOfFilesInLocalDiskForConcurrentRead; i++ { + // Increment the WaitGroup counter. + wg.Add(1) + // Thread to read file. + go readFile(filesPathInLocalDisk[i], filesPathInMntDir[i], &wg, t) + } + + // Wait on threads to end. + wg.Wait() +} diff --git a/tools/integration_tests/read_large_files/seq_read_large_file_test.go b/tools/integration_tests/read_large_files/seq_read_large_file_test.go index c1f7fc117a..df8a122f69 100644 --- a/tools/integration_tests/read_large_files/seq_read_large_file_test.go +++ b/tools/integration_tests/read_large_files/seq_read_large_file_test.go @@ -28,9 +28,9 @@ func TestReadLargeFileSequentially(t *testing.T) { // Clean the mountedDirectory before running test. setup.CleanMntDir() + // Create file of 500 MB with random data in local disk and copy it in mntDir. fileInLocalDisk := path.Join(os.Getenv("HOME"), FiveHundredMBFile) file := path.Join(setup.MntDir(), FiveHundredMBFile) - // Create and copy the local file in mountedDirectory. createFileOnDiskAndCopyToMntDir(fileInLocalDisk, file, FiveHundredMB, t) // Sequentially read the data from file. From 70526574af96a88c33dece73f2448f93cfcde97b Mon Sep 17 00:00:00 2001 From: Nitin Garg <113666283+gargnitingoogle@users.noreply.github.com> Date: Mon, 7 Aug 2023 05:44:56 +0000 Subject: [PATCH 24/46] Update gcloud version (#1259) Update jacobsa/gcloud dependency version This is to account for https://github.com/jacobsa/gcloud/pull/34 . Also minor gitignore change: Ignore vscode settings folder (.vscode) --- .gitignore | 1 + go.mod | 2 +- go.sum | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f40cd650ac..8656d8cf65 100644 --- a/.gitignore +++ b/.gitignore @@ -39,6 +39,7 @@ _testmain.go # Editors .idea/ +.vscode/ # External folders vendor/ diff --git a/go.mod b/go.mod index 388019c9a2..bbae2f604f 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/googleapis/gax-go/v2 v2.7.0 github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474 - github.com/jacobsa/gcloud v0.0.0-20230425120041-5ed2958cdfee + github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984 github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11 diff --git a/go.sum b/go.sum index fee96d3e3c..008c5ac876 100644 --- a/go.sum +++ b/go.sum @@ -212,6 +212,8 @@ github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474 h1:6z3Yj4PZKk3n18T2s7 github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474/go.mod h1:XUKuYy1M4vamyxQjW8/WZBTxyZ0NnUiq+kkA+WWOfeI= github.com/jacobsa/gcloud v0.0.0-20230425120041-5ed2958cdfee h1:1NvpBXX7CiuoK+SdLNMwelLB+2OkJLxhjllc0WgY8sE= github.com/jacobsa/gcloud v0.0.0-20230425120041-5ed2958cdfee/go.mod h1:CGkT80TfaoPTzQ8My+t2M7PnMDvkwAR36Qm8Mm8HytI= +github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984 h1:kD9sX/8uHuPQI6OO/VKz/olbTXNkQ4vveSPNdS9AtHw= +github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984/go.mod h1:CGkT80TfaoPTzQ8My+t2M7PnMDvkwAR36Qm8Mm8HytI= github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd h1:9GCSedGjMcLZCrusBZuo4tyKLpKUPenUUqi34AkuFmA= github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd/go.mod h1:TlmyIZDpGmwRoTWiakdr+HA1Tukze6C6XbRVidYq02M= github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff h1:2xRHTvkpJ5zJmglXLRqHiZQNjUoOkhUyhTAhEQvPAWw= From 4efd86ebe31f40b7c50336c4a069b05c37909bf5 Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:53:23 +0530 Subject: [PATCH 25/46] Removing rate limit dependency (#1253) * local changes * local changes * local changes * local changes * removed token bucket and added unit tests * fixing lint * small fix- renaming * small fix- renaming * fix lint * adding licence * testing * back to changes * fixing comments * removing throttle_test * adding throttle test * fixing comment * fixing lint * fixed comment * lint tests * lint tests * fixing comment * empty commit * empty commit --- go.mod | 2 +- go.sum | 4 +- internal/gcsx/bucket_manager.go | 13 +- internal/ratelimit/limiter_capacity.go | 83 +++++ internal/ratelimit/limiter_capacity_test.go | 96 ++++++ internal/ratelimit/throttle.go | 58 ++++ internal/ratelimit/throttle_reader_test.go | 335 ++++++++++++++++++++ internal/ratelimit/throttle_test.go | 209 ++++++++++++ internal/ratelimit/throttled_bucket.go | 202 ++++++++++++ internal/ratelimit/throttled_reader.go | 66 ++++ 10 files changed, 1058 insertions(+), 10 deletions(-) create mode 100644 internal/ratelimit/limiter_capacity.go create mode 100644 internal/ratelimit/limiter_capacity_test.go create mode 100644 internal/ratelimit/throttle.go create mode 100644 internal/ratelimit/throttle_reader_test.go create mode 100644 internal/ratelimit/throttle_test.go create mode 100644 internal/ratelimit/throttled_bucket.go create mode 100644 internal/ratelimit/throttled_reader.go diff --git a/go.mod b/go.mod index bbae2f604f..6835cce813 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,6 @@ require ( github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11 - github.com/jacobsa/ratelimit v0.0.0-20150904001804-f5e47030f3b0 github.com/jacobsa/reqtrace v0.0.0-20150505043853-245c9e0234cb github.com/jacobsa/syncutil v0.0.0-20180201203307-228ac8e5a6c3 github.com/jacobsa/timeutil v0.0.0-20170205232429-577e5acbbcf6 @@ -55,6 +54,7 @@ require ( golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.6.0 // indirect golang.org/x/text v0.8.0 // indirect + golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect diff --git a/go.sum b/go.sum index 008c5ac876..b158cb6b2e 100644 --- a/go.sum +++ b/go.sum @@ -220,8 +220,6 @@ github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff h1:2xRHTvkpJ5zJmg github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff/go.mod h1:gJWba/XXGl0UoOmBQKRWCJdHrr3nE0T65t6ioaj3mLI= github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11 h1:BMb8s3ENQLt5ulwVIHVDWFHp8eIXmbfSExkvdn9qMXI= github.com/jacobsa/ogletest v0.0.0-20170503003838-80d50a735a11/go.mod h1:+DBdDyfoO2McrOyDemRBq0q9CMEByef7sYl7JH5Q3BI= -github.com/jacobsa/ratelimit v0.0.0-20150904001804-f5e47030f3b0 h1:6GaIakaFrxn738iBykUc6fyS5sIAKRg/wafwzrzRX30= -github.com/jacobsa/ratelimit v0.0.0-20150904001804-f5e47030f3b0/go.mod h1:5/sdn6lSZE5l3rXMkJGO7Y3MHJImklO43rZx9ouOWYQ= github.com/jacobsa/reqtrace v0.0.0-20150505043853-245c9e0234cb h1:uSWBjJdMf47kQlXMwWEfmc864bA1wAC+Kl3ApryuG9Y= github.com/jacobsa/reqtrace v0.0.0-20150505043853-245c9e0234cb/go.mod h1:ivcmUvxXWjb27NsPEaiYK7AidlZXS7oQ5PowUS9z3I4= github.com/jacobsa/syncutil v0.0.0-20180201203307-228ac8e5a6c3 h1:+gHfvQxomE6fI4zg7QYyaGDCnuw2wylD4i6yzrQvAmY= @@ -475,6 +473,8 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/internal/gcsx/bucket_manager.go b/internal/gcsx/bucket_manager.go index a8cfa51c6a..d645564fba 100644 --- a/internal/gcsx/bucket_manager.go +++ b/internal/gcsx/bucket_manager.go @@ -20,17 +20,16 @@ import ( "path" "time" - "github.com/googlecloudplatform/gcsfuse/internal/storage" - "github.com/jacobsa/reqtrace" - "golang.org/x/net/context" - "github.com/googlecloudplatform/gcsfuse/internal/canned" "github.com/googlecloudplatform/gcsfuse/internal/logger" "github.com/googlecloudplatform/gcsfuse/internal/monitor" + "github.com/googlecloudplatform/gcsfuse/internal/ratelimit" + "github.com/googlecloudplatform/gcsfuse/internal/storage" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/gcloud/gcs/gcscaching" - "github.com/jacobsa/ratelimit" + "github.com/jacobsa/reqtrace" "github.com/jacobsa/timeutil" + "golang.org/x/net/context" ) type BucketConfig struct { @@ -117,7 +116,7 @@ func setUpRateLimiting( // window of the given size. const window = 8 * time.Hour - opCapacity, err := ratelimit.ChooseTokenBucketCapacity( + opCapacity, err := ratelimit.ChooseLimiterCapacity( opRateLimitHz, window) @@ -126,7 +125,7 @@ func setUpRateLimiting( return } - egressCapacity, err := ratelimit.ChooseTokenBucketCapacity( + egressCapacity, err := ratelimit.ChooseLimiterCapacity( egressBandwidthLimit, window) diff --git a/internal/ratelimit/limiter_capacity.go b/internal/ratelimit/limiter_capacity.go new file mode 100644 index 0000000000..a4c095ebb0 --- /dev/null +++ b/internal/ratelimit/limiter_capacity.go @@ -0,0 +1,83 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "fmt" + "math" + "time" +) + +// Choose a limiter capacity that ensures that the action gated by the +// limiter will be limited to within a few percent of `rateHz * window` +// for any window of the given size. +// +// This is not be possible for all rates and windows. In that case, an error +// will be returned. +func ChooseLimiterCapacity( + rateHz float64, + window time.Duration) (capacity uint64, err error) { + // Check that the input is reasonable. + if rateHz <= 0 || math.IsInf(rateHz, 0) { + err = fmt.Errorf("Illegal rate: %f", rateHz) + return + } + + if window <= 0 { + err = fmt.Errorf("Illegal window: %v", window) + return + } + + // We cannot help but allow the rate to exceed the configured maximum by some + // factor in an arbitrary window, no matter how small we scale the max + // accumulated credit -- the bucket may be full at the start of the window, + // be immediately exhausted, then be repeatedly exhausted just before filling + // throughout the window. + // + // For example: let the window W = 10 seconds, and the bandwidth B = 20 MiB/s. + // Set the max accumulated credit C = W*B/2 = 100 MiB. Then this + // sequence of events is allowed: + // + // * T=0: Allow through 100 MiB. + // * T=4.999999: Allow through nearly 100 MiB. + // * T=9.999999: Allow through nearly 100 MiB. + // + // Above we allow through nearly 300 MiB, exceeding the allowed bytes for the + // window by nearly 50%. Note however that this trend cannot continue into + // the next window, so this must be a transient spike. + // + // In general if we set C <= W*B/N, then we're off by no more than a factor + // of (N+1)/N within any window of size W. + // + // Choose a reasonable N. + const N = 50 // At most 2% error + + w := float64(window) / float64(time.Second) + capacityFloat := math.Floor(w * rateHz / N) + if !(capacityFloat >= 1 && capacityFloat < float64(math.MaxUint64)) { + err = fmt.Errorf( + "Can't use a token bucket to limit to %f Hz over a window of %v "+ + "(result is a capacity of %f)", + rateHz, + window, + capacityFloat) + + return + } + + capacity = uint64(capacityFloat) + + return +} diff --git a/internal/ratelimit/limiter_capacity_test.go b/internal/ratelimit/limiter_capacity_test.go new file mode 100644 index 0000000000..d5dc037b58 --- /dev/null +++ b/internal/ratelimit/limiter_capacity_test.go @@ -0,0 +1,96 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "fmt" + "testing" + "time" + + . "github.com/jacobsa/ogletest" +) + +func TestLimiterCapacity(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type LimiterCapacityTest struct { +} + +func init() { RegisterTestSuite(&LimiterCapacityTest{}) } + +func rateLessThanOrEqualToZero(rate float64) { + _, err := ChooseLimiterCapacity(rate, 30) + + expectedError := fmt.Errorf("Illegal rate: %f", rate) + + AssertEq(expectedError.Error(), err.Error()) +} + +func (t *LimiterCapacityTest) TestRateLessThanZero() { + var negativeRateHz float64 = -1 + + rateLessThanOrEqualToZero(negativeRateHz) +} + +func (t *LimiterCapacityTest) TestRateEqualToZero() { + var zeroRateHz float64 = 0 + + rateLessThanOrEqualToZero(zeroRateHz) +} + +func windowLessThanOrEqualToZero(window time.Duration) { + _, err := ChooseLimiterCapacity(1, window) + + expectedError := fmt.Errorf("Illegal window: %v", window) + + AssertEq(expectedError.Error(), err.Error()) +} + +func (t *LimiterCapacityTest) TestWindowLessThanZero() { + var negativeWindow time.Duration = -1 + + windowLessThanOrEqualToZero(negativeWindow) +} + +func (t *LimiterCapacityTest) TestWindowEqualToZero() { + var zeroWindow time.Duration = 0 + + windowLessThanOrEqualToZero(zeroWindow) +} + +func (t *LimiterCapacityTest) TestCapacityEqualToZero() { + var rate = 0.5 + var window time.Duration = 1 + + capacity, err := ChooseLimiterCapacity(rate, window) + + expectedError := fmt.Errorf( + "Can't use a token bucket to limit to %f Hz over a window of %v (result is a capacity of %f)", rate, window, float64(capacity)) + AssertEq(expectedError.Error(), err.Error()) +} + +func (t *LimiterCapacityTest) TestExpectedCapacity() { + var rate float64 = 20 + var window = 10 * time.Second + + capacity, err := ChooseLimiterCapacity(rate, window) + // capacity = floor((20.0 * 10)/50) = floor(4.0) = 4 + + ExpectEq(nil, err) + ExpectEq(4, capacity) +} diff --git a/internal/ratelimit/throttle.go b/internal/ratelimit/throttle.go new file mode 100644 index 0000000000..4b45da0976 --- /dev/null +++ b/internal/ratelimit/throttle.go @@ -0,0 +1,58 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "golang.org/x/net/context" + "golang.org/x/time/rate" +) + +// A simple interface for limiting the rate of some event. Unlike TokenBucket, +// does not allow the user control over what time means. +// +// Safe for concurrent access. +type Throttle interface { + // Return the maximum number of tokens that can be requested in a call to + // Wait. + Capacity() (c uint64) + + // Acquire the given number of tokens from the underlying token bucket, then + // sleep until when it says to wake. If the context is cancelled before then, + // return early with an error. + // + // REQUIRES: tokens <= capacity + Wait(ctx context.Context, tokens uint64) (err error) +} + +type limiter struct { + *rate.Limiter +} + +func NewThrottle( + rateHz float64, + capacity uint64) (t Throttle) { + t = &limiter{rate.NewLimiter(rate.Limit(rateHz), int(capacity))} + return +} + +func (l *limiter) Capacity() (c uint64) { + return uint64(l.Burst()) +} + +func (l *limiter) Wait( + ctx context.Context, + tokens uint64) (err error) { + return l.WaitN(ctx, int(tokens)) +} diff --git a/internal/ratelimit/throttle_reader_test.go b/internal/ratelimit/throttle_reader_test.go new file mode 100644 index 0000000000..25e612fefe --- /dev/null +++ b/internal/ratelimit/throttle_reader_test.go @@ -0,0 +1,335 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "errors" + "io" + "testing" + + "golang.org/x/net/context" + + . "github.com/jacobsa/ogletest" +) + +func TestThrottledReader(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +// An io.Reader that defers to a function. +type funcReader struct { + f func([]byte) (int, error) +} + +func (fr *funcReader) Read(p []byte) (n int, err error) { + n, err = fr.f(p) + return +} + +// A throttler that defers to a function. +type funcThrottle struct { + f func(context.Context, uint64) error +} + +func (ft *funcThrottle) Capacity() (c uint64) { + return 1024 +} + +func (ft *funcThrottle) Wait( + ctx context.Context, + tokens uint64) (err error) { + err = ft.f(ctx, tokens) + return +} + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type ThrottledReaderTest struct { + ctx context.Context + + wrapped funcReader + throttle funcThrottle + + reader io.Reader +} + +var _ SetUpInterface = &ThrottledReaderTest{} + +func init() { RegisterTestSuite(&ThrottledReaderTest{}) } + +func (t *ThrottledReaderTest) SetUp(ti *TestInfo) { + t.ctx = ti.Ctx + + // Set up the default throttle function. + t.throttle.f = func(ctx context.Context, tokens uint64) (err error) { + return + } + + // Set up the reader. + t.reader = ThrottledReader(t.ctx, &t.wrapped, &t.throttle) +} + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *ThrottledReaderTest) CallsThrottle() { + const readSize = 17 + AssertLe(readSize, t.throttle.Capacity()) + + // Throttle + var throttleCalled bool + t.throttle.f = func(ctx context.Context, tokens uint64) (err error) { + AssertFalse(throttleCalled) + throttleCalled = true + + AssertEq(t.ctx.Err(), ctx.Err()) + AssertEq(t.ctx.Done(), ctx.Done()) + AssertEq(readSize, tokens) + + err = errors.New("") + return + } + + // Call + _, err := t.reader.Read(make([]byte, readSize)) + + ExpectEq("", err.Error()) + ExpectTrue(throttleCalled) +} + +func (t *ThrottledReaderTest) ThrottleReturnsError() { + // Throttle + expectedErr := errors.New("taco") + t.throttle.f = func(ctx context.Context, tokens uint64) (err error) { + err = expectedErr + return + } + + // Call + n, err := t.reader.Read(make([]byte, 1)) + + ExpectEq(0, n) + ExpectEq(expectedErr, err) +} + +func (t *ThrottledReaderTest) CallsWrapped() { + buf := make([]byte, 16) + AssertLe(len(buf), t.throttle.Capacity()) + + // Wrapped + var readCalled bool + t.wrapped.f = func(p []byte) (n int, err error) { + AssertFalse(readCalled) + readCalled = true + + AssertEq(&buf[0], &p[0]) + AssertEq(len(buf), len(p)) + + err = errors.New("") + return + } + + // Call + _, err := t.reader.Read(buf) + + ExpectEq("", err.Error()) + ExpectTrue(readCalled) +} + +func (t *ThrottledReaderTest) WrappedReturnsError() { + // Wrapped + expectedErr := errors.New("taco") + t.wrapped.f = func(p []byte) (n int, err error) { + n = 11 + err = expectedErr + return + } + + // Call + n, err := t.reader.Read(make([]byte, 16)) + + ExpectEq(11, n) + ExpectEq(expectedErr, err) +} + +func (t *ThrottledReaderTest) WrappedReturnsEOF() { + // Wrapped + t.wrapped.f = func(p []byte) (n int, err error) { + n = 11 + err = io.EOF + return + } + + // Call + n, err := t.reader.Read(make([]byte, 16)) + + ExpectEq(11, n) + ExpectEq(io.EOF, err) +} + +func (t *ThrottledReaderTest) WrappedReturnsFullRead() { + const readSize = 17 + AssertLe(readSize, t.throttle.Capacity()) + + // Wrapped + t.wrapped.f = func(p []byte) (n int, err error) { + n = len(p) + return + } + + // Call + n, err := t.reader.Read(make([]byte, readSize)) + + ExpectEq(nil, err) + ExpectEq(readSize, n) +} + +func (t *ThrottledReaderTest) WrappedReturnsShortRead_CallsAgain() { + buf := make([]byte, 16) + AssertLe(len(buf), t.throttle.Capacity()) + + // Wrapped + var callCount int + t.wrapped.f = func(p []byte) (n int, err error) { + AssertLt(callCount, 2) + switch callCount { + case 0: + callCount++ + n = 2 + + case 1: + callCount++ + AssertEq(&buf[2], &p[0]) + AssertEq(len(buf)-2, len(p)) + err = errors.New("") + } + + return + } + + // Call + _, err := t.reader.Read(buf) + + ExpectEq("", err.Error()) + ExpectEq(2, callCount) +} + +func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondReturnsError() { + // Wrapped + var callCount int + expectedErr := errors.New("taco") + + t.wrapped.f = func(p []byte) (n int, err error) { + AssertLt(callCount, 2) + switch callCount { + case 0: + callCount++ + n = 2 + + case 1: + callCount++ + n = 11 + err = expectedErr + } + + return + } + + // Call + n, err := t.reader.Read(make([]byte, 16)) + + ExpectEq(2+11, n) + ExpectEq(expectedErr, err) +} + +func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondReturnsEOF() { + // Wrapped + var callCount int + t.wrapped.f = func(p []byte) (n int, err error) { + AssertLt(callCount, 2) + switch callCount { + case 0: + callCount++ + n = 2 + + case 1: + callCount++ + n = 11 + err = io.EOF + } + + return + } + + // Call + n, err := t.reader.Read(make([]byte, 16)) + + ExpectEq(2+11, n) + ExpectEq(io.EOF, err) +} + +func (t *ThrottledReaderTest) WrappedReturnsShortRead_SecondSucceedsInFull() { + // Wrapped + var callCount int + t.wrapped.f = func(p []byte) (n int, err error) { + AssertLt(callCount, 2) + switch callCount { + case 0: + callCount++ + n = 2 + + case 1: + callCount++ + n = len(p) + } + + return + } + + // Call + n, err := t.reader.Read(make([]byte, 16)) + + ExpectEq(16, n) + ExpectEq(nil, err) +} + +func (t *ThrottledReaderTest) ReadSizeIsAboveThrottleCapacity() { + buf := make([]byte, 2048) + AssertGt(len(buf), t.throttle.Capacity()) + + // Wrapped + var readCalled bool + t.wrapped.f = func(p []byte) (n int, err error) { + AssertFalse(readCalled) + readCalled = true + + AssertEq(&buf[0], &p[0]) + ExpectEq(t.throttle.Capacity(), len(p)) + + err = errors.New("") + return + } + + // Call + _, err := t.reader.Read(buf) + + ExpectEq("", err.Error()) + ExpectTrue(readCalled) +} diff --git a/internal/ratelimit/throttle_test.go b/internal/ratelimit/throttle_test.go new file mode 100644 index 0000000000..3a6a7ada67 --- /dev/null +++ b/internal/ratelimit/throttle_test.go @@ -0,0 +1,209 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// It is performing integration tests for throttle.go +// Set up several test cases where we have N goroutines simulating the arrival of +// packets at a given rate, asking a limiter when to admit them. +// limiter can accept number of packets equivalent to capacity. After that, +// it will wait until limiter get space to receive the new packet. +package ratelimit_test + +import ( + cryptorand "crypto/rand" + "io" + "math/rand" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/googlecloudplatform/gcsfuse/internal/ratelimit" + "golang.org/x/net/context" + + . "github.com/jacobsa/oglematchers" + . "github.com/jacobsa/ogletest" +) + +func TestThrottle(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +func makeSeed() (seed int64) { + var buf [8]byte + _, err := io.ReadFull(cryptorand.Reader, buf[:]) + if err != nil { + panic(err) + } + + seed = (int64(buf[0])>>1)<<56 | + int64(buf[1])<<48 | + int64(buf[2])<<40 | + int64(buf[3])<<32 | + int64(buf[4])<<24 | + int64(buf[5])<<16 | + int64(buf[6])<<8 | + int64(buf[7])<<0 + + return +} + +func processArrivals( + ctx context.Context, + throttle ratelimit.Throttle, + arrivalRateHz float64, + d time.Duration) (processed uint64) { + // Set up an independent source of randomness. + randSrc := rand.New(rand.NewSource(makeSeed())) + + // Tick into a channel at a steady rate, buffering over delays caused by the + // limiter. + arrivalPeriod := time.Duration((1.0 / arrivalRateHz) * float64(time.Second)) + ticks := make(chan struct{}, 3*int(float64(d)/float64(arrivalPeriod))) + + go func() { + ticker := time.NewTicker(arrivalPeriod) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + + case <-ticker.C: + select { + case ticks <- struct{}{}: + default: + panic("Buffer exceeded?") + } + } + } + }() + + // Simulate until we're supposed to stop. + for { + // Accumulate a few packets. + toAccumulate := uint64(randSrc.Int63n(5)) + + var accumulated uint64 + for accumulated < toAccumulate { + select { + case <-ctx.Done(): + return + + case <-ticks: + accumulated++ + } + } + + // Wait. + err := throttle.Wait(ctx, accumulated) + if err != nil { + return + } + + processed += accumulated + } +} + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type ThrottleTest struct { +} + +func init() { RegisterTestSuite(&ThrottleTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *ThrottleTest) IntegrationTest() { + runtime.GOMAXPROCS(runtime.NumCPU()) + const perCaseDuration = 1 * time.Second + + // Set up several test cases where we have N goroutines simulating arrival of + // packets at a given rate, asking a limiter when to admit them. + testCases := []struct { + numActors int + arrivalRateHz float64 + limitRateHz float64 + }{ + // Single actor + {1, 150, 200}, + {1, 200, 200}, + {1, 250, 200}, + + // Multiple actors + {4, 150, 200}, + {4, 200, 200}, + {4, 250, 200}, + } + + // Run each test case. + for i, tc := range testCases { + // Create a throttle. + capacity, err := ratelimit.ChooseLimiterCapacity( + tc.limitRateHz, + perCaseDuration) + + AssertEq(nil, err) + + throttle := ratelimit.NewThrottle(tc.limitRateHz, capacity) + + // Start workers. + var wg sync.WaitGroup + var totalProcessed uint64 + + ctx, _ := context.WithDeadline( + context.Background(), + time.Now().Add(perCaseDuration)) + + for i := 0; i < tc.numActors; i++ { + wg.Add(1) + go func() { + defer wg.Done() + processed := processArrivals( + ctx, + throttle, + tc.arrivalRateHz/float64(tc.numActors), + perCaseDuration) + + atomic.AddUint64(&totalProcessed, processed) + }() + } + + // Wait for them all to finish. + wg.Wait() + + // We should have processed about the correct number of arrivals. + smallerRateHz := tc.arrivalRateHz + if smallerRateHz > tc.limitRateHz { + smallerRateHz = tc.limitRateHz + } + + expected := smallerRateHz * (float64(perCaseDuration) / float64(time.Second)) + ExpectThat( + totalProcessed, + AllOf( + GreaterThan(expected*0.90), + LessThan(expected*1.10)), + "Test case %d. expected: %f", + i, + expected) + } +} diff --git a/internal/ratelimit/throttled_bucket.go b/internal/ratelimit/throttled_bucket.go new file mode 100644 index 0000000000..82112ef4b1 --- /dev/null +++ b/internal/ratelimit/throttled_bucket.go @@ -0,0 +1,202 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "io" + + "github.com/jacobsa/gcloud/gcs" + "golang.org/x/net/context" +) + +// Create a bucket that limits the rate at which it calls the wrapped bucket +// using opThrottle, and limits the bandwidth with which it reads from the +// wrapped bucket using egressThrottle. +func NewThrottledBucket( + opThrottle Throttle, + egressThrottle Throttle, + wrapped gcs.Bucket) (b gcs.Bucket) { + b = &throttledBucket{ + opThrottle: opThrottle, + egressThrottle: egressThrottle, + wrapped: wrapped, + } + return +} + +//////////////////////////////////////////////////////////////////////// +// throttledBucket +//////////////////////////////////////////////////////////////////////// + +type throttledBucket struct { + opThrottle Throttle + egressThrottle Throttle + wrapped gcs.Bucket +} + +func (b *throttledBucket) Name() string { + return b.wrapped.Name() +} + +func (b *throttledBucket) NewReader( + ctx context.Context, + req *gcs.ReadObjectRequest) (rc io.ReadCloser, err error) { + // Wait for permission to call through. + + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + rc, err = b.wrapped.NewReader(ctx, req) + if err != nil { + return + } + + // Wrap the result in a throttled layer. + rc = &readerCloser{ + Reader: ThrottledReader(ctx, rc, b.egressThrottle), + Closer: rc, + } + + return +} + +func (b *throttledBucket) CreateObject( + ctx context.Context, + req *gcs.CreateObjectRequest) (o *gcs.Object, err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + o, err = b.wrapped.CreateObject(ctx, req) + + return +} + +func (b *throttledBucket) CopyObject( + ctx context.Context, + req *gcs.CopyObjectRequest) (o *gcs.Object, err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + o, err = b.wrapped.CopyObject(ctx, req) + + return +} + +func (b *throttledBucket) ComposeObjects( + ctx context.Context, + req *gcs.ComposeObjectsRequest) (o *gcs.Object, err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + o, err = b.wrapped.ComposeObjects(ctx, req) + + return +} + +func (b *throttledBucket) StatObject( + ctx context.Context, + req *gcs.StatObjectRequest) (o *gcs.Object, err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + o, err = b.wrapped.StatObject(ctx, req) + + return +} + +func (b *throttledBucket) ListObjects( + ctx context.Context, + req *gcs.ListObjectsRequest) (listing *gcs.Listing, err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + listing, err = b.wrapped.ListObjects(ctx, req) + + return +} + +func (b *throttledBucket) UpdateObject( + ctx context.Context, + req *gcs.UpdateObjectRequest) (o *gcs.Object, err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + o, err = b.wrapped.UpdateObject(ctx, req) + + return +} + +func (b *throttledBucket) DeleteObject( + ctx context.Context, + req *gcs.DeleteObjectRequest) (err error) { + // Wait for permission to call through. + err = b.opThrottle.Wait(ctx, 1) + if err != nil { + return + } + + // Call through. + err = b.wrapped.DeleteObject(ctx, req) + + return +} + +//////////////////////////////////////////////////////////////////////// +// readerCloser +//////////////////////////////////////////////////////////////////////// + +// An io.ReadCloser that forwards read requests to an io.Reader and close +// requests to an io.Closer. +type readerCloser struct { + Reader io.Reader + Closer io.Closer +} + +func (rc *readerCloser) Read(p []byte) (n int, err error) { + n, err = rc.Reader.Read(p) + return +} + +func (rc *readerCloser) Close() (err error) { + err = rc.Closer.Close() + return +} diff --git a/internal/ratelimit/throttled_reader.go b/internal/ratelimit/throttled_reader.go new file mode 100644 index 0000000000..5794a02cbe --- /dev/null +++ b/internal/ratelimit/throttled_reader.go @@ -0,0 +1,66 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ratelimit + +import ( + "io" + + "golang.org/x/net/context" +) + +// Create a reader that limits the bandwidth of reads made from r according to +// the supplied throttler. Reads are assumed to be made under the supplied +// context. +func ThrottledReader( + ctx context.Context, + r io.Reader, + throttle Throttle) io.Reader { + return &throttledReader{ + ctx: ctx, + wrapped: r, + throttle: throttle, + } +} + +type throttledReader struct { + ctx context.Context + wrapped io.Reader + throttle Throttle +} + +func (tr *throttledReader) Read(p []byte) (n int, err error) { + // We can't serve a read larger than the throttle's capacity. + if uint64(len(p)) > tr.throttle.Capacity() { + p = p[:tr.throttle.Capacity()] + } + + // Wait for permission to continue. + err = tr.throttle.Wait(tr.ctx, uint64(len(p))) + if err != nil { + return + } + + // Serve the full amount we acquired from the throttle (unless we hit an + // early error, including EOF). + for len(p) > 0 && err == nil { + var tmp int + tmp, err = tr.wrapped.Read(p) + + n += tmp + p = p[tmp:] + } + + return +} From 10d398b6dcd704ab68f109b26b48e38897e0f7e1 Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Thu, 10 Aug 2023 10:53:35 +0530 Subject: [PATCH 26/46] Upgrading go-client module from 1.29.0 to 1.31.0 (#1263) * Upgrading go-client module from 1.29.0 to 1.31.0 * Resolving merge conflict --- go.mod | 42 ++++++++++++++------------ go.sum | 93 ++++++++++++++++++++++++++++++++++------------------------ 2 files changed, 77 insertions(+), 58 deletions(-) diff --git a/go.mod b/go.mod index 6835cce813..0f43d4b38e 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,12 @@ module github.com/googlecloudplatform/gcsfuse go 1.20 require ( - cloud.google.com/go/storage v1.29.0 + cloud.google.com/go/compute/metadata v0.2.3 + cloud.google.com/go/storage v1.31.0 contrib.go.opencensus.io/exporter/ocagent v0.7.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.11 github.com/fsouza/fake-gcs-server v1.38.4 - github.com/googleapis/gax-go/v2 v2.7.0 + github.com/googleapis/gax-go/v2 v2.11.0 github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474 github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984 @@ -21,19 +22,19 @@ require ( github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 github.com/urfave/cli v1.22.5 go.opencensus.io v0.24.0 - golang.org/x/net v0.8.0 - golang.org/x/oauth2 v0.6.0 - google.golang.org/api v0.111.0 + golang.org/x/net v0.10.0 + golang.org/x/oauth2 v0.8.0 + golang.org/x/time v0.3.0 + google.golang.org/api v0.126.0 ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.18.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.12.0 // indirect - cloud.google.com/go/monitoring v1.12.0 // indirect - cloud.google.com/go/pubsub v1.28.0 // indirect - cloud.google.com/go/trace v1.8.0 // indirect + cloud.google.com/go v0.110.2 // indirect + cloud.google.com/go/compute v1.19.3 // indirect + cloud.google.com/go/iam v1.1.0 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/pubsub v1.30.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect github.com/aws/aws-sdk-go v1.44.217 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect @@ -41,6 +42,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect github.com/gorilla/handlers v1.5.1 // indirect @@ -51,15 +53,17 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/stretchr/testify v1.8.2 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/sync v0.2.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/grpc v1.53.0 // indirect - google.golang.org/protobuf v1.29.1 // indirect + google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + google.golang.org/grpc v1.55.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index b158cb6b2e..538c71be83 100644 --- a/go.sum +++ b/go.sum @@ -25,43 +25,42 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.2 h1:sdFPBr6xG9/wkBbfhmUz/JmZC7X6LavQgcrVINrKiVA= +cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.3 h1:DcTwsFgGev/wV5+q8o2fzgcHOaac+DKGC91ZlvpsQds= +cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/kms v1.9.0 h1:b0votJQa/9DSsxgHwN33/tTLA7ZHVzfWhDCrfiXijSo= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= +cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk= +cloud.google.com/go/kms v1.10.1 h1:7hm1bRqGCA1GBRQUrp831TwJ9TWhP+tvLuP497CQS2g= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= -cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.28.0 h1:XzabfdPx/+eNrsVVGLFgeUnQQKPGkMb8klRCeYK52is= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0 h1:vCge8m7aUKBJYOgrZp7EsNDf6QMd2CAlXZqWTn3yq6s= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdTCI= +cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= -cloud.google.com/go/trace v1.8.0 h1:GFPLxbp5/FzdgTzor3nlNYNxMd6hLmzkE7sA9F0qQcA= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= contrib.go.opencensus.io/exporter/ocagent v0.7.0 h1:BEfdCTXfMV30tLZD8c9n64V/tIZX5+9sXiuFLnrr1k8= contrib.go.opencensus.io/exporter/ocagent v0.7.0/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= contrib.go.opencensus.io/exporter/stackdriver v0.13.11 h1:YzmWJ2OT2K3ouXyMm5FmFQPoDs5TfLjx6Xn5x5CLN0I= @@ -79,6 +78,7 @@ github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -86,7 +86,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -100,6 +104,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= @@ -111,7 +116,7 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -183,6 +188,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -192,8 +199,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= @@ -210,8 +217,6 @@ github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAw github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI= github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474 h1:6z3Yj4PZKk3n18T2s7RYCa4uBCOpeLoQfDH/mUZTrVo= github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474/go.mod h1:XUKuYy1M4vamyxQjW8/WZBTxyZ0NnUiq+kkA+WWOfeI= -github.com/jacobsa/gcloud v0.0.0-20230425120041-5ed2958cdfee h1:1NvpBXX7CiuoK+SdLNMwelLB+2OkJLxhjllc0WgY8sE= -github.com/jacobsa/gcloud v0.0.0-20230425120041-5ed2958cdfee/go.mod h1:CGkT80TfaoPTzQ8My+t2M7PnMDvkwAR36Qm8Mm8HytI= github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984 h1:kD9sX/8uHuPQI6OO/VKz/olbTXNkQ4vveSPNdS9AtHw= github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984/go.mod h1:CGkT80TfaoPTzQ8My+t2M7PnMDvkwAR36Qm8Mm8HytI= github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd h1:9GCSedGjMcLZCrusBZuo4tyKLpKUPenUUqi34AkuFmA= @@ -292,6 +297,9 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -364,10 +372,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -384,8 +393,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -398,8 +407,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -453,8 +462,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -467,9 +476,10 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -565,8 +575,8 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.111.0 h1:bwKi+z2BsdwYFRKrqwutM+axAlYLz83gt5pDSXCJT+0= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -636,8 +646,12 @@ google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= +google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= +google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -663,8 +677,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -679,8 +694,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= From c316052d13e7a549e8a7cf471b75a05027d14672 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 16:24:26 +0530 Subject: [PATCH 27/46] Bump requests in /perfmetrics/scripts/load_tests/python (#1268) Bumps [requests](https://github.com/psf/requests) from 2.30.0 to 2.31.0. - [Release notes](https://github.com/psf/requests/releases) - [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md) - [Commits](https://github.com/psf/requests/compare/v2.30.0...v2.31.0) --- updated-dependencies: - dependency-name: requests dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../scripts/load_tests/python/requirements.txt | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/perfmetrics/scripts/load_tests/python/requirements.txt b/perfmetrics/scripts/load_tests/python/requirements.txt index 0b24497df8..53113f4774 100644 --- a/perfmetrics/scripts/load_tests/python/requirements.txt +++ b/perfmetrics/scripts/load_tests/python/requirements.txt @@ -371,9 +371,9 @@ pyasn1-modules==0.3.0 \ --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \ --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d # via google-auth -requests==2.30.0 \ - --hash=sha256:10e94cc4f3121ee6da529d358cdaeaff2f1c409cd377dbc72b825852f2f7e294 \ - --hash=sha256:239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4 +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via # requests-oauthlib # tensorboard @@ -441,7 +441,7 @@ tensorflow==2.12.0 \ --hash=sha256:c5193ddb3bb5120cb445279beb08ed9e74a85a4eeb2485550d6fb707a89d9a88 \ --hash=sha256:c8001210df7202ef6267150865b0b79f834c3ca69ee3132277de8eeb994dffde \ --hash=sha256:e29fcf6cfd069aefb4b44f357cccbb4415a5a3d7b5b516eaf4450062fe40021e - # via -r ./requirements.in + # via -r requirements.in tensorflow-estimator==2.12.0 \ --hash=sha256:59b191bead4883822de3d63ac02ace11a83bfe6c10d64d0c4dfde75a50e60ca1 # via tensorflow @@ -553,5 +553,6 @@ wrapt==1.14.1 \ # via tensorflow # WARNING: The following packages were not pinned, but pip requires them to be -# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag. +# pinned when the requirements file includes hashes and the requirement is not +# satisfied by a package already installed. Consider using the --allow-unsafe flag. # setuptools From d0ef40847463477811528d498dd35414e1b2390e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 16:24:39 +0530 Subject: [PATCH 28/46] Bump requests from 2.30.0 to 2.31.0 in /perfmetrics/scripts (#1267) Bumps [requests](https://github.com/psf/requests) from 2.30.0 to 2.31.0. - [Release notes](https://github.com/psf/requests/releases) - [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md) - [Commits](https://github.com/psf/requests/compare/v2.30.0...v2.31.0) --- updated-dependencies: - dependency-name: requests dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- perfmetrics/scripts/requirements.txt | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/perfmetrics/scripts/requirements.txt b/perfmetrics/scripts/requirements.txt index f96eafb59b..9f7107b429 100644 --- a/perfmetrics/scripts/requirements.txt +++ b/perfmetrics/scripts/requirements.txt @@ -93,10 +93,6 @@ dataclasses==0.6 \ --hash=sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f \ --hash=sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84 # via -r requirements.in -exceptiongroup==1.1.1 \ - --hash=sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e \ - --hash=sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785 - # via pytest google-api-core[grpc]==2.11.0 \ --hash=sha256:4b9bb5d5a380a0befa0573b302651b8a9a89262c1730e37bf423cec511804c22 \ --hash=sha256:ce222e27b0de0d7bc63eb043b956996d6dccab14cc3b690aaea91c9cc99dc16e @@ -261,9 +257,9 @@ pytest==7.3.1 \ --hash=sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362 \ --hash=sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3 # via -r requirements.in -requests==2.30.0 \ - --hash=sha256:10e94cc4f3121ee6da529d358cdaeaff2f1c409cd377dbc72b825852f2f7e294 \ - --hash=sha256:239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4 +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via # -r requirements.in # google-api-core @@ -281,10 +277,6 @@ testresources==2.0.1 \ --hash=sha256:67a361c3a2412231963b91ab04192209aa91a1aa052f0ab87245dbea889d1282 \ --hash=sha256:ee9d1982154a1e212d4e4bac6b610800bfb558e4fb853572a827bc14a96e4417 # via -r requirements.in -tomli==2.0.1 \ - --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ - --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f - # via pytest typing==3.7.4.3 \ --hash=sha256:1187fb9c82fd670d10aa07bbb6cfcfe4bdda42d6fab8d5134f04e8c4d0b71cc9 \ --hash=sha256:283d868f5071ab9ad873e5e52268d611e851c870a2ba354193026f2dfb29d8b5 From bb315266bb684dfd3ea9bed54cf530b467b1c023 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Aug 2023 16:25:22 +0530 Subject: [PATCH 29/46] Bump certifi in /perfmetrics/scripts/load_tests/python (#1269) Bumps [certifi](https://github.com/certifi/python-certifi) from 2023.5.7 to 2023.7.22. - [Commits](https://github.com/certifi/python-certifi/compare/2023.05.07...2023.07.22) --- updated-dependencies: - dependency-name: certifi dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- perfmetrics/scripts/load_tests/python/requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/perfmetrics/scripts/load_tests/python/requirements.txt b/perfmetrics/scripts/load_tests/python/requirements.txt index 53113f4774..e91abc7100 100644 --- a/perfmetrics/scripts/load_tests/python/requirements.txt +++ b/perfmetrics/scripts/load_tests/python/requirements.txt @@ -18,9 +18,9 @@ cachetools==5.3.0 \ --hash=sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14 \ --hash=sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4 # via google-auth -certifi==2023.5.7 \ - --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \ - --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716 +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests charset-normalizer==3.1.0 \ --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \ From d4e07940eb1d5dac80d12a200bea8b04838ab7f3 Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Fri, 11 Aug 2023 08:29:27 +0530 Subject: [PATCH 30/46] updating fuse library version (#1273) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0f43d4b38e..c3c8581f5e 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/fsouza/fake-gcs-server v1.38.4 github.com/googleapis/gax-go/v2 v2.11.0 github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f - github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474 + github.com/jacobsa/fuse v0.0.0-20230810134708-ab21db1af836 github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984 github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd github.com/jacobsa/oglemock v0.0.0-20150831005832-e94d794d06ff diff --git a/go.sum b/go.sum index 538c71be83..2709f09a5f 100644 --- a/go.sum +++ b/go.sum @@ -215,8 +215,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f h1:X+tnaqoCcBgAwSTJtoYW6p0qKiuPyMfofEHEFUf2kdU= github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f/go.mod h1:Ip4fOwzCrnDVuluHBd7FXIMb7SHOKfkt9/UDrYSZvqI= -github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474 h1:6z3Yj4PZKk3n18T2s7RYCa4uBCOpeLoQfDH/mUZTrVo= -github.com/jacobsa/fuse v0.0.0-20230509090321-7263f3a2b474/go.mod h1:XUKuYy1M4vamyxQjW8/WZBTxyZ0NnUiq+kkA+WWOfeI= +github.com/jacobsa/fuse v0.0.0-20230810134708-ab21db1af836 h1:Xhn8huWAi1BVXQlpSEO+ZTWmrkaH+FuCJw0KLQtzwOg= +github.com/jacobsa/fuse v0.0.0-20230810134708-ab21db1af836/go.mod h1:XUKuYy1M4vamyxQjW8/WZBTxyZ0NnUiq+kkA+WWOfeI= github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984 h1:kD9sX/8uHuPQI6OO/VKz/olbTXNkQ4vveSPNdS9AtHw= github.com/jacobsa/gcloud v0.0.0-20230803125757-3196d990d984/go.mod h1:CGkT80TfaoPTzQ8My+t2M7PnMDvkwAR36Qm8Mm8HytI= github.com/jacobsa/oglematchers v0.0.0-20150720000706-141901ea67cd h1:9GCSedGjMcLZCrusBZuo4tyKLpKUPenUUqi34AkuFmA= From 2fa6cf29dca88237efaf1148b7561b5b97842987 Mon Sep 17 00:00:00 2001 From: Ashmeen Kaur <57195160+ashmeenkaur@users.noreply.github.com> Date: Fri, 11 Aug 2023 18:11:44 +0530 Subject: [PATCH 31/46] updated architecture to all (#1270) --- DEBIAN/control | 2 +- tools/package_gcsfuse_docker/Dockerfile | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/DEBIAN/control b/DEBIAN/control index 2db8d19a36..37bfff0b34 100644 --- a/DEBIAN/control +++ b/DEBIAN/control @@ -3,7 +3,7 @@ Source: gcsfuse Maintainer: GCSFuse Team Homepage: https://github.com/GoogleCloudPlatform/gcsfuse Package: gcsfuse -Architecture: amd64 +Architecture: all Depends: fuse Description: User-space file system for Google Cloud Storage. GCSFuse is a FUSE adapter that allows you to mount and access Cloud Storage diff --git a/tools/package_gcsfuse_docker/Dockerfile b/tools/package_gcsfuse_docker/Dockerfile index 623984b49a..535416c056 100644 --- a/tools/package_gcsfuse_docker/Dockerfile +++ b/tools/package_gcsfuse_docker/Dockerfile @@ -42,7 +42,8 @@ RUN git checkout "${BRANCH_NAME}" # Install fpm package using bundle RUN bundle install --gemfile=${GCSFUSE_PATH}/tools/gem_dependency/Gemfile -ARG GCSFUSE_BIN="/gcsfuse_${GCSFUSE_VERSION}_amd64" +ARG ARCHITECTURE="amd64" +ARG GCSFUSE_BIN="/gcsfuse_${GCSFUSE_VERSION}_${ARCHITECTURE}" ARG GCSFUSE_DOC="${GCSFUSE_BIN}/usr/share/doc/gcsfuse" WORKDIR ${GOPATH} RUN go install ${GCSFUSE_REPO}/tools/build_gcsfuse From 8c194937e53b75a3a92111c30001b14a5ebd1b48 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 13 Aug 2023 12:08:14 +0530 Subject: [PATCH 32/46] Bump certifi from 2023.5.7 to 2023.7.22 in /perfmetrics/scripts (#1266) Bumps [certifi](https://github.com/certifi/python-certifi) from 2023.5.7 to 2023.7.22. - [Commits](https://github.com/certifi/python-certifi/compare/2023.05.07...2023.07.22) --- updated-dependencies: - dependency-name: certifi dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- perfmetrics/scripts/requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/perfmetrics/scripts/requirements.txt b/perfmetrics/scripts/requirements.txt index 9f7107b429..88106720b0 100644 --- a/perfmetrics/scripts/requirements.txt +++ b/perfmetrics/scripts/requirements.txt @@ -8,9 +8,9 @@ cachetools==5.3.0 \ --hash=sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14 \ --hash=sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4 # via google-auth -certifi==2023.5.7 \ - --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \ - --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716 +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests charset-normalizer==3.1.0 \ --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \ From 88e4f28a41209e9af02edf389d8f77bdf7d5e6ae Mon Sep 17 00:00:00 2001 From: Nitin Garg <113666283+gargnitingoogle@users.noreply.github.com> Date: Mon, 14 Aug 2023 04:55:16 +0000 Subject: [PATCH 33/46] Read gzip content-encoding objects as compressed (#1255) Enable read-compressed for gzip objects Details of changes * Add field contentEncoding in internal.storage.MinObject * Passes ReadCompressed as true for objects with contentEncoding set to "gzip" * Added corresponding unit tests. * Update fsouza/fake-gcs-server to v1.40.3 - to fix the read-behavior of objects with content-encoding gzip in fake-server, used in unit tests. --- go.mod | 5 +- go.sum | 10 +- internal/fs/inode/file.go | 18 ++-- internal/fs/inode/file_test.go | 33 ++++++ internal/gcsx/random_reader.go | 1 + internal/storage/bucket_handle.go | 4 + internal/storage/bucket_handle_test.go | 136 ++++++++++++++++--------- internal/storage/fake_storage_util.go | 25 +++++ internal/storage/object.go | 19 ++-- internal/storage/object_test.go | 54 ++++++++++ 10 files changed, 239 insertions(+), 66 deletions(-) create mode 100644 internal/storage/object_test.go diff --git a/go.mod b/go.mod index c3c8581f5e..f66bba7df9 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( cloud.google.com/go/storage v1.31.0 contrib.go.opencensus.io/exporter/ocagent v0.7.0 contrib.go.opencensus.io/exporter/stackdriver v0.13.11 - github.com/fsouza/fake-gcs-server v1.38.4 + github.com/fsouza/fake-gcs-server v1.40.3 github.com/googleapis/gax-go/v2 v2.11.0 github.com/jacobsa/daemonize v0.0.0-20160101105449-e460293e890f github.com/jacobsa/fuse v0.0.0-20230810134708-ab21db1af836 @@ -35,6 +35,7 @@ require ( cloud.google.com/go/monitoring v1.13.0 // indirect cloud.google.com/go/pubsub v1.30.0 // indirect cloud.google.com/go/trace v1.9.0 // indirect + github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect github.com/aws/aws-sdk-go v1.44.217 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect @@ -49,7 +50,7 @@ require ( github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/pkg/xattr v0.4.8 // indirect + github.com/pkg/xattr v0.4.9 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/stretchr/testify v1.8.2 // indirect diff --git a/go.sum b/go.sum index 2709f09a5f..da7d4d1364 100644 --- a/go.sum +++ b/go.sum @@ -69,6 +69,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= +github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.44.217 h1:FcWC56MRl+k756aH3qeMQTylSdeJ58WN0iFz3fkyRz0= @@ -109,8 +111,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsouza/fake-gcs-server v1.38.4 h1:FciRmVB7IC+0TnS2n/Sh12Z+oi0whyWVjTc3oNI2ELg= -github.com/fsouza/fake-gcs-server v1.38.4/go.mod h1:41eZwb5PT2Gyr7KvTkFxciD5otwT72X4DWk7TAXdcuU= +github.com/fsouza/fake-gcs-server v1.40.3 h1:JPCaiXsk9XkHzUqyYM/6MDmpqdwN4C3qSA5iOQPBRrw= +github.com/fsouza/fake-gcs-server v1.40.3/go.mod h1:WmAi3nILMdFDGSC2ppegChf7IMJaqOw1VFu3iFjqAq0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -247,8 +249,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/xattr v0.4.8 h1:3QwVADT+4oUm3zg7MXO/2i/lqnKkQ9viNY8pl5egRDE= -github.com/pkg/xattr v0.4.8/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= +github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= diff --git a/internal/fs/inode/file.go b/internal/fs/inode/file.go index 42d28bfaec..4c0c6f291a 100644 --- a/internal/fs/inode/file.go +++ b/internal/fs/inode/file.go @@ -189,8 +189,9 @@ func (f *FileInode) openReader(ctx context.Context) (io.ReadCloser, error) { rc, err := f.bucket.NewReader( ctx, &gcs.ReadObjectRequest{ - Name: f.src.Name, - Generation: f.src.Generation, + Name: f.src.Name, + Generation: f.src.Generation, + ReadCompressed: f.src.HasContentEncodingGzip(), }) if err != nil { err = fmt.Errorf("NewReader: %w", err) @@ -597,11 +598,12 @@ func (f *FileInode) CacheEnsureContent(ctx context.Context) (err error) { func convertObjToMinObject(o *gcs.Object) (mo storage.MinObject) { return storage.MinObject{ - Name: o.Name, - Size: o.Size, - Generation: o.Generation, - MetaGeneration: o.MetaGeneration, - Updated: o.Updated, - Metadata: o.Metadata, + Name: o.Name, + Size: o.Size, + Generation: o.Generation, + MetaGeneration: o.MetaGeneration, + Updated: o.Updated, + Metadata: o.Metadata, + ContentEncoding: o.ContentEncoding, } } diff --git a/internal/fs/inode/file_test.go b/internal/fs/inode/file_test.go index 98d1dc927c..df4f71a1f0 100644 --- a/internal/fs/inode/file_test.go +++ b/internal/fs/inode/file_test.go @@ -682,3 +682,36 @@ func (t *FileTest) SetMtime_SourceObjectMetaGenerationChanged() { ExpectEq(newObj.Generation, o.Generation) ExpectEq(newObj.MetaGeneration, o.MetaGeneration) } + +func (t *FileTest) ContentEncodingGzip() { + // Set up an explicit content-encoding on the backing object and re-create the inode. + contentEncoding := "gzip" + t.backingObj.ContentEncoding = contentEncoding + + t.createInode() + + AssertEq(contentEncoding, t.in.Source().ContentEncoding) + AssertTrue(t.in.Source().HasContentEncodingGzip()) +} + +func (t *FileTest) ContentEncodingNone() { + // Set up an explicit content-encoding on the backing object and re-create the inode. + contentEncoding := "" + t.backingObj.ContentEncoding = contentEncoding + + t.createInode() + + AssertEq(contentEncoding, t.in.Source().ContentEncoding) + AssertFalse(t.in.Source().HasContentEncodingGzip()) +} + +func (t *FileTest) ContentEncodingOther() { + // Set up an explicit content-encoding on the backing object and re-create the inode. + contentEncoding := "other" + t.backingObj.ContentEncoding = contentEncoding + + t.createInode() + + AssertEq(contentEncoding, t.in.Source().ContentEncoding) + AssertFalse(t.in.Source().HasContentEncodingGzip()) +} diff --git a/internal/gcsx/random_reader.go b/internal/gcsx/random_reader.go index 7db7b576ca..4d24e4971b 100644 --- a/internal/gcsx/random_reader.go +++ b/internal/gcsx/random_reader.go @@ -372,6 +372,7 @@ func (rr *randomReader) startRead( Start: uint64(start), Limit: uint64(end), }, + ReadCompressed: rr.object.HasContentEncodingGzip(), }) if err != nil { diff --git a/internal/storage/bucket_handle.go b/internal/storage/bucket_handle.go index 311bd68d21..0526441465 100644 --- a/internal/storage/bucket_handle.go +++ b/internal/storage/bucket_handle.go @@ -64,6 +64,10 @@ func (bh *bucketHandle) NewReader( obj = obj.Generation(req.Generation) } + if req.ReadCompressed { + obj = obj.ReadCompressed(true) + } + // NewRangeReader creates a "storage.Reader" object which is also io.ReadCloser since it contains both Read() and Close() methods present in io.ReadCloser interface. return obj.NewRangeReader(ctx, start, length) } diff --git a/internal/storage/bucket_handle_test.go b/internal/storage/bucket_handle_test.go index 298ae363df..76ccec6ce6 100644 --- a/internal/storage/bucket_handle_test.go +++ b/internal/storage/bucket_handle_test.go @@ -81,7 +81,7 @@ func (t *BucketHandleTest) TestNewReaderMethodWithCompleteRead() { buf := make([]byte, len(ContentInTestObject)) _, err = rc.Read(buf) AssertEq(nil, err) - ExpectEq(string(buf[:]), ContentInTestObject) + ExpectEq(ContentInTestObject, string(buf[:])) } func (t *BucketHandleTest) TestNewReaderMethodWithRangeRead() { @@ -102,7 +102,7 @@ func (t *BucketHandleTest) TestNewReaderMethodWithRangeRead() { buf := make([]byte, limit-start) _, err = rc.Read(buf) AssertEq(nil, err) - ExpectEq(string(buf[:]), ContentInTestObject[start:limit]) + ExpectEq(ContentInTestObject[start:limit], string(buf[:])) } func (t *BucketHandleTest) TestNewReaderMethodWithNilRange() { @@ -117,7 +117,7 @@ func (t *BucketHandleTest) TestNewReaderMethodWithNilRange() { buf := make([]byte, len(ContentInTestObject)) _, err = rc.Read(buf) AssertEq(nil, err) - ExpectEq(string(buf[:]), ContentInTestObject[:]) + ExpectEq(ContentInTestObject, string(buf[:])) } func (t *BucketHandleTest) TestNewReaderMethodWithInValidObject() { @@ -150,7 +150,7 @@ func (t *BucketHandleTest) TestNewReaderMethodWithValidGeneration() { buf := make([]byte, len(ContentInTestObject)) _, err = rc.Read(buf) AssertEq(nil, err) - ExpectEq(string(buf[:]), ContentInTestObject) + ExpectEq(ContentInTestObject, string(buf[:])) } func (t *BucketHandleTest) TestNewReaderMethodWithInvalidGeneration() { @@ -168,6 +168,44 @@ func (t *BucketHandleTest) TestNewReaderMethodWithInvalidGeneration() { AssertEq(nil, rc) } +func (t *BucketHandleTest) TestNewReaderMethodWithCompressionEnabled() { + rc, err := t.bucketHandle.NewReader(context.Background(), + &gcs.ReadObjectRequest{ + Name: TestGzipObjectName, + Range: &gcs.ByteRange{ + Start: uint64(0), + Limit: uint64(len(ContentInTestGzipObjectCompressed)), + }, + ReadCompressed: true, + }) + + AssertEq(nil, err) + defer rc.Close() + buf := make([]byte, len(ContentInTestGzipObjectCompressed)) + _, err = rc.Read(buf) + AssertEq(nil, err) + ExpectEq(ContentInTestGzipObjectCompressed, string(buf)) +} + +func (t *BucketHandleTest) TestNewReaderMethodWithCompressionDisabled() { + rc, err := t.bucketHandle.NewReader(context.Background(), + &gcs.ReadObjectRequest{ + Name: TestGzipObjectName, + Range: &gcs.ByteRange{ + Start: uint64(0), + Limit: uint64(len(ContentInTestGzipObjectCompressed)), + }, + ReadCompressed: false, + }) + + AssertEq(nil, err) + defer rc.Close() + buf := make([]byte, len(ContentInTestGzipObjectDecompressed)) + _, err = rc.Read(buf) + AssertEq(nil, err) + ExpectEq(ContentInTestGzipObjectDecompressed, string(buf)) +} + func (t *BucketHandleTest) TestDeleteObjectMethodWithValidObject() { err := t.bucketHandle.DeleteObject(context.Background(), &gcs.DeleteObjectRequest{ @@ -376,7 +414,7 @@ func (t *BucketHandleTest) TestListObjectMethodWithPrefixObjectExist() { }) AssertEq(nil, err) - AssertEq(3, len(obj.Objects)) + AssertEq(4, len(obj.Objects)) AssertEq(1, len(obj.CollapsedRuns)) AssertEq(TestObjectRootFolderName, obj.Objects[0].Name) AssertEq(TestObjectSubRootFolderName, obj.Objects[1].Name) @@ -413,10 +451,11 @@ func (t *BucketHandleTest) TestListObjectMethodWithIncludeTrailingDelimiterFalse }) AssertEq(nil, err) - AssertEq(2, len(obj.Objects)) + AssertEq(3, len(obj.Objects)) AssertEq(1, len(obj.CollapsedRuns)) AssertEq(TestObjectRootFolderName, obj.Objects[0].Name) AssertEq(TestObjectName, obj.Objects[1].Name) + AssertEq(TestGzipObjectName, obj.Objects[2].Name) AssertEq(TestObjectSubRootFolderName, obj.CollapsedRuns[0]) } @@ -433,62 +472,65 @@ func (t *BucketHandleTest) TestListObjectMethodWithEmptyDelimiter() { }) AssertEq(nil, err) - AssertEq(4, len(obj.Objects)) + AssertEq(5, len(obj.Objects)) AssertEq(TestObjectRootFolderName, obj.Objects[0].Name) AssertEq(TestObjectSubRootFolderName, obj.Objects[1].Name) AssertEq(TestSubObjectName, obj.Objects[2].Name) AssertEq(TestObjectName, obj.Objects[3].Name) + AssertEq(TestGzipObjectName, obj.Objects[4].Name) AssertEq(TestObjectGeneration, obj.Objects[0].Generation) AssertEq(nil, obj.CollapsedRuns) } -// We have 4 objects in fakeserver. +// We have 5 objects in fakeserver. func (t *BucketHandleTest) TestListObjectMethodForMaxResult() { - fourObj, err := t.bucketHandle.ListObjects(context.Background(), + fiveObj, err := t.bucketHandle.ListObjects(context.Background(), &gcs.ListObjectsRequest{ Prefix: "", Delimiter: "", IncludeTrailingDelimiter: false, ContinuationToken: "", - MaxResults: 4, + MaxResults: 5, ProjectionVal: 0, }) - twoObj, err2 := t.bucketHandle.ListObjects(context.Background(), + threeObj, err2 := t.bucketHandle.ListObjects(context.Background(), &gcs.ListObjectsRequest{ Prefix: "gcsfuse/", Delimiter: "/", IncludeTrailingDelimiter: false, ContinuationToken: "", - MaxResults: 2, + MaxResults: 3, ProjectionVal: 0, }) - // Validate that 4 objects are listed when MaxResults is passed 4. + // Validate that 5 objects are listed when MaxResults is passed 5. AssertEq(nil, err) - AssertEq(4, len(fourObj.Objects)) - AssertEq(TestObjectRootFolderName, fourObj.Objects[0].Name) - AssertEq(TestObjectSubRootFolderName, fourObj.Objects[1].Name) - AssertEq(TestSubObjectName, fourObj.Objects[2].Name) - AssertEq(TestObjectName, fourObj.Objects[3].Name) - AssertEq(nil, fourObj.CollapsedRuns) + AssertEq(5, len(fiveObj.Objects)) + AssertEq(TestObjectRootFolderName, fiveObj.Objects[0].Name) + AssertEq(TestObjectSubRootFolderName, fiveObj.Objects[1].Name) + AssertEq(TestSubObjectName, fiveObj.Objects[2].Name) + AssertEq(TestObjectName, fiveObj.Objects[3].Name) + AssertEq(TestGzipObjectName, fiveObj.Objects[4].Name) + AssertEq(nil, fiveObj.CollapsedRuns) // Note: The behavior is different in real GCS storage JSON API. In real API, // only 1 object and 1 collapsedRuns would have been returned if - // IncludeTrailingDelimiter = false and 2 objects and 1 collapsedRuns if + // IncludeTrailingDelimiter = false and 3 objects and 1 collapsedRuns if // IncludeTrailingDelimiter = true. // This is because fake storage doesn't support pagination and hence maxResults // has no affect. AssertEq(nil, err2) - AssertEq(2, len(twoObj.Objects)) - AssertEq(TestObjectRootFolderName, twoObj.Objects[0].Name) - AssertEq(TestObjectName, twoObj.Objects[1].Name) - AssertEq(1, len(twoObj.CollapsedRuns)) + AssertEq(3, len(threeObj.Objects)) + AssertEq(TestObjectRootFolderName, threeObj.Objects[0].Name) + AssertEq(TestObjectName, threeObj.Objects[1].Name) + AssertEq(TestGzipObjectName, threeObj.Objects[2].Name) + AssertEq(1, len(threeObj.CollapsedRuns)) } func (t *BucketHandleTest) TestListObjectMethodWithMissingMaxResult() { - // Validate that ee have 4 objects in fakeserver - fourObjWithMaxResults, err := t.bucketHandle.ListObjects(context.Background(), + // Validate that ee have 5 objects in fakeserver + fiveObjWithMaxResults, err := t.bucketHandle.ListObjects(context.Background(), &gcs.ListObjectsRequest{ Prefix: "", Delimiter: "", @@ -498,9 +540,9 @@ func (t *BucketHandleTest) TestListObjectMethodWithMissingMaxResult() { ProjectionVal: 0, }) AssertEq(nil, err) - AssertEq(4, len(fourObjWithMaxResults.Objects)) + AssertEq(5, len(fiveObjWithMaxResults.Objects)) - fourObjWithoutMaxResults, err2 := t.bucketHandle.ListObjects(context.Background(), + fiveObjWithoutMaxResults, err2 := t.bucketHandle.ListObjects(context.Background(), &gcs.ListObjectsRequest{ Prefix: "", Delimiter: "", @@ -509,19 +551,20 @@ func (t *BucketHandleTest) TestListObjectMethodWithMissingMaxResult() { ProjectionVal: 0, }) - // Validate that all objects (4) are listed when MaxResults is not passed. + // Validate that all objects (5) are listed when MaxResults is not passed. AssertEq(nil, err2) - AssertEq(4, len(fourObjWithoutMaxResults.Objects)) - AssertEq(TestObjectRootFolderName, fourObjWithoutMaxResults.Objects[0].Name) - AssertEq(TestObjectSubRootFolderName, fourObjWithoutMaxResults.Objects[1].Name) - AssertEq(TestSubObjectName, fourObjWithoutMaxResults.Objects[2].Name) - AssertEq(TestObjectName, fourObjWithoutMaxResults.Objects[3].Name) - AssertEq(nil, fourObjWithoutMaxResults.CollapsedRuns) + AssertEq(5, len(fiveObjWithoutMaxResults.Objects)) + AssertEq(TestObjectRootFolderName, fiveObjWithoutMaxResults.Objects[0].Name) + AssertEq(TestObjectSubRootFolderName, fiveObjWithoutMaxResults.Objects[1].Name) + AssertEq(TestSubObjectName, fiveObjWithoutMaxResults.Objects[2].Name) + AssertEq(TestObjectName, fiveObjWithoutMaxResults.Objects[3].Name) + AssertEq(TestGzipObjectName, fiveObjWithoutMaxResults.Objects[4].Name) + AssertEq(nil, fiveObjWithoutMaxResults.CollapsedRuns) } func (t *BucketHandleTest) TestListObjectMethodWithZeroMaxResult() { - // Validate that ee have 4 objects in fakeserver - fourObj, err := t.bucketHandle.ListObjects(context.Background(), + // Validate that we have 5 objects in fakeserver + fiveObj, err := t.bucketHandle.ListObjects(context.Background(), &gcs.ListObjectsRequest{ Prefix: "", Delimiter: "", @@ -531,9 +574,9 @@ func (t *BucketHandleTest) TestListObjectMethodWithZeroMaxResult() { ProjectionVal: 0, }) AssertEq(nil, err) - AssertEq(4, len(fourObj.Objects)) + AssertEq(5, len(fiveObj.Objects)) - fourObjWithZeroMaxResults, err2 := t.bucketHandle.ListObjects(context.Background(), + fiveObjWithZeroMaxResults, err2 := t.bucketHandle.ListObjects(context.Background(), &gcs.ListObjectsRequest{ Prefix: "", Delimiter: "", @@ -543,15 +586,16 @@ func (t *BucketHandleTest) TestListObjectMethodWithZeroMaxResult() { ProjectionVal: 0, }) - // Validate that all objects (4) are listed when MaxResults is 0. This has + // Validate that all objects (5) are listed when MaxResults is 0. This has // same behavior as not passing MaxResults in request. AssertEq(nil, err2) - AssertEq(4, len(fourObjWithZeroMaxResults.Objects)) - AssertEq(TestObjectRootFolderName, fourObjWithZeroMaxResults.Objects[0].Name) - AssertEq(TestObjectSubRootFolderName, fourObjWithZeroMaxResults.Objects[1].Name) - AssertEq(TestSubObjectName, fourObjWithZeroMaxResults.Objects[2].Name) - AssertEq(TestObjectName, fourObjWithZeroMaxResults.Objects[3].Name) - AssertEq(nil, fourObjWithZeroMaxResults.CollapsedRuns) + AssertEq(5, len(fiveObjWithZeroMaxResults.Objects)) + AssertEq(TestObjectRootFolderName, fiveObjWithZeroMaxResults.Objects[0].Name) + AssertEq(TestObjectSubRootFolderName, fiveObjWithZeroMaxResults.Objects[1].Name) + AssertEq(TestSubObjectName, fiveObjWithZeroMaxResults.Objects[2].Name) + AssertEq(TestObjectName, fiveObjWithZeroMaxResults.Objects[3].Name) + AssertEq(TestGzipObjectName, fiveObjWithZeroMaxResults.Objects[4].Name) + AssertEq(nil, fiveObjWithZeroMaxResults.CollapsedRuns) } // FakeGCSServer is not handling ContentType, ContentEncoding, ContentLanguage, CacheControl in updateflow diff --git a/internal/storage/fake_storage_util.go b/internal/storage/fake_storage_util.go index b40f46fa45..9d6801e7e8 100644 --- a/internal/storage/fake_storage_util.go +++ b/internal/storage/fake_storage_util.go @@ -32,6 +32,19 @@ const TestObjectGeneration int64 = 780 const MetaDataValue string = "metaData" const MetaDataKey string = "key" +// Data specific to content-encoding gzip tests +const TestGzipObjectName string = "gcsfuse/test_gzip.txt" + +// ContentInTestGzipObjectCompressed is a gzip-compressed content for gzip tests. +// It was created by uploading a small file to GCS using `gsutil cp -Z` and then +// downloading it as it is (compressed as present on GCS) using go storage client +// library. To view/change it, open it in a gzip.newReader() ur using a gzip plugin +// in the IDE. If you do change it, remember to update ContentInTestGzipObjectDecompressed +// too correspondingly. +const ContentInTestGzipObjectCompressed string = "\x1f\x8b\b\b\x9d\xab\xd5d\x02\xfftmp1bg8d7ug\x00\v\xc9\xc8,\xe6\x02\x00~r\xe2V\x05\x00\x00\x00" +const ContentInTestGzipObjectDecompressed string = "This\n" +const TestGzipObjectGeneration int64 = 781 + type FakeStorage interface { CreateStorageHandle() (sh StorageHandle) @@ -102,6 +115,18 @@ func getTestFakeStorageObject() []fakestorage.Object { } fakeObjects = append(fakeObjects, testSubObject) + testGzipObject := fakestorage.Object{ + ObjectAttrs: fakestorage.ObjectAttrs{ + BucketName: TestBucketName, + Name: TestGzipObjectName, + Generation: TestGzipObjectGeneration, + Metadata: map[string]string{MetaDataKey: MetaDataValue}, + ContentEncoding: ContentEncodingGzip, + }, + Content: []byte(ContentInTestGzipObjectCompressed), + } + fakeObjects = append(fakeObjects, testGzipObject) + return fakeObjects } diff --git a/internal/storage/object.go b/internal/storage/object.go index c9678d699e..fdce8a29e1 100644 --- a/internal/storage/object.go +++ b/internal/storage/object.go @@ -18,6 +18,8 @@ import ( "time" ) +const ContentEncodingGzip = "gzip" + // MinObject is a record representing subset of properties of a particular // generation object in GCS. // @@ -25,10 +27,15 @@ import ( // // https://cloud.google.com/storage/docs/json_api/v1/objects#resource type MinObject struct { - Name string - Size uint64 - Generation int64 - MetaGeneration int64 - Updated time.Time - Metadata map[string]string + Name string + Size uint64 + Generation int64 + MetaGeneration int64 + Updated time.Time + Metadata map[string]string + ContentEncoding string +} + +func (mo MinObject) HasContentEncodingGzip() bool { + return mo.ContentEncoding == ContentEncodingGzip } diff --git a/internal/storage/object_test.go b/internal/storage/object_test.go new file mode 100644 index 0000000000..03a1dd4564 --- /dev/null +++ b/internal/storage/object_test.go @@ -0,0 +1,54 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "testing" + + . "github.com/jacobsa/ogletest" +) + +func TestObject(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type ObjectTest struct { +} + +func init() { RegisterTestSuite(&ObjectTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *ObjectTest) HasContentEncodingGzipPositive() { + mo := MinObject{} + mo.ContentEncoding = "gzip" + + AssertTrue(mo.HasContentEncodingGzip()) +} + +func (t *ObjectTest) HasContentEncodingGzipNegative() { + encodings := []string{"", "GZIP", "xzip", "zip"} + + for _, encoding := range encodings { + mo := MinObject{} + mo.ContentEncoding = encoding + + AssertFalse(mo.HasContentEncodingGzip()) + } +} From 4a0c1ac23c4eb362522e573b7a022f7809e44282 Mon Sep 17 00:00:00 2001 From: Nitin Garg <113666283+gargnitingoogle@users.noreply.github.com> Date: Mon, 14 Aug 2023 05:12:41 +0000 Subject: [PATCH 34/46] gzip integration tests (#1256) Add read-flow integration tests for gzip objects This adds read-flow integration tests for the gzip read-compressed changes added in previous PR https://github.com/GoogleCloudPlatform/gcsfuse/pull/1255 . Tests added (2*5 = 10 tests) * Two types of operations each * List, Stat and full-file read * On five types of objects each * Object with text content, uploaded with gzip compression with content-encoding: gzip, and cache-control: no-transform * Object with text content, uploaded with gzip compression with content-encoding: gzip, and cache-control: '' * Object with gzip content, uploaded with content-encoding: '' * Object with gzip content, uploaded with gzip compression (i.e. doubly compressed) with content-encoding: gzip, and cache-control: no-transform * Object with gzip content, uploaded with gzip compression (i.e. doubly compressed) with content-encoding: gzip, and cache-control: '' Write-flow tests will be added in another commit shortly after. This commit also adds some helper functions needed for gzip integration tests Functions added * CreateLocalTempFile - creates a temporary local file of given size, gzip/non-gzip * DiffFiles * GetGcsObjectSize * DownloadGcsObject * UploadGcsObject * DeleteGcsObject * ClearCacheControlOnGcsObject --- tools/integration_tests/gzip/gzip_test.go | 171 +++++++++++++++++ .../integration_tests/gzip/helpers/helpers.go | 179 ++++++++++++++++++ .../integration_tests/gzip/read_gzip_test.go | 149 +++++++++++++++ .../run_tests_mounted_directory.sh | 4 + .../util/operations/file_operations.go | 161 ++++++++++++++++ 5 files changed, 664 insertions(+) create mode 100644 tools/integration_tests/gzip/gzip_test.go create mode 100644 tools/integration_tests/gzip/helpers/helpers.go create mode 100644 tools/integration_tests/gzip/read_gzip_test.go mode change 100644 => 100755 tools/integration_tests/run_tests_mounted_directory.sh diff --git a/tools/integration_tests/gzip/gzip_test.go b/tools/integration_tests/gzip/gzip_test.go new file mode 100644 index 0000000000..739b18a5a1 --- /dev/null +++ b/tools/integration_tests/gzip/gzip_test.go @@ -0,0 +1,171 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for gzip objects in gcsfuse mounts. +package gzip_test + +import ( + "fmt" + "log" + "os" + "path" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/gzip/helpers" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/mounting/static_mounting" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const ( + SeqReadSizeMb = 1 + TextContentSize = 10 * 1e6 * SeqReadSizeMb + + TextContentWithContentEncodingWithNoTransformFilename = "textContentWithContentEncodingWithNoTransform.txt" + TextContentWithContentEncodingWithoutNoTransformFilename = "textContentWithContentEncodingWithoutNoTransform.txt" + + GzipContentWithoutContentEncodingFilename = "gzipContentWithoutContentEncoding.txt.gz" + + GzipContentWithContentEncodingWithNoTransformFilename = "gzipContentWithContentEncodingWithNoTransform.txt.gz" + GzipContentWithContentEncodingWithoutNoTransformFilename = "gzipContentWithContentEncodingWithoutNoTransform.txt.gz" + + TestBucketPrefixPath = "gzip" +) + +var ( + gcsObjectsToBeDeletedEventually []string +) + +func setup_testdata(m *testing.M) error { + fmds := []struct { + filename string + filesize int + keepCacheControlNoTransform bool // if true, no-transform is reset as '' + enableGzipEncodedContent bool // if true, original file content is gzip-encoded + enableGzipContentEncoding bool // if true, the content is uploaded as gsutil cp -Z i.e. with content-encoding: gzip header in GCS + }{ + { + filename: TextContentWithContentEncodingWithNoTransformFilename, + filesize: TextContentSize, + keepCacheControlNoTransform: true, + enableGzipEncodedContent: false, + enableGzipContentEncoding: true, + }, + { + filename: TextContentWithContentEncodingWithoutNoTransformFilename, + filesize: TextContentSize, + keepCacheControlNoTransform: false, + enableGzipEncodedContent: false, + enableGzipContentEncoding: true, + }, + { + filename: GzipContentWithoutContentEncodingFilename, + filesize: TextContentSize, + keepCacheControlNoTransform: true, // it's a don't care in this case + enableGzipEncodedContent: true, + enableGzipContentEncoding: false, + }, { + filename: GzipContentWithContentEncodingWithNoTransformFilename, + filesize: TextContentSize, + keepCacheControlNoTransform: true, + enableGzipEncodedContent: true, + enableGzipContentEncoding: true, + }, { + filename: GzipContentWithContentEncodingWithoutNoTransformFilename, + filesize: TextContentSize, + keepCacheControlNoTransform: false, + enableGzipEncodedContent: true, + enableGzipContentEncoding: true, + }, + } + + for _, fmd := range fmds { + var localFilePath string + localFilePath, err := helpers.CreateLocalTempFile(fmd.filesize, fmd.enableGzipEncodedContent) + if err != nil { + return err + } + + defer os.Remove(localFilePath) + + // upload to the test-bucket for testing + gcsObjectPath := path.Join(setup.TestBucket(), TestBucketPrefixPath, fmd.filename) + + err = operations.UploadGcsObject(localFilePath, gcsObjectPath, fmd.enableGzipContentEncoding) + if err != nil { + return err + } + + gcsObjectsToBeDeletedEventually = append(gcsObjectsToBeDeletedEventually, gcsObjectPath) + + if !fmd.keepCacheControlNoTransform { + err = operations.ClearCacheControlOnGcsObject(gcsObjectPath) + if err != nil { + return err + } + } + } + + return nil +} + +func destroy_testdata(m *testing.M) error { + for _, gcsObjectPath := range gcsObjectsToBeDeletedEventually { + err := operations.DeleteGcsObject(gcsObjectPath) + if err != nil { + return fmt.Errorf("Failed to delete gcs object gs://%s", gcsObjectPath) + } + } + + return nil +} + +func TestMain(m *testing.M) { + setup.ParseSetUpFlags() + + commonFlags := []string{"--sequential-read-size-mb=" + fmt.Sprint(SeqReadSizeMb), "--implicit-dirs"} + flags := [][]string{commonFlags} + + setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() + + if setup.TestBucket() == "" && setup.MountedDirectory() != "" { + log.Print("Please pass the name of bucket mounted at mountedDirectory to --testBucket flag.") + os.Exit(1) + } + + err := setup_testdata(m) + if err != nil { + fmt.Printf("Failed to setup test data: %v", err) + os.Exit(1) + } + + defer func() { + err := destroy_testdata(m) + if err != nil { + fmt.Printf("Failed to destoy gzip test data: %v", err) + } + }() + + // Run tests for mountedDirectory only if --mountedDirectory flag is set. + setup.RunTestsForMountedDirectoryFlag(m) + + // Run tests for testBucket + setup.SetUpTestDirForTestBucketFlag() + + successCode := static_mounting.RunTests(flags, m) + + setup.RemoveBinFileCopiedForTesting() + + os.Exit(successCode) +} diff --git a/tools/integration_tests/gzip/helpers/helpers.go b/tools/integration_tests/gzip/helpers/helpers.go new file mode 100644 index 0000000000..4871768880 --- /dev/null +++ b/tools/integration_tests/gzip/helpers/helpers.go @@ -0,0 +1,179 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helpers + +import ( + "compress/gzip" + "context" + "fmt" + "io" + "io/fs" + "os" + "path" + "strings" + + "cloud.google.com/go/storage" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +const ( + TempFileStrLine = "This is a test file" + TmpDirectory = "/tmp" +) + +// Creates a temporary file (name-collision-safe) in /tmp with given content size in bytes. +// If gzipCompress is true, output file is a gzip-compressed file. +// contentSize is the size of the uncompressed content. In case gzipCompress is true, the actual output file size will be +// different from contentSize (typically gzip-compressed file size < contentSize). +// Caller is responsible for deleting the created file when done using it. +// Failure cases: +// 1. contentSize <= 0 +// 2. os.CreateTemp() returned error or nil handle +// 3. gzip.NewWriter() returned nil handle +// 4. Failed to write the content to the created temp file +func CreateLocalTempFile(contentSize int, gzipCompress bool) (string, error) { + // fail if contentSize <= 0 + if contentSize <= 0 { + return "", fmt.Errorf("unsupported fileSize: %d", contentSize) + } + + // Create text-content of given size. + // strings.builder is used as opposed to string appends + // as this is much more efficient when multiple concatenations + // are required. + var contentBuilder strings.Builder + const tempStr = TempFileStrLine + "\n" + + for ; contentSize >= len(tempStr); contentSize -= len(tempStr) { + contentBuilder.WriteString(tempStr) + } + + if contentSize > 0 { + contentBuilder.WriteString(tempStr[0:contentSize]) + } + + // reset contentSize + contentSize = contentBuilder.Len() + + // create appropriate name template for temp file + filenameTemplate := "testfile-*.txt" + if gzipCompress { + filenameTemplate += ".gz" + } + + // create a temp file + f, err := os.CreateTemp(TmpDirectory, filenameTemplate) + if err != nil { + return "", err + } else if f == nil { + return "", fmt.Errorf("nil file handle returned from os.CreateTemp") + } + defer operations.CloseFile(f) + filepath := f.Name() + + content := contentBuilder.String() + + if gzipCompress { + w := gzip.NewWriter(f) + if w == nil { + return "", fmt.Errorf("failed to open a gzip writer handle") + } + defer func() { + err := w.Close() + if err != nil { + fmt.Printf("Failed to close file %s: %v", filepath, err) + } + }() + + // write the content created above as gzip + n, err := w.Write([]byte(content)) + if err != nil { + return "", err + } else if n != contentSize { + return "", fmt.Errorf("failed to write to gzip file %s. Content-size: %d bytes, wrote = %d bytes", filepath, contentSize, n) + } + } else { + // write the content created above as text + n, err := f.WriteString(content) + if err != nil { + return "", err + } else if n != contentSize { + return "", fmt.Errorf("failed to write to text file %s. Content-size: %d bytes, wrote = %d bytes", filepath, contentSize, n) + } + } + + return filepath, nil +} + +// Downloads given gzipped GCS object (with path without 'gs://') to local disk. +// Fails if the object doesn't exist or permission to read object is not +// available. +// Uses go storage client library to download object. Use of gsutil/gcloud is not +// possible as they both always read back objects with content-encoding: gzip as +// uncompressed/decompressed irrespective of any argument passed. +func DownloadGzipGcsObjectAsCompressed(bucketName, objPathInBucket string) (string, error) { + gcsObjectPath := path.Join(setup.TestBucket(), objPathInBucket) + gcsObjectSize, err := operations.GetGcsObjectSize(gcsObjectPath) + if err != nil { + return "", fmt.Errorf("failed to get size of gcs object %s: %w", gcsObjectPath, err) + } + + tempfile, err := CreateLocalTempFile(1, false) + if err != nil { + return "", fmt.Errorf("failed to create tempfile for downloading gcs object: %w", err) + } + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil || client == nil { + return "", fmt.Errorf("failed to create storage client: %w", err) + } + defer client.Close() + + bktName := setup.TestBucket() + bkt := client.Bucket(bktName) + if bkt == nil { + return "", fmt.Errorf("failed to access bucket %s: %w", bktName, err) + } + + obj := bkt.Object(objPathInBucket) + if obj == nil { + return "", fmt.Errorf("failed to access object %s from bucket %s: %w", objPathInBucket, bktName, err) + } + + obj = obj.ReadCompressed(true) + if obj == nil { + return "", fmt.Errorf("failed to access object %s from bucket %s as compressed: %w", objPathInBucket, bktName, err) + } + + r, err := obj.NewReader(ctx) + if r == nil || err != nil { + return "", fmt.Errorf("failed to read object %s from bucket %s: %w", objPathInBucket, bktName, err) + } + defer r.Close() + + gcsObjectData, err := io.ReadAll(r) + if len(gcsObjectData) < gcsObjectSize || err != nil { + return "", fmt.Errorf("failed to read object %s from bucket %s (expected read-size: %d, actual read-size: %d): %w", objPathInBucket, bktName, gcsObjectSize, len(gcsObjectData), err) + } + + err = os.WriteFile(tempfile, gcsObjectData, fs.FileMode(os.O_CREATE|os.O_WRONLY|os.O_TRUNC)) + if err != nil || client == nil { + return "", fmt.Errorf("failed to write to tempfile %s: %w", tempfile, err) + } + + return tempfile, nil +} diff --git a/tools/integration_tests/gzip/read_gzip_test.go b/tools/integration_tests/gzip/read_gzip_test.go new file mode 100644 index 0000000000..1da9b4127c --- /dev/null +++ b/tools/integration_tests/gzip/read_gzip_test.go @@ -0,0 +1,149 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for gzip objects in gcsfuse mounts. +package gzip_test + +import ( + "bytes" + "os" + "path" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/gzip/helpers" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +// Verify that the passed file exists on the GCS test-bucket and in the mounted bucket +// and its size in the mounted directory matches that of the GCS object. Also verify +// that the passed file in the mounted bucket matches the corresponding +// GCS object in content. +// GCS object. +func verifyFileSizeAndFullFileRead(t *testing.T, filename string) { + mountedFilePath := path.Join(setup.MntDir(), TestBucketPrefixPath, filename) + gcsObjectPath := path.Join(setup.TestBucket(), TestBucketPrefixPath, filename) + gcsObjectSize, err := operations.GetGcsObjectSize(gcsObjectPath) + if err != nil { + t.Fatalf("Failed to get size of gcs object %s: %v\n", gcsObjectPath, err) + } + + fi, err := operations.StatFile(mountedFilePath) + if err != nil || fi == nil { + t.Fatalf("Failed to get stat info of mounted file %s: %v\n", mountedFilePath, err) + } + + if (*fi).Size() != int64(gcsObjectSize) { + t.Fatalf("Size of file mounted through gcsfuse (%s, %d) doesn't match the size of the file on GCS (%s, %d)", + mountedFilePath, (*fi).Size(), gcsObjectPath, gcsObjectSize) + } + + localCopy, err := helpers.DownloadGzipGcsObjectAsCompressed(setup.TestBucket(), path.Join(TestBucketPrefixPath, filename)) + if err != nil { + t.Fatalf("failed to download gcs object (gs:/%s) to local-disk: %v", gcsObjectPath, err) + } + + defer operations.RemoveFile(localCopy) + + diff, err := operations.DiffFiles(localCopy, mountedFilePath) + if diff != 0 { + t.Fatalf("Tempfile (%s, download of GCS object %s) didn't match the Mounted local file (%s): %v", localCopy, gcsObjectPath, mountedFilePath, err) + } +} + +// Verify that the passed file exists on the GCS test-bucket and in the mounted bucket +// and its ranged read returns the same size as the requested read size. +func verifyRangedRead(t *testing.T, filename string) { + mountedFilePath := path.Join(setup.MntDir(), TestBucketPrefixPath, filename) + + gcsObjectPath := path.Join(setup.TestBucket(), TestBucketPrefixPath, filename) + gcsObjectSize, err := operations.GetGcsObjectSize(gcsObjectPath) + if err != nil { + t.Fatalf("Failed to get size of gcs object %s: %v\n", gcsObjectPath, err) + } + + readSize := int64(gcsObjectSize / 10) + readOffset := int64(readSize / 10) + f, err := os.Open(mountedFilePath) + if err != nil || f == nil { + t.Fatalf("Failed to open local mounted file %s: %v", mountedFilePath, err) + } + + localCopy, err := helpers.DownloadGzipGcsObjectAsCompressed(setup.TestBucket(), path.Join(TestBucketPrefixPath, filename)) + if err != nil { + t.Fatalf("failed to download gcs object (gs:/%s) to local-disk: %v", gcsObjectPath, err) + } + + defer operations.RemoveFile(localCopy) + + for _, offsetMultiplier := range []int64{1, 3, 5, 7, 9} { + buf1, err := operations.ReadChunkFromFile(mountedFilePath, (readSize), offsetMultiplier*(readOffset)) + if err != nil { + t.Fatalf("Failed to read mounted file %s: %v", mountedFilePath, err) + } else if buf1 == nil { + t.Fatalf("Failed to read mounted file %s: buffer returned as nul", mountedFilePath) + } + + buf2, err := operations.ReadChunkFromFile(localCopy, (readSize), offsetMultiplier*(readOffset)) + if err != nil { + t.Fatalf("Failed to read local file %s: %v", localCopy, err) + } else if buf2 == nil { + t.Fatalf("Failed to read local file %s: buffer returned as nul", localCopy) + } + + if !bytes.Equal(buf1, buf2) { + t.Fatalf("Read buffer (of size %d from offset %d) of %s doesn't match that of %s", int64(readSize), offsetMultiplier*int64(readOffset), mountedFilePath, localCopy) + } + } +} + +func TestGzipEncodedTextFileWithNoTransformSizeAndFullFileRead(t *testing.T) { + verifyFileSizeAndFullFileRead(t, TextContentWithContentEncodingWithNoTransformFilename) +} + +func TestGzipEncodedTextFileWithNoTransformRangedRead(t *testing.T) { + verifyRangedRead(t, TextContentWithContentEncodingWithNoTransformFilename) +} + +func TestGzipEncodedTextFileWithoutNoTransformSizeAndFullFileRead(t *testing.T) { + verifyFileSizeAndFullFileRead(t, TextContentWithContentEncodingWithoutNoTransformFilename) +} + +func TestGzipEncodedTextFileWithoutNoTransformRangedRead(t *testing.T) { + verifyRangedRead(t, TextContentWithContentEncodingWithoutNoTransformFilename) +} + +func TestGzipUnencodedGzipFileSizeAndFullFileRead(t *testing.T) { + verifyFileSizeAndFullFileRead(t, GzipContentWithoutContentEncodingFilename) +} + +func TestGzipUnencodedGzipFileRangedRead(t *testing.T) { + verifyRangedRead(t, GzipContentWithoutContentEncodingFilename) +} + +func TestGzipEncodedGzipFileWithNoTransformSizeAndFullFileRead(t *testing.T) { + verifyFileSizeAndFullFileRead(t, GzipContentWithContentEncodingWithNoTransformFilename) +} + +func TestGzipEncodedGzipFileWithNoTransformRangedRead(t *testing.T) { + verifyRangedRead(t, GzipContentWithContentEncodingWithNoTransformFilename) +} + +func TestGzipEncodedGzipFileWithoutNoTransformSizeAndFullFileRead(t *testing.T) { + verifyFileSizeAndFullFileRead(t, GzipContentWithContentEncodingWithoutNoTransformFilename) +} + +func TestGzipEncodedGzipFileWithoutNoTransformRangedRead(t *testing.T) { + verifyRangedRead(t, GzipContentWithContentEncodingWithoutNoTransformFilename) +} diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh old mode 100644 new mode 100755 index 9b5938b25f..363217dde9 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -272,3 +272,7 @@ sudo umount $MOUNT_DIR gcsfuse --implicit-dirs --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/write_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR + +gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/gzip/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME +sudo umount $MOUNT_DIR diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index d2b20899cb..236576f2ed 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -16,12 +16,16 @@ package operations import ( + "bytes" "crypto/rand" "fmt" "io" + "io/fs" "log" "os" "os/exec" + "strconv" + "strings" "syscall" ) @@ -273,3 +277,160 @@ func ReadChunkFromFile(filePath string, chunkSize int64, offset int64) (chunk [] return } + +// Returns the stats of a file. +// Fails if the passed input is a directory. +func StatFile(file string) (*fs.FileInfo, error) { + fstat, err := os.Stat(file) + if err != nil { + return nil, fmt.Errorf("failed to stat input file %s: %v", file, err) + } else if fstat.IsDir() { + return nil, fmt.Errorf("input file %s is a directory", file) + } + + return &fstat, nil +} + +// Finds if two local files have identical content (equivalnt to binary diff). +// Needs (a) both files to exist, (b)read permission on both the files, (c) both +// inputs to be proper files, symlinks/directories not supported. +// Compares file names first. If different, compares sizes next. +// If sizes match, then compares hashes of both the files. +// Not a good idea for very large files as it loads both the files in the memory completely. +// Returns 0 if no error and files match. +// Returns 1 if files don't match and captures reason for mismatch in err. +// Returns 2 if any error. +func DiffFiles(filepath1, filepath2 string) (int, error) { + if filepath1 == "" || filepath2 == "" { + return 2, fmt.Errorf("one or both files being diff'ed have empty path") + } else if filepath1 == filepath2 { + return 0, nil + } + + fstat1, err := StatFile(filepath1) + if err != nil { + return 2, err + } + + fstat2, err := StatFile(filepath2) + if err != nil { + return 2, err + } + + file1size := (*fstat1).Size() + file2size := (*fstat2).Size() + if file1size != file2size { + return 1, fmt.Errorf("files don't match in size: %s (%d bytes), %s (%d bytes)", filepath1, file1size, filepath2, file2size) + } + + bytes1, err := ReadFile(filepath1) + if err != nil || bytes1 == nil { + return 2, fmt.Errorf("failed to read file %s", filepath1) + } else if int64(len(bytes1)) != file1size { + return 2, fmt.Errorf("failed to completely read file %s", filepath1) + } + + bytes2, err := ReadFile(filepath2) + if err != nil || bytes2 == nil { + return 2, fmt.Errorf("failed to read file %s", filepath2) + } else if int64(len(bytes2)) != file2size { + return 2, fmt.Errorf("failed to completely read file %s", filepath2) + } + + if !bytes.Equal(bytes1, bytes2) { + return 1, fmt.Errorf("files don't match in content: %s, %s", filepath1, filepath2) + } + + return 0, nil +} + +// Executes any given tool (e.g. gsutil/gcloud) with given args. +func executeToolCommandf(tool string, format string, args ...any) ([]byte, error) { + cmdArgs := tool + " " + fmt.Sprintf(format, args...) + cmd := exec.Command("/bin/bash", "-c", cmdArgs) + + var stdout bytes.Buffer + var stderr bytes.Buffer + + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return stdout.Bytes(), fmt.Errorf("failed command '%s': %v, %s", cmdArgs, err, stderr.String()) + } + + return stdout.Bytes(), nil +} + +// Executes any given gcloud command with given args +func ExecuteGcloudCommandf(format string, args ...any) ([]byte, error) { + return executeToolCommandf("gcloud alpha", format, args...) +} + +// Returns size of a give GCS object with path (without 'gs://'). +// Fails if the object doesn't exist or permission to read object's metadata is not +// available. +// Uses 'gcloud storage du -s gs://gcsObjPath'. +func GetGcsObjectSize(gcsObjPath string) (int, error) { + stdout, err := ExecuteGcloudCommandf("storage du -s gs://%s", gcsObjPath) + if err != nil { + return 0, err + } + + // The above gcloud command returns output in the following format: + // + // So, we need to pick out only the first string before ' '. + gcsObjectSize, err := strconv.Atoi(strings.TrimSpace(strings.Split(string(stdout), " ")[0])) + if err != nil { + return gcsObjectSize, err + } + + return gcsObjectSize, nil +} + +// Downloads given GCS object (with path without 'gs://') to localPath. +// Fails if the object doesn't exist or permission to read object is not +// available. +// Uses 'gcloud storage cp gs://gcsObjPath localPath' +func DownloadGcsObject(gcsObjPath, localPath string) error { + _, err := ExecuteGcloudCommandf("storage cp gs://%s %s", gcsObjPath, localPath) + if err != nil { + return err + } + + return nil +} + +// Uploads given local file to GCS object (with path without 'gs://'). +// Fails if the file doesn't exist or permission to write to object/bucket is not +// available. +// Uses 'gcloud storage cp localPath gs://gcsObjPath' +func UploadGcsObject(localPath, gcsObjPath string, uploadGzipEncoded bool) error { + var err error + if uploadGzipEncoded { + _, err = ExecuteGcloudCommandf("storage cp -Z %s gs://%s", localPath, gcsObjPath) + } else { + _, err = ExecuteGcloudCommandf("storage cp %s gs://%s", localPath, gcsObjPath) + } + + return err +} + +// Deletes a given GCS object (with path without 'gs://'). +// Fails if the object doesn't exist or permission to delete object is not +// available. +// Uses 'gcloud storage rm gs://gcsObjPath' +func DeleteGcsObject(gcsObjPath string) error { + _, err := ExecuteGcloudCommandf("storage rm gs://%s", gcsObjPath) + return err +} + +// Clears cache-control attributes on given GCS object (with path without 'gs://'). +// Fails if the file doesn't exist or permission to modify object's metadata is not +// available. +// Uses 'gcloud storage objects update gs://gs://gcsObjPath --cache-control=' ' ' +func ClearCacheControlOnGcsObject(gcsObjPath string) error { + _, err := ExecuteGcloudCommandf("storage objects update gs://%s --cache-control=''", gcsObjPath) + return err +} From e163c1cd9d7582110b2c61d217e24ee0fa08be08 Mon Sep 17 00:00:00 2001 From: Nitin Garg <113666283+gargnitingoogle@users.noreply.github.com> Date: Mon, 14 Aug 2023 06:01:24 +0000 Subject: [PATCH 35/46] Add gzip write-flow tests (#1262) It adds write-flow integration tests for GCSfuse-mounted objects with content-encoding gzip. These tests replace an existing gzip object in a mounted bucket, then verify the size of the re-uploaded GCS object. Tests added (1*5 = 5 tests) * One type of operation * Overwrite * On five types of objects each * Object with text content, uploaded with gzip compression with content-encoding: gzip, and cache-control: no-transform * Object with text content, uploaded with gzip compression with content-encoding: gzip, and cache-control: '' * Object with gzip content, uploaded with content-encoding: '' * Object with gzip content, uploaded with gzip compression (i.e. doubly compressed) with content-encoding: gzip, and cache-control: no-transform * Object with gzip content, uploaded with gzip compression (i.e. doubly compressed) with content-encoding: gzip, and cache-control: '' This also adds a necessary CopyFileAllowOverwrite alternative utility to the existing CopyFile utility(which doesn't allow file overwrite) in tools/integration_tests/util/operations/file_operations.go . --- tools/integration_tests/gzip/gzip_test.go | 41 ++++++++ .../integration_tests/gzip/write_gzip_test.go | 97 +++++++++++++++++++ .../run_tests_mounted_directory.sh | 1 + .../util/operations/file_operations.go | 44 ++++++--- 4 files changed, 171 insertions(+), 12 deletions(-) create mode 100644 tools/integration_tests/gzip/write_gzip_test.go diff --git a/tools/integration_tests/gzip/gzip_test.go b/tools/integration_tests/gzip/gzip_test.go index 739b18a5a1..a04eb30f88 100644 --- a/tools/integration_tests/gzip/gzip_test.go +++ b/tools/integration_tests/gzip/gzip_test.go @@ -40,6 +40,14 @@ const ( GzipContentWithContentEncodingWithNoTransformFilename = "gzipContentWithContentEncodingWithNoTransform.txt.gz" GzipContentWithContentEncodingWithoutNoTransformFilename = "gzipContentWithContentEncodingWithoutNoTransform.txt.gz" + TextContentWithContentEncodingWithNoTransformToOverwrite = "TextContentWithContentEncodingWithNoTransformToOverwrite.txt" + TextContentWithContentEncodingWithoutNoTransformToOverwrite = "TextContentWithContentEncodingWithoutNoTransformToOverwrite.txt" + + GzipContentWithoutContentEncodingToOverwrite = "GzipContentWithoutContentEncodingToOverwrite.txt.gz" + + GzipContentWithContentEncodingWithNoTransformToOverwrite = "GzipContentWithContentEncodingWithNoTransformToOverwrite.txt.gz" + GzipContentWithContentEncodingWithoutNoTransformToOverwrite = "GzipContentWithContentEncodingWithoutNoTransformToOverwrite.txt.gz" + TestBucketPrefixPath = "gzip" ) @@ -88,6 +96,39 @@ func setup_testdata(m *testing.M) error { enableGzipEncodedContent: true, enableGzipContentEncoding: true, }, + { + filename: TextContentWithContentEncodingWithNoTransformToOverwrite, + filesize: TextContentSize, + keepCacheControlNoTransform: true, + enableGzipEncodedContent: false, + enableGzipContentEncoding: true, + }, + { + filename: TextContentWithContentEncodingWithoutNoTransformToOverwrite, + filesize: TextContentSize, + keepCacheControlNoTransform: false, + enableGzipEncodedContent: false, + enableGzipContentEncoding: true, + }, + { + filename: GzipContentWithoutContentEncodingToOverwrite, + filesize: TextContentSize, + keepCacheControlNoTransform: true, // it's a don't care in this case + enableGzipEncodedContent: true, + enableGzipContentEncoding: false, + }, { + filename: GzipContentWithContentEncodingWithNoTransformToOverwrite, + filesize: TextContentSize, + keepCacheControlNoTransform: true, + enableGzipEncodedContent: true, + enableGzipContentEncoding: true, + }, { + filename: GzipContentWithContentEncodingWithoutNoTransformToOverwrite, + filesize: TextContentSize, + keepCacheControlNoTransform: false, + enableGzipEncodedContent: true, + enableGzipContentEncoding: true, + }, } for _, fmd := range fmds { diff --git a/tools/integration_tests/gzip/write_gzip_test.go b/tools/integration_tests/gzip/write_gzip_test.go new file mode 100644 index 0000000000..d03e74c6f1 --- /dev/null +++ b/tools/integration_tests/gzip/write_gzip_test.go @@ -0,0 +1,97 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Provides integration tests for gzip objects in gcsfuse mounts. +package gzip_test + +import ( + "path" + "testing" + + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/gzip/helpers" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/operations" + "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" +) + +// Size of the overwritten content created in bytes. +const OverwittenFileSize = 1000 + +// Verify that the passed file exists on the GCS test-bucket and in the mounted bucket +// and its size in the mounted directory matches that of the GCS object. Also verify +// that the passed file in the mounted bucket matches the corresponding +// GCS object in content. +// GCS object. +func verifyFullFileOverwrite(t *testing.T, filename string) { + mountedFilePath := path.Join(setup.MntDir(), TestBucketPrefixPath, filename) + gcsObjectPath := path.Join(setup.TestBucket(), TestBucketPrefixPath, filename) + gcsObjectSize, err := operations.GetGcsObjectSize(gcsObjectPath) + if err != nil { + t.Fatalf("Failed to get size of gcs object %s: %v\n", gcsObjectPath, err) + } + + fi, err := operations.StatFile(mountedFilePath) + if err != nil || fi == nil { + t.Fatalf("Failed to get stat info of mounted file %s: %v\n", mountedFilePath, err) + } + + if (*fi).Size() != int64(gcsObjectSize) { + t.Fatalf("Size of file mounted through gcsfuse (%s, %d) doesn't match the size of the file on GCS (%s, %d)", + mountedFilePath, (*fi).Size(), gcsObjectPath, gcsObjectSize) + } + + // No need to worry about gzipping the overwritten data, because it's + // expensive to invoke a gzip-writer and unnecessary in this case. + // All we are interested in testing is that the content of the overwritten + // gzip file matches in size with that of the source file that was used to + // overwrite it. + tempfile, err := helpers.CreateLocalTempFile(OverwittenFileSize, false) + if err != nil { + t.Fatalf("Failed to create local temp file for overwriting existing gzip object: %v", err) + } + defer operations.RemoveFile(tempfile) + + err = operations.CopyFileAllowOverwrite(tempfile, mountedFilePath) + if err != nil { + t.Fatalf("Failed to copy/overwrite temp file %s to existing gzip object/file %s: %v", tempfile, mountedFilePath, err) + } + + gcsObjectSize, err = operations.GetGcsObjectSize(gcsObjectPath) + if err != nil { + t.Fatalf("Failed to get size of gcs object %s: %v\n", gcsObjectPath, err) + } + + if gcsObjectSize != OverwittenFileSize { + t.Fatalf("Size of overwritten gcs object (%s, %d) doesn't match that of the expected overwrite size (%s, %d)", gcsObjectPath, gcsObjectSize, tempfile, OverwittenFileSize) + } +} + +func TestGzipEncodedTextFileWithNoTransformFullFileOverwrite(t *testing.T) { + verifyFullFileOverwrite(t, TextContentWithContentEncodingWithNoTransformToOverwrite) +} + +func TestGzipEncodedTextFileWithoutNoTransformFullFileOverwrite(t *testing.T) { + verifyFullFileOverwrite(t, TextContentWithContentEncodingWithoutNoTransformToOverwrite) +} + +func TestGzipUnencodedGzipFileFullFileOverwrite(t *testing.T) { + verifyFullFileOverwrite(t, GzipContentWithoutContentEncodingToOverwrite) +} + +func TestGzipEncodedGzipFileWithNoTransformFullFileOverwrite(t *testing.T) { + verifyFullFileOverwrite(t, GzipContentWithContentEncodingWithNoTransformToOverwrite) +} + +func TestGzipEncodedGzipFileWithoutNoTransformFullFileOverwrite(t *testing.T) { + verifyFullFileOverwrite(t, GzipContentWithContentEncodingWithoutNoTransformToOverwrite) +} diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index 363217dde9..c359880100 100755 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -273,6 +273,7 @@ gcsfuse --implicit-dirs --enable-storage-client-library=false $TEST_BUCKET_NAME GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/write_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run integration tests from gzip package/directory with static mounting gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/gzip/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index 236576f2ed..0944954ccf 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -29,38 +29,55 @@ import ( "syscall" ) -func CopyFile(srcFileName string, newFileName string) (err error) { - if _, err = os.Stat(newFileName); err == nil { - err = fmt.Errorf("Copied file %s already present", newFileName) - return +func copyFile(srcFileName, dstFileName string, allowOverwrite bool) (err error) { + if !allowOverwrite { + if _, err = os.Stat(dstFileName); err == nil { + err = fmt.Errorf("destination file %s already present", dstFileName) + return + } } source, err := os.OpenFile(srcFileName, syscall.O_DIRECT, FilePermission_0600) if err != nil { - err = fmt.Errorf("File %s opening error: %v", srcFileName, err) + err = fmt.Errorf("file %s opening error: %v", srcFileName, err) return } // Closing file at the end. defer CloseFile(source) - destination, err := os.OpenFile(newFileName, os.O_WRONLY|os.O_CREATE|syscall.O_DIRECT, FilePermission_0600) + var destination *os.File + if allowOverwrite { + destination, err = os.OpenFile(dstFileName, os.O_WRONLY|os.O_CREATE|syscall.O_DIRECT|os.O_TRUNC, FilePermission_0600) + } else { + destination, err = os.OpenFile(dstFileName, os.O_WRONLY|os.O_CREATE|syscall.O_DIRECT, FilePermission_0600) + } + if err != nil { - err = fmt.Errorf("Copied file creation error: %v", err) + err = fmt.Errorf("copied file creation error: %v", err) return } + // Closing file at the end. defer CloseFile(destination) // File copying with io.Copy() utility. _, err = io.Copy(destination, source) if err != nil { - err = fmt.Errorf("Error in file copying: %v", err) + err = fmt.Errorf("error in file copying: %v", err) return } return } +func CopyFile(srcFileName, newFileName string) (err error) { + return copyFile(srcFileName, newFileName, false) +} + +func CopyFileAllowOverwrite(srcFileName, newFileName string) (err error) { + return copyFile(srcFileName, newFileName, true) +} + func ReadFile(filePath string) (content []byte, err error) { file, err := os.OpenFile(filePath, os.O_RDONLY|syscall.O_DIRECT, FilePermission_0600) if err != nil { @@ -293,10 +310,11 @@ func StatFile(file string) (*fs.FileInfo, error) { // Finds if two local files have identical content (equivalnt to binary diff). // Needs (a) both files to exist, (b)read permission on both the files, (c) both -// inputs to be proper files, symlinks/directories not supported. +// inputs to be proper files, i.e. directories not supported. // Compares file names first. If different, compares sizes next. -// If sizes match, then compares hashes of both the files. -// Not a good idea for very large files as it loads both the files in the memory completely. +// If sizes match, then compares the contents of both the files. +// Not a good idea for very large files as it loads both the files' contents in +// the memory completely. // Returns 0 if no error and files match. // Returns 1 if files don't match and captures reason for mismatch in err. // Returns 2 if any error. @@ -363,7 +381,9 @@ func executeToolCommandf(tool string, format string, args ...any) ([]byte, error return stdout.Bytes(), nil } -// Executes any given gcloud command with given args +// Executes any given gcloud command with given args. +// Using `gcloud alpha` instead of `gcloud` as the latter isn't supported +// on some VMs e.e. kokoro VMs and rhel/centos VMs etc. func ExecuteGcloudCommandf(format string, args ...any) ([]byte, error) { return executeToolCommandf("gcloud alpha", format, args...) } From ea06eb4164db79b71470cc0f4bcaf59b7ecde31a Mon Sep 17 00:00:00 2001 From: Nitin Garg <113666283+gargnitingoogle@users.noreply.github.com> Date: Mon, 14 Aug 2023 10:56:58 +0000 Subject: [PATCH 36/46] Fix kokoro job failure for gzip integration tests (#1276) Kokoro VM is based on a specialized linux image created by the Kokoro team for running jobs on it. This has an old version of gcloud on it, and thus the gcsfuse integration test jobs use `gcloud alpha` on it as opposed to `gcloud` command. To make matters worse, on the kokoro VM, `gcloud alpha` doesn't support the `-Z` option as well as the options for updating object metadata. Both these features are required for integration tests for GCS objects with content-encoding gzip in GCSFuse. Hence, for both these features, switching from `gcloud alpha` to equivalent `gsutil` commands. --- .../util/operations/file_operations.go | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index 0944954ccf..faee31dfd3 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -384,16 +384,21 @@ func executeToolCommandf(tool string, format string, args ...any) ([]byte, error // Executes any given gcloud command with given args. // Using `gcloud alpha` instead of `gcloud` as the latter isn't supported // on some VMs e.e. kokoro VMs and rhel/centos VMs etc. -func ExecuteGcloudCommandf(format string, args ...any) ([]byte, error) { +func executeGcloudCommandf(format string, args ...any) ([]byte, error) { return executeToolCommandf("gcloud alpha", format, args...) } +// Executes any given gsutil command with given args. +func executeGsutilCommandf(format string, args ...any) ([]byte, error) { + return executeToolCommandf("gsutil", format, args...) +} + // Returns size of a give GCS object with path (without 'gs://'). // Fails if the object doesn't exist or permission to read object's metadata is not // available. // Uses 'gcloud storage du -s gs://gcsObjPath'. func GetGcsObjectSize(gcsObjPath string) (int, error) { - stdout, err := ExecuteGcloudCommandf("storage du -s gs://%s", gcsObjPath) + stdout, err := executeGcloudCommandf("storage du -s gs://%s", gcsObjPath) if err != nil { return 0, err } @@ -414,7 +419,7 @@ func GetGcsObjectSize(gcsObjPath string) (int, error) { // available. // Uses 'gcloud storage cp gs://gcsObjPath localPath' func DownloadGcsObject(gcsObjPath, localPath string) error { - _, err := ExecuteGcloudCommandf("storage cp gs://%s %s", gcsObjPath, localPath) + _, err := executeGcloudCommandf("storage cp gs://%s %s", gcsObjPath, localPath) if err != nil { return err } @@ -429,9 +434,11 @@ func DownloadGcsObject(gcsObjPath, localPath string) error { func UploadGcsObject(localPath, gcsObjPath string, uploadGzipEncoded bool) error { var err error if uploadGzipEncoded { - _, err = ExecuteGcloudCommandf("storage cp -Z %s gs://%s", localPath, gcsObjPath) + // Using gsutil instead of `gcloud alpha` here as `gcloud alpha` + // option `-Z` isn't supported on the kokoro VM. + _, err = executeGsutilCommandf("cp -Z %s gs://%s", localPath, gcsObjPath) } else { - _, err = ExecuteGcloudCommandf("storage cp %s gs://%s", localPath, gcsObjPath) + _, err = executeGcloudCommandf("storage cp %s gs://%s", localPath, gcsObjPath) } return err @@ -442,15 +449,18 @@ func UploadGcsObject(localPath, gcsObjPath string, uploadGzipEncoded bool) error // available. // Uses 'gcloud storage rm gs://gcsObjPath' func DeleteGcsObject(gcsObjPath string) error { - _, err := ExecuteGcloudCommandf("storage rm gs://%s", gcsObjPath) + _, err := executeGcloudCommandf("storage rm gs://%s", gcsObjPath) return err } // Clears cache-control attributes on given GCS object (with path without 'gs://'). // Fails if the file doesn't exist or permission to modify object's metadata is not // available. -// Uses 'gcloud storage objects update gs://gs://gcsObjPath --cache-control=' ' ' +// Uses 'gsutil setmeta -h "Cache-Control:" gs://' +// Preferred approach is 'gcloud storage objects update gs://gs://gcsObjPath --cache-control=' ' ' but it doesn't work on kokoro VM. func ClearCacheControlOnGcsObject(gcsObjPath string) error { - _, err := ExecuteGcloudCommandf("storage objects update gs://%s --cache-control=''", gcsObjPath) + // Using gsutil instead of `gcloud alpha` here as `gcloud alpha` + // implementation for updating object metadata is missing on the kokoro VM. + _, err := executeGsutilCommandf("setmeta -h \"Cache-Control:\" gs://%s ", gcsObjPath) return err } From 5555c257a6850eef1eb2309919043bbbc59bdf9f Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Mon, 14 Aug 2023 23:56:33 +0530 Subject: [PATCH 37/46] fixing requirements (#1281) --- perfmetrics/scripts/requirements.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/perfmetrics/scripts/requirements.txt b/perfmetrics/scripts/requirements.txt index 88106720b0..883c3f653b 100644 --- a/perfmetrics/scripts/requirements.txt +++ b/perfmetrics/scripts/requirements.txt @@ -93,6 +93,10 @@ dataclasses==0.6 \ --hash=sha256:454a69d788c7fda44efd71e259be79577822f5e3f53f029a22d08004e951dc9f \ --hash=sha256:6988bd2b895eef432d562370bb707d540f32f7360ab13da45340101bc2307d84 # via -r requirements.in +exceptiongroup==1.1.1 \ + --hash=sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e \ + --hash=sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785 + # via pytest google-api-core[grpc]==2.11.0 \ --hash=sha256:4b9bb5d5a380a0befa0573b302651b8a9a89262c1730e37bf423cec511804c22 \ --hash=sha256:ce222e27b0de0d7bc63eb043b956996d6dccab14cc3b690aaea91c9cc99dc16e @@ -273,6 +277,10 @@ six==1.16.0 \ # via # google-auth # google-auth-httplib2 +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via pytest testresources==2.0.1 \ --hash=sha256:67a361c3a2412231963b91ab04192209aa91a1aa052f0ab87245dbea889d1282 \ --hash=sha256:ee9d1982154a1e212d4e4bac6b610800bfb558e4fb853572a827bc14a96e4417 From 5e27bc2cee5a4f3857d30711eb2cb9fe65bf21f4 Mon Sep 17 00:00:00 2001 From: Nitin Garg <113666283+gargnitingoogle@users.noreply.github.com> Date: Wed, 16 Aug 2023 03:55:20 +0000 Subject: [PATCH 38/46] Replace gcloud with gsutil in file-op utils (#1280) Replaces `gcloud` or `gcloud alpha` with `gsutil` in all gcs related utilities in file_operations utils in integration_tests. This is to avoid failures related to compatibility in kokoro VMs which support an old version of gcloud cli. --- .../util/operations/file_operations.go | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/tools/integration_tests/util/operations/file_operations.go b/tools/integration_tests/util/operations/file_operations.go index faee31dfd3..fcff27eba9 100644 --- a/tools/integration_tests/util/operations/file_operations.go +++ b/tools/integration_tests/util/operations/file_operations.go @@ -381,13 +381,6 @@ func executeToolCommandf(tool string, format string, args ...any) ([]byte, error return stdout.Bytes(), nil } -// Executes any given gcloud command with given args. -// Using `gcloud alpha` instead of `gcloud` as the latter isn't supported -// on some VMs e.e. kokoro VMs and rhel/centos VMs etc. -func executeGcloudCommandf(format string, args ...any) ([]byte, error) { - return executeToolCommandf("gcloud alpha", format, args...) -} - // Executes any given gsutil command with given args. func executeGsutilCommandf(format string, args ...any) ([]byte, error) { return executeToolCommandf("gsutil", format, args...) @@ -396,9 +389,10 @@ func executeGsutilCommandf(format string, args ...any) ([]byte, error) { // Returns size of a give GCS object with path (without 'gs://'). // Fails if the object doesn't exist or permission to read object's metadata is not // available. -// Uses 'gcloud storage du -s gs://gcsObjPath'. +// Uses 'gsutil du -s gs://gcsObjPath'. +// Alternative 'gcloud storage du -s gs://gcsObjPath', but it doesn't work on kokoro VM. func GetGcsObjectSize(gcsObjPath string) (int, error) { - stdout, err := executeGcloudCommandf("storage du -s gs://%s", gcsObjPath) + stdout, err := executeGsutilCommandf("du -s gs://%s", gcsObjPath) if err != nil { return 0, err } @@ -417,9 +411,10 @@ func GetGcsObjectSize(gcsObjPath string) (int, error) { // Downloads given GCS object (with path without 'gs://') to localPath. // Fails if the object doesn't exist or permission to read object is not // available. -// Uses 'gcloud storage cp gs://gcsObjPath localPath' +// Uses 'gsutil cp gs://gcsObjPath localPath' +// Alternative 'gcloud storage cp gs://gcsObjPath localPath' but it doesn't work on kokoro VM. func DownloadGcsObject(gcsObjPath, localPath string) error { - _, err := executeGcloudCommandf("storage cp gs://%s %s", gcsObjPath, localPath) + _, err := executeGsutilCommandf("cp gs://%s %s", gcsObjPath, localPath) if err != nil { return err } @@ -430,7 +425,8 @@ func DownloadGcsObject(gcsObjPath, localPath string) error { // Uploads given local file to GCS object (with path without 'gs://'). // Fails if the file doesn't exist or permission to write to object/bucket is not // available. -// Uses 'gcloud storage cp localPath gs://gcsObjPath' +// Uses 'gsutil cp localPath gs://gcsObjPath' +// Alternative 'gcloud storage cp localPath gs://gcsObjPath' but it doesn't work on kokoro VM. func UploadGcsObject(localPath, gcsObjPath string, uploadGzipEncoded bool) error { var err error if uploadGzipEncoded { @@ -438,7 +434,7 @@ func UploadGcsObject(localPath, gcsObjPath string, uploadGzipEncoded bool) error // option `-Z` isn't supported on the kokoro VM. _, err = executeGsutilCommandf("cp -Z %s gs://%s", localPath, gcsObjPath) } else { - _, err = executeGcloudCommandf("storage cp %s gs://%s", localPath, gcsObjPath) + _, err = executeGsutilCommandf("cp %s gs://%s", localPath, gcsObjPath) } return err @@ -447,9 +443,10 @@ func UploadGcsObject(localPath, gcsObjPath string, uploadGzipEncoded bool) error // Deletes a given GCS object (with path without 'gs://'). // Fails if the object doesn't exist or permission to delete object is not // available. -// Uses 'gcloud storage rm gs://gcsObjPath' +// Uses 'gsutil rm gs://gcsObjPath' +// Alternative 'gcloud storage rm gs://gcsObjPath' but it doesn't work on kokoro VM. func DeleteGcsObject(gcsObjPath string) error { - _, err := executeGcloudCommandf("storage rm gs://%s", gcsObjPath) + _, err := executeGsutilCommandf("rm gs://%s", gcsObjPath) return err } From 91e0fe0652fb5d60e14b26483fcf3d3e5698bbde Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Wed, 16 Aug 2023 12:38:29 +0530 Subject: [PATCH 39/46] Deprecate --enable-storage-client-library flag (#1258) * local changes * local changes * local changes * local changes * removed token bucket and added unit tests * fixing lint * small fix- renaming * small fix- renaming * fix lint * adding licence * testing * back to changes * fixing comments * removing throttle_test * depricating from enable storage client library flag * temporary using env variable to set end point * fixing linux tests * adding throttle test * fixing comment * fixing lint * fixed comment * lint tests * lint tests * removing endpoint changes * removing endpoint changes * removing unnecessary changes * removing connection * fixing comment * empty commit * empty commit * removing endponti changes * fixing comments * formating * removing debug http * fixing linux tests * adding back debug_http flag * fixing comments * renaming function * fixing comments * fixing comments * fixing comments in integration test scrip * fixing comments in integration test scrip * fixing comments in integration test scrip * fixing comments in integration test script * updating vendor client * fixing comment * removing concurrent read * empty commit * replacing mountWithConn to mountWithStorageHandle * empty commit * testing * testing * back to original changes --- benchmarks/concurrent_read/job/job.go | 173 ---------------- benchmarks/concurrent_read/main.go | 192 ------------------ benchmarks/concurrent_read/readers/google.go | 71 ------- benchmarks/concurrent_read/readers/util.go | 40 ---- benchmarks/concurrent_read/readers/vendor.go | 68 ------- docs/troubleshooting.md | 8 +- flags.go | 25 +-- flags_test.go | 15 +- internal/gcsx/bucket_manager.go | 32 +-- internal/gcsx/bucket_manager_test.go | 33 +-- internal/gcsx/connection.go | 61 ------ internal/storage/storage_handle.go | 2 + main.go | 90 +------- main_test.go | 10 - mount.go | 6 +- .../continuous_test/gcp_ubuntu/build.sh | 2 +- .../scripts/ls_metrics/listing_benchmark.py | 2 +- .../resnet/setup_scripts/setup_container.sh | 2 +- .../explicit_dir/explicit_dir_test.go | 2 +- .../implicit_dir/implicit_dir_test.go | 2 +- .../operations/operations_test.go | 4 +- .../read_large_files/read_large_files_test.go | 2 +- .../run_tests_mounted_directory.sh | 168 +++++---------- .../perisistent_mounting.go | 6 +- .../write_large_files_test.go | 2 +- tools/mount_gcsfuse/main.go | 1 - 26 files changed, 97 insertions(+), 922 deletions(-) delete mode 100644 benchmarks/concurrent_read/job/job.go delete mode 100644 benchmarks/concurrent_read/main.go delete mode 100644 benchmarks/concurrent_read/readers/google.go delete mode 100644 benchmarks/concurrent_read/readers/util.go delete mode 100644 benchmarks/concurrent_read/readers/vendor.go delete mode 100644 internal/gcsx/connection.go diff --git a/benchmarks/concurrent_read/job/job.go b/benchmarks/concurrent_read/job/job.go deleted file mode 100644 index d2619b1679..0000000000 --- a/benchmarks/concurrent_read/job/job.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package job - -import ( - "context" - "fmt" - "io" - "runtime/trace" - "time" - - "github.com/googlecloudplatform/gcsfuse/benchmarks/concurrent_read/readers" - "github.com/googlecloudplatform/gcsfuse/internal/logger" -) - -const ( - KB = 1024 - MB = 1024 * KB -) - -type Job struct { - // Choose from HTTP/1.1, HTTP/2, GRPC - Protocol string - // Max connections for this job - Connections int - // Choose from vendor, google. - Implementation string -} - -type Stats struct { - Job *Job - TotalBytes int64 - TotalFiles int - Mbps []float32 - Duration time.Duration -} - -func (s *Stats) Throughput() float32 { - mbs := float32(s.TotalBytes) / float32(MB) - seconds := float32(s.Duration) / float32(time.Second) - return mbs / seconds -} - -func (s *Stats) Report() { - logger.Infof( - "# TEST READER %s\n"+ - "Protocol: %s (%v connections per host)\n"+ - "Total bytes: %d\n"+ - "Total files: %d\n"+ - "Avg Throughput: %.1f MB/s\n\n", - s.Job.Protocol, - s.Job.Implementation, - s.Job.Connections, - s.TotalBytes, - s.TotalFiles, - s.Throughput(), - ) -} - -func (s *Stats) Query(key string) string { - switch key { - case "Protocol": - return s.Job.Protocol - case "Implementation": - return s.Job.Implementation - case "Connections": - return fmt.Sprintf("%d", s.Job.Connections) - case "TotalBytes (MB)": - return fmt.Sprintf("%d", s.TotalBytes/MB) - case "TotalFiles": - return fmt.Sprintf("%d", s.TotalFiles) - case "Throughput (MB/s)": - return fmt.Sprintf("%.1f", s.Throughput()) - default: - return "" - } -} - -type Client interface { - NewReader(objectName string) (io.ReadCloser, error) -} - -func (job *Job) Run(ctx context.Context, bucketName string, objects []string) (*Stats, error) { - var client Client - var err error - - switch job.Implementation { - case "vendor": - client, err = readers.NewVendorClient(ctx, job.Protocol, job.Connections, bucketName) - case "google": - client, err = readers.NewGoogleClient(ctx, job.Protocol, job.Connections, bucketName) - default: - panic(fmt.Errorf("Unknown reader implementation: %q", job.Implementation)) - } - - if err != nil { - return nil, err - } - stats := job.testReader(ctx, client, objects) - return stats, nil -} - -func (job *Job) testReader(ctx context.Context, client Client, objectNames []string) *Stats { - stats := &Stats{Job: job} - reportDuration := 10 * time.Second - ticker := time.NewTicker(reportDuration) - defer ticker.Stop() - - doneBytes := make(chan int64) - doneFiles := make(chan int) - start := time.Now() - - // run readers concurrently - for _, objectName := range objectNames { - name := objectName - go func() { - region := trace.StartRegion(ctx, "NewReader") - reader, err := client.NewReader(name) - region.End() - if err != nil { - fmt.Printf("Skip %q: %s", name, err) - return - } - defer reader.Close() - - p := make([]byte, 128*1024) - region = trace.StartRegion(ctx, "ReadObject") - for { - n, err := reader.Read(p) - - doneBytes <- int64(n) - if err == io.EOF { - break - } else if err != nil { - panic(fmt.Errorf("read %q fails: %w", name, err)) - } - } - region.End() - doneFiles <- 1 - return - }() - } - - // collect test stats - var lastTotalBytes int64 - for stats.TotalFiles < len(objectNames) { - select { - case b := <-doneBytes: - stats.TotalBytes += b - case f := <-doneFiles: - stats.TotalFiles += f - case <-ticker.C: - readBytes := stats.TotalBytes - lastTotalBytes - lastTotalBytes = stats.TotalBytes - mbps := float32(readBytes/MB) / float32(reportDuration/time.Second) - stats.Mbps = append(stats.Mbps, mbps) - } - } - stats.Duration = time.Since(start) - return stats -} diff --git a/benchmarks/concurrent_read/main.go b/benchmarks/concurrent_read/main.go deleted file mode 100644 index b3375af419..0000000000 --- a/benchmarks/concurrent_read/main.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2020 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Concurrently read objects on GCS provided by stdin. The user must ensure -// (1) all the objects come from the same bucket, and -// (2) the script is authorized to read from the bucket. -// The stdin should contain N lines of object name, in the form of -// "gs://bucket-name/object-name". -// -// This benchmark only tests the internal reader implementation, which -// doesn't have FUSE involved. -// -// Usage Example: -// gsutil ls 'gs://bucket/prefix*' | go run ./benchmarks/concurrent_read/ -// - -package main - -import ( - "bufio" - "context" - "flag" - "fmt" - "io" - "log" - "os" - "runtime/trace" - "strings" - - "github.com/googlecloudplatform/gcsfuse/benchmarks/concurrent_read/job" - "github.com/googlecloudplatform/gcsfuse/internal/perf" -) - -type BenchmarkConfig struct { - // The GCS bucket storing the objects to be read. - Bucket string - // The GCS objects as 'gs://...' to be read from the bucket above. - Objects []string - // Each job reads all the objects. - Jobs []*job.Job -} - -func getJobs() []*job.Job { - return []*job.Job{ - &job.Job{ - Protocol: "HTTP/1.1", - Connections: 50, - Implementation: "vendor", - }, - &job.Job{ - Protocol: "HTTP/2", - Connections: 50, - Implementation: "vendor", - }, - &job.Job{ - Protocol: "HTTP/1.1", - Connections: 50, - Implementation: "google", - }, - &job.Job{ - Protocol: "HTTP/2", - Connections: 50, - Implementation: "google", - }, - } -} - -func run(cfg BenchmarkConfig) { - ctx := context.Background() - - ctx, traceTask := trace.NewTask(ctx, "ReadAllObjects") - defer traceTask.End() - - var statsList []*job.Stats - for _, job := range cfg.Jobs { - stats, err := job.Run(ctx, cfg.Bucket, cfg.Objects) - if err != nil { - fmt.Printf("Job failed: %v", job) - continue - } - stats.Report() - statsList = append(statsList, stats) - } - printSummary(statsList) -} - -func printSummary(statsList []*job.Stats) { - cols := []string{ - "Protocol", - "Implementation", - "Connections", - "TotalBytes (MB)", - "TotalFiles", - "Throughput (MB/s)", - } - for _, col := range cols { - fmt.Printf(" %s |", col) - } - fmt.Println("") - for _, col := range cols { - fmt.Printf(strings.Repeat("-", len(col)+6)) - } - fmt.Println("") - for _, stats := range statsList { - for _, col := range cols { - value := stats.Query(col) - padding := strings.Repeat(" ", len(col)+4-len(value)) - fmt.Printf("%s%s |", padding, value) - } - fmt.Println("") - } -} - -func getLinesFromStdin() (lines []string) { - reader := bufio.NewReader(os.Stdin) - for { - line, err := reader.ReadString('\n') - if err != nil { - if err == io.EOF { - err = nil - break - } - panic(fmt.Errorf("Stdin error: %w", err)) - } - lines = append(lines, line) - } - return -} - -func getObjectNames() (bucketName string, objectNames []string) { - uris := getLinesFromStdin() - for _, uri := range uris { - path := uri[len("gs://"):] - path = strings.TrimRight(path, "\n") - segs := strings.Split(path, "/") - if len(segs) <= 1 { - panic(fmt.Errorf("Not a file name: %q", uri)) - } - - if bucketName == "" { - bucketName = segs[0] - } else if bucketName != segs[0] { - panic(fmt.Errorf("Multiple buckets: %q, %q", bucketName, segs[0])) - } - - objectName := strings.Join(segs[1:], "/") - objectNames = append(objectNames, objectName) - } - return -} - -func main() { - flag.Parse() - - go perf.HandleCPUProfileSignals() - - // Enable trace - f, err := os.Create("/tmp/concurrent_read_trace.out") - if err != nil { - log.Fatalf("failed to create trace output file: %v", err) - } - defer func() { - if err := f.Close(); err != nil { - log.Fatalf("failed to close trace file: %v", err) - } - }() - if err := trace.Start(f); err != nil { - log.Fatalf("failed to start trace: %v", err) - } - defer trace.Stop() - - bucketName, objectNames := getObjectNames() - config := BenchmarkConfig{ - Bucket: bucketName, - Objects: objectNames, - Jobs: getJobs(), - } - run(config) - - return -} diff --git a/benchmarks/concurrent_read/readers/google.go b/benchmarks/concurrent_read/readers/google.go deleted file mode 100644 index 38ed90c20e..0000000000 --- a/benchmarks/concurrent_read/readers/google.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2020 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package readers - -import ( - "context" - "errors" - "io" - "net/http" - - "cloud.google.com/go/storage" - "github.com/jacobsa/gcloud/gcs" - "golang.org/x/oauth2" - "golang.org/x/oauth2/google" - "google.golang.org/api/option" -) - -// Google reader depends on "cloud.google.com/go/storage" -type googleClient struct { - ctx context.Context - bucket *storage.BucketHandle -} - -func NewGoogleClient(ctx context.Context, protocol string, connections int, bucketName string) (*googleClient, error) { - client, err := getStorageClient(ctx, protocol, connections) - if err != nil { - return nil, err - } - bucket := client.Bucket(bucketName) - return &googleClient{ctx, bucket}, nil -} - -func (c *googleClient) NewReader(objectName string) (io.ReadCloser, error) { - return c.bucket.Object(objectName).NewReader(c.ctx) -} - -func getStorageClient(ctx context.Context, protocol string, connections int) (*storage.Client, error) { - if protocol == "GRPC" { - return getGRPCClient() - } - tokenSrc, err := google.DefaultTokenSource(ctx, gcs.Scope_FullControl) - if err != nil { - return nil, err - } - return storage.NewClient( - ctx, - option.WithUserAgent(userAgent), - option.WithHTTPClient(&http.Client{ - Transport: &oauth2.Transport{ - Base: getTransport(protocol, connections), - Source: tokenSrc, - }, - }), - ) -} - -func getGRPCClient() (*storage.Client, error) { - return nil, errors.New("GRPC is not supported") -} diff --git a/benchmarks/concurrent_read/readers/util.go b/benchmarks/concurrent_read/readers/util.go deleted file mode 100644 index 07b2a7a30f..0000000000 --- a/benchmarks/concurrent_read/readers/util.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package readers - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -const userAgent = "gcsfuse/dev Benchmark (concurrent_read)" - -func getTransport(protocol string, connections int) *http.Transport { - switch protocol { - case "HTTP/1.1": - return &http.Transport{ - MaxConnsPerHost: connections, - // This disables HTTP/2 in the transport. - TLSNextProto: make( - map[string]func(string, *tls.Conn) http.RoundTripper, - ), - } - case "HTTP/2": - return http.DefaultTransport.(*http.Transport).Clone() - default: - panic(fmt.Errorf("Unsupported protocol: %q", protocol)) - } -} diff --git a/benchmarks/concurrent_read/readers/vendor.go b/benchmarks/concurrent_read/readers/vendor.go deleted file mode 100644 index 644ea36b9e..0000000000 --- a/benchmarks/concurrent_read/readers/vendor.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package readers - -import ( - "context" - "fmt" - "io" - "net/url" - - "github.com/jacobsa/gcloud/gcs" - "golang.org/x/oauth2/google" -) - -// Vendor reader depends on "github.com/jacobsa/gcloud/gcs" -type vendorClient struct { - ctx context.Context - bucket gcs.Bucket -} - -func NewVendorClient(ctx context.Context, protocol string, connections int, bucketName string) (*vendorClient, error) { - tokenSrc, err := google.DefaultTokenSource(ctx, gcs.Scope_FullControl) - if err != nil { - return nil, err - } - endpoint, _ := url.Parse("https://storage.googleapis.com:443") - config := &gcs.ConnConfig{ - Url: endpoint, - TokenSource: tokenSrc, - UserAgent: userAgent, - Transport: getTransport(protocol, connections), - } - conn, err := gcs.NewConn(config) - if err != nil { - return nil, err - } - bucket, err := conn.OpenBucket( - ctx, - &gcs.OpenBucketOptions{ - Name: bucketName, - }, - ) - if err != nil { - panic(fmt.Errorf("Cannot open bucket %q: %w", bucketName, err)) - } - return &vendorClient{ctx, bucket}, nil -} - -func (c *vendorClient) NewReader(objectName string) (io.ReadCloser, error) { - return c.bucket.NewReader( - c.ctx, - &gcs.ReadObjectRequest{ - Name: objectName, - }, - ) -} diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index eebc50746a..268fdae477 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,4 +1,4 @@ -# Troubleshooting for production issues + # Troubleshooting for production issues This page enumerates some common user facing issues around GCSFuse and also discusses potential solutions to the same. | Issues | Fix | @@ -11,9 +11,9 @@ This page enumerates some common user facing issues around GCSFuse and also disc | Input/Output Error | It’s a generic error, but the most probable culprit is the bucket not having the right permission for Cloud Storage FUSE to operate on. Ref - [here](https://stackoverflow.com/questions/36382704/gcsfuse-input-output-error) | | Generic NO_PUBKEY Error - while installing Cloud Storage FUSE on ubuntu 22.04 | It happens while running - ```sudo apt-get update``` - working on installing Cloud Storage FUSE. You just have to add the pubkey you get in the error using the below command: ```sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys ``` And then try running ```sudo apt-get update``` | | Cloud Storage FUSE fails with Docker container | Though not tested extensively, the [community](https://stackoverflow.com/questions/65715624/permission-denied-with-gcsfuse-in-unprivileged-ubuntu-based-docker-container) reports that Cloud Storage FUSE works only in privileged mode when used with Docker. There are [solutions](https://cloud.google.com/iam/docs/service-account-overview) which exist and claim to do so without privileged mode, but these are not tested by the Cloud Storage FUSE team | -| daemonize.Run: readFromProcess: sub-process: mountWithArgs: mountWithConn: fs.NewServer: create file system: SetUpBucket: OpenBucket: Bad credentials for bucket BUCKET_NAME: permission denied | Check the bucket name. Make sure it is within your project. Make sure the applied roles on the bucket contain storage.objects.list permission. You can refer to them [here](https://cloud.google.com/storage/docs/access-control/iam-roles). | -| daemonize.Run: readFromProcess: sub-process: mountWithArgs: mountWithConn: fs.NewServer: create file system: SetUpBucket: OpenBucket: Unknown bucket BUCKET_NAME: no such file or directory | Check the bucket name. Make sure the [service account](https://www.google.com/url?q=https://cloud.google.com/iam/docs/service-accounts&sa=D&source=docs&ust=1679992003850814&usg=AOvVaw3nJ6wNQK4FZdgm8gBTS82l) has permissions to access the files. It must at least have the permissions of the Storage Object Viewer role. | -| daemonize.Run: readFromProcess: sub-process: mountWithArgs: mountWithConn: Mount: mount: running fusermount: exit status 1 stderr: /bin/fusermount: fuse device not found, try 'modprobe fuse' first | To run the container locally, add the --privilege flag to the docker run command: ```docker run --privileged gcr.io/PROJECT/my-fs-app ```
  • You must create a local mount directory
  • If you want all the logs from the mount process use the --foreground flag in combination with the mount command: ```gcsfuse --foreground --debug_gcs --debug_fuse $GCSFUSE_BUCKET $MNT_DIR ```
  • Add --debug_http for HTTP request/response debug output.
  • Add --debug_fuse to enable fuse-related debugging output.
  • Add --debug_gcs to print GCS request and timing information.
| +| daemonize.Run: readFromProcess: sub-process: mountWithArgs: mountWithStorageHandle: fs.NewServer: create file system: SetUpBucket: OpenBucket: Bad credentials for bucket BUCKET_NAME: permission denied | Check the bucket name. Make sure it is within your project. Make sure the applied roles on the bucket contain storage.objects.list permission. You can refer to them [here](https://cloud.google.com/storage/docs/access-control/iam-roles). | +| daemonize.Run: readFromProcess: sub-process: mountWithArgs: mountWithStorageHandle: fs.NewServer: create file system: SetUpBucket: OpenBucket: Unknown bucket BUCKET_NAME: no such file or directory | Check the bucket name. Make sure the [service account](https://www.google.com/url?q=https://cloud.google.com/iam/docs/service-accounts&sa=D&source=docs&ust=1679992003850814&usg=AOvVaw3nJ6wNQK4FZdgm8gBTS82l) has permissions to access the files. It must at least have the permissions of the Storage Object Viewer role. | +| daemonize.Run: readFromProcess: sub-process: mountWithArgs: mountWithStorageHandle: Mount: mount: running fusermount: exit status 1 stderr: /bin/fusermount: fuse device not found, try 'modprobe fuse' first | To run the container locally, add the --privilege flag to the docker run command: ```docker run --privileged gcr.io/PROJECT/my-fs-app ```
  • You must create a local mount directory
  • If you want all the logs from the mount process use the --foreground flag in combination with the mount command: ```gcsfuse --foreground --debug_gcs --debug_fuse $GCSFUSE_BUCKET $MNT_DIR ```
  • Add --debug_http for HTTP request/response debug output.
  • Add --debug_fuse to enable fuse-related debugging output.
  • Add --debug_gcs to print GCS request and timing information.
| | Cloud Storage FUSE installation fails with an error at build time. | Only specific OS distributions are currently supported, learn more about [Installing Cloud Storage FUSE](https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/installing.md). | | Cloud Storage FUSE not mounting after reboot when entry is present in ```/etc/fstab``` with 1 or 2 as fsck order | Pass [_netdev option](https://github.com/GoogleCloudPlatform/gcsfuse/blob/master/docs/mounting.md#persisting-a-mount) in fstab entry (reference issue [here](https://github.com/GoogleCloudPlatform/gcsfuse/issues/1043)). With this option, mount will be attempted on reboot only when network is connected. | | Cloud Storage FUSE get stuck when using it to concurrently work with a large number of opened files (reference issue [here](https://github.com/GoogleCloudPlatform/gcsfuse/issues/1043)) | This happens when gcsfuse is mounted with http1 client (default) and the application using gcsfuse tries to keep more than value of `--max-conns-per-host` number of files opened. You can try (a) Passing a value higher than the number of files you want to keep open to `--max-conns-per-host` flag. (b) Adding some timeout for http client connections using `--http-client-timeout` flag. | diff --git a/flags.go b/flags.go index 0174d5cab2..9625b6c6ae 100644 --- a/flags.go +++ b/flags.go @@ -26,6 +26,7 @@ import ( "github.com/googlecloudplatform/gcsfuse/internal/logger" mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" + "github.com/googlecloudplatform/gcsfuse/internal/storage" "github.com/urfave/cli" ) @@ -135,7 +136,7 @@ func newApp() (app *cli.App) { cli.StringFlag{ Name: "endpoint", - Value: "https://storage.googleapis.com:443", + Value: storage.GcsEndPoint, Usage: "The endpoint to connect to.", }, @@ -322,9 +323,8 @@ func newApp() (app *cli.App) { }, cli.BoolFlag{ - Name: "debug_http", - Usage: "Dump HTTP requests and responses to/from GCS, " + - "doesn't work when enable-storage-client-library flag is true.", + Name: "debug_http", + Usage: "This flag is currently unused.", }, cli.BoolFlag{ @@ -336,15 +336,6 @@ func newApp() (app *cli.App) { Name: "debug_mutex", Usage: "Print debug messages when a mutex is held too long.", }, - - ///////////////////////// - // Client - ///////////////////////// - - cli.BoolTFlag{ - Name: "enable-storage-client-library", - Usage: "If true, will use go storage client library otherwise jacobsa/gcloud", - }, }, } @@ -404,9 +395,6 @@ type flagStorage struct { DebugHTTP bool DebugInvariants bool DebugMutex bool - - // client - EnableStorageClientLibrary bool } const GCSFUSE_PARENT_PROCESS_DIR = "gcsfuse-parent-process-dir" @@ -534,13 +522,10 @@ func populateFlags(c *cli.Context) (flags *flagStorage, err error) { DebugFuseErrors: c.BoolT("debug_fuse_errors"), DebugFuse: c.Bool("debug_fuse"), DebugGCS: c.Bool("debug_gcs"), - DebugFS: c.Bool("debug_fs"), DebugHTTP: c.Bool("debug_http"), + DebugFS: c.Bool("debug_fs"), DebugInvariants: c.Bool("debug_invariants"), DebugMutex: c.Bool("debug_mutex"), - - // Client, - EnableStorageClientLibrary: c.Bool("enable-storage-client-library"), } // Handle the repeated "-o" flag. diff --git a/flags_test.go b/flags_test.go index 000aa5e2e0..a95c263a3d 100644 --- a/flags_test.go +++ b/flags_test.go @@ -23,6 +23,7 @@ import ( "time" mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" + "github.com/googlecloudplatform/gcsfuse/internal/storage" . "github.com/jacobsa/oglematchers" . "github.com/jacobsa/ogletest" "github.com/urfave/cli" @@ -80,6 +81,7 @@ func (t *FlagsTest) Defaults() { ExpectEq(-1, f.EgressBandwidthLimitBytesPerSecond) ExpectEq(-1, f.OpRateLimitHz) ExpectTrue(f.ReuseTokenFromUrl) + ExpectEq(storage.GcsEndPoint, f.Endpoint.String()) // Tuning ExpectEq(4096, f.StatCacheCapacity) @@ -106,10 +108,9 @@ func (t *FlagsTest) Bools() { "reuse-token-from-url", "debug_fuse_errors", "debug_fuse", - "debug_gcs", "debug_http", + "debug_gcs", "debug_invariants", - "enable-storage-client-library", "enable-nonexistent-type-cache", } @@ -130,7 +131,6 @@ func (t *FlagsTest) Bools() { ExpectTrue(f.DebugGCS) ExpectTrue(f.DebugHTTP) ExpectTrue(f.DebugInvariants) - ExpectTrue(f.EnableStorageClientLibrary) ExpectTrue(f.EnableNonexistentTypeCache) // --foo=false form @@ -147,7 +147,6 @@ func (t *FlagsTest) Bools() { ExpectFalse(f.DebugGCS) ExpectFalse(f.DebugHTTP) ExpectFalse(f.DebugInvariants) - ExpectFalse(f.EnableStorageClientLibrary) ExpectFalse(f.EnableNonexistentTypeCache) // --foo=true form @@ -164,7 +163,6 @@ func (t *FlagsTest) Bools() { ExpectTrue(f.DebugGCS) ExpectTrue(f.DebugHTTP) ExpectTrue(f.DebugInvariants) - ExpectTrue(f.EnableStorageClientLibrary) ExpectTrue(f.EnableNonexistentTypeCache) } @@ -455,10 +453,3 @@ func (t *FlagsTest) TestValidateFlagsForValidSequentialReadSizeAndHTTP2ClientPro AssertEq(nil, err) } - -func (t *FlagsTest) TestDefaultValueOfEnableStorageClientLibraryFlag() { - var args []string = nil - f := parseArgs(args) - - ExpectTrue(f.EnableStorageClientLibrary) -} diff --git a/internal/gcsx/bucket_manager.go b/internal/gcsx/bucket_manager.go index d645564fba..335137778f 100644 --- a/internal/gcsx/bucket_manager.go +++ b/internal/gcsx/bucket_manager.go @@ -40,7 +40,6 @@ type BucketConfig struct { StatCacheCapacity int StatCacheTTL time.Duration EnableMonitoring bool - EnableStorageClientLibrary bool DebugGCS bool // Files backed by on object of length at least AppendThreshold that have @@ -75,7 +74,6 @@ type BucketManager interface { type bucketManager struct { config BucketConfig - conn *Connection storageHandle storage.StorageHandle // Garbage collector @@ -83,10 +81,9 @@ type bucketManager struct { stopGarbageCollecting func() } -func NewBucketManager(config BucketConfig, conn *Connection, storageHandle storage.StorageHandle) BucketManager { +func NewBucketManager(config BucketConfig, storageHandle storage.StorageHandle) BucketManager { bm := &bucketManager{ config: config, - conn: conn, storageHandle: storageHandle, } bm.gcCtx, bm.stopGarbageCollecting = context.WithCancel(context.Background()) @@ -151,25 +148,14 @@ func setUpRateLimiting( // // Special case: if the bucket name is canned.FakeBucketName, set up a fake // bucket as described in that package. -func (bm *bucketManager) SetUpGcsBucket(ctx context.Context, name string) (b gcs.Bucket, err error) { - if bm.config.EnableStorageClientLibrary { - b = bm.storageHandle.BucketHandle(name, bm.config.BillingProject) +func (bm *bucketManager) SetUpGcsBucket(name string) (b gcs.Bucket, err error) { + b = bm.storageHandle.BucketHandle(name, bm.config.BillingProject) - if reqtrace.Enabled() { - b = gcs.GetWrappedWithReqtraceBucket(b) - } - if bm.config.DebugGCS { - b = gcs.NewDebugBucket(b, logger.NewDebug("gcs: ")) - } - } else { - logger.Infof("OpenBucket(%q, %q)\n", name, bm.config.BillingProject) - b, err = bm.conn.OpenBucket( - ctx, - &gcs.OpenBucketOptions{ - Name: name, - BillingProject: bm.config.BillingProject, - }, - ) + if reqtrace.Enabled() { + b = gcs.GetWrappedWithReqtraceBucket(b) + } + if bm.config.DebugGCS { + b = gcs.NewDebugBucket(b, logger.NewDebug("gcs: ")) } return } @@ -182,7 +168,7 @@ func (bm *bucketManager) SetUpBucket( if name == canned.FakeBucketName { b = canned.MakeFakeBucket(ctx) } else { - b, err = bm.SetUpGcsBucket(ctx, name) + b, err = bm.SetUpGcsBucket(name) if err != nil { err = fmt.Errorf("OpenBucket: %w", err) return diff --git a/internal/gcsx/bucket_manager_test.go b/internal/gcsx/bucket_manager_test.go index c05ed315b5..35cd732b70 100644 --- a/internal/gcsx/bucket_manager_test.go +++ b/internal/gcsx/bucket_manager_test.go @@ -7,9 +7,7 @@ import ( "github.com/googlecloudplatform/gcsfuse/internal/storage" "github.com/jacobsa/gcloud/gcs" - "github.com/jacobsa/gcloud/gcs/gcsfake" . "github.com/jacobsa/ogletest" - "github.com/jacobsa/timeutil" ) func TestBucketManager(t *testing.T) { RunTests(t) } @@ -56,36 +54,19 @@ func (t *BucketManagerTest) TestNewBucketManagerMethod() { DebugGCS: true, AppendThreshold: 2, TmpObjectPrefix: "TmpObjectPrefix", - EnableStorageClientLibrary: true, } - bm := NewBucketManager(bucketConfig, nil, t.storageHandle) + bm := NewBucketManager(bucketConfig, t.storageHandle) ExpectNe(nil, bm) } -func (t *BucketManagerTest) TestSetupGcsBucketWhenEnableStorageClientLibraryIsTrue() { +func (t *BucketManagerTest) TestSetupGcsBucket() { var bm bucketManager bm.storageHandle = t.storageHandle - bm.config.EnableStorageClientLibrary = true bm.config.DebugGCS = true - bucket, err := bm.SetUpGcsBucket(context.Background(), TestBucketName) - - ExpectNe(nil, bucket) - ExpectEq(nil, err) -} - -func (t *BucketManagerTest) TestSetupGcsBucketWhenEnableStorageClientLibraryIsFalse() { - var bm bucketManager - bm.storageHandle = t.storageHandle - bm.config.EnableStorageClientLibrary = false - bm.config.BillingProject = "BillingProject" - bm.conn = &Connection{ - wrapped: gcsfake.NewConn(timeutil.RealClock()), - } - - bucket, err := bm.SetUpGcsBucket(context.Background(), "fake@bucket") + bucket, err := bm.SetUpGcsBucket(TestBucketName) ExpectNe(nil, bucket) ExpectEq(nil, err) @@ -104,15 +85,11 @@ func (t *BucketManagerTest) TestSetUpBucketMethod() { DebugGCS: true, AppendThreshold: 2, TmpObjectPrefix: "TmpObjectPrefix", - EnableStorageClientLibrary: true, } ctx := context.Background() bm.storageHandle = t.storageHandle bm.config = bucketConfig bm.gcCtx = ctx - bm.conn = &Connection{ - wrapped: gcsfake.NewConn(timeutil.RealClock()), - } bucket, err := bm.SetUpBucket(context.Background(), TestBucketName) @@ -133,15 +110,11 @@ func (t *BucketManagerTest) TestSetUpBucketMethodWhenBucketDoesNotExist() { DebugGCS: true, AppendThreshold: 2, TmpObjectPrefix: "TmpObjectPrefix", - EnableStorageClientLibrary: true, } ctx := context.Background() bm.storageHandle = t.storageHandle bm.config = bucketConfig bm.gcCtx = ctx - bm.conn = &Connection{ - wrapped: gcsfake.NewConn(timeutil.RealClock()), - } bucket, err := bm.SetUpBucket(context.Background(), invalidBucketName) diff --git a/internal/gcsx/connection.go b/internal/gcsx/connection.go deleted file mode 100644 index 87ab6c54d0..0000000000 --- a/internal/gcsx/connection.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcsx - -import ( - "fmt" - "strings" - "syscall" - - "github.com/jacobsa/gcloud/gcs" - "golang.org/x/net/context" -) - -type Connection struct { - wrapped gcs.Conn -} - -func NewConnection(cfg *gcs.ConnConfig) (c *Connection, err error) { - wrapped, err := gcs.NewConn(cfg) - if err != nil { - err = fmt.Errorf("Cannot create Conn: %w", err) - return - } - - c = &Connection{ - wrapped: wrapped, - } - return -} - -func (c *Connection) OpenBucket( - ctx context.Context, - options *gcs.OpenBucketOptions) (b gcs.Bucket, err error) { - b, err = c.wrapped.OpenBucket(ctx, options) - - // The gcs.Conn.OpenBucket returns converted errors without the underlying - // googleapi.Error, which is impossible to use errors.As to match the error - // type. To interpret the errors in syscall, here we use string matching. - if err != nil { - if strings.Contains(err.Error(), "Bad credentials") { - return b, fmt.Errorf("Bad credentials for bucket %q: %w", options.Name, syscall.EACCES) - } - if strings.Contains(err.Error(), "Unknown bucket") { - return b, fmt.Errorf("Unknown bucket %q: %w", options.Name, syscall.ENOENT) - } - } - - return -} diff --git a/internal/storage/storage_handle.go b/internal/storage/storage_handle.go index fe434c5022..830bb6afc9 100644 --- a/internal/storage/storage_handle.go +++ b/internal/storage/storage_handle.go @@ -29,6 +29,8 @@ import ( "google.golang.org/api/option" ) +const GcsEndPoint = "https://storage.googleapis.com:443" + type StorageHandle interface { // In case of non-empty billingProject, this project is set as user-project for // all subsequent calls on the bucket. Calls with user-project will be billed diff --git a/main.go b/main.go index 7526061330..4c8a0f7f2f 100644 --- a/main.go +++ b/main.go @@ -20,33 +20,25 @@ package main import ( - "crypto/tls" "fmt" "log" - "net/http" "os" "os/signal" "path" "strings" - "time" - - "github.com/googlecloudplatform/gcsfuse/internal/storage" - "golang.org/x/net/context" - "golang.org/x/oauth2" "github.com/googlecloudplatform/gcsfuse/internal/auth" "github.com/googlecloudplatform/gcsfuse/internal/canned" - "github.com/googlecloudplatform/gcsfuse/internal/gcsx" "github.com/googlecloudplatform/gcsfuse/internal/locker" "github.com/googlecloudplatform/gcsfuse/internal/logger" "github.com/googlecloudplatform/gcsfuse/internal/monitor" - mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" "github.com/googlecloudplatform/gcsfuse/internal/perf" + "github.com/googlecloudplatform/gcsfuse/internal/storage" "github.com/jacobsa/daemonize" "github.com/jacobsa/fuse" - "github.com/jacobsa/gcloud/gcs" "github.com/kardianos/osext" "github.com/urfave/cli" + "golang.org/x/net/context" ) //////////////////////////////////////////////////////////////////////// @@ -87,67 +79,6 @@ func getUserAgent(appName string) string { } } -func getConn(flags *flagStorage) (c *gcsx.Connection, err error) { - var tokenSrc oauth2.TokenSource - if flags.Endpoint.Hostname() == "storage.googleapis.com" { - tokenSrc, err = auth.GetTokenSource( - context.Background(), - flags.KeyFile, - flags.TokenUrl, - flags.ReuseTokenFromUrl, - ) - if err != nil { - err = fmt.Errorf("GetTokenSource: %w", err) - return - } - } else { - // Do not use OAuth with non-Google hosts. - tokenSrc = oauth2.StaticTokenSource(&oauth2.Token{}) - } - - // Create the connection. - cfg := &gcs.ConnConfig{ - Url: flags.Endpoint, - TokenSource: tokenSrc, - UserAgent: getUserAgent(flags.AppName), - MaxBackoffSleep: flags.MaxRetrySleep, - } - - // The default HTTP transport uses HTTP/2 with TCP multiplexing, which - // does not create new TCP connections even when the idle connections - // run out. To specify multiple connections per host, HTTP/2 is disabled - // on purpose. - if flags.ClientProtocol == mountpkg.HTTP1 { - cfg.Transport = &http.Transport{ - MaxConnsPerHost: flags.MaxConnsPerHost, - // This disables HTTP/2 in the transport. - TLSNextProto: make( - map[string]func(string, *tls.Conn) http.RoundTripper, - ), - } - } - - if flags.DebugHTTP { - cfg.HTTPDebugLogger = logger.NewDebug("http: ") - } - - if flags.DebugGCS { - cfg.GCSDebugLogger = logger.NewDebug("gcs: ") - } - - return gcsx.NewConnection(cfg) -} - -func getConnWithRetry(flags *flagStorage) (c *gcsx.Connection, err error) { - c, err = getConn(flags) - for delay := 1 * time.Second; delay <= flags.MaxRetrySleep && err != nil; delay = delay/2 + delay { - logger.Infof("Waiting for connection: %v\n", err) - time.Sleep(delay) - c, err = getConn(flags) - } - return -} - func createStorageHandle(flags *flagStorage) (storageHandle storage.StorageHandle, err error) { tokenSrc, err := auth.GetTokenSource(context.Background(), flags.KeyFile, flags.TokenUrl, true) if err != nil { @@ -192,35 +123,28 @@ func mountWithArgs( // // Special case: if we're mounting the fake bucket, we don't need an actual // connection. - var conn *gcsx.Connection var storageHandle storage.StorageHandle if bucketName != canned.FakeBucketName { - mountStatus.Println("Opening GCS connection...") - - if flags.EnableStorageClientLibrary { - storageHandle, err = createStorageHandle(flags) - } else { - conn, err = getConnWithRetry(flags) - } + mountStatus.Println("Creating Storage handle...") + storageHandle, err = createStorageHandle(flags) if err != nil { - err = fmt.Errorf("failed to open connection - getConnWithRetry: %w", err) + err = fmt.Errorf("Failed to create storage handle using createStorageHandle: %w", err) return } } // Mount the file system. logger.Infof("Creating a mount at %q\n", mountPoint) - mfs, err = mountWithConn( + mfs, err = mountWithStorageHandle( context.Background(), bucketName, mountPoint, flags, - conn, storageHandle, mountStatus) if err != nil { - err = fmt.Errorf("mountWithConn: %w", err) + err = fmt.Errorf("mountWithStorageHandle: %w", err) return } diff --git a/main_test.go b/main_test.go index 36dc0b3644..1bfeac0582 100644 --- a/main_test.go +++ b/main_test.go @@ -21,16 +21,6 @@ type MainTest struct { func init() { RegisterTestSuite(&MainTest{}) } -func (t *MainTest) TestCreateStorageHandleEnableStorageClientLibraryIsTrue() { - storageHandle, err := createStorageHandle(&flagStorage{ - EnableStorageClientLibrary: true, - KeyFile: "testdata/test_creds.json", - }) - - ExpectNe(nil, storageHandle) - ExpectEq(nil, err) -} - func (t *MainTest) TestCreateStorageHandle() { flags := &flagStorage{ ClientProtocol: mountpkg.HTTP1, diff --git a/mount.go b/mount.go index 9bad034d5c..f5b7eac525 100644 --- a/mount.go +++ b/mount.go @@ -33,12 +33,11 @@ import ( // Mount the file system based on the supplied arguments, returning a // fuse.MountedFileSystem that can be joined to wait for unmounting. -func mountWithConn( +func mountWithStorageHandle( ctx context.Context, bucketName string, mountPoint string, flags *flagStorage, - conn *gcsx.Connection, storageHandle storage.StorageHandle, status *log.Logger) (mfs *fuse.MountedFileSystem, err error) { // Sanity check: make sure the temporary directory exists and is writable @@ -95,9 +94,8 @@ be interacting with the file system.`) AppendThreshold: 1 << 21, // 2 MiB, a total guess. TmpObjectPrefix: ".gcsfuse_tmp/", DebugGCS: flags.DebugGCS, - EnableStorageClientLibrary: flags.EnableStorageClientLibrary, } - bm := gcsx.NewBucketManager(bucketCfg, conn, storageHandle) + bm := gcsx.NewBucketManager(bucketCfg, storageHandle) // Create a file system server. serverCfg := &fs.ServerConfig{ diff --git a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh index cb2d3d38ed..810f99edb4 100644 --- a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh +++ b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh @@ -45,7 +45,7 @@ cd "./perfmetrics/scripts/" echo "Mounting gcs bucket" mkdir -p gcs LOG_FILE=${KOKORO_ARTIFACTS_DIR}/gcsfuse-logs.txt -GCSFUSE_FLAGS="--implicit-dirs --max-conns-per-host 100 --enable-storage-client-library --debug_fuse --debug_gcs --log-file $LOG_FILE --log-format \"text\" --stackdriver-export-interval=30s" +GCSFUSE_FLAGS="--implicit-dirs --max-conns-per-host 100 --debug_fuse --debug_gcs --log-file $LOG_FILE --log-format \"text\" --stackdriver-export-interval=30s" BUCKET_NAME=periodic-perf-tests MOUNT_POINT=gcs # The VM will itself exit if the gcsfuse mount fails. diff --git a/perfmetrics/scripts/ls_metrics/listing_benchmark.py b/perfmetrics/scripts/ls_metrics/listing_benchmark.py index 6bda6b1496..7c99de82cf 100644 --- a/perfmetrics/scripts/ls_metrics/listing_benchmark.py +++ b/perfmetrics/scripts/ls_metrics/listing_benchmark.py @@ -375,7 +375,7 @@ def _mount_gcs_bucket(bucket_name) -> str: subprocess.call('mkdir {}'.format(gcs_bucket), shell=True) exit_code = subprocess.call( - 'gcsfuse --implicit-dirs --enable-storage-client-library --max-conns-per-host 100 {} {}'.format( + 'gcsfuse --implicit-dirs --max-conns-per-host 100 {} {}'.format( bucket_name, gcs_bucket), shell=True) if exit_code != 0: log.error('Cannot mount the GCS bucket due to exit code %s.\n', exit_code) diff --git a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh index f6299d6b61..c72a85f0bc 100644 --- a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh +++ b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh @@ -17,7 +17,7 @@ cd - # Mount the bucket and run in background so that docker doesn't keep running after resnet_runner.py fails echo "Mounting the bucket" -nohup gcsfuse/gcsfuse --foreground --implicit-dirs --enable-storage-client-library --debug_fuse --debug_gcs --max-conns-per-host 100 --log-format "text" --log-file /home/logs/gcsfuse.log --stackdriver-export-interval 60s ml-models-data-gcsfuse myBucket > /home/output/gcsfuse.out 2> /home/output/gcsfuse.err & +nohup gcsfuse/gcsfuse --foreground --implicit-dirs --debug_fuse --debug_gcs --max-conns-per-host 100 --log-format "text" --log-file /home/logs/gcsfuse.log --stackdriver-export-interval 60s ml-models-data-gcsfuse myBucket > /home/output/gcsfuse.out 2> /home/output/gcsfuse.err & # Install tensorflow model garden library pip3 install --user tf-models-official==2.10.0 diff --git a/tools/integration_tests/explicit_dir/explicit_dir_test.go b/tools/integration_tests/explicit_dir/explicit_dir_test.go index 06c69ff6de..01e675271b 100644 --- a/tools/integration_tests/explicit_dir/explicit_dir_test.go +++ b/tools/integration_tests/explicit_dir/explicit_dir_test.go @@ -22,7 +22,7 @@ import ( ) func TestMain(m *testing.M) { - flags := [][]string{{"--enable-storage-client-library=true"}, {"--enable-storage-client-library=false"}} + flags := [][]string{{"--implicit-dirs=false"}} implicit_and_explicit_dir_setup.RunTestsForImplicitDirAndExplicitDir(flags, m) } diff --git a/tools/integration_tests/implicit_dir/implicit_dir_test.go b/tools/integration_tests/implicit_dir/implicit_dir_test.go index e710445dee..fd83b37392 100644 --- a/tools/integration_tests/implicit_dir/implicit_dir_test.go +++ b/tools/integration_tests/implicit_dir/implicit_dir_test.go @@ -29,7 +29,7 @@ const NumberOfFilesInExplicitDirInImplicitSubDir = 1 const NumberOfFilesInExplicitDirInImplicitDir = 1 func TestMain(m *testing.M) { - flags := [][]string{{"--implicit-dirs"}, {"--enable-storage-client-library=false", "--implicit-dirs"}} + flags := [][]string{{"--implicit-dirs"}} implicit_and_explicit_dir_setup.RunTestsForImplicitDirAndExplicitDir(flags, m) } diff --git a/tools/integration_tests/operations/operations_test.go b/tools/integration_tests/operations/operations_test.go index 11df5ee42e..a3af827726 100644 --- a/tools/integration_tests/operations/operations_test.go +++ b/tools/integration_tests/operations/operations_test.go @@ -86,9 +86,7 @@ const ContentInFileInDirThreeInCreateThreeLevelDirTest = "Hello world!!" func TestMain(m *testing.M) { setup.ParseSetUpFlags() - flags := [][]string{{"--enable-storage-client-library=true", "--implicit-dirs=true"}, - {"--enable-storage-client-library=false"}, - {"--implicit-dirs=true"}, + flags := [][]string{{"--implicit-dirs=true"}, {"--implicit-dirs=false"}} setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() diff --git a/tools/integration_tests/read_large_files/read_large_files_test.go b/tools/integration_tests/read_large_files/read_large_files_test.go index 3ebb4d381f..60c9199623 100644 --- a/tools/integration_tests/read_large_files/read_large_files_test.go +++ b/tools/integration_tests/read_large_files/read_large_files_test.go @@ -37,7 +37,7 @@ const MaxReadableByteFromFile = 500 * OneMB func TestMain(m *testing.M) { setup.ParseSetUpFlags() - flags := [][]string{{"--implicit-dirs"}, {"--enable-storage-client-library=false", "--implicit-dirs"}} + flags := [][]string{{"--implicit-dirs"}} setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index c359880100..e6f88ac8b4 100755 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -23,257 +23,191 @@ TEST_BUCKET_NAME=$1 MOUNT_DIR=$2 export CGO_ENABLED=0 -# Run integration tests for operations directory with static mounting -gcsfuse --enable-storage-client-library=true --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR +# package operations +# Run test with static mounting. (flags: --implicit-dirs=true) +gcsfuse --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=true,implicit_dirs=true -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -gcsfuse --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=false -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -gcsfuse --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs=true) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=true GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with static mounting. (flags: --implicit-dirs=false) gcsfuse --implicit-dirs=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs=false) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=false GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run integration tests for operations with --only-dir mounting. -gcsfuse --only-dir testDir --enable-storage-client-library=true --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=true,implicit_dirs=true +# Run tests with static mounting. (flags: --implicit-dirs=true, --only-dir testDir) +gcsfuse --only-dir testDir --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -gcsfuse --only-dir testDir --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=false -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -gcsfuse --only-dir testDir --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run test with persistent mounting +# Run tests with persistent mounting. (flags: --implicit-dirs=true, --only-dir=testDir) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=true GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run tests with static mounting. (flags: --implicit-dirs=false, --only-dir testDir) gcsfuse --only-dir testDir --implicit-dirs=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run tests with persistent mounting. (flags: --implicit-dirs=false, --only-dir=testDir) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=false GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run integration tests for readonly directory with static mounting +# package readonly +# Run tests with static mounting. (flags: --implicit-dirs=true,--o=ro) gcsfuse --o=ro --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs=true,--o=ro) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o ro,implicit_dirs=true GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR +# Run tests with static mounting. (flags: --implicit-dirs=true, --file-mode=544, --dir-mode=544) gcsfuse --file-mode=544 --dir-mode=544 --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs=true, --file-mode=544, --dir-mode=544) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o file_mode=544,dir_mode=544,implicit_dirs=true GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run integration tests for readonly with --only-dir mounting. +# Run tests with static mounting. (flags: --implicit-dirs=true, --o=ro, --only-dir testDir) gcsfuse --only-dir testDir --o=ro --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs=true,--o=ro,--only-dir=testDir) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o ro,only_dir=testDir,implicit_dirs=true GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR +# Run test with static mounting. (flags: --implicit-dirs=true, --file-mode=544, --dir-mode=544, --only-dir testDir) gcsfuse --only-dir testDir --file-mode=544 --dir-mode=544 --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs=true, --file-mode=544, --dir-mode=544, --only-dir=testDir) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,file_mode=544,dir_mode=544,implicit_dirs=true GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/readonly/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -# Run integration tests for rename_dir_limit directory with static mounting +# package rename_dir_limit +# Run tests with static mounting. (flags: --rename-dir-limit=3, --implicit-dirs) gcsfuse --rename-dir-limit=3 --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --rename-dir-limit=3, --implicit-dirs) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o rename_dir_limit=3,implicit_dirs GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run tests with static mounting. (flags: --rename-dir-limit=3) gcsfuse --rename-dir-limit=3 $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --rename-dir-limit=3) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o rename_dir_limit=3 GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run integration tests for rename_dir_limit with --only-dir mounting. +# Run test with static mounting. (flags: --rename-dir-limit=3, --implicit-dirs, --only-dir testDir) gcsfuse --only-dir testDir --rename-dir-limit=3 --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting . (flags: --rename-dir-limit=3, --implicit-dirs) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,rename_dir_limit=3,implicit_dirs GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with static mounting. (flags: --rename-dir-limit=3, --only-dir testDir) gcsfuse --only-dir testDir --rename-dir-limit=3 $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting . (flags: --rename-dir-limit=3, --implicit-dirs, --only-dir=testDir) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,rename_dir_limit=3,implicit_dirs GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/rename_dir_limit/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -# Run integration tests for implicit_dir directory with static mounting +# package implicit_dir +# Run tests with static mounting. (flags: --implicit-dirs) gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -gcsfuse --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=false,implicit_dirs -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME -sudo umount $MOUNT_DIR - - -# Run integration tests for implicit_dir with --only-dir mounting. +# Run tests with static mounting. (flags: --implicit-dirs, --only-dir testDir) gcsfuse --only-dir testDir --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -# Run test with persistent mounting +# Run test with persistent mounting. (flags: --implicit-dirs,--only-dir=testDir) mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -gcsfuse --only-dir testDir --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=false,implicit_dirs -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/implicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir -sudo umount $MOUNT_DIR - -# Run integration tests for explicit_dir directory with static mounting -gcsfuse --enable-storage-client-library=true $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=true -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME -sudo umount $MOUNT_DIR - -gcsfuse --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR +# package explicit_dir +# Run tests with static mounting. (flags: --implicit-dirs=false) +gcsfuse --implicit-dirs=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o enable_storage_client_library=false +# Run test with persistent mounting. (flags: --implicit-dirs=false) +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=false GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run integration tests for explicit_dir with --only-dir mounting. -gcsfuse --only-dir testDir --enable-storage-client-library=true $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir -sudo umount $MOUNT_DIR - -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=true -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir -sudo umount $MOUNT_DIR - -gcsfuse --only-dir testDir --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR +# Run tests with static mounting. (flags: --implicit-dirs=false, --only-dir testDir) +gcsfuse --only-dir testDir --implicit-dirs=false $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -# Run test with persistent mounting -mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,enable_storage_client_library=false +# Run test with persistent mounting. (flags: --implicit-dirs=false, --only-dir=testDir) +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=false GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/explicit_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME/testDir sudo umount $MOUNT_DIR -# Run integration tests for list_large_dir directory with static mounting +# package list_large_dir +# Run tests with static mounting. (flags: --implicit-dirs) gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/list_large_dir/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR -# Run integration tests for read_large_files directory with static mounting +# package read_large_files +# Run tests with static mounting. (flags: --implicit-dirs) gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/read_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -gcsfuse --enable-storage-client-library=false --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/read_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run integration tests for write_large_files directory with static mounting +# package write_large_files +# Run tests with static mounting. (flags: --implicit-dirs) gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/write_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR -gcsfuse --implicit-dirs --enable-storage-client-library=false $TEST_BUCKET_NAME $MOUNT_DIR -GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/write_large_files/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR -sudo umount $MOUNT_DIR - -# Run integration tests from gzip package/directory with static mounting +# package gzip +# Run tests with static mounting. (flags: --implicit-dirs) gcsfuse --implicit-dirs $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/gzip/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR --testbucket=$TEST_BUCKET_NAME sudo umount $MOUNT_DIR diff --git a/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go b/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go index 688d85e9bd..02a773db45 100644 --- a/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go +++ b/tools/integration_tests/util/mounting/persistent_mounting/perisistent_mounting.go @@ -24,15 +24,15 @@ import ( "github.com/googlecloudplatform/gcsfuse/tools/integration_tests/util/setup" ) -// make e.g --enable-storage-client-library in enable_storage_client_library +// make e.g --debug_gcs in debug_gcs func makePersistentMountingArgs(flags []string) (args []string, err error) { var s string for i := range flags { // We are already passing flags with -o flag. s = strings.Replace(flags[i], "--o=", "", -1) - // e.g. Convert --enable-storage-client-library to __enable_storage_client_library + // e.g. Convert --debug_gcs to __debug_gcs s = strings.Replace(s, "-", "_", -1) - // e.g. Convert __enable_storage_client_library to enable_storage_client_library + // e.g. Convert __debug_gcs to debug_gcs s = strings.Replace(s, "__", "", -1) args = append(args, s) } diff --git a/tools/integration_tests/write_large_files/write_large_files_test.go b/tools/integration_tests/write_large_files/write_large_files_test.go index 7002780abe..21b9f1c96d 100644 --- a/tools/integration_tests/write_large_files/write_large_files_test.go +++ b/tools/integration_tests/write_large_files/write_large_files_test.go @@ -27,7 +27,7 @@ import ( func TestMain(m *testing.M) { setup.ParseSetUpFlags() - flags := [][]string{{"--implicit-dirs"}, {"--enable-storage-client-library=false", "--implicit-dirs"}} + flags := [][]string{{"--implicit-dirs"}} setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() diff --git a/tools/mount_gcsfuse/main.go b/tools/mount_gcsfuse/main.go index 3c50898c3e..bcfefe5843 100644 --- a/tools/mount_gcsfuse/main.go +++ b/tools/mount_gcsfuse/main.go @@ -83,7 +83,6 @@ func makeGcsfuseArgs( case "implicit_dirs", "foreground", "experimental_local_file_cache", - "enable_storage_client_library", "reuse_token_from_url", "enable_nonexistent_type_cache": if value == "" { From 8517709ae7682c4407f1e6bc6403024d23d7abf6 Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Wed, 16 Aug 2023 20:15:12 +0530 Subject: [PATCH 40/46] Enabling --endpoint flag for go-storage-client library (#1264) * Enabling --endpoint flag for go-storage-client library * Fixing lint issue * Added more unit test * fixing end pointd escription * Refactoring and resolving comments * refactoring * Reverting previous refactoring, created client_helper for all client creation logic * treating passing actual GCSURL like custom point, by design in go-storage-lib * Fixing unit test, removing unused token-source * Minor refactoring, adding dummy key-file * Making the default client non-nil so that we can get dummy token-source * Fixing unit test * Review comments updated * Review comments * Incorporating review comments * Fixing build issue * Incorporating review comments * Renamed client helper to client * Incorporating review comments * Adding more unit test * Review comments * Fixing flag unit test * Fixed gcsfuse hang * Triggering performance test and fixing unit test --- flags.go | 28 ++++-- flags_test.go | 3 +- internal/storage/storage_handle.go | 65 ++++-------- internal/storage/storage_handle_test.go | 69 +++++++++---- internal/storage/storageutil/client.go | 99 +++++++++++++++++++ internal/storage/storageutil/client_test.go | 78 +++++++++++++++ internal/storage/storageutil/test_util.go | 44 +++++++++ .../user_agent_round_tripper.go | 2 +- main.go | 15 ++- tools/mount_gcsfuse/main.go | 2 +- 10 files changed, 314 insertions(+), 91 deletions(-) create mode 100644 internal/storage/storageutil/client.go create mode 100644 internal/storage/storageutil/client_test.go create mode 100644 internal/storage/storageutil/test_util.go rename internal/storage/{ => storageutil}/user_agent_round_tripper.go (98%) diff --git a/flags.go b/flags.go index 9625b6c6ae..53debf06e4 100644 --- a/flags.go +++ b/flags.go @@ -26,7 +26,6 @@ import ( "github.com/googlecloudplatform/gcsfuse/internal/logger" mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" - "github.com/googlecloudplatform/gcsfuse/internal/storage" "github.com/urfave/cli" ) @@ -135,9 +134,10 @@ func newApp() (app *cli.App) { ///////////////////////// cli.StringFlag{ - Name: "endpoint", - Value: storage.GcsEndPoint, - Usage: "The endpoint to connect to.", + Name: "custom-endpoint", + Usage: "Alternate endpoint for fetching data. Should be used only for testing purposes. " + + "The endpoint should be equivalent to the base endpoint of GCS JSON API (https://storage.googleapis.com/storage/v1). " + + "If not specified GCS endpoint will be used. Auth will be skipped for custom endpoint.", }, cli.StringFlag{ @@ -357,7 +357,7 @@ type flagStorage struct { RenameDirLimit int64 // GCS - Endpoint *url.URL + CustomEndpoint *url.URL BillingProject string KeyFile string TokenUrl string @@ -466,11 +466,19 @@ func resolvePathForTheFlagsInContext(c *cli.Context) (err error) { // Add the flags accepted by run to the supplied flag set, returning the // variables into which the flags will parse. func populateFlags(c *cli.Context) (flags *flagStorage, err error) { - endpoint, err := url.Parse(c.String("endpoint")) - if err != nil { - fmt.Printf("Could not parse endpoint") - return + customEndpointStr := c.String("custom-endpoint") + var customEndpoint *url.URL + + if customEndpointStr == "" { + customEndpoint = nil + } else { + customEndpoint, err = url.Parse(customEndpointStr) + if err != nil { + fmt.Printf("Could not parse endpoint") + return + } } + clientProtocolString := strings.ToLower(c.String("client-protocol")) clientProtocol := mountpkg.ClientProtocol(clientProtocolString) flags = &flagStorage{ @@ -488,7 +496,7 @@ func populateFlags(c *cli.Context) (flags *flagStorage, err error) { RenameDirLimit: int64(c.Int("rename-dir-limit")), // GCS, - Endpoint: endpoint, + CustomEndpoint: customEndpoint, BillingProject: c.String("billing-project"), KeyFile: c.String("key-file"), TokenUrl: c.String("token-url"), diff --git a/flags_test.go b/flags_test.go index a95c263a3d..eb560b756e 100644 --- a/flags_test.go +++ b/flags_test.go @@ -23,7 +23,6 @@ import ( "time" mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" - "github.com/googlecloudplatform/gcsfuse/internal/storage" . "github.com/jacobsa/oglematchers" . "github.com/jacobsa/ogletest" "github.com/urfave/cli" @@ -81,7 +80,7 @@ func (t *FlagsTest) Defaults() { ExpectEq(-1, f.EgressBandwidthLimitBytesPerSecond) ExpectEq(-1, f.OpRateLimitHz) ExpectTrue(f.ReuseTokenFromUrl) - ExpectEq(storage.GcsEndPoint, f.Endpoint.String()) + ExpectEq(nil, f.CustomEndpoint) // Tuning ExpectEq(4096, f.StatCacheCapacity) diff --git a/internal/storage/storage_handle.go b/internal/storage/storage_handle.go index 830bb6afc9..65f62387a3 100644 --- a/internal/storage/storage_handle.go +++ b/internal/storage/storage_handle.go @@ -15,22 +15,17 @@ package storage import ( - "crypto/tls" "fmt" "net/http" - "time" "cloud.google.com/go/storage" "github.com/googleapis/gax-go/v2" mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" "github.com/googlecloudplatform/gcsfuse/internal/storage/storageutil" "golang.org/x/net/context" - "golang.org/x/oauth2" - "google.golang.org/api/option" + option "google.golang.org/api/option" ) -const GcsEndPoint = "https://storage.googleapis.com:443" - type StorageHandle interface { // In case of non-empty billingProject, this project is set as user-project for // all subsequent calls on the bucket. Calls with user-project will be billed @@ -44,57 +39,31 @@ type storageClient struct { client *storage.Client } -type StorageClientConfig struct { - ClientProtocol mountpkg.ClientProtocol - MaxConnsPerHost int - MaxIdleConnsPerHost int - TokenSrc oauth2.TokenSource - HttpClientTimeout time.Duration - MaxRetryDuration time.Duration - RetryMultiplier float64 - UserAgent string -} - // NewStorageHandle returns the handle of Go storage client containing // customized http client. We can configure the http client using the // storageClientConfig parameter. -func NewStorageHandle(ctx context.Context, clientConfig StorageClientConfig) (sh StorageHandle, err error) { - var transport *http.Transport - // Using http1 makes the client more performant. - if clientConfig.ClientProtocol == mountpkg.HTTP1 { - transport = &http.Transport{ - MaxConnsPerHost: clientConfig.MaxConnsPerHost, - MaxIdleConnsPerHost: clientConfig.MaxIdleConnsPerHost, - // This disables HTTP/2 in transport. - TLSNextProto: make( - map[string]func(string, *tls.Conn) http.RoundTripper, - ), - } - } else { - // For http2, change in MaxConnsPerHost doesn't affect the performance. - transport = &http.Transport{ - DisableKeepAlives: true, - MaxConnsPerHost: clientConfig.MaxConnsPerHost, - ForceAttemptHTTP2: true, +func NewStorageHandle(ctx context.Context, clientConfig storageutil.StorageClientConfig) (sh StorageHandle, err error) { + + var clientOpts []option.ClientOption + // Add WithHttpClient option. + if clientConfig.ClientProtocol == mountpkg.HTTP1 || clientConfig.ClientProtocol == mountpkg.HTTP2 { + var httpClient *http.Client + httpClient, err = storageutil.CreateHttpClient(&clientConfig) + if err != nil { + err = fmt.Errorf("while creating http endpoint: %w", err) + return } - } - // Custom http client for Go Client. - httpClient := &http.Client{ - Transport: &oauth2.Transport{ - Base: transport, - Source: clientConfig.TokenSrc, - }, - Timeout: clientConfig.HttpClientTimeout, + clientOpts = append(clientOpts, option.WithHTTPClient(httpClient)) } - // Setting UserAgent through RoundTripper middleware - httpClient.Transport = &userAgentRoundTripper{ - wrapped: httpClient.Transport, - UserAgent: clientConfig.UserAgent, + // Add Custom endpoint option. + if clientConfig.CustomEndpoint != nil { + clientOpts = append(clientOpts, option.WithEndpoint(clientConfig.CustomEndpoint.String())) } + var sc *storage.Client - sc, err = storage.NewClient(ctx, option.WithHTTPClient(httpClient)) + sc, err = storage.NewClient(ctx, clientOpts...) if err != nil { err = fmt.Errorf("go storage client creation failed: %w", err) return diff --git a/internal/storage/storage_handle_test.go b/internal/storage/storage_handle_test.go index 841d1b6c07..2c856359d4 100644 --- a/internal/storage/storage_handle_test.go +++ b/internal/storage/storage_handle_test.go @@ -16,30 +16,18 @@ package storage import ( "context" + "net/url" "testing" - "time" mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" + "github.com/googlecloudplatform/gcsfuse/internal/storage/storageutil" + "github.com/jacobsa/oglematchers" . "github.com/jacobsa/ogletest" - "golang.org/x/oauth2" ) const invalidBucketName string = "will-not-be-present-in-fake-server" const projectID string = "valid-project-id" -func getDefaultStorageClientConfig() (clientConfig StorageClientConfig) { - return StorageClientConfig{ - ClientProtocol: mountpkg.HTTP1, - MaxConnsPerHost: 10, - MaxIdleConnsPerHost: 100, - TokenSrc: oauth2.StaticTokenSource(&oauth2.Token{}), - HttpClientTimeout: 800 * time.Millisecond, - MaxRetryDuration: 30 * time.Second, - RetryMultiplier: 2, - UserAgent: "gcsfuse/unknown (Go version go1.20-pre3 cl/474093167 +a813be86df) (GCP:gcsfuse)", - } -} - func TestStorageHandle(t *testing.T) { RunTests(t) } type StorageHandleTest struct { @@ -61,7 +49,7 @@ func (t *StorageHandleTest) TearDown() { t.fakeStorage.ShutDown() } -func (t *StorageHandleTest) invokeAndVerifyStorageHandle(sc StorageClientConfig) { +func (t *StorageHandleTest) invokeAndVerifyStorageHandle(sc storageutil.StorageClientConfig) { handleCreated, err := NewStorageHandle(context.Background(), sc) AssertEq(nil, err) AssertNe(nil, handleCreated) @@ -98,28 +86,69 @@ func (t *StorageHandleTest) TestBucketHandleWhenBucketDoesNotExistWithNonEmptyBi } func (t *StorageHandleTest) TestNewStorageHandleHttp2Disabled() { - sc := getDefaultStorageClientConfig() // by default http1 enabled + sc := storageutil.GetDefaultStorageClientConfig() // by default http1 enabled t.invokeAndVerifyStorageHandle(sc) } func (t *StorageHandleTest) TestNewStorageHandleHttp2Enabled() { - sc := getDefaultStorageClientConfig() + sc := storageutil.GetDefaultStorageClientConfig() sc.ClientProtocol = mountpkg.HTTP2 t.invokeAndVerifyStorageHandle(sc) } func (t *StorageHandleTest) TestNewStorageHandleWithZeroMaxConnsPerHost() { - sc := getDefaultStorageClientConfig() + sc := storageutil.GetDefaultStorageClientConfig() sc.MaxConnsPerHost = 0 t.invokeAndVerifyStorageHandle(sc) } func (t *StorageHandleTest) TestNewStorageHandleWhenUserAgentIsSet() { - sc := getDefaultStorageClientConfig() + sc := storageutil.GetDefaultStorageClientConfig() sc.UserAgent = "gcsfuse/unknown (Go version go1.20-pre3 cl/474093167 +a813be86df) appName (GPN:Gcsfuse-DLC)" t.invokeAndVerifyStorageHandle(sc) } +func (t *StorageHandleTest) TestNewStorageHandleWithCustomEndpoint() { + url, err := url.Parse(storageutil.CustomEndpoint) + AssertEq(nil, err) + sc := storageutil.GetDefaultStorageClientConfig() + sc.CustomEndpoint = url + + t.invokeAndVerifyStorageHandle(sc) +} + +// This will fail while fetching the token-source, since key-file doesn't exist. +func (t *StorageHandleTest) TestNewStorageHandleWhenCustomEndpointIsNil() { + sc := storageutil.GetDefaultStorageClientConfig() + sc.CustomEndpoint = nil + + handleCreated, err := NewStorageHandle(context.Background(), sc) + + AssertNe(nil, err) + ExpectThat(err, oglematchers.Error(oglematchers.HasSubstr("no such file or directory"))) + AssertEq(nil, handleCreated) +} + +func (t *StorageHandleTest) TestNewStorageHandleWhenKeyFileIsEmpty() { + sc := storageutil.GetDefaultStorageClientConfig() + sc.KeyFile = "" + + t.invokeAndVerifyStorageHandle(sc) +} + +func (t *StorageHandleTest) TestNewStorageHandleWhenReuseTokenUrlFalse() { + sc := storageutil.GetDefaultStorageClientConfig() + sc.ReuseTokenFromUrl = false + + t.invokeAndVerifyStorageHandle(sc) +} + +func (t *StorageHandleTest) TestNewStorageHandleWhenTokenUrlIsSet() { + sc := storageutil.GetDefaultStorageClientConfig() + sc.TokenUrl = storageutil.CustomTokenUrl + + t.invokeAndVerifyStorageHandle(sc) +} diff --git a/internal/storage/storageutil/client.go b/internal/storage/storageutil/client.go new file mode 100644 index 0000000000..7c43b7a031 --- /dev/null +++ b/internal/storage/storageutil/client.go @@ -0,0 +1,99 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storageutil + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/googlecloudplatform/gcsfuse/internal/auth" + mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +type StorageClientConfig struct { + ClientProtocol mountpkg.ClientProtocol + MaxConnsPerHost int + MaxIdleConnsPerHost int + HttpClientTimeout time.Duration + MaxRetryDuration time.Duration + RetryMultiplier float64 + UserAgent string + CustomEndpoint *url.URL + KeyFile string + TokenUrl string + ReuseTokenFromUrl bool + ExperimentalEnableJasonRead bool +} + +func CreateHttpClient(storageClientConfig *StorageClientConfig) (httpClient *http.Client, err error) { + var transport *http.Transport + // Using http1 makes the client more performant. + if storageClientConfig.ClientProtocol == mountpkg.HTTP1 { + transport = &http.Transport{ + MaxConnsPerHost: storageClientConfig.MaxConnsPerHost, + MaxIdleConnsPerHost: storageClientConfig.MaxIdleConnsPerHost, + // This disables HTTP/2 in transport. + TLSNextProto: make( + map[string]func(string, *tls.Conn) http.RoundTripper, + ), + } + } else { + // For http2, change in MaxConnsPerHost doesn't affect the performance. + transport = &http.Transport{ + DisableKeepAlives: true, + MaxConnsPerHost: storageClientConfig.MaxConnsPerHost, + ForceAttemptHTTP2: true, + } + } + + tokenSrc, err := createTokenSource(storageClientConfig) + if err != nil { + err = fmt.Errorf("while fetching tokenSource: %w", err) + return + } + + // Custom http client for Go Client. + httpClient = &http.Client{ + Transport: &oauth2.Transport{ + Base: transport, + Source: tokenSrc, + }, + Timeout: storageClientConfig.HttpClientTimeout, + } + + // Setting UserAgent through RoundTripper middleware + httpClient.Transport = &userAgentRoundTripper{ + wrapped: httpClient.Transport, + UserAgent: storageClientConfig.UserAgent, + } + + return httpClient, err +} + +// It creates dummy token-source in case of non-nil custom url. If the custom-endpoint +// is nil, it creates the token-source from the provided key-file or using ADC search +// order (https://cloud.google.com/docs/authentication/application-default-credentials#order). +func createTokenSource(storageClientConfig *StorageClientConfig) (tokenSrc oauth2.TokenSource, err error) { + if storageClientConfig.CustomEndpoint == nil { + return auth.GetTokenSource(context.Background(), storageClientConfig.KeyFile, storageClientConfig.TokenUrl, storageClientConfig.ReuseTokenFromUrl) + } else { + return oauth2.StaticTokenSource(&oauth2.Token{}), nil + } +} diff --git a/internal/storage/storageutil/client_test.go b/internal/storage/storageutil/client_test.go new file mode 100644 index 0000000000..c345359c50 --- /dev/null +++ b/internal/storage/storageutil/client_test.go @@ -0,0 +1,78 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storageutil + +import ( + "net/url" + "testing" + + "github.com/jacobsa/oglematchers" + . "github.com/jacobsa/ogletest" +) + +func TestClient(t *testing.T) { RunTests(t) } + +type clientTest struct { +} + +func init() { RegisterTestSuite(&clientTest{}) } + +func (t *clientTest) TestCreateTokenSrcWithCustomEndpoint() { + url, err := url.Parse(CustomEndpoint) + AssertEq(nil, err) + sc := GetDefaultStorageClientConfig() + sc.CustomEndpoint = url + + tokenSrc, err := createTokenSource(&sc) + + ExpectEq(nil, err) + ExpectNe(nil, &tokenSrc) +} + +func (t *clientTest) TestCreateTokenSrcWhenCustomEndpointIsNil() { + sc := GetDefaultStorageClientConfig() + sc.CustomEndpoint = nil + + // It will try to create the actual auth token and fail since key-file doesn't exist. + tokenSrc, err := createTokenSource(&sc) + + ExpectNe(nil, err) + ExpectThat(err, oglematchers.Error(oglematchers.HasSubstr("no such file or directory"))) + ExpectEq(nil, tokenSrc) +} + +func (t *clientTest) TestCreateHttpClientWithHttp1() { + sc := GetDefaultStorageClientConfig() // By default http1 enabled + + // Act: this method add tokenSource and clientOptions. + httpClient, err := CreateHttpClient(&sc) + + ExpectEq(nil, err) + ExpectNe(nil, httpClient) + ExpectNe(nil, httpClient.Transport) + ExpectEq(sc.HttpClientTimeout, httpClient.Timeout) +} + +func (t *clientTest) TestCreateHttpClientWithHttp2() { + sc := GetDefaultStorageClientConfig() + + // Act: this method add tokenSource and clientOptions. + httpClient, err := CreateHttpClient(&sc) + + ExpectEq(nil, err) + ExpectNe(nil, httpClient) + ExpectNe(nil, httpClient.Transport) + ExpectEq(sc.HttpClientTimeout, httpClient.Timeout) +} diff --git a/internal/storage/storageutil/test_util.go b/internal/storage/storageutil/test_util.go new file mode 100644 index 0000000000..0ae500cb24 --- /dev/null +++ b/internal/storage/storageutil/test_util.go @@ -0,0 +1,44 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storageutil + +import ( + "net/url" + "time" + + mountpkg "github.com/googlecloudplatform/gcsfuse/internal/mount" +) + +const CustomEndpoint = "https://localhost:9000" +const DummyKeyFile = "test/test_creds.json" +const CustomTokenUrl = "http://custom-token-url" + +// GetDefaultStorageClientConfig is only for test, making the default endpoint +// non-nil, so that we can create dummy tokenSource while unit test. +func GetDefaultStorageClientConfig() (clientConfig StorageClientConfig) { + return StorageClientConfig{ + ClientProtocol: mountpkg.HTTP1, + MaxConnsPerHost: 10, + MaxIdleConnsPerHost: 100, + HttpClientTimeout: 800 * time.Millisecond, + MaxRetryDuration: 30 * time.Second, + RetryMultiplier: 2, + UserAgent: "gcsfuse/unknown (Go version go1.20-pre3 cl/474093167 +a813be86df) (GCP:gcsfuse)", + CustomEndpoint: &url.URL{}, + KeyFile: DummyKeyFile, + TokenUrl: "", + ReuseTokenFromUrl: true, + } +} diff --git a/internal/storage/user_agent_round_tripper.go b/internal/storage/storageutil/user_agent_round_tripper.go similarity index 98% rename from internal/storage/user_agent_round_tripper.go rename to internal/storage/storageutil/user_agent_round_tripper.go index 78cd36cbcc..fad6855a21 100644 --- a/internal/storage/user_agent_round_tripper.go +++ b/internal/storage/storageutil/user_agent_round_tripper.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package storage +package storageutil import "net/http" diff --git a/main.go b/main.go index 4c8a0f7f2f..df9cf4fae8 100644 --- a/main.go +++ b/main.go @@ -27,13 +27,13 @@ import ( "path" "strings" - "github.com/googlecloudplatform/gcsfuse/internal/auth" "github.com/googlecloudplatform/gcsfuse/internal/canned" "github.com/googlecloudplatform/gcsfuse/internal/locker" "github.com/googlecloudplatform/gcsfuse/internal/logger" "github.com/googlecloudplatform/gcsfuse/internal/monitor" "github.com/googlecloudplatform/gcsfuse/internal/perf" "github.com/googlecloudplatform/gcsfuse/internal/storage" + "github.com/googlecloudplatform/gcsfuse/internal/storage/storageutil" "github.com/jacobsa/daemonize" "github.com/jacobsa/fuse" "github.com/kardianos/osext" @@ -80,21 +80,18 @@ func getUserAgent(appName string) string { } func createStorageHandle(flags *flagStorage) (storageHandle storage.StorageHandle, err error) { - tokenSrc, err := auth.GetTokenSource(context.Background(), flags.KeyFile, flags.TokenUrl, true) - if err != nil { - err = fmt.Errorf("get token source: %w", err) - return - } - - storageClientConfig := storage.StorageClientConfig{ + storageClientConfig := storageutil.StorageClientConfig{ ClientProtocol: flags.ClientProtocol, MaxConnsPerHost: flags.MaxConnsPerHost, MaxIdleConnsPerHost: flags.MaxIdleConnsPerHost, - TokenSrc: tokenSrc, HttpClientTimeout: flags.HttpClientTimeout, MaxRetryDuration: flags.MaxRetryDuration, RetryMultiplier: flags.RetryMultiplier, UserAgent: getUserAgent(flags.AppName), + CustomEndpoint: flags.CustomEndpoint, + KeyFile: flags.KeyFile, + TokenUrl: flags.TokenUrl, + ReuseTokenFromUrl: flags.ReuseTokenFromUrl, } storageHandle, err = storage.NewStorageHandle(context.Background(), storageClientConfig) diff --git a/tools/mount_gcsfuse/main.go b/tools/mount_gcsfuse/main.go index bcfefe5843..d5c60f330a 100644 --- a/tools/mount_gcsfuse/main.go +++ b/tools/mount_gcsfuse/main.go @@ -119,7 +119,7 @@ func makeGcsfuseArgs( "experimental_opentelemetry_collector_address", "log_format", "log_file", - "endpoint": + "custom_endpoint": args = append(args, "--"+strings.Replace(name, "_", "-", -1), value) // Special case: support mount-like formatting for gcsfuse debug flags. From e13d4bba7662517a7e22f99ca70403007c47e0f6 Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Wed, 16 Aug 2023 22:29:48 +0530 Subject: [PATCH 41/46] Changing the timeout from 15m to 24m for integraotin test of one package (#1284) * Changing the timeout from 15m to 24m for integraotin test of one package * Adding the timout flag for continuous test also --- perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh | 2 +- perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh index 810f99edb4..c47908abd7 100644 --- a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh +++ b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh @@ -28,7 +28,7 @@ commitId=$(git log --before='yesterday 23:59:59' --max-count=1 --pretty=%H) git checkout $commitId echo "Executing integration tests" -GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 15m +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 24m # Checkout back to master branch to use latest CI test scripts in master. git checkout master diff --git a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh index 55e998a7fa..454bf98274 100644 --- a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh +++ b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh @@ -53,7 +53,7 @@ echo checkout PR branch git checkout pr/$KOKORO_GITHUB_PULL_REQUEST_NUMBER # Executing integration tests -GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 15m +GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 24m # Executing perf tests echo Mounting gcs bucket from pr branch From 52e6a3c0490f7b47229f19d73ea844caafca750a Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Thu, 17 Aug 2023 08:09:11 +0530 Subject: [PATCH 42/46] Added an experimental flag to trigger JSON flow of go-storage-client (#1265) * Enabling --endpoint flag for go-storage-client library * Fixing lint issue * Added more unit test * fixing end pointd escription * Refactoring and resolving comments * refactoring * Reverting previous refactoring, created client_helper for all client creation logic * treating passing actual GCSURL like custom point, by design in go-storage-lib * Fixing unit test, removing unused token-source * Minor refactoring, adding dummy key-file * Making the default client non-nil so that we can get dummy token-source * Fixing unit test * Review comments updated * Review comments * Incorporating review comments * Fixing build issue * Incorporating review comments * Renamed client helper to client * Incorporating review comments * Adding more unit test * Review comments * Support json read in GCSFuse * Rebased with support endpoint branch * Incorporating review comments * reverting formatting issue * Fixing build issue * Fixing build failure * Correcting a comment * Fixing typo * Enabling --endpoint flag for go-storage-client library * Fixing lint issue * Added more unit test * fixing end pointd escription * Refactoring and resolving comments * refactoring * Reverting previous refactoring, created client_helper for all client creation logic * treating passing actual GCSURL like custom point, by design in go-storage-lib * Fixing unit test, removing unused token-source * Minor refactoring, adding dummy key-file * Making the default client non-nil so that we can get dummy token-source * Fixing unit test * Review comments updated * Review comments * Incorporating review comments * Fixing build issue * Incorporating review comments * Renamed client helper to client * Incorporating review comments * Adding more unit test * Review comments * Fixing flag unit test * Fixed gcsfuse hang * Triggering performance test and fixing unit test * Fixing linting issue * Trigger perf test * Added equivalent command in mounted-directory run test --- flags.go | 25 ++++++++++++------- flags_test.go | 2 ++ internal/storage/storage_handle.go | 5 ++++ internal/storage/storage_handle_test.go | 8 ++++++ internal/storage/storageutil/client.go | 24 +++++++++--------- internal/storage/storageutil/test_util.go | 23 +++++++++-------- main.go | 23 +++++++++-------- .../operations/operations_test.go | 3 ++- .../run_tests_mounted_directory.sh | 20 +++++++++++++++ tools/mount_gcsfuse/main.go | 3 ++- 10 files changed, 91 insertions(+), 45 deletions(-) diff --git a/flags.go b/flags.go index 53debf06e4..94dbb3b27e 100644 --- a/flags.go +++ b/flags.go @@ -297,6 +297,11 @@ func newApp() (app *cli.App) { Usage: "The format of the log file: 'text' or 'json'.", }, + cli.BoolFlag{ + Name: "experimental-enable-json-read", + Usage: "By default read flow uses xml media, this flag will enable the json path for read operation.", + }, + ///////////////////////// // Debugging ///////////////////////// @@ -382,11 +387,12 @@ type flagStorage struct { EnableNonexistentTypeCache bool // Monitoring & Logging - StackdriverExportInterval time.Duration - OtelCollectorAddress string - LogFile string - LogFormat string - DebugFuseErrors bool + StackdriverExportInterval time.Duration + OtelCollectorAddress string + LogFile string + LogFormat string + ExperimentalEnableJsonRead bool + DebugFuseErrors bool // Debugging DebugFuse bool @@ -521,10 +527,11 @@ func populateFlags(c *cli.Context) (flags *flagStorage, err error) { EnableNonexistentTypeCache: c.Bool("enable-nonexistent-type-cache"), // Monitoring & Logging - StackdriverExportInterval: c.Duration("stackdriver-export-interval"), - OtelCollectorAddress: c.String("experimental-opentelemetry-collector-address"), - LogFile: c.String("log-file"), - LogFormat: c.String("log-format"), + StackdriverExportInterval: c.Duration("stackdriver-export-interval"), + OtelCollectorAddress: c.String("experimental-opentelemetry-collector-address"), + LogFile: c.String("log-file"), + LogFormat: c.String("log-format"), + ExperimentalEnableJsonRead: c.Bool("experimental-enable-json-read"), // Debugging, DebugFuseErrors: c.BoolT("debug_fuse_errors"), diff --git a/flags_test.go b/flags_test.go index eb560b756e..24fe2e3c26 100644 --- a/flags_test.go +++ b/flags_test.go @@ -111,6 +111,7 @@ func (t *FlagsTest) Bools() { "debug_gcs", "debug_invariants", "enable-nonexistent-type-cache", + "experimental-enable-json-read", } var args []string @@ -131,6 +132,7 @@ func (t *FlagsTest) Bools() { ExpectTrue(f.DebugHTTP) ExpectTrue(f.DebugInvariants) ExpectTrue(f.EnableNonexistentTypeCache) + ExpectTrue(f.ExperimentalEnableJsonRead) // --foo=false form args = nil diff --git a/internal/storage/storage_handle.go b/internal/storage/storage_handle.go index 65f62387a3..4c263c8fd5 100644 --- a/internal/storage/storage_handle.go +++ b/internal/storage/storage_handle.go @@ -57,6 +57,11 @@ func NewStorageHandle(ctx context.Context, clientConfig storageutil.StorageClien clientOpts = append(clientOpts, option.WithHTTPClient(httpClient)) } + // Create client with JSON read flow, if EnableJasonRead flag is set. + if clientConfig.ExperimentalEnableJsonRead { + clientOpts = append(clientOpts, storage.WithJSONReads()) + } + // Add Custom endpoint option. if clientConfig.CustomEndpoint != nil { clientOpts = append(clientOpts, option.WithEndpoint(clientConfig.CustomEndpoint.String())) diff --git a/internal/storage/storage_handle_test.go b/internal/storage/storage_handle_test.go index 2c856359d4..b69c9cd857 100644 --- a/internal/storage/storage_handle_test.go +++ b/internal/storage/storage_handle_test.go @@ -111,6 +111,7 @@ func (t *StorageHandleTest) TestNewStorageHandleWhenUserAgentIsSet() { t.invokeAndVerifyStorageHandle(sc) } + func (t *StorageHandleTest) TestNewStorageHandleWithCustomEndpoint() { url, err := url.Parse(storageutil.CustomEndpoint) AssertEq(nil, err) @@ -152,3 +153,10 @@ func (t *StorageHandleTest) TestNewStorageHandleWhenTokenUrlIsSet() { t.invokeAndVerifyStorageHandle(sc) } + +func (t *StorageHandleTest) TestNewStorageHandleWhenJsonReadEnabled() { + sc := storageutil.GetDefaultStorageClientConfig() + sc.ExperimentalEnableJsonRead = true + + t.invokeAndVerifyStorageHandle(sc) +} diff --git a/internal/storage/storageutil/client.go b/internal/storage/storageutil/client.go index 7c43b7a031..3459bab17d 100644 --- a/internal/storage/storageutil/client.go +++ b/internal/storage/storageutil/client.go @@ -28,18 +28,18 @@ import ( ) type StorageClientConfig struct { - ClientProtocol mountpkg.ClientProtocol - MaxConnsPerHost int - MaxIdleConnsPerHost int - HttpClientTimeout time.Duration - MaxRetryDuration time.Duration - RetryMultiplier float64 - UserAgent string - CustomEndpoint *url.URL - KeyFile string - TokenUrl string - ReuseTokenFromUrl bool - ExperimentalEnableJasonRead bool + ClientProtocol mountpkg.ClientProtocol + MaxConnsPerHost int + MaxIdleConnsPerHost int + HttpClientTimeout time.Duration + MaxRetryDuration time.Duration + RetryMultiplier float64 + UserAgent string + CustomEndpoint *url.URL + KeyFile string + TokenUrl string + ReuseTokenFromUrl bool + ExperimentalEnableJsonRead bool } func CreateHttpClient(storageClientConfig *StorageClientConfig) (httpClient *http.Client, err error) { diff --git a/internal/storage/storageutil/test_util.go b/internal/storage/storageutil/test_util.go index 0ae500cb24..56c60a2dc2 100644 --- a/internal/storage/storageutil/test_util.go +++ b/internal/storage/storageutil/test_util.go @@ -29,16 +29,17 @@ const CustomTokenUrl = "http://custom-token-url" // non-nil, so that we can create dummy tokenSource while unit test. func GetDefaultStorageClientConfig() (clientConfig StorageClientConfig) { return StorageClientConfig{ - ClientProtocol: mountpkg.HTTP1, - MaxConnsPerHost: 10, - MaxIdleConnsPerHost: 100, - HttpClientTimeout: 800 * time.Millisecond, - MaxRetryDuration: 30 * time.Second, - RetryMultiplier: 2, - UserAgent: "gcsfuse/unknown (Go version go1.20-pre3 cl/474093167 +a813be86df) (GCP:gcsfuse)", - CustomEndpoint: &url.URL{}, - KeyFile: DummyKeyFile, - TokenUrl: "", - ReuseTokenFromUrl: true, + ClientProtocol: mountpkg.HTTP1, + MaxConnsPerHost: 10, + MaxIdleConnsPerHost: 100, + HttpClientTimeout: 800 * time.Millisecond, + MaxRetryDuration: 30 * time.Second, + RetryMultiplier: 2, + UserAgent: "gcsfuse/unknown (Go version go1.20-pre3 cl/474093167 +a813be86df) (GCP:gcsfuse)", + CustomEndpoint: &url.URL{}, + KeyFile: DummyKeyFile, + TokenUrl: "", + ReuseTokenFromUrl: true, + ExperimentalEnableJsonRead: false, } } diff --git a/main.go b/main.go index df9cf4fae8..85de27cc78 100644 --- a/main.go +++ b/main.go @@ -81,17 +81,18 @@ func getUserAgent(appName string) string { func createStorageHandle(flags *flagStorage) (storageHandle storage.StorageHandle, err error) { storageClientConfig := storageutil.StorageClientConfig{ - ClientProtocol: flags.ClientProtocol, - MaxConnsPerHost: flags.MaxConnsPerHost, - MaxIdleConnsPerHost: flags.MaxIdleConnsPerHost, - HttpClientTimeout: flags.HttpClientTimeout, - MaxRetryDuration: flags.MaxRetryDuration, - RetryMultiplier: flags.RetryMultiplier, - UserAgent: getUserAgent(flags.AppName), - CustomEndpoint: flags.CustomEndpoint, - KeyFile: flags.KeyFile, - TokenUrl: flags.TokenUrl, - ReuseTokenFromUrl: flags.ReuseTokenFromUrl, + ClientProtocol: flags.ClientProtocol, + MaxConnsPerHost: flags.MaxConnsPerHost, + MaxIdleConnsPerHost: flags.MaxIdleConnsPerHost, + HttpClientTimeout: flags.HttpClientTimeout, + MaxRetryDuration: flags.MaxRetryDuration, + RetryMultiplier: flags.RetryMultiplier, + UserAgent: getUserAgent(flags.AppName), + CustomEndpoint: flags.CustomEndpoint, + KeyFile: flags.KeyFile, + TokenUrl: flags.TokenUrl, + ReuseTokenFromUrl: flags.ReuseTokenFromUrl, + ExperimentalEnableJsonRead: flags.ExperimentalEnableJsonRead, } storageHandle, err = storage.NewStorageHandle(context.Background(), storageClientConfig) diff --git a/tools/integration_tests/operations/operations_test.go b/tools/integration_tests/operations/operations_test.go index a3af827726..a3d85ef6a3 100644 --- a/tools/integration_tests/operations/operations_test.go +++ b/tools/integration_tests/operations/operations_test.go @@ -87,7 +87,8 @@ func TestMain(m *testing.M) { setup.ParseSetUpFlags() flags := [][]string{{"--implicit-dirs=true"}, - {"--implicit-dirs=false"}} + {"--implicit-dirs=false"}, + {"--experimental-enable-json-read=true", "--implicit-dirs=true"}} setup.ExitWithFailureIfBothTestBucketAndMountedDirectoryFlagsAreNotSet() diff --git a/tools/integration_tests/run_tests_mounted_directory.sh b/tools/integration_tests/run_tests_mounted_directory.sh index e6f88ac8b4..1ee8910439 100755 --- a/tools/integration_tests/run_tests_mounted_directory.sh +++ b/tools/integration_tests/run_tests_mounted_directory.sh @@ -44,6 +44,16 @@ mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=false GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run test with static mounting. (flags: --experimental-enable-json-read --implicit-dirs=true) +gcsfuse --experimental-enable-json-read --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + +# Run test with persistent mounting. (flags: --experimental-enable-json-read, --implicit-dirs=true) +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o implicit_dirs=true,experimental_enable_json_read=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + # Run tests with static mounting. (flags: --implicit-dirs=true, --only-dir testDir) gcsfuse --only-dir testDir --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR @@ -64,6 +74,16 @@ mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=fal GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR sudo umount $MOUNT_DIR +# Run tests with static mounting. (flags: --experimental-enable-json-read, --implicit-dirs=true, --only-dir testDir) +gcsfuse --experimental-enable-json-read --only-dir testDir --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + +# Run tests with persistent mounting. (flags: --experimental-enable-json-read, --implicit-dirs=true, --only-dir=testDir) +mount.gcsfuse $TEST_BUCKET_NAME $MOUNT_DIR -o only_dir=testDir,implicit_dirs=true,experimental_enable_json_read=true +GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/operations/... -p 1 --integrationTest -v --mountedDirectory=$MOUNT_DIR +sudo umount $MOUNT_DIR + # package readonly # Run tests with static mounting. (flags: --implicit-dirs=true,--o=ro) gcsfuse --o=ro --implicit-dirs=true $TEST_BUCKET_NAME $MOUNT_DIR diff --git a/tools/mount_gcsfuse/main.go b/tools/mount_gcsfuse/main.go index d5c60f330a..64e75c3fa7 100644 --- a/tools/mount_gcsfuse/main.go +++ b/tools/mount_gcsfuse/main.go @@ -84,7 +84,8 @@ func makeGcsfuseArgs( "foreground", "experimental_local_file_cache", "reuse_token_from_url", - "enable_nonexistent_type_cache": + "enable_nonexistent_type_cache", + "experimental_enable_json_read": if value == "" { value = "true" } From 3a4d19aca6ce095ef5fa7fc6ed9e9f7923e11c44 Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Thu, 17 Aug 2023 11:25:48 +0530 Subject: [PATCH 43/46] Fixing review comments: replacing printf error log to returning error (#1285) --- flags.go | 2 +- flags_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/flags.go b/flags.go index 94dbb3b27e..db3c6d2548 100644 --- a/flags.go +++ b/flags.go @@ -480,7 +480,7 @@ func populateFlags(c *cli.Context) (flags *flagStorage, err error) { } else { customEndpoint, err = url.Parse(customEndpointStr) if err != nil { - fmt.Printf("Could not parse endpoint") + err = fmt.Errorf("could not parse custom-endpoint: %w", err) return } } diff --git a/flags_test.go b/flags_test.go index 24fe2e3c26..e826cacc9e 100644 --- a/flags_test.go +++ b/flags_test.go @@ -47,6 +47,7 @@ func parseArgs(args []string) (flags *flagStorage) { var err error app.Action = func(appCtx *cli.Context) { flags, err = populateFlags(appCtx) + AssertEq(nil, err) } // Simulate argv. From 64409ef46b72668b0d3a9fae3d8d7c30e2549824 Mon Sep 17 00:00:00 2001 From: Prince Kumar Date: Thu, 17 Aug 2023 12:54:20 +0530 Subject: [PATCH 44/46] Updating gcsufse --help (#1286) --- flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flags.go b/flags.go index db3c6d2548..9c29e46833 100644 --- a/flags.go +++ b/flags.go @@ -299,7 +299,7 @@ func newApp() (app *cli.App) { cli.BoolFlag{ Name: "experimental-enable-json-read", - Usage: "By default read flow uses xml media, this flag will enable the json path for read operation.", + Usage: "By default read flow uses xml api, this flag will enable the json path for read operation.", }, ///////////////////////// From fe1242da29520019229138697e4921e9a2085396 Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Fri, 18 Aug 2023 09:34:57 +0530 Subject: [PATCH 45/46] Adding changes to run integration and perf tests with label (#1283) * adding changes to separate integration tests and perf tests * fix braces error * fix random string error * testing * remove changes * merge * merge * fixing comments * fixing comments * fixing comments * fixing comments * testing * testing * echo bucket name * testing by adding git stash * removing testing commanda * testing by adding git stash * testing by adding git stash * testing by adding git stash * testing by adding git stash * testing kokoro * testing kokoro * testing kokoro * testing kokoro * undo testing changes * fixing comment * testing with both the labels * removing testing changes * removing comment --- .../presubmit_test/pr_perf_test/build.sh | 125 +++++++++++------- 1 file changed, 79 insertions(+), 46 deletions(-) diff --git a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh index 454bf98274..e6dab6a2e0 100644 --- a/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh +++ b/perfmetrics/scripts/presubmit_test/pr_perf_test/build.sh @@ -1,69 +1,102 @@ #!/bin/bash -# Running test only for when PR contains execute-perf-test label +# Running test only for when PR contains execute-perf-test or execute-integration-tests label +readonly EXECUTE_PERF_TEST_LABEL="execute-perf-test" +readonly EXECUTE_INTEGRATION_TEST_LABEL="execute-integration-tests" +readonly INTEGRATION_TEST_EXECUTION_TIME=24m + curl https://api.github.com/repos/GoogleCloudPlatform/gcsfuse/pulls/$KOKORO_GITHUB_PULL_REQUEST_NUMBER >> pr.json -perfTest=$(cat pr.json | grep "execute-perf-test") +perfTest=$(grep "$EXECUTE_PERF_TEST_LABEL" pr.json) +integrationTests=$(grep "$EXECUTE_INTEGRATION_TEST_LABEL" pr.json) rm pr.json perfTestStr="$perfTest" -if [[ "$perfTestStr" != *"execute-perf-test"* ]] +integrationTestsStr="$integrationTests" +if [[ "$perfTestStr" != *"$EXECUTE_PERF_TEST_LABEL"* && "$integrationTestsStr" != *"$EXECUTE_INTEGRATION_TEST_LABEL"* ]] then echo "No need to execute tests" exit 0 fi -# It will take approx 80 minutes to run the script. set -e sudo apt-get update echo Installing git sudo apt-get install git -echo Installing python3-pip -sudo apt-get -y install python3-pip -echo Installing libraries to run python script -pip install google-cloud -pip install google-cloud-vision -pip install google-api-python-client -pip install prettytable echo Installing go-lang 1.20.5 -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz -q sudo rm -rf /usr/local/go && tar -xzf go_tar.tar.gz && sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin -echo Installing fio -sudo apt-get install fio -y - -# Run on master branch +export CGO_ENABLED=0 cd "${KOKORO_ARTIFACTS_DIR}/github/gcsfuse" -git checkout master -echo Mounting gcs bucket for master branch -mkdir -p gcs -GCSFUSE_FLAGS="--implicit-dirs --max-conns-per-host 100" -BUCKET_NAME=presubmit-perf-tests -MOUNT_POINT=gcs -# The VM will itself exit if the gcsfuse mount fails. -CGO_ENABLED=0 go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT -touch result.txt -# Running FIO test -chmod +x perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh -./perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh -sudo umount gcs - # Fetch PR branch echo '[remote "origin"] fetch = +refs/pull/*/head:refs/remotes/origin/pr/*' >> .git/config -git fetch origin -echo checkout PR branch -git checkout pr/$KOKORO_GITHUB_PULL_REQUEST_NUMBER +git fetch origin -q + +function execute_perf_test() { + mkdir -p gcs + GCSFUSE_FLAGS="--implicit-dirs --max-conns-per-host 100" + BUCKET_NAME=presubmit-perf-tests + MOUNT_POINT=gcs + # The VM will itself exit if the gcsfuse mount fails. + go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT + # Running FIO test + chmod +x perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh + ./perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh + sudo umount gcs +} + +# execute perf tests. +if [[ "$perfTestStr" == *"$EXECUTE_PERF_TEST_LABEL"* ]]; +then + # Installing requirements + echo Installing python3-pip + sudo apt-get -y install python3-pip + echo Installing libraries to run python script + pip install google-cloud + pip install google-cloud-vision + pip install google-api-python-client + pip install prettytable + echo Installing fio + sudo apt-get install fio -y + + # Executing perf tests for master branch + git checkout master + # Store results + touch result.txt + echo Mounting gcs bucket for master branch and execute tests + execute_perf_test -# Executing integration tests -GODEBUG=asyncpreemptoff=1 CGO_ENABLED=0 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=gcsfuse-integration-test -timeout 24m -# Executing perf tests -echo Mounting gcs bucket from pr branch -mkdir -p gcs -# The VM will itself exit if the gcsfuse mount fails. -CGO_ENABLED=0 go run . $GCSFUSE_FLAGS $BUCKET_NAME $MOUNT_POINT -# Running FIO test -chmod +x perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh -./perfmetrics/scripts/presubmit/run_load_test_on_presubmit.sh -sudo umount gcs + # Executing perf tests for PR branch + echo checkout PR branch + git checkout pr/$KOKORO_GITHUB_PULL_REQUEST_NUMBER + echo Mounting gcs bucket from pr branch and execute tests + execute_perf_test -echo showing results... -python3 ./perfmetrics/scripts/presubmit/print_results.py + # Show results + echo showing results... + python3 ./perfmetrics/scripts/presubmit/print_results.py +fi + +# Execute integration tests. +if [[ "$integrationTestsStr" == *"$EXECUTE_INTEGRATION_TEST_LABEL"* ]]; +then + echo checkout PR branch + git checkout pr/$KOKORO_GITHUB_PULL_REQUEST_NUMBER + + # Create bucket for integration tests. + # The prefix for the random string + bucketPrefix="gcsfuse-integration-test-" + # The length of the random string + length=5 + # Generate the random string + random_string=$(tr -dc 'a-z0-9' < /dev/urandom | head -c $length) + BUCKET_NAME=$bucketPrefix$random_string + echo 'bucket name = '$BUCKET_NAME + gcloud alpha storage buckets create gs://$BUCKET_NAME --project=gcs-fuse-test-ml --location=us-west1 --uniform-bucket-level-access + + # Executing integration tests + GODEBUG=asyncpreemptoff=1 go test ./tools/integration_tests/... -p 1 --integrationTest -v --testbucket=$BUCKET_NAME -timeout $INTEGRATION_TEST_EXECUTION_TIME + + # Delete bucket after testing. + gcloud alpha storage rm --recursive gs://$BUCKET_NAME/ +fi From 9053fe3b80e9ec9a3c26213891d345b539a5ffa4 Mon Sep 17 00:00:00 2001 From: Tulsi Shah <46474643+Tulsishah@users.noreply.github.com> Date: Mon, 21 Aug 2023 22:51:26 +0530 Subject: [PATCH 46/46] Removing extra logs in kokoro continous test (#1289) * removing extra logs of go tar * removing extra logs of go tar --- perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh | 2 +- perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh | 2 +- .../scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh index c47908abd7..0f8ce8e3bf 100644 --- a/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh +++ b/perfmetrics/scripts/continuous_test/gcp_ubuntu/build.sh @@ -7,7 +7,7 @@ sudo apt-get install git echo "Installing pip" sudo apt-get install pip -y echo "Installing go-lang 1.20.5" -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz -q sudo rm -rf /usr/local/go && tar -xzf go_tar.tar.gz && sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin echo "Installing fio" diff --git a/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh b/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh index be82b16994..884c633b8d 100644 --- a/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh +++ b/perfmetrics/scripts/ml_tests/pytorch/dino/setup_container.sh @@ -1,7 +1,7 @@ #!/bin/bash # Install golang -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz -q rm -rf /usr/local/go && tar -C /usr/local -xzf go_tar.tar.gz export PATH=$PATH:/usr/local/go/bin diff --git a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh index c72a85f0bc..d31c3cc22c 100644 --- a/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh +++ b/perfmetrics/scripts/ml_tests/tf/resnet/setup_scripts/setup_container.sh @@ -5,7 +5,7 @@ # and epochs functionality, and runs the model # Install go lang -wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz +wget -O go_tar.tar.gz https://go.dev/dl/go1.20.5.linux-amd64.tar.gz -q sudo rm -rf /usr/local/go && tar -xzf go_tar.tar.gz && sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin