diff --git a/.gitignore b/.gitignore index fc41556f7..70a46ce78 100644 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,6 @@ dist/ downloads/ eggs/ .eggs/ -lib/ lib64/ parts/ sdist/ @@ -106,3 +105,6 @@ openshift/templates/projects/mem-tfrs-prod/* # dev dev/ +node_modules/ + +.sql.gz \ No newline at end of file diff --git a/.jenkins/Jenkinsfile b/.jenkins/Jenkinsfile deleted file mode 100644 index dfb66becc..000000000 --- a/.jenkins/Jenkinsfile +++ /dev/null @@ -1,14 +0,0 @@ -pipeline { - agent none - options { - disableResume() - } - stages { - stage('Build') { - agent { label 'build' } - steps { - echo "hello world ..." - } - } - } -} \ No newline at end of file diff --git a/.jenkins/README.md b/.jenkins/README.md deleted file mode 100644 index 2348e52a1..000000000 --- a/.jenkins/README.md +++ /dev/null @@ -1,143 +0,0 @@ -# Introduction - -So you want to build out a pull-request based pipeline. This document will guide you through the process and advise on some caviets or best practices along the way; you'll see these in the **ProTips** notes along the way. - -# Setup, Build, Deployments & Your Pipeline - -In this section we'll get your all the config you need into your repository and run the necessary templates in OpenShift to build out and deploy your PR based Jenkins. - -We have to use a bespoke Jenkins because the one that ships with OpenShift only does branch based builds and, between you and me, its overly loaded with plugins that cause instability. - -# Setup - -We need to build a bespoke Jenkins image that will pack in some custom configuration. To store all our config we recommend creating a `.jenkins` directory at the root of your project to hold your config. - -Run the following commands in the root of your repository: - -```console -mkdir -p .jenkins/docker/contrib/jenkins/configuration/jobs// -mkdir .jenkins/openshift -``` - -| Parameter | Optional | Description | -| ------------ | -------- | --------------------------------- | -| REPO_NAME | NO | The case sensitive name of your repository. | - -## Configuration - -**OpenShift** - -Copy the OpenShift templates from `sample/openshift` to the directory you created above called `.jenkins/openshift`. - -**GitHub** - -Jenkins needs a way to interact with GitHub. You can do it by using your own account (not recommended), creating a new account, or creating an organization account. You can read more about the different types of accounts [here](https://help.github.com/en/articles/differences-between-user-and-organization-accounts). - -Once your account is created you will need the *username* and *token*. You can create a new token by going to `GitHub -> Settings -> Developer settings` and choosing `Generate new token`. - -Make sure the token account has the following scope set: - -| Parameter | Optional | Description | -| ------------ | -------- | --------------------------------- | -| repo:status | NO | Access commit status. | -| public_repo | NO | Access public repositories. | -| write:repo_hook | NO | Write repository hooks. | - - -**Jenkins** - -The next step is to modify a jenkins config file some info from GitHub. Copy the sample Jenkins `sample/config.xml` from this directory to the directory you created above called `.jenkins/docker/contrib/jenkins/configuration/jobs//` - -Next, edit this file and find the following lines: - -```xml -A_UUID_GOES_HERE -github-account -GITHUB_USERNAME -REPOSITORY_NAME -``` - -| Parameter | Optional | Description | -| ----------------- | -------- | --------------------------------- | -| A_UUID_GOES_HERE | NO | A UUID (See ProTip). | -| GITHUB_USERNAME | NO | The GitHub user ID from above. | -| REPOSITORY_NAME | NO | The name of the repository, matching case **without** the `.git` extension. | - -Go ahead and commit your `.jenkins` directory and all of the files and directories inside it. - -### ProTip - -* This is what a UUID should look like: b358bd09-d6b8-4836-ac58-c32f6b356c3e -* You can use [this](https://www.uuidgenerator.net/) site to generate an UUID. - -## Build - -Now we're going to mint a bespoke Jenkins image using the built in OpenShift mechanics. Its going to use a Dockerfile to do this; copy file `sample/Dockerfile` to `.jenkins/docker` in your repo then commit and push these changes. The Dockerfile needs to be in your repo so the OpenShift build can use it. - -Run the following command to trigger an OpenShift image build: - -```console -oc process -f .jenkins/openshift/build.yaml \ -SOURCE_REPOSITORY_URL=https://github.com/bcgov/blarb.git \ -SOURCE_REPOSITORY_REF=master \ -| oc create -f - -``` - -| Parameter | Optional | Description | -| ---------------------- | -------- | --------------------------------- | -| SOURCE_REPOSITORY_URL | NO | The full URL of your repository. | -| SOURCE_REPOSITORY_REF | NO | The main branch. In needs to contain the `.jenkins` directory you just committed. | - - -### ProTip - -* Add the label flag `-l "app=jenkins-basic-bc"` to the `oc process` command so that all the objects created by this template share a common label and can be easily identified or deleted. The `bc` at the end is meant to distinguish build-config objects from deploy-config objects. - - -## Deploy - -We will use and OpenShift template to crate two secrets: The first will contains GitHub credentisla so that Jenkins can interact with GitHub. The second is the Jenkins credentials the builder nodes will use to contact the master node. - -``` -oc process -f .jenkins/openshift/secret.yaml \ -GITHUB_USERNAME=foouser \ -GITHUB_PASSWORD=6dc7f2532350ca20e86b05 \ -| oc create -f - - ``` - -| Parameter | Optional | Description | -| ---------------- | -------- | --------------------------------- | -| GITHUB_USERNAME | NO | The GitHub account you created earlier in the process. | -| GITHUB_PASSWORD | NO | The access token you created earlier in the process. | - - -Next, we need to deploy the newly minted Jenkins image. This will create two Jenkins nodes: A `master` node that will serve to orchestrate your builder images and a single builder image to do all the heavy lifting. It will also create two Webhooks in your repository: One will will be used to send pull-request notifications and the other to invoke a build on the given pull-request. - -```console -oc process -f .jenkins/openshift/deploy.yaml \ -NAME=jenkins \ -ROUTE_HOST=jenkins-devex-mpf-secure-tools.pathfinder.gov.bc.ca \ -| oc create -f - -``` - -| Parameter | Optional | Description | -| ----------- | -------- | --------------------------------- | -| NAME | NO | The name used for the node deployments. | -| ROUTE_HOST | NO | The route used to access Jenkins. | - -When this command finishes you'll see two deployments start, one for each node mentioned above. They deployments will start two pods, again, representing the two nodes mentioned above. - -### ProTip - -* Add the label flag `-l 'app=jenkins-basic-dc'"` to the `oc process` command(s) so that all the objects created by this template share a common label and can be easily identified or deleted. The `bc` at the end is meant to distinguish build-config objects from deploy-config objects. - -## Pipeline - -The final step is to create a `Jenkinsfile` that will be used by Jenkins to do your build. This project has one that echos `Hello World`. Start with it so that you can be sure all the plumbing is working before you start customizing. - -Copy the `sample/Jenkinsfile` to the root of your repository. Make sure its on the `master` branch or wherever you fork from when creating a new pull-request. It must be present in any PR you wish to build. - -## Testing - -Create a pull request. You should be able to - diff --git a/.jenkins/docker/Dockerfile b/.jenkins/docker/Dockerfile deleted file mode 100644 index f9b548282..000000000 --- a/.jenkins/docker/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM bcgov/jenkins-basic:v1-stable - -USER 0 - -COPY ./contrib/jenkins/configuration $JENKINS_REF_HOME -COPY ./contrib/jenkins/configuration/config.xml $JENKINS_REF_HOME -COPY ./contrib/jenkins/configuration/init.groovy.d/099-setup-tfrs-credentials.groovy $JENKINS_REF_HOME/init.groovy.d -COPY ./contrib/jenkins/configuration/plugins/embeddable-build-status.jpi $JENKINS_REF_HOME/plugins - -RUN echo $'\nembeddable-build-status:2.0.2' >> $JENKINS_REF_HOME/plugins.txt - -RUN set -x && \ - chgrp -R 0 $JENKINS_REF_HOME && \ - chmod -R 644 $JENKINS_REF_HOME && \ - chmod -R g+rwX $JENKINS_REF_HOME - -USER 1001 diff --git a/.jenkins/docker/contrib/jenkins/configuration/config.xml b/.jenkins/docker/contrib/jenkins/configuration/config.xml deleted file mode 100644 index 539daa890..000000000 --- a/.jenkins/docker/contrib/jenkins/configuration/config.xml +++ /dev/null @@ -1,195 +0,0 @@ - - - - GitHubHookRegisterProblemMonitor - - 2.164.3 - RUNNING - 0 - EXCLUSIVE - true - - com.cloudbees.plugins.credentials.CredentialsProvider.Create:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.Create:plasticviking-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.Delete:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.Delete:plasticviking-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains:plasticviking-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.Update:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.Update:plasticviking-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.UseItem:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.UseItem:plasticviking-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.UseOwn:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.UseOwn:plasticviking-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.View:amichard-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.View:dkelsey-view - com.cloudbees.plugins.credentials.CredentialsProvider.View:kuanfandevops-admin-edit-view - com.cloudbees.plugins.credentials.CredentialsProvider.View:plasticviking-admin-edit-view - hudson.model.Computer.Build:kuanfandevops-admin-edit-view - hudson.model.Computer.Build:plasticviking-admin-edit-view - hudson.model.Computer.Configure:kuanfandevops-admin-edit-view - hudson.model.Computer.Configure:plasticviking-admin-edit-view - hudson.model.Computer.Connect:jenkins-slave - hudson.model.Computer.Connect:kuanfandevops-admin-edit-view - hudson.model.Computer.Connect:plasticviking-admin-edit-view - hudson.model.Computer.Create:jenkins-slave - hudson.model.Computer.Create:kuanfandevops-admin-edit-view - hudson.model.Computer.Create:plasticviking-admin-edit-view - hudson.model.Computer.Delete:kuanfandevops-admin-edit-view - hudson.model.Computer.Delete:plasticviking-admin-edit-view - hudson.model.Computer.Disconnect:kuanfandevops-admin-edit-view - hudson.model.Computer.Disconnect:plasticviking-admin-edit-view - hudson.model.Computer.ExtendedRead:kuanfandevops-admin-edit-view - hudson.model.Computer.ExtendedRead:plasticviking-admin-edit-view - hudson.model.Computer.Provision:kuanfandevops-admin-edit-view - hudson.model.Computer.Provision:plasticviking-admin-edit-view - hudson.model.Hudson.Administer:kuanfandevops-admin-edit-view - hudson.model.Hudson.Administer:plasticviking-admin-edit-view - hudson.model.Hudson.ConfigureUpdateCenter:kuanfandevops-admin-edit-view - hudson.model.Hudson.ConfigureUpdateCenter:plasticviking-admin-edit-view - hudson.model.Hudson.Read:amichard-edit-view - hudson.model.Hudson.Read:dkelsey-view - hudson.model.Hudson.Read:jenkins-slave - hudson.model.Hudson.Read:kuanfandevops-admin-edit-view - hudson.model.Hudson.Read:plasticviking-admin-edit-view - hudson.model.Hudson.RunScripts:amichard-edit-view - hudson.model.Hudson.RunScripts:kuanfandevops-admin-edit-view - hudson.model.Hudson.RunScripts:plasticviking-admin-edit-view - hudson.model.Hudson.UploadPlugins:kuanfandevops-admin-edit-view - hudson.model.Hudson.UploadPlugins:plasticviking-admin-edit-view - hudson.model.Item.Build:amichard-edit-view - hudson.model.Item.Build:kuanfandevops-admin-edit-view - hudson.model.Item.Build:plasticviking-admin-edit-view - hudson.model.Item.Cancel:amichard-edit-view - hudson.model.Item.Cancel:kuanfandevops-admin-edit-view - hudson.model.Item.Cancel:plasticviking-admin-edit-view - hudson.model.Item.Configure:amichard-edit-view - hudson.model.Item.Configure:kuanfandevops-admin-edit-view - hudson.model.Item.Configure:plasticviking-admin-edit-view - hudson.model.Item.Create:amichard-edit-view - hudson.model.Item.Create:kuanfandevops-admin-edit-view - hudson.model.Item.Create:plasticviking-admin-edit-view - hudson.model.Item.Delete:amichard-edit-view - hudson.model.Item.Delete:kuanfandevops-admin-edit-view - hudson.model.Item.Delete:plasticviking-admin-edit-view - hudson.model.Item.Discover:amichard-edit-view - hudson.model.Item.Discover:dkelsey-view - hudson.model.Item.Discover:kuanfandevops-admin-edit-view - hudson.model.Item.Discover:plasticviking-admin-edit-view - hudson.model.Item.ExtendedRead:kuanfandevops-admin-edit-view - hudson.model.Item.ExtendedRead:plasticviking-admin-edit-view - hudson.model.Item.Move:kuanfandevops-admin-edit-view - hudson.model.Item.Move:plasticviking-admin-edit-view - hudson.model.Item.Read:amichard-edit-view - hudson.model.Item.Read:dkelsey-view - hudson.model.Item.Read:kuanfandevops-admin-edit-view - hudson.model.Item.Read:plasticviking-admin-edit-view - hudson.model.Item.Workspace:amichard-edit-view - hudson.model.Item.Workspace:kuanfandevops-admin-edit-view - hudson.model.Item.Workspace:plasticviking-admin-edit-view - hudson.model.Run.Delete:kuanfandevops-admin-edit-view - hudson.model.Run.Delete:plasticviking-admin-edit-view - hudson.model.Run.Replay:kuanfandevops-admin-edit-view - hudson.model.Run.Replay:plasticviking-admin-edit-view - hudson.model.Run.Update:kuanfandevops-admin-edit-view - hudson.model.Run.Update:plasticviking-admin-edit-view - hudson.model.View.Configure:kuanfandevops-admin-edit-view - hudson.model.View.Configure:plasticviking-admin-edit-view - hudson.model.View.Create:kuanfandevops-admin-edit-view - hudson.model.View.Create:plasticviking-admin-edit-view - hudson.model.View.Delete:kuanfandevops-admin-edit-view - hudson.model.View.Delete:plasticviking-admin-edit-view - hudson.model.View.Read:kuanfandevops-admin-edit-view - hudson.model.View.Read:plasticviking-admin-edit-view - hudson.scm.SCM.Tag:amichard-edit-view - hudson.scm.SCM.Tag:kuanfandevops-admin-edit-view - hudson.scm.SCM.Tag:plasticviking-admin-edit-view - hudson.security.Permission.FullControl:kuanfandevops-admin-edit-view - hudson.security.Permission.FullControl:plasticviking-admin-edit-view - hudson.security.Permission.GenericConfigure:kuanfandevops-admin-edit-view - hudson.security.Permission.GenericConfigure:plasticviking-admin-edit-view - hudson.security.Permission.GenericCreate:kuanfandevops-admin-edit-view - hudson.security.Permission.GenericCreate:plasticviking-admin-edit-view - hudson.security.Permission.GenericDelete:kuanfandevops-admin-edit-view - hudson.security.Permission.GenericDelete:plasticviking-admin-edit-view - hudson.security.Permission.GenericRead:kuanfandevops-admin-edit-view - hudson.security.Permission.GenericRead:plasticviking-admin-edit-view - hudson.security.Permission.GenericUpdate:kuanfandevops-admin-edit-view - hudson.security.Permission.GenericUpdate:plasticviking-admin-edit-view - hudson.security.Permission.GenericWrite:kuanfandevops-admin-edit-view - hudson.security.Permission.GenericWrite:plasticviking-admin-edit-view - org.jenkins.plugins.lockableresources.LockableResourcesManager.Reserve:kuanfandevops-admin-edit-view - org.jenkins.plugins.lockableresources.LockableResourcesManager.Reserve:plasticviking-admin-edit-view - org.jenkins.plugins.lockableresources.LockableResourcesManager.Unlock:kuanfandevops-admin-edit-view - org.jenkins.plugins.lockableresources.LockableResourcesManager.Unlock:plasticviking-admin-edit-view - - - /run/secrets/kubernetes.io/serviceaccount - jenkins-basic - https://openshift.default.svc - https://console.pathfinder.gov.bc.ca:8443 - system:serviceaccount:mem-tfrs-tools:jenkins-basic - eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJtZW0tdGZycy10b29scyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJqZW5raW5zLWJhc2ljLXRva2VuLTdjbXJsIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImplbmtpbnMtYmFzaWMiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIwYzRkOTVkYi01NTkxLTExZTktOWM5Yi0wMDUwNTY4MzQ4Y2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6bWVtLXRmcnMtdG9vbHM6amVua2lucy1iYXNpYyJ9.r3LZUzhYL_nS5Jg48O9lC1gzM4JzFZTQnLYg0oSyPyYgDMhJQNI9hawf4Hii7i7VKq3h9F94mg4xyusECBE89_uXxBkIeZce6iAGPlFfrgeRQLrPEH7njoF2FDpEF-VCKgrcXGsuyMXo325hKMye_BPRkiG9PYHuzbWIaaZ3OnbPJWJXOF6e6f_-7h7iddYoGSPG_DFy-0BPPN_231fXaeJlwXrkFNIFJ_Cb1lBVaC5p81eC87rbRkP_62NwqsJcfy-9x8zkUTc25X7-DJIzcCmPg-cbWxIJFyQgsn_ApXw2GYYCtPyS6asi2Iufyu28qxW2wwsVarlOLoD-Gx8IUA - mem-tfrs-tools - - https://console.pathfinder.gov.bc.ca:8443 - https://console.pathfinder.gov.bc.ca:8443/oauth/authorize - https://console.pathfinder.gov.bc.ca:8443/oauth/token - - - - true - - ${JENKINS_HOME}/workspace/${ITEM_FULL_NAME} - ${JENKINS_HOME}/builds/${ITEM_FULL_NAME} - - - - - - - openshift - - - - false - false - false - http://jenkins-basic.mem-tfrs-tools.svc:8080 - jenkins-basic.mem-tfrs-tools.svc:50000 - 10 - 5 - 5 - 15 - false - 32 - 600 - - - - 5 - 0 - - - - all - false - false - - - - all - 50000 - - JNLP-connect - JNLP2-connect - JNLP3-connect - - - - true - - - - true - \ No newline at end of file diff --git a/.jenkins/docker/contrib/jenkins/configuration/init.groovy.d/099-setup-tfrs-credentials.groovy b/.jenkins/docker/contrib/jenkins/configuration/init.groovy.d/099-setup-tfrs-credentials.groovy deleted file mode 100644 index bfdef34bb..000000000 --- a/.jenkins/docker/contrib/jenkins/configuration/init.groovy.d/099-setup-tfrs-credentials.groovy +++ /dev/null @@ -1,36 +0,0 @@ -import jenkins.model.Jenkins - -import com.cloudbees.plugins.credentials.*; -import com.cloudbees.plugins.credentials.impl.*; -import com.cloudbees.plugins.credentials.domains.*; -import com.cloudbees.jenkins.* -import org.jenkinsci.plugins.plaincredentials.impl.*; -import java.nio.file.*; - -if (new File('/var/run/secrets/browserstack/username').exists()){ - String browserstackUsername = new File('/var/run/secrets/browserstack/username').getText('UTF-8').trim() - String browserstackAccesskey = new File('/var/run/secrets/browserstack/accesskey').getText('UTF-8').trim() - - Credentials c1 = (Credentials) new UsernamePasswordCredentialsImpl( - CredentialsScope.GLOBAL, - "browserstack", - "Browserstack Automate username and accesskey", - browserstackUsername, - browserstackAccesskey); - - SystemCredentialsProvider.getInstance().getStore().addCredentials(Domain.global(), c1); - -} - -String functionaTestUsersFile = '/var/run/secrets/functional-test-users/functional_test_users_v2' -if (new File(functionaTestUsersFile).exists()){ - Path fileLocation = Paths.get(functionaTestUsersFile); - def secretBytes = SecretBytes.fromBytes(Files.readAllBytes(fileLocation)) - Credentials c2 = new FileCredentialsImpl( - CredentialsScope.GLOBAL, - "functional_test_users_v2", - "Functiona Test Users", - "functional_test_users_v2", - secretBytes); - SystemCredentialsProvider.getInstance().getStore().addCredentials(Domain.global(), c2); -} diff --git a/.jenkins/docker/contrib/jenkins/configuration/jobs/tfrs/config.xml b/.jenkins/docker/contrib/jenkins/configuration/jobs/tfrs/config.xml deleted file mode 100644 index 4665e5f69..000000000 --- a/.jenkins/docker/contrib/jenkins/configuration/jobs/tfrs/config.xml +++ /dev/null @@ -1,65 +0,0 @@ - - - - Transportation Fuels Reporting System - - - - - - - - - - - - - - - false - - - - - - - true - -1 - -1 - - - false - - - - - 6809510e-2eee-4a66-908d-c06ca4646bea - github-account - bcgov - tfrs - - - 1 - - - - 2 - - - - - - - - - - - - - - - - - Jenkinsfile-pr - - \ No newline at end of file diff --git a/.jenkins/docker/contrib/jenkins/configuration/plugins/embeddable-build-status.jpi b/.jenkins/docker/contrib/jenkins/configuration/plugins/embeddable-build-status.jpi deleted file mode 100644 index e6d0adb12..000000000 Binary files a/.jenkins/docker/contrib/jenkins/configuration/plugins/embeddable-build-status.jpi and /dev/null differ diff --git a/.jenkins/openshift/README.md b/.jenkins/openshift/README.md deleted file mode 100644 index bfce834cb..000000000 --- a/.jenkins/openshift/README.md +++ /dev/null @@ -1,44 +0,0 @@ - -### Create image stream bcgov-jenkins-basic and build bcgov-jenkins-basic image as base image fpr jenkins -oc process -f ./bcgov-jenkins-basic.yaml | oc create -f - - -### Create secrets used by Jenkins -1. Create below secrets from yaml file -jenkins-basic-slave-user -jenkins-basic-github -jenkins-basic-browserstack -jenkins-basic-functional-test-users - -$ oc process -f secrets.yaml \ --l app=jenkins-basic-secret \ -NAME=jenkins-basic \ -GITHUB_USERNAME=***** \ -GITHUB_PASSWORD=***** \ -BROWSERSTACK_USERNAME=**** \ -BROWSERSTACK_ACCESSKEY=**** -| oc create -f - - - -2. Create jenkins-basic-functional-test-users, need to have functional_test_users.txt ready -$ oc create secret generic jenkins-basic-functional-test-users --from-file=functional_test_users_v2=./functional_test_users.txt - -### Build and deploy jenkins-basic - -1. Build jenkins-basic image -$ oc process -f .jenkins/openshift/build.yaml -l app=jenkins-basic-bc | oc create -f - - -2. deploy jenkins-basic -$ oc process -f .jenkins/openshift/deploy.yaml \ --l app=jenkins-basic-dc \ -NAME=jenkins-basic \ -ROUTE_HOST=jenkins-basic.pathfinder.gov.bc.ca \ -| oc create -f - -persistentvolumeclaim/jenkins-basic created -serviceaccount/jenkins-basic created -rolebinding.authorization.openshift.io/jenkins-basic_edit created -rolebinding.authorization.openshift.io/jenkins-basic_admin created -deploymentconfig.apps.openshift.io/jenkins-basic created -deploymentconfig.apps.openshift.io/jenkins-basic-slave-build created -service/jenkins-basic created -route.route.openshift.io/jenkins-basic created - diff --git a/.jenkins/openshift/bcgov-jenkins-basic.yaml b/.jenkins/openshift/bcgov-jenkins-basic.yaml deleted file mode 100644 index 1e27d8d86..000000000 --- a/.jenkins/openshift/bcgov-jenkins-basic.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: bcgov-jenkins-basic -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - creationTimestamp: null - labels: - shared: "true" - name: bcgov-jenkins-basic - spec: - lookupPolicy: - local: true -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - name: bcgov-jenkins-basic - spec: - failedBuildsHistoryLimit: 2 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: bcgov-jenkins-basic:v2-20200303 - postCommit: {} - resources: - limits: - cpu: "2" - memory: 6Gi - requests: - cpu: "1" - memory: 2Gi - runPolicy: SerialLatestOnly - source: - contextDir: cicd/jenkins-basic/docker - git: - ref: update-jenkins-rhel-atomic - uri: https://github.com/kuanfandevops/openshift-components.git - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: rhel-atomic:7.7-371 - type: Docker - successfulBuildsHistoryLimit: 5 - triggers: - - type: ConfigChange - - imageChange: {} - type: ImageChange - status: - lastVersion: 0 diff --git a/.jenkins/openshift/build.yaml b/.jenkins/openshift/build.yaml deleted file mode 100644 index 189aec30c..000000000 --- a/.jenkins/openshift/build.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins - namespace: default -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - creationTimestamp: null - labels: - shared: "true" - name: jenkins-basic - spec: - lookupPolicy: - local: false -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - labels: - app: jenkins-basic-bc - name: jenkins-basic - spec: - failedBuildsHistoryLimit: 2 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: jenkins-basic:v1.0.0 - postCommit: {} - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi - runPolicy: SerialLatestOnly - source: - contextDir: .jenkins/docker - git: - ref: master - uri: https://github.com/bcgov/tfrs.git - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: bcgov-jenkins-basic:v2-20200303 - type: Docker - successfulBuildsHistoryLimit: 2 - triggers: - - imageChange: {} - type: ImageChange - - type: ConfigChange - status: - lastVersion: 0 \ No newline at end of file diff --git a/.jenkins/openshift/deploy.yaml b/.jenkins/openshift/deploy.yaml deleted file mode 100644 index b9fff410e..000000000 --- a/.jenkins/openshift/deploy.yaml +++ /dev/null @@ -1,451 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: ImageStream - metadata: - creationTimestamp: null - labels: - shared: "true" - name: ${NAME} - spec: - lookupPolicy: - local: false -# - apiVersion: v1 -# kind: Secret -# metadata: -# annotations: -# as-copy-of: template.${NAME}-slave-user -# as-copy-of/preserve: password -# name: ${NAME}${SUFFIX}-slave-user -# stringData: -# metadata.name: "" -# username: "" -# password: "" -# type: kubernetes.io/basic-auth -# - apiVersion: v1 -# kind: Secret -# metadata: -# annotations: -# as-copy-of: template.${NAME}-github -# name: ${NAME}${SUFFIX}-github -# stringData: -# metadata.name: "" -# username: "" -# password: "" -# type: kubernetes.io/basic-auth -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - annotations: - volume.beta.kubernetes.io/storage-class: gluster-file - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs - name: ${NAME}${SUFFIX} - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi -- apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - annotations: - volume.beta.kubernetes.io/storage-class: gluster-file - volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs - name: ${NAME}${SUFFIX}-builds - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi -- apiVersion: v1 - kind: ServiceAccount - metadata: - annotations: - serviceaccounts.openshift.io/oauth-redirectreference.jenkins: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"${NAME}${SUFFIX}"}}' - name: ${NAME}${SUFFIX} -- apiVersion: v1 - groupNames: null - kind: RoleBinding - metadata: - name: ${NAME}${SUFFIX}_edit - roleRef: - name: edit - subjects: - - kind: ServiceAccount - name: ${NAME}${SUFFIX} -- apiVersion: v1 - groupNames: null - kind: RoleBinding - metadata: - name: ${NAME}${SUFFIX}_admin - roleRef: - name: admin - subjects: - - kind: ServiceAccount - name: ${NAME}${SUFFIX} -- apiVersion: v1 - kind: DeploymentConfig - metadata: - annotations: - template.alpha.openshift.io/wait-for-ready: "true" - creationTimestamp: null - name: ${NAME}${SUFFIX} - spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME}${SUFFIX} - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME}${SUFFIX} - spec: - containers: - - command: - - /usr/local/bin/container-entrypoint - - /usr/local/bin/jenkins-run - env: - - name: USE_JAVA_DIAGNOSTICS - value: "true" - - name: JENKINS_URL - value: https://${ROUTE_HOST}${ROUTE_PATH} - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - image: ' ' - imagePullPolicy: Always - livenessProbe: - failureThreshold: 3 - httpGet: - path: /login - port: 8080 - initialDelaySeconds: 420 - periodSeconds: 360 - timeoutSeconds: 240 - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - readinessProbe: - httpGet: - path: /login - port: 8080 - initialDelaySeconds: 3 - timeoutSeconds: 240 - resources: - limits: - cpu: "1" - memory: 2Gi - requests: - cpu: 300m - memory: 2Gi - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/lib/jenkins/jobs - name: jenkins-jobs - readOnly: false - - mountPath: /var/lib/jenkins/builds - name: jenkins-builds - readOnly: false - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - - mountPath: /run/secrets/browserstack - name: browserstack - readOnly: true - - mountPath: /run/secrets/functional-test-users - name: functional-test-users - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME}${SUFFIX} - serviceAccountName: ${NAME}${SUFFIX} - terminationGracePeriodSeconds: 30 - volumes: - - name: jenkins-jobs - persistentVolumeClaim: - claimName: ${NAME}${SUFFIX} - - name: jenkins-builds - persistentVolumeClaim: - claimName: ${NAME}${SUFFIX}-builds - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}${SUFFIX}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}${SUFFIX}-github - - name: browserstack - secret: - defaultMode: 420 - secretName: ${NAME}${SUFFIX}-browserstack - - name: functional-test-users - secret: - defaultMode: 420 - secretName: ${NAME}${SUFFIX}-functional-test-users - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - from: - kind: ImageStreamTag - name: ${NAME}:${VERSION} - type: ImageChange - - type: ConfigChange -- apiVersion: v1 - kind: DeploymentConfig - metadata: - creationTimestamp: null - name: ${NAME}${SUFFIX}-slave-build - spec: - replicas: 1 - revisionHistoryLimit: 10 - selector: - deploymentconfig: ${NAME}${SUFFIX}-slave-build - strategy: - activeDeadlineSeconds: 21600 - recreateParams: - timeoutSeconds: 600 - resources: {} - type: Recreate - template: - metadata: - creationTimestamp: null - labels: - deploymentconfig: ${NAME}${SUFFIX}-slave-build - spec: - containers: - - command: - - bash - - -c - - cd $HOME && java -XshowSettings:vm -version && exec java -jar /usr/lib/jenkins/swarm-client.jar - -name "$(cat /etc/hostname)" -deleteExistingClients -fsroot "$JENKINS_HOME/$(cat - /etc/hostname)" -master http://$JENKINS_MASTER_SERVICE:8080 -disableSslVerification - -username "$(cat /var/run/secrets/jenkins-slave-user/username)" -passwordFile - /var/run/secrets/jenkins-slave-user/password -description "$(cat /etc/hostname)" - -executors 3 -labels 'Linux rhel rhel7 build test deploy light' -mode - 'normal' -retry 10 -tunnel $JENKINS_MASTER_SERVICE:50000 -disableClientsUniqueId - env: - - name: JENKINS_MASTER_SERVICE - value: ${NAME}${SUFFIX} - - name: JAVA_TOOL_OPTIONS - value: -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap - -XX:MaxRAMFraction=5 -XX:MaxHeapFreeRatio=20 -XX:MinHeapFreeRatio=10 - -XX:+UseParallelGC -XX:ParallelGCThreads=2 - - name: ENV_NAME - value: ${ENV_NAME} - - name: ENV_ID - value: ${ENV_ID} - image: ' ' - imagePullPolicy: Always - livenessProbe: - exec: - command: - - curl - - -sSf - - http://${NAME}${SUFFIX}:8080/login - failureThreshold: 3 - initialDelaySeconds: 420 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 360 - name: jenkins - ports: - - containerPort: 50000 - protocol: TCP - - containerPort: 8080 - protocol: TCP - readinessProbe: - exec: - command: - - curl - - -sSf - - http://${NAME}${SUFFIX}:8080/login - failureThreshold: 3 - initialDelaySeconds: 3 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 240 - resources: - limits: - cpu: 500m - memory: 2Gi - requests: - cpu: 300m - memory: 2Gi - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /var/run/pod - name: pod-metadata - - mountPath: /run/secrets/jenkins-slave-user - name: jenkins-slave-user - readOnly: true - - mountPath: /run/secrets/github - name: github - readOnly: true - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: ${NAME}${SUFFIX} - serviceAccountName: ${NAME}${SUFFIX} - terminationGracePeriodSeconds: 30 - volumes: - - name: jenkins-home - persistentVolumeClaim: - claimName: ${NAME}${SUFFIX} - - downwardAPI: - items: - - fieldRef: - fieldPath: metadata.name - name: name - path: name - - fieldRef: - fieldPath: metadata.namespace - name: namespace - path: namespace - - fieldRef: - fieldPath: metadata.labels - name: labels - path: labels - - fieldRef: - fieldPath: metadata.annotations - name: annotations - path: annotations - name: pod-metadata - - name: jenkins-slave-user - secret: - defaultMode: 420 - secretName: ${NAME}${SUFFIX}-slave-user - - name: github - secret: - defaultMode: 420 - secretName: ${NAME}${SUFFIX}-github - test: false - triggers: - - imageChangeParams: - automatic: true - containerNames: - - jenkins - from: - kind: ImageStreamTag - name: ${NAME}:${VERSION} - type: ImageChange - - type: ConfigChange -- apiVersion: v1 - kind: Service - metadata: - creationTimestamp: null - name: ${NAME}${SUFFIX} - spec: - ports: - - name: 8080-tcp - port: 8080 - protocol: TCP - targetPort: 8080 - - name: 50000-tcp - port: 50000 - protocol: TCP - targetPort: 50000 - selector: - deploymentconfig: ${NAME}${SUFFIX} - sessionAffinity: None - type: ClusterIP -- apiVersion: v1 - kind: Route - metadata: - creationTimestamp: null - name: ${NAME}${SUFFIX} - spec: - host: ${ROUTE_HOST} - path: ${ROUTE_PATH} - port: - targetPort: 8080-tcp - tls: - termination: edge - to: - kind: Service - name: ${NAME}${SUFFIX} - weight: 100 - wildcardPolicy: None -parameters: -- description: A name used for all objects - displayName: Name - name: NAME - required: true -- description: A name suffix used for all objects - displayName: Suffix - name: SUFFIX - required: false - value: "" -- description: A version used for the image tags - displayName: version - name: VERSION - required: true - value: v1.0.0 -- description: ROUTE_HOST - displayName: ROUTE_HOST - name: ROUTE_HOST - required: true -- description: ROUTE_PATH - displayName: ROUTE_PATH - name: ROUTE_PATH - required: true - value: / -- description: Environment Name - displayName: ENV_NAME - name: ENV_NAME - required: true - value: prod -- description: Environment ID - displayName: ENV_ID - name: ENV_ID - required: true - value: prod diff --git a/.jenkins/openshift/jenkins-basic.yaml b/.jenkins/openshift/jenkins-basic.yaml deleted file mode 100644 index ed9d5f136..000000000 --- a/.jenkins/openshift/jenkins-basic.yaml +++ /dev/null @@ -1,48 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins-basic -objects: -- apiVersion: build.openshift.io/v1 - kind: BuildConfig - metadata: - creationTimestamp: null - labels: - app: jenkins-basic-bc - name: jenkins-basic - spec: - failedBuildsHistoryLimit: 2 - nodeSelector: null - output: - to: - kind: ImageStreamTag - name: jenkins-basic:v1.0.0 - postCommit: {} - resources: - limits: - cpu: "1" - memory: 1Gi - requests: - cpu: "1" - memory: 1Gi - runPolicy: SerialLatestOnly - source: - contextDir: .jenkins/docker - git: - ref: master - uri: https://github.com/bcgov/tfrs.git - type: Git - strategy: - dockerStrategy: - from: - kind: ImageStreamTag - name: bcgov-jenkins-basic:v2-20200303 - type: Docker - successfulBuildsHistoryLimit: 2 - triggers: - - imageChange: {} - type: ImageChange - - type: ConfigChange - status: - lastVersion: 0 diff --git a/.jenkins/openshift/secrets.yaml b/.jenkins/openshift/secrets.yaml deleted file mode 100644 index 49942e462..000000000 --- a/.jenkins/openshift/secrets.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: template.openshift.io/v1 -kind: Template -metadata: - creationTimestamp: null - name: jenkins -objects: -- apiVersion: v1 - kind: Secret - type: kubernetes.io/basic-auth - metadata: - annotations: null - name: ${NAME}-slave-user - stringData: - metadata.name: ${NAME}-slave-user - password: ${SLAVE_USER_PASSWORD} - username: jenkins-slave -- apiVersion: v1 - kind: Secret - type: kubernetes.io/basic-auth - metadata: - annotations: null - name: ${NAME}-github - stringData: - metadata.name: ${NAME}-github - password: ${GITHUB_PASSWORD} - username: ${GITHUB_USERNAME} -- apiVersion: v1 - kind: Secret - type: kubernetes.io/basic-auth - metadata: - annotations: null - name: ${NAME}-browserstack - stringData: - metadata.name: ${NAME}-browserstack - accesskey: ${BROWSERSTACK_ACCESSKEY} - username: ${BROWSERSTACK_USERNAME} -parameters: -- name: NAME - description: <- - This name will be given to all objects so they can be uniquley referenced. - displayName: Name - required: true - value: jenkins -- name: GITHUB_USERNAME - description: <- - The name of the GitHub user that Jenkins will use to access the GitHub API - required: true -- name: GITHUB_PASSWORD - description: <- - The Personal Access Token assocated with the GitHub user. This will function as a - password for the user. - required: true -- name: SLAVE_USER_PASSWORD - description: <- - The password for the Jenkins slave user. This will be updated by the deployment - so its best to let it be auto-generated for now. - from: '[a-zA-Z0-9]{16}' - generate: expression -- name: BROWSERSTACK_USERNAME - description: <- - BrowserStack Automatem username. - required: true -- name: BROWSERSTACK_ACCESSKEY - description: <- - BrowserStack Automatem accesskey. - required: true \ No newline at end of file diff --git a/.pipeline/.nvmrc b/.pipeline/.nvmrc new file mode 100644 index 000000000..6b12bc745 --- /dev/null +++ b/.pipeline/.nvmrc @@ -0,0 +1 @@ +v10.15.2 \ No newline at end of file diff --git a/.pipeline/build.js b/.pipeline/build.js new file mode 100755 index 000000000..3ac899f86 --- /dev/null +++ b/.pipeline/build.js @@ -0,0 +1,5 @@ +'use strict'; +const task = require('./lib/build.js') +const settings = require('./lib/config.js') + +task(Object.assign(settings, { phase: 'build'})) diff --git a/.pipeline/clean-nsps.js b/.pipeline/clean-nsps.js new file mode 100755 index 000000000..1c779a387 --- /dev/null +++ b/.pipeline/clean-nsps.js @@ -0,0 +1,5 @@ +'use strict'; +const settings = require('./lib/config.js') +const task = require('./lib/clean-nsps.js') + +task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/clean.js b/.pipeline/clean.js new file mode 100755 index 000000000..42231d7ff --- /dev/null +++ b/.pipeline/clean.js @@ -0,0 +1,5 @@ +'use strict'; +const settings = require('./lib/config.js') +const task = require('./lib/clean.js') + +task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy-nsps.js b/.pipeline/deploy-nsps.js new file mode 100755 index 000000000..cdfb7480b --- /dev/null +++ b/.pipeline/deploy-nsps.js @@ -0,0 +1,5 @@ +'use strict'; +const settings = require('./lib/config.js') +const task = require('./lib/deploy-nsps.js') + +task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/deploy.js b/.pipeline/deploy.js new file mode 100755 index 000000000..595509459 --- /dev/null +++ b/.pipeline/deploy.js @@ -0,0 +1,5 @@ +'use strict'; +const settings = require('./lib/config.js') +const task = require('./lib/deploy.js') + +task(Object.assign(settings, { phase: settings.options.env})); diff --git a/.pipeline/lib/build.js b/.pipeline/lib/build.js new file mode 100755 index 000000000..51a737ae7 --- /dev/null +++ b/.pipeline/lib/build.js @@ -0,0 +1,100 @@ +"use strict"; +const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); +const path = require("path"); + +module.exports = settings => { + const phases = settings.phases; + const options = settings.options; + const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const phase = "build"; + let objects = []; + const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift-v4")); + + // The building of your cool app goes here ▼▼▼ + // build frontend + console.log( oc.git.http_url); + console.log( oc.git.ref); + + //build backend + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-bc.yaml`, { + 'param':{ + 'NAME': 'tfrs', + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URL': oc.git.http_url, + 'GIT_REF': oc.git.ref + } + })) + + //build frontend + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-angular-app-bc.yaml`, { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URL': oc.git.http_url, + 'GIT_REF': oc.git.ref + } + })) + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-bc.yaml`, { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URL': oc.git.http_url, + 'GIT_REF': oc.git.ref + } + })) + + //build celery + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/celery/celery-bc.yaml`, { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URL': oc.git.http_url, + 'RELEASE_BRANCH': phases[phase].releaseBranch + } +})) + + //build notification server + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/notification/notification-server-bc.yaml`, { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URL': oc.git.http_url, + 'GIT_REF': oc.git.ref + } +})) + + //build scan coordinator server + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/scan-coordinator/scan-coordinator-bc.yaml`, { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URL': oc.git.http_url, + 'GIT_REF': oc.git.ref + } +})) + +//build scan handler server +objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/scan-handler/scan-handler-bc.yaml`, { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'RELEASE_BRANCH': phases[phase].releaseBranch + } +})) + + oc.applyRecommendedLabels( + objects, + phases[phase].name, + phase, + phases[phase].changeId, + phases[phase].instance, + ); + oc.applyAndBuild(objects); +}; diff --git a/.pipeline/lib/clean-nsps.js b/.pipeline/lib/clean-nsps.js new file mode 100755 index 000000000..4d1c7aa3c --- /dev/null +++ b/.pipeline/lib/clean-nsps.js @@ -0,0 +1,46 @@ +"use strict"; +const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); +const KeyCloakClient = require('./keycloak'); + +const getTargetPhases = (env, phases) => { + let target_phase = []; + for (const phase in phases) { + if (env.match(/^(all|transient)$/) && phases[phase].transient) { + target_phase.push(phase); + } else if (env === phase) { + target_phase.push(phase); + break; + } + } + + return target_phase; +}; + +module.exports = settings => { + const phases = settings.phases; + const options = settings.options; + const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const target_phases = getTargetPhases(options.env, phases); + + target_phases.forEach(k => { + if (phases.hasOwnProperty(k)) { + + const phase = phases[k]; + oc.namespace(phase.namespace); + + //remove all custom security policies create for specific pull request + const nsps = oc.get("networksecuritypolicies", { + selector: `app=${phase.name}${phase.suffix}`, + namespace: phase.namespace, + }); + nsps.forEach(nsp => { + oc.delete([`networksecuritypolicy/${nsp.metadata.name}`], { + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }); + }); + + } + }); +}; diff --git a/.pipeline/lib/clean.js b/.pipeline/lib/clean.js new file mode 100755 index 000000000..b47a62135 --- /dev/null +++ b/.pipeline/lib/clean.js @@ -0,0 +1,135 @@ +"use strict"; +const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); +const KeyCloakClient = require('./keycloak'); + +const getTargetPhases = (env, phases) => { + let target_phase = []; + for (const phase in phases) { + if (env.match(/^(all|transient)$/) && phases[phase].transient) { + target_phase.push(phase); + } else if (env === phase) { + target_phase.push(phase); + break; + } + } + + return target_phase; +}; + +module.exports = settings => { + const phases = settings.phases; + const options = settings.options; + const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options)); + const target_phases = getTargetPhases(options.env, phases); + + target_phases.forEach(k => { + if (phases.hasOwnProperty(k)) { + + const phase = phases[k]; + oc.namespace(phase.namespace); + /** + if(k === 'dev') { + const kc = new KeyCloakClient(settings, oc); + kc.removeUris(); + } + */ + + let buildConfigs = oc.get("bc", { + selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, + namespace: phase.namespace, + }); + buildConfigs.forEach(bc => { + if (bc.spec.output.to.kind == "ImageStreamTag") { + oc.delete([`ImageStreamTag/${bc.spec.output.to.name}`], { + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }); + } + }); + + let deploymentConfigs = oc.get("dc", { + selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, + namespace: phase.namespace, + }); + deploymentConfigs.forEach(dc => { + dc.spec.triggers.forEach(trigger => { + if ( + trigger.type == "ImageChange" && + trigger.imageChangeParams.from.kind == "ImageStreamTag" + ) { + oc.delete([`ImageStreamTag/${trigger.imageChangeParams.from.name}`], { + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }); + } + }); + }); + + //get all statefulsets before they are deleted + const statefulsets = oc.get("statefulset", { + selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, + namespace: phase.namespace, + }); + + oc.raw("delete", ["all"], { + selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, + wait: "true", + namespace: phase.namespace, + }); + oc.raw( + "delete", + ["pvc,Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints"], + { + selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, + wait: "true", + namespace: phase.namespace, + }, + ); + + //remove all the PVCs associated with each statefulset, after they get deleted by above delete all operation + statefulsets.forEach(statefulset => { + //delete PVCs mounted for statfulset + oc.raw("delete", ["pvc"], { + selector: `statefulset=${statefulset.metadata.name}`, + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }); + + //delete configmaps create by patroni + let patroniConfigmaps = oc.get("configmap", { + selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, + namespace: phase.namespace, + }); + if(Object.entries(patroniConfigmaps).length > 0) { + oc.raw( + "delete", + ["configmap"], + { + selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`, + wait: "true", + "ignore-not-found": "true", + namespace: phase.namespace, + }, + ); + } + }); + + //remove all custom security policies create for specific pull request + const nsps = oc.get("networksecuritypolicies", { + selector: `app=${phase.name}${phase.suffix}`, + namespace: phase.namespace, + }); + nsps.forEach(nsp => { + oc.delete([`networksecuritypolicy/${nsp.metadata.name}`], { + "ignore-not-found": "true", + wait: "true", + namespace: phase.namespace, + }); + }); + + } + }); +}; diff --git a/.pipeline/lib/config.js b/.pipeline/lib/config.js new file mode 100644 index 000000000..92b982857 --- /dev/null +++ b/.pipeline/lib/config.js @@ -0,0 +1,104 @@ +'use strict'; +const options= require('@bcgov/pipeline-cli').Util.parseArguments() +const changeId = options.pr //aka pull-request +const version = '1.0.0' +const name = 'tfrs' +const ocpName = 'apps.silver.devops' +const phases = { + build: { namespace:'0ab226-tools' , name: `${name}`, phase: 'build' , changeId:changeId, suffix: `-build-${changeId}` , + instance: `${name}-build-${changeId}` , version:`${version}-${changeId}`, tag:`build-${version}-${changeId}`, + releaseBranch: 'openshift-v4-migration' + }, + dev: {namespace:'0ab226-dev' , name: `${name}`, phase: 'dev' , changeId:changeId, suffix: `-dev-${changeId}` , + instance: `${name}-dev-${changeId}` , version:`${version}-${changeId}`, tag:`dev-${version}-${changeId}`, + frontendCpuRequest: '100m', frontendCpuLimit: '700m', frontendMemoryRequest: '300M', frontendMemoryLimit: '4G', frontendReplicas: 1, + frontendKeycloakAuthority: 'https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev', frontendKeycloakClientId: 'tfrs-dev', frontendKeycloakCallbackUrl: `https://tfrs-frontend-dev-${changeId}.${ocpName}.gov.bc.ca/authCallback`, + frontendKeycloakLogoutUrl: `https://logontest.gov.bc.ca/clp-cgi/logoff.cgi?returl=https://tfrs-frontend-dev-${changeId}.${ocpName}.gov.bc.ca`, + frontendHost: `tfrs-frontend-dev-${changeId}.${ocpName}.gov.bc.ca`, + frontendCpuRequest: '200m', frontendCpuLimit: '500m', frontendMemoryRequest: '250Mi', frontendMemoryLimit: '500Mi', + backendCpuRequest: '300m', backendCpuLimit: '600m', backendMemoryRequest: '1Gi', backendMemoryLimit: '2Gi', backendHealthCheckDelay: 30, + backendHost: `tfrs-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1, + backendKeycloakSaBaseurl: 'https://dev.oidc.gov.bc.ca', + backendKeycloakSaClientId: 'tfrs-dev-django-sa', + backendKeycloakSaRealm: 'tfrs-dev', + backendKeycloakAudience: 'tfrs-dev', + backendKeycloakCertsUrl: 'https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev/protocol/openid-connect/certs', + backendKeycloakClientId: 'tfrs-dev', + backendKeycloakIssuer: 'https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev', + backendKeycloakRealm: 'https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev', + celeryCpuRequest: '100m', celeryCpuLimit: '250m', celeryMemoryRequest: '1600Mi', celeryMemoryLimit: '3Gi', + scanHandlerCpuRequest: '100m', scanHandlerCpuLimit: '250m', scanHandlerMemoryRequest: '255Mi', scanHandlerMemoryLimit: '512Mi', + scanCoordinatorCpuRequest: '100m', scanCoordinatorCpuLimit: '250m', scanCoordinatorMemoryRequest: '255Mi', scanCoordinatorMemoryLimit: '512Mi', + notificationServerCpuRequest: '100m', notificationServerCpuLimit: '400m', notificationServerMemoryRequest: '256Mi', notificationServerMemoryLimit: '512Mi', + patroniCpuRequest: '500m', patroniCpuLimit: '1000m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '1Gi', patroniPvcSize: '2Gi', + patroniReplica: 1, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`, + rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500Mi', rabbitmqMemoryLimit: '1Gi', rabbitmqPvcSize: '1Gi', + rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', + schemaSpyPublicCpuRequest: '50m', schemaSpyPublicCpuLimit: '500m', schemaSpyPublicMemoryRequest: '512Mi', schemaSpyPublicMemoryLimit: '2Gi', + schemaSpyAuditCpuRequest: '50m', schemaSpyAuditCpuLimit: '300m', schemaSpyAuditMemoryRequest: '256Mi', schemaSpyAuditMemoryLimit: '512Mi' + }, + test: {namespace:'0ab226-test' , name: `${name}`, phase: 'test' , changeId:changeId, suffix: `-test` , + instance: `${name}-test` , version:`${version}`, tag:`test-${version}`, + frontendCpuRequest: '100m', frontendCpuLimit: '700m', frontendMemoryRequest: '300M', frontendMemoryLimit: '4G', frontendReplicas: 1, + frontendKeycloakAuthority: 'https://test.oidc.gov.bc.ca/auth/realms/tfrs', frontendKeycloakClientId: 'tfrs', frontendKeycloakCallbackUrl: `https://tfrs-frontend-test.${ocpName}.gov.bc.ca/authCallback`, + frontendKeycloakLogoutUrl: `https://logontest.gov.bc.ca/clp-cgi/logoff.cgi?returl=https://tfrs-frontend-test.${ocpName}.gov.bc.ca`, + frontendHost: `tfrs-frontend-test.${ocpName}.gov.bc.ca`, + frontendCpuRequest: '200m', frontendCpuLimit: '500m', frontendMemoryRequest: '250Mi', frontendMemoryLimit: '500Mi', + backendCpuRequest: '250m', backendCpuLimit: '500m', backendMemoryRequest: '1Gi', backendMemoryLimit: '2Gi', backendHealthCheckDelay: 30, + backendHost: `tfrs-backend-test.${ocpName}.gov.bc.ca`, backendReplicas: 1, + backendKeycloakSaBaseurl: 'https://test.oidc.gov.bc.ca', + backendKeycloakSaClientId: 'tfrs-django-sa', + backendKeycloakSaRealm: 'tfrs', + backendKeycloakAudience: 'tfrs', + backendKeycloakCertsUrl: 'https://test.oidc.gov.bc.ca/auth/realms/tfrs/protocol/openid-connect/certs', + backendKeycloakClientId: 'tfrs', + backendKeycloakIssuer: 'https://test.oidc.gov.bc.ca/auth/realms/tfrs', + backendKeycloakRealm: 'https://test.oidc.gov.bc.ca/auth/realms/tfrs', + celeryCpuRequest: '100m', celeryCpuLimit: '250m', celeryMemoryRequest: '1600Mi', celeryMemoryLimit: '3Gi', + scanHandlerCpuRequest: '100m', scanHandlerCpuLimit: '250m', scanHandlerMemoryRequest: '255Mi', scanHandlerMemoryLimit: '512Mi', + scanCoordinatorCpuRequest: '100m', scanCoordinatorCpuLimit: '250m', scanCoordinatorMemoryRequest: '255Mi', scanCoordinatorMemoryLimit: '512Mi', + notificationServerCpuRequest: '100m', notificationServerCpuLimit: '400m', notificationServerMemoryRequest: '256Mi', notificationServerMemoryLimit: '512Mi', + patroniCpuRequest: '300m', patroniCpuLimit: '700m', patroniMemoryRequest: '250Mi', patroniMemoryLimit: '1Gi', patroniPvcSize: '3Gi', + patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`, + rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '400m', rabbitmqMemoryRequest: '500Mi', rabbitmqMemoryLimit: '1Gi', rabbitmqPvcSize: '1Gi', + rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard', + schemaSpyPublicCpuRequest: '50m', schemaSpyPublicCpuLimit: '500m', schemaSpyPublicMemoryRequest: '512Mi', schemaSpyPublicMemoryLimit: '2Gi', + schemaSpyAuditCpuRequest: '50m', schemaSpyAuditCpuLimit: '300m', schemaSpyAuditMemoryRequest: '256Mi', schemaSpyAuditMemoryLimit: '512Mi' + }, + prod: {namespace:'0ab226-prod' , name: `${name}`, phase: 'prod' , changeId:changeId, suffix: `-prod` , + instance: `${name}-prod` , version:`${version}`, tag:`prod-${version}`, + frontendCpuRequest: '200m', frontendCpuLimit: '700m', frontendMemoryRequest: '300M', frontendMemoryLimit: '4G', frontendReplicas: 2, + frontendKeycloakAuthority: 'https://oidc.gov.bc.ca/auth/realms/tfrs', frontendKeycloakClientId: 'tfrs', frontendKeycloakCallbackUrl: `https://tfrs-frontend-prod.${ocpName}.gov.bc.ca/authCallback`, + frontendKeycloakLogoutUrl: `https://logon7.gov.bc.ca/clp-cgi/logoff.cgi?returl=https://tfrs-frontend-prod.${ocpName}.gov.bc.ca`, + frontendHost: `tfrs-frontend-prod.${ocpName}.gov.bc.ca`, + frontendCpuRequest: '200m', frontendCpuLimit: '500m', frontendMemoryRequest: '250Mi', frontendMemoryLimit: '500Mi', + backendCpuRequest: '300m', backendCpuLimit: '600m', backendMemoryRequest: '1Gi', backendMemoryLimit: '2Gi', backendHealthCheckDelay: 30, + backendHost: `tfrs-backend-prod.${ocpName}.gov.bc.ca`, backendReplicas: 2, + backendKeycloakSaBaseurl: 'https://oidc.gov.bc.ca', + backendKeycloakSaClientId: 'tfrs-django-sa', + backendKeycloakSaRealm: 'tfrs', + backendKeycloakAudience: 'tfrs', + backendKeycloakCertsUrl: 'https://oidc.gov.bc.ca/auth/realms/tfrs/protocol/openid-connect/certs', + backendKeycloakClientId: 'tfrs', + backendKeycloakIssuer: 'https://oidc.gov.bc.ca/auth/realms/tfrs', + backendKeycloakRealm: 'https://oidc.gov.bc.ca/auth/realms/tfrs', + celeryCpuRequest: '200m', celeryCpuLimit: '300mm', celeryMemoryRequest: '1600Mi', celeryMemoryLimit: '3Gi', + scanHandlerCpuRequest: '200m', scanHandlerCpuLimit: '300m', scanHandlerMemoryRequest: '255Mi', scanHandlerMemoryLimit: '512Mi', + scanCoordinatorCpuRequest: '200m', scanCoordinatorCpuLimit: '300m', scanCoordinatorMemoryRequest: '255Mi', scanCoordinatorMemoryLimit: '512Mi', + notificationServerCpuRequest: '200m', notificationServerCpuLimit: '400m', notificationServerMemoryRequest: '256Mi', notificationServerMemoryLimit: '512Mi', + patroniCpuRequest: '300m', patroniCpuLimit: '600m', patroniMemoryRequest: '500Mi', patroniMemoryLimit: '2Gi', patroniPvcSize: '10Gi', + patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`, + rabbitmqCpuRequest: '200m', rabbitmqCpuLimit: '400m', rabbitmqMemoryRequest: '500Mi', rabbitmqMemoryLimit: '2Gi', rabbitmqPvcSize: '1Gi', + rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard' , + schemaSpyPublicCpuRequest: '50m', schemaSpyPublicCpuLimit: '500m', schemaSpyPublicMemoryRequest: '512Mi', schemaSpyPublicMemoryLimit: '2Gi', + schemaSpyAuditCpuRequest: '50m', schemaSpyAuditCpuLimit: '300m', schemaSpyAuditMemoryRequest: '256Mi', schemaSpyAuditMemoryLimit: '512Mi' + }, +}; + +// This callback forces the node process to exit as failure. +process.on('unhandledRejection', (reason) => { + console.log(reason); + process.exit(1); +}); + +module.exports = exports = {phases, options}; \ No newline at end of file diff --git a/.pipeline/lib/deploy-nsps.js b/.pipeline/lib/deploy-nsps.js new file mode 100755 index 000000000..873c47275 --- /dev/null +++ b/.pipeline/lib/deploy-nsps.js @@ -0,0 +1,36 @@ +"use strict"; +const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); +const path = require("path"); +const KeyCloakClient = require('./keycloak'); + +module.exports = settings => { + const phases = settings.phases; + const options = settings.options; + const phase = options.env; + const changeId = phases[phase].changeId; + const oc = new OpenShiftClientX(Object.assign({ namespace: phases[phase].namespace }, options)); + + const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift-v4")); + var objects = []; + + //The deployment of your cool app goes here ▼▼▼ + + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/nsp/nsp-env.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'ENV_NAME': phases[phase].phase, + 'SUFFIX': phases[phase].suffix, + 'API_VERSION': 'security.devops.gov.bc.ca/v1alpha1' + } + })) + + oc.applyRecommendedLabels( + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); + oc.applyAndDeploy(objects, phases[phase].instance); +}; diff --git a/.pipeline/lib/deploy.js b/.pipeline/lib/deploy.js new file mode 100755 index 000000000..cab70ea28 --- /dev/null +++ b/.pipeline/lib/deploy.js @@ -0,0 +1,228 @@ +"use strict"; +const { OpenShiftClientX } = require("@bcgov/pipeline-cli"); +const path = require("path"); +const KeyCloakClient = require('./keycloak'); + +module.exports = settings => { + const phases = settings.phases; + const options = settings.options; + const phase = options.env; + const changeId = phases[phase].changeId; + const oc = new OpenShiftClientX(Object.assign({ namespace: phases[phase].namespace }, options)); + + const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift-v4")); + var objects = []; + + //The deployment of your cool app goes here ▼▼▼ +/* + if(phases[phase].phase === 'dev') { + + //deploy Patroni + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni/deployment-prereq.yaml`, { + 'param': { + 'NAME': 'patroni', + 'SUFFIX': phases[phase].suffix + } + })) + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni/deployment.yaml`, { + 'param': { + 'NAME': 'patroni', + 'ENV_NAME': phases[phase].phase, + 'SUFFIX': phases[phase].suffix, + 'CPU_REQUEST': phases[phase].patroniCpuRequest, + 'CPU_LIMIT': phases[phase].patroniCpuLimit, + 'MEMORY_REQUEST': phases[phase].patroniMemoryRequest, + 'MEMORY_LIMIT': phases[phase].patroniMemoryLimit, + 'IMAGE_REGISTRY': 'image-registry.openshift-image-registry.svc:5000', + 'IMAGE_STREAM_NAMESPACE': phases[phase].namespace, + 'IMAGE_STREAM_TAG': 'patroni:v10-stable', + 'REPLICA': phases[phase].patroniReplica, + 'PVC_SIZE': phases[phase].patroniPvcSize, + 'STORAGE_CLASS': phases[phase].storageClass + } + })) + + //deploy rabbitmq, use docker image directly + //POST_START_SLEEP is harded coded in the rabbitmq template, replacement was not successful + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/rabbitmq/rabbitmq-cluster-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'ENV_NAME': phases[phase].phase, + 'SUFFIX': phases[phase].suffix, + 'NAMESPACE': phases[phase].namespace, + 'CLUSTER_NAME': 'rabbitmq-cluster', + 'ISTAG': 'rabbitmq:3.8.3-management', + 'SERVICE_ACCOUNT': 'rabbitmq-discovery', + 'VOLUME_SIZE': phases[phase].rabbitmqPvcSize, + 'CPU_REQUEST': phases[phase].rabbitmqCpuRequest, + 'CPU_LIMIT': phases[phase].rabbitmqCpuLimit, + 'MEMORY_REQUEST': phases[phase].rabbitmqMemoryRequest, + 'MEMORY_LIMIT': phases[phase].rabbitmqMemoryLimit, + 'REPLICA': phases[phase].rabbitmqReplica, + 'POST_START_SLEEP': phases[phase].rabbitmqPostStartSleep, + 'STORAGE_CLASS': phases[phase].storageClass + } + })) + } +*/ + + //deploy backend + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'ENV_NAME': phases[phase].phase, + 'NAMESPACE': phases[phase].namespace, + 'VERSION': phases[phase].tag, + 'KEYCLOAK_SA_BASEURL': phases[phase].backendKeycloakSaBaseurl, + 'KEYCLOAK_SA_CLIENT_ID': phases[phase].backendKeycloakSaClientId, + 'KEYCLOAK_SA_REALM': phases[phase].backendKeycloakSaRealm, + 'KEYCLOAK_AUDIENCE': phases[phase].backendKeycloakAudience, + 'KEYCLOAK_CERTS_URL': phases[phase].backendKeycloakCertsUrl, + 'KEYCLOAK_CLIENT_ID': phases[phase].backendKeycloakClientId, + 'KEYCLOAK_ISSUER': phases[phase].backendKeycloakIssuer, + 'KEYCLOAK_REALM':phases[phase].backendKeycloakRealm, + 'CPU_REQUEST':phases[phase].backendCpuRequest, + 'CPU_LIMIT':phases[phase].backendCpuLimit, + 'MEMORY_REQUEST':phases[phase].backendMemoryRequest, + 'MEMORY_LIMIT':phases[phase].backendMemoryLimit, + 'REPLICAS':phases[phase].backendReplicas + } + })) + +/* + //deploy backend others + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-dc-others.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'BACKEND_HOST':phases[phase].backendHost + } + })) + + //deploy frontend + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-dc-others.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'KEYCLOAK_AUTHORITY': phases[phase].frontendKeycloakAuthority, + 'KEYCLOAK_CLIENT_ID': phases[phase].frontendKeycloakClientId, + 'KEYCLOAK_CALLBACK_URL': phases[phase].frontendKeycloakCallbackUrl, + 'KEYCLOAK_LOGOUT_URL': phases[phase].frontendKeycloakLogoutUrl, + 'FRONTEND_HOST': phases[phase].frontendHost, + 'BACKEND_HOST': phases[phase].backendHost + } + })) + + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'CPU_REQUEST': phases[phase].frontendCpuRequest, + 'CPU_LIMIT': phases[phase].frontendCpuLimit, + 'MEMORY_REQUEST': phases[phase].frontendMemoryRequest, + 'MEMORY_LIMIT': phases[phase].frontendMemoryLimit, + 'REPLICAS':phases[phase].frontendReplicas + } + })) + + + //deploy celery + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/celery/celery-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'ENV_NAME': phases[phase].phase, + 'NAMESPACE': phases[phase].namespace, + 'CPU_REQUEST': phases[phase].celeryCpuRequest, + 'CPU_LIMIT': phases[phase].celeryCpuLimit, + 'MEMORY_REQUEST': phases[phase].celeryMemoryRequest, + 'MEMORY_LIMIT': phases[phase].celeryMemoryLimit + } + })) + + //deploy notification server + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/notification/notification-server-others-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'FRONTEND_HOST': phases[phase].frontendHost + } + })) + + //deploy notification server + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/notification/notification-server-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'NAMESPACE': phases[phase].namespace, + 'VERSION': phases[phase].tag, + 'KEYCLOAK_CERTS_URL': phases[phase].backendKeycloakCertsUrl, + 'CPU_REQUEST':phases[phase].notificationServerCpuRequest, + 'CPU_LIMIT':phases[phase].notificationServerCpuLimit, + 'MEMORY_REQUEST':phases[phase].notificationServerMemoryRequest, + 'MEMORY_LIMIT':phases[phase].notificationServerMemoryLimit + } + })) + + //deploy scan coordinator + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/scan-coordinator/scan-coordinator-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'NAMESPACE': phases[phase].namespace, + 'VERSION': phases[phase].tag, + 'ENV_NAME': phases[phase].phase, + 'CPU_REQUEST':phases[phase].scanCoordinatorCpuRequest, + 'CPU_LIMIT':phases[phase].scanCoordinatorCpuLimit, + 'MEMORY_REQUEST':phases[phase].scanCoordinatorMemoryRequest, + 'MEMORY_LIMIT':phases[phase].scanCoordinatorMemoryLimit + } + })) + + //deploy scan handler + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/scan-handler/scan-handler-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'NAMESPACE': phases[phase].namespace, + 'VERSION': phases[phase].tag, + 'CPU_REQUEST':phases[phase].scanHandlerCpuRequest, + 'CPU_LIMIT':phases[phase].scanHandlerServerCpuLimit, + 'MEMORY_REQUEST':phases[phase].scanHandlerServerMemoryRequest, + 'MEMORY_LIMIT':phases[phase].scanHandlerServerMemoryLimit + } + })) + + //only deploy schemaspy for test and prod + if(phases[phase].phase === 'test' || phases[phase].phase === 'prod') { + objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/schema-spy/schemaspy-dc.yaml`, { + 'param': { + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'ENV_NAME': phases[phase].phase, + 'CPU_REQUEST_PUBLIC': phases[phase].schemaSpyPublicCpuRequest, + 'CPU_LIMIT_PUBLIC': phases[phase].schemaSpyPublicCpuLimit, + 'MEMORY_REQUEST_PUBLIC': phases[phase].schemaSpyPublicMemoryRequest, + 'MEMORY_LIMIT_PUBLIC': phases[phase].schemaSpyPublicMemoryLimit, + 'CPU_REQUEST_AUDIT': phases[phase].schemaSpyAuditCpuRequest, + 'CPU_LIMIT_AUDIT': phases[phase].schemaSpyAuditCpuLimit, + 'MEMORY_REQUEST_AUDIT': phases[phase].schemaSpyAuditMemoryRequest, + 'MEMORY_LIMIT_AUDIT': phases[phase].schemaSpyAuditMemoryLimit + } + })) + } +*/ + oc.applyRecommendedLabels( + objects, + phases[phase].name, + phase, + `${changeId}`, + phases[phase].instance, + ); + oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); + oc.applyAndDeploy(objects, phases[phase].instance); +}; diff --git a/.pipeline/lib/keycloak.js b/.pipeline/lib/keycloak.js new file mode 100644 index 000000000..ea6a1b2b0 --- /dev/null +++ b/.pipeline/lib/keycloak.js @@ -0,0 +1,146 @@ +"use strict"; +const axios = require("axios"); +const _ = require("lodash"); +//code reference https://github.com/bcgov/HMCR/blob/0.7/.pipeline/lib/keycloak.js +module.exports = class KeyCloakClient { + constructor(settings, oc) { + this.phases = settings.phases; + this.options = settings.options; + this.oc = oc; + this.appHost = this.phases.dev.host; + } + + async init() { + + this.getSecrets(); + + this.apiTokenPath = `/auth/realms/${this.realmId}/protocol/openid-connect/token`; + this.tfrsPublicClientPath = `auth/admin/realms/${this.realmId}/clients/${this.tfrsClientId}`; + + this.api = axios.create({ + baseURL: `https://${this.ssoHost}` + }); + + const token = await this.getAccessToken(); + + this.api.defaults.headers.common = { + Authorization: `Bearer ${token}` + }; + } + + getSecrets() { + const keycloakSecret = this.oc.raw("get", [ + "secret", + "tfrs-keycloak", + "-o", + "json" + ]); + const secret = JSON.parse(keycloakSecret.stdout).data; + + this.clientId = Buffer.from(secret.clientId, "base64").toString(); + this.clientSecret = Buffer.from(secret.clientSecret, "base64").toString(); + this.tfrsClientId = Buffer.from(secret.tfrsPublic, "base64").toString(); + this.realmId = Buffer.from(secret.realmId, "base64").toString(); + this.ssoHost = Buffer.from(secret.host, "base64").toString(); + + if (!this.clientId || !this.clientSecret || !this.tfrsClientId) + throw new Error( + "Unable to retrieve Keycloak service account info from OpenShift" + ); + } + + getAccessToken() { + + return this.api + .post(this.apiTokenPath, "grant_type=client_credentials", { + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + auth: { + username: this.clientId, + password: this.clientSecret + } + }) + .then(function(response) { + if (!response.data.access_token) + throw new Error( + "Unable to retrieve Keycloak service account access token" + ); + + return Promise.resolve(response.data.access_token); + }); + } + + async getUris() { + + console.log("in getURis this.tfrsPublicClientPath=", this.tfrsPublicClientPath) + + const response = await this.api.get(this.tfrsPublicClientPath); + + console.log("in getURis 000000") + const data = { ...response.data }; + const redirectUris = data.redirectUris; + + return { data, redirectUris }; + } + + async addUris() { + await this.init(); + + console.log("111Attempting to add RedirectUri and WebOrigins"); + + const { data, redirectUris} = await this.getUris(); + + console.log("2222"); + + const putData = { id: data.id, clientId: data.clientId }; + + console.log("3333"); + + const hasRedirectUris = redirectUris.find(item => + item.includes(this.appHost) + ); + + console.log("4444"); + + if (!hasRedirectUris) { + redirectUris.push(`https://${this.appHost}/*`); + putData.redirectUris = redirectUris; + } + + if (!(hasRedirectUris)) { + this.api + .put(this.tfrsPublicClientPath, putData) + .then(() => console.log("RedirectUri and WebOrigins added.")); + } else { + console.log("RedirectUri and WebOrigins add skipped."); + } + } + + async removeUris() { + await this.init(); + + console.log("Attempting to remove RedirectUri and WebOrigins"); + + const { data, redirectUris } = await this.getUris(); + + const putData = { id: data.id, clientId: data.clientId }; + + const hasRedirectUris = redirectUris.find(item => + item.includes(this.appHost) + ); + + if (hasRedirectUris) { + putData.redirectUris = redirectUris.filter( + item => !item.includes(this.appHost) + ); + } + + if (hasRedirectUris) { + this.api + .put(this.tfrsPublicClientPath, putData) + .then(() => console.log("RedirectUri and WebOrigins removed.")); + } else { + console.log("RedirectUri and WebOrigins remove skipped."); + } + + } +}; diff --git a/.pipeline/npmw b/.pipeline/npmw new file mode 100755 index 000000000..1eed7c953 --- /dev/null +++ b/.pipeline/npmw @@ -0,0 +1,12 @@ +#!/bin/sh +set +x +type -t nvm && nvm deactivate +export NVM_DIR="$(git rev-parse --show-toplevel)/.nvm" +if [ ! -f "$NVM_DIR/nvm.sh" ]; then + mkdir -p "${NVM_DIR}" + curl -sSL -o- https://raw.githubusercontent.com/creationix/nvm/v0.34.0/install.sh | bash &>/dev/null +fi +source "$NVM_DIR/nvm.sh" &>/dev/null +METHOD=script nvm install --no-progress &>/dev/null +nvm use &>/dev/null +exec npm "$@" diff --git a/.pipeline/package.json b/.pipeline/package.json new file mode 100644 index 000000000..cd953c07f --- /dev/null +++ b/.pipeline/package.json @@ -0,0 +1,29 @@ +{ + "name": "pipeline", + "version": "1.0.0", + "description": "This a pipeliene script", + "engines": { + "node": ">=8" + }, + "scripts": { + "build": "node build.js", + "clean": "node clean.js", + "clean-nsps": "node clean-nsps.js", + "deploy": "node deploy.js", + "deploy-nsps": "node deploy-nsps.js", + "deploy-unittest": "node deploy-unittest.js", + "version": "echo \"node@$(node --version) ($(which node))\" && echo \"npm@$(npm --version) ($(which npm))\" && npm ls" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/bcgov/ocp-sso.git" + }, + "author": "", + "license": "Apache-2.0", + "dependencies": { + "@bcgov/gh-deploy": "^1.1.4", + "@bcgov/pipeline-cli": "^1.0.1-0", + "axios": "^0.21.1", + "lodash": "^4.17.15" + } +} diff --git a/.yo-rc.json b/.yo-rc.json new file mode 100644 index 000000000..4d5c45bb7 --- /dev/null +++ b/.yo-rc.json @@ -0,0 +1,27 @@ +{ + "@bcgov/generator-bcdk": { + "promptValues": { + "modules": { + "tfrs": { + "name": "tfrs", + "version": "1.0.0", + "path": ".", + "environments": { + "build": { + "namespace": "mem-tfrs-tools" + }, + "dev": { + "namespace": "mem-tfrs-dev" + }, + "test": { + "namespace": "mem-tfrs-test" + }, + "prod": { + "namespace": "mem-tfrs-prod" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 000000000..3e1994fd9 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,65 @@ +pipeline { + agent none + options { + disableResume() + } + stages { + stage('Build') { + agent { label 'build' } + steps { + script { + def filesInThisCommitAsString = sh(script:"git diff --name-only HEAD~1..HEAD | grep -v '^.jenkins/' || echo -n ''", returnStatus: false, returnStdout: true).trim() + def hasChangesInPath = (filesInThisCommitAsString.length() > 0) + echo "${filesInThisCommitAsString}" + if (!currentBuild.rawBuild.getCauses()[0].toString().contains('UserIdCause') && !hasChangesInPath){ + currentBuild.rawBuild.delete() + error("No changes detected in the path ('^.jenkins/')") + } + } + echo "Aborting all running jobs ..." + script { + abortAllPreviousBuildInProgress(currentBuild) + } + echo "Building ..." + sh "cd .pipeline && ./npmw ci && ./npmw run build -- --pr=${CHANGE_ID}" + } + } + stage('Deploy (DEV)') { + agent { label 'deploy' } + steps { + echo "Deploying ..." + sh "cd .pipeline && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=dev" + } + } + stage('Deploy (TEST)') { + agent { label 'deploy' } + when { + expression { return env.CHANGE_TARGET == 'master';} + beforeInput true + } + input { + message "Should we continue with deployment to TEST?" + ok "Yes!" + } + steps { + echo "Deploying ..." + sh "cd .pipeline && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=test" + } + } + stage('Deploy (PROD)') { + agent { label 'deploy' } + when { + expression { return env.CHANGE_TARGET == 'master';} + beforeInput true + } + input { + message "Should we continue with deployment to TEST?" + ok "Yes!" + } + steps { + echo "Deploying ..." + sh "cd .pipeline && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=prod" + } + } + } +} \ No newline at end of file diff --git a/backend/requirements.txt b/backend/requirements.txt index 391b930e4..e62e98945 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -18,7 +18,7 @@ whitenoise==3.0 nose>=1.3.7 django-nose>=1.4.6 coverage>=4.4.2 -minio>=4.0.2 +minio==4.0.21 xlwt>=1.3.0 pika==0.12.0 pyjwt==1.6.4 diff --git a/backup-container-2.0.0/openshift/templates/backup/README.md b/backup-container-2.0.0/openshift/templates/backup/README.md index 86afb1343..b2acaaa68 100644 --- a/backup-container-2.0.0/openshift/templates/backup/README.md +++ b/backup-container-2.0.0/openshift/templates/backup/README.md @@ -57,7 +57,7 @@ oc -n mem-tfrs-prod process -f ./templates/backup/backup-deploy.json \ -p IMAGE_NAMESPACE=mem-tfrs-tools \ -p TAG_NAME=2.0.0 \ -p DATABASE_SERVICE_NAME=patroni-master-prod \ - -p DATABASE_NAME=zeva \ + -p DATABASE_NAME=tfrs \ -p DATABASE_DEPLOYMENT_NAME=patroni-prod \ -p DATABASE_USER_KEY_NAME=app-db-username \ -p DATABASE_PASSWORD_KEY_NAME=app-db-password \ @@ -72,8 +72,8 @@ oc -n mem-tfrs-prod process -f ./templates/backup/backup-deploy.json \ -p VERIFICATION_VOLUME_SIZE=2G \ -p VERIFICATION_VOLUME_CLASS=netapp-file-standard \ -p ENVIRONMENT_FRIENDLY_NAME='ZEVA Database Backup' \ - -p ENVIRONMENT_NAME=zeva-prod \ - -p MINIO_DATA_VOLUME_NAME=zeva-minio-prod | \ + -p ENVIRONMENT_NAME=tfrs-prod \ + -p MINIO_DATA_VOLUME_NAME=tfrs-minio-prod | \ oc create -f - -n mem-tfrs-prod 7. If need to remove, only keeps configmap/backup-conf and the the nfs storage oc -n mem-tfrs-prod delete secret/patroni-backup secret/ftp-secret dc/patroni-backup pvc/backup-verification diff --git a/backup-container/config/backup.conf b/backup-container/config/backup.conf deleted file mode 100644 index 150d879d4..000000000 --- a/backup-container/config/backup.conf +++ /dev/null @@ -1,13 +0,0 @@ -# ========================================================= -# List the databases you want backed up here. -# Databases will be backed up in the order they are listed. -# -# The entries must be in one of the following forms: -# - / -# - :/ -# -# Examples: -# - postgresql/my_database -# - postgresql:5432/my_database -# -------------------------------------------------------- -postgresql:tfrs \ No newline at end of file diff --git a/backup-container/docker/Dockerfile b/backup-container/docker/Dockerfile deleted file mode 100644 index fbf468425..000000000 --- a/backup-container/docker/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -# This image provides a postgres installation from which to run backups -FROM registry.access.redhat.com/rhscl/postgresql-10-rhel7 - -# Set the workdir to be root -WORKDIR / - -# Load the backup script into the container (must be executable). -COPY backup.sh / -COPY webhook-template.json / - -# Set the default CMD to print the usage of the language image. -CMD sh /backup.sh - diff --git a/backup-container/docker/backup.sh b/backup-container/docker/backup.sh deleted file mode 100755 index f15de840f..000000000 --- a/backup-container/docker/backup.sh +++ /dev/null @@ -1,762 +0,0 @@ - #!/bin/bash - -# ================================================================================================================= -# Usage: -# ----------------------------------------------------------------------------------------------------------------- -usage () { - cat <<-EOF - - Automated backup script for Postgresql databases - - Refer to the project documentation for additional details on how to use this script. - - https://github.com/BCDevOps/backup-container - - Usage: - $0 [options] - - Standard Options: - ======== - -h prints this usage documentation. - - -1 run once. - Performs a single set of backups and exits. - - -l lists existing backups. - Great for listing the available backups for a restore. - - -c lists the current configuration settings and exits. - Great for confirming the current settings, and listing the databases included in the backup schedule. - - Restore Options: - ======== - The restore process performs the following basic operations: - - Drop and recreate the selected database. - - Grant the database user access to the recreated database - - Restore the database from the selected backup file - - Have the 'Admin' (postgres) password handy, the script will ask you for it during the restore. - - When in restore mode, the script will list the settings it will use and wait for your confirmation to continue. - This provides you with an opportunity to ensure you have selected the correct database and backup file - for the job. - - Restore mode will allow you to restore a database to a different location (host, and/or database name) provided - it can contact the host and you can provide the appropriate credentials. If you choose to do this, you will need - to provide a file filter using the '-f' option, since the script will likely not be able to determine which backup - file you would want to use. This functionality provides a convenient way to test your backups or migrate your - database/data whithout affecting the original database. - - -r ; in the form /, or :/ - Triggers restore mode and starts restore mode on the specified database. - - Example: - $0 -r postgresql:5432/TheOrgBook_Database - - Would start the restore process on the database using the most recent backup for the database. - - -f ; the filter to use to find/identify the backup file to restore. - This can be a full or partial file specification. When only part of a filename is specified the restore process - attempts to find the most recent backup matching the filter. - If not specified, the restore process attempts to locate the most recent backup file for the specified database. - - Examples: - $0 -r wallet-db/test_db -f wallet-db-tob_holder - - Would try to find the latest backup matching on the partial file name provided. - - $0 -r wallet-db/test_db -f /backups/daily/2018-11-07/wallet-db-tob_holder_2018-11-07_23-59-35.sql.gz - - Would use the specific backup file. - - $0 -r wallet-db/test_db -f wallet-db-tob_holder_2018-11-07_23-59-35.sql.gz - - Would use the specific backup file regardless of its location in the root backup folder. - -EOF -exit 1 -} -# ================================================================================================================= - -# ================================================================================================================= -# Funtions: -# ----------------------------------------------------------------------------------------------------------------- -echoRed (){ - _msg=${1} - _red='\e[31m' - _nc='\e[0m' # No Color - echo -e "${_red}${_msg}${_nc}" -} - -echoYellow (){ - _msg=${1} - _yellow='\e[33m' - _nc='\e[0m' # No Color - echo -e "${_yellow}${_msg}${_nc}" -} - -echoBlue (){ - _msg=${1} - _blue='\e[34m' - _nc='\e[0m' # No Color - echo -e "${_blue}${_msg}${_nc}" -} - -echoGreen (){ - _msg=${1} - _green='\e[32m' - _nc='\e[0m' # No Color - echo -e "${_green}${_msg}${_nc}" -} - -echoMagenta (){ - _msg=${1} - _magenta='\e[35m' - _nc='\e[0m' # No Color - echo -e "${_magenta}${_msg}${_nc}" -} - -logInfo(){ - ( - infoMsg="${1}" - echo "${infoMsg}" - postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \ - "${ENVIRONMENT_NAME}" \ - "INFO" \ - "${infoMsg}" - ) -} - -logError(){ - ( - errorMsg="${1}" - echoRed "[!!ERROR!!] - ${errorMsg}" - postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \ - "${ENVIRONMENT_NAME}" \ - "ERROR" \ - "${errorMsg}" - ) -} - -getWebhookPayload(){ - _payload=$(eval "cat <<-EOF -$(<${WEBHOOK_TEMPLATE}) -EOF -") - echo "${_payload}" -} - -postMsgToWebhook(){ - ( - if [ -z "${WEBHOOK_URL}" ] && [ -f ${WEBHOOK_TEMPLATE} ]; then - return 0 - fi - - projectFriendlyName=${1} - projectName=${2} - statusCode=${3} - message=${4} - curl -s -X POST -H 'Content-Type: application/json' --data "$(getWebhookPayload)" "${WEBHOOK_URL}" > /dev/null - ) -} - -waitForAnyKey() { - read -n1 -s -r -p $'\e[33mWould you like to continue?\e[0m Press Ctrl-C to exit, or any other key to continue ...' key - echo -e \\n - - # If we get here the user did NOT press Ctrl-C ... - return 0 -} - -runOnce() { - if [ ! -z "${RUN_ONCE}" ]; then - return 0 - else - return 1 - fi -} - -getDatabaseName(){ - ( - _databaseSpec=${1} - _databaseName=$(echo ${_databaseSpec} | sed 's~^.*/\(.*$\)~\1~') - echo "${_databaseName}" - ) -} - -getPort(){ - ( - _databaseSpec=${1} - _port=$(echo ${_databaseSpec} | sed "s~\(^.*:\)\(.*\)/\(.*$\)~\2~;s~${_databaseSpec}~~g;") - if [ -z ${_port} ]; then - _port=${DEFAULT_PORT} - fi - echo "${_port}" - ) -} - -getHostname(){ - ( - _databaseSpec=${1} - _hostname=$(echo ${_databaseSpec} | sed 's~\(^.*\)/.*$~\1~;s~\(^.*\):.*$~\1~;') - echo "${_hostname}" - ) -} - -getHostPrefix(){ - ( - _hostname=${1} - _hostPrefix=$(echo ${_hostname} | tr '[:lower:]' '[:upper:]' | sed "s~-~_~g") - echo "${_hostPrefix}" - ) -} - -getHostUserParam(){ - ( - _hostname=${1} - _hostUser=$(getHostPrefix ${_hostname})_USER - echo "${_hostUser}" - ) -} - -getHostPasswordParam(){ - ( - _hostname=${1} - _hostPassword=$(getHostPrefix ${_hostname})_PASSWORD - echo "${_hostPassword}" - ) -} - -readConf(){ - ( - if [ -f ${BACKUP_CONF} ]; then - # Read in the config minus any comments ... - echo "Reading backup config from ${BACKUP_CONF} ..." >&2 - _value=$(sed '/^[[:blank:]]*#/d;s/#.*//' ${BACKUP_CONF}) - fi - - if [ -z "${_value}" ]; then - # Backward compatibility - echo "Reading backup config from environment variables ..." >&2 - _value="${DATABASE_SERVICE_NAME}:${DEFAULT_PORT}/${POSTGRESQL_DATABASE}" - fi - echo "${_value}" - ) -} - -makeDirectory() -{ - ( - # Creates directories with permissions reclusively. - # ${1} is the directory to be created - # Inspired by https://unix.stackexchange.com/questions/49263/recursive-mkdir - directory="${1}" - - test $# -eq 1 || { echo "Function 'makeDirectory' can create only one directory (with it's parent directories)."; exit 1; } - test -d "${directory}" && return 0 - test -d "$(dirname "${directory}")" || { makeDirectory "$(dirname "${directory}")" || return 1; } - test -d "${directory}" || { mkdir --mode=g+w "${directory}" || return 1; } - return 0 - ) -} - -finalizeBackup(){ - ( - _filename=${1} - _inProgressFilename="${_filename}${IN_PROGRESS_BACKUP_FILE_EXTENSION}" - _finalFilename="${_filename}${BACKUP_FILE_EXTENSION}" - - if [ -f ${_inProgressFilename} ]; then - mv "${_inProgressFilename}" "${_finalFilename}" - logInfo "Backup written to ${_finalFilename} ..." - fi - ) -} - -ftpBackup(){ - ( - if [ -z "${FTP_URL}" ] ; then - return 0 - fi - - _filename=${1} - _filenameWithExtension="${_filename}${BACKUP_FILE_EXTENSION}" - echo "Transferring ${_filenameWithExtension} to ${FTP_URL}" - curl --ftp-ssl -T ${_filenameWithExtension} --user ${FTP_USER}:${FTP_PASSWORD} ${FTP_URL} - - if [ ${?} -eq 0 ]; then - logInfo "Successfully transferred ${_filenameWithExtension} to the FTP server" - else - logError "Failed to transfer ${_filenameWithExtension} with the exit code ${?}" - fi - ) -} - -listExistingBackups(){ - ( - _backupDir=${1:-${ROOT_BACKUP_DIR}} - echoMagenta "\n================================================================================================================================" - echoMagenta "Current Backups:" - echoMagenta "--------------------------------------------------------------------------------------------------------------------------------" - du -ah --time ${_backupDir} - echoMagenta "================================================================================================================================\n" - ) -} - -getNumBackupsToRetain(){ - ( - _count=0 - _backupType=$(getBackupType) - - case "${_backupType}" in - daily) - _count=${DAILY_BACKUPS} - ;; - weekly) - _count=${WEEKLY_BACKUPS} - ;; - monthly) - _count=${MONTHLY_BACKUPS} - ;; - *) - _count=${NUM_BACKUPS} - ;; - esac - - echo "${_count}" - ) -} - -pruneBackups(){ - ( - _backupDir=${1} - _databaseSpec=${2} - _pruneDir="$(dirname "${_backupDir}")" - _numBackupsToRetain=$(getNumBackupsToRetain) - _coreFilename=$(generateCoreFilename ${_databaseSpec}) - - let _index=${_numBackupsToRetain}+1 - _filesToPrune=$(find ${_pruneDir}* -type f -printf '%T@ %p\n' | grep ${_coreFilename} | sort -r | tail -n +${_index} | sed 's~^.* \(.*$\)~\1~') - - if [ ! -z "${_filesToPrune}" ]; then - echoYellow "\nPruning ${_coreFilename} backups from ${_pruneDir} ..." - echo "${_filesToPrune}" | xargs rm -rfvd - - # Quietly delete any empty directories that are left behind ... - find ${ROOT_BACKUP_DIR} -type d -empty -delete > /dev/null 2>&1 - fi - ) -} - -getUsername(){ - ( - _databaseSpec=${1} - _hostname=$(getHostname ${_databaseSpec}) - _paramName=$(getHostUserParam ${_hostname}) - # Backward compatibility ... - _username="${!_paramName:-${POSTGRESQL_USER}}" - echo ${_username} - ) -} - -getPassword(){ - ( - _databaseSpec=${1} - _hostname=$(getHostname ${_databaseSpec}) - _paramName=$(getHostPasswordParam ${_hostname}) - # Backward compatibility ... - _password="${!_paramName:-${POSTGRESQL_PASSWORD}}" - echo ${_password} - ) -} - -backupDatabase(){ - ( - _databaseSpec=${1} - _fileName=${2} - - _hostname=$(getHostname ${_databaseSpec}) - _port=$(getPort ${_databaseSpec}) - _database=$(getDatabaseName ${_databaseSpec}) - _username=$(getUsername ${_databaseSpec}) - _password=$(getPassword ${_databaseSpec}) - _backupFile="${_fileName}${IN_PROGRESS_BACKUP_FILE_EXTENSION}" - - echoGreen "\nBacking up ${_databaseSpec} ..." - - export PGPASSWORD=${_password} - SECONDS=0 - touchBackupFile "${_backupFile}" - - pg_dump -Fp -h "${_hostname}" -p "${_port}" -U "${_username}" "${_database}" | gzip > ${_backupFile} - # Get the status code from pg_dump. ${?} would provide the status of the last command, gzip in this case. - _rtnCd=${PIPESTATUS[0]} - - if (( ${_rtnCd} != 0 )); then - rm -rfvd ${_backupFile} - fi - - duration=$SECONDS - echo "Elapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s - Status Code: ${_rtnCd}" - return ${_rtnCd} - ) -} - -touchBackupFile() { - ( - # For safety, make absolutely certain the directory and file exist. - # The pruning process removes empty directories, so if there is an error - # during a backup the backup directory could be deleted. - _backupFile=${1} - _backupDir="${_backupFile%/*}" - makeDirectory ${_backupDir} && touch ${_backupFile} - ) -} - -restoreDatabase(){ - ( - _databaseSpec=${1} - _fileName=${2} - - # If no backup file was specified, find the most recent for the database. - # Otherwise treat the value provided as a filter to find the most recent backup file matching the filter. - if [ -z "${_fileName}" ]; then - _coreFilename=$(generateCoreFilename ${_databaseSpec}) - _fileName=$(find ${ROOT_BACKUP_DIR}* -type f -printf '%T@ %p\n' | grep ${_coreFilename} | sort | tail -n 1 | sed 's~^.* \(.*$\)~\1~') - else - _fileName=$(find ${ROOT_BACKUP_DIR}* -type f -printf '%T@ %p\n' | grep ${_fileName} | sort | tail -n 1 | sed 's~^.* \(.*$\)~\1~') - fi - - echoBlue "\nRestoring database ..." - echo -e "\nSettings:" - echo "- Database: ${_databaseSpec}" - - if [ ! -z "${_fileName}" ]; then - echo -e "- Backup file: ${_fileName}\n" - else - echoRed "- Backup file: No backup file found or specified. Cannot continue with the restore.\n" - exit 0 - fi - waitForAnyKey - - _hostname=$(getHostname ${_databaseSpec}) - _port=$(getPort ${_databaseSpec}) - _database=$(getDatabaseName ${_databaseSpec}) - _username=$(getUsername ${_databaseSpec}) - _password=$(getPassword ${_databaseSpec}) - - # Ask for the Admin Password for the database - _msg="Admin password (${_databaseSpec}):" - _yellow='\033[1;33m' - _nc='\033[0m' # No Color - _message=$(echo -e "${_yellow}${_msg}${_nc}") - read -r -s -p $"${_message}" _adminPassword - echo -e "\n" - - export PGPASSWORD=${_adminPassword} - SECONDS=0 - - # Drop - psql -h "${_hostname}" -p "${_port}" -ac "DROP DATABASE \"${_database}\";" - echo - - # Create - psql -h "${_hostname}" -p "${_port}" -ac "CREATE DATABASE \"${_database}\";" - echo - - # Grant User Access - psql -h "${_hostname}" -p "${_port}" -ac "GRANT ALL ON DATABASE \"${_database}\" TO \"${_username}\";" - echo - - # Restore - echo "Restoring from backup ..." - gunzip -c "${_fileName}" | psql -h "${_hostname}" -p "${_port}" -d "${_database}" - - duration=$SECONDS - echo -e "Restore complete - Elapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s"\\n - - # List tables - psql -h "${_hostname}" -p "${_port}" -d "${_database}" -c "\d" - ) -} - -isLastDayOfMonth(){ - ( - _date=${1:-$(date)} - _day=$(date -d "${_date}" +%-d) - _month=$(date -d "${_date}" +%-m) - _lastDayOfMonth=$(date -d "${_month}/1 + 1 month - 1 day" "+%-d") - - if (( ${_day} == ${_lastDayOfMonth} )); then - return 0 - else - return 1 - fi - ) -} - -isLastDayOfWeek(){ - ( - # We're calling Sunday the last dayt of the week in this case. - _date=${1:-$(date)} - _dayOfWeek=$(date -d "${_date}" +%u) - - if (( ${_dayOfWeek} == 7 )); then - return 0 - else - return 1 - fi - ) -} - -getBackupType(){ - ( - _backupType="" - if rollingStrategy; then - if isLastDayOfMonth && (( "${MONTHLY_BACKUPS}" > 0 )); then - _backupType="monthly" - elif isLastDayOfWeek; then - _backupType="weekly" - else - _backupType="daily" - fi - fi - echo "${_backupType}" - ) -} - -createBackupFolder(){ - ( - _backupTypeDir="$(getBackupType)" - if [ ! -z "${_backupTypeDir}" ]; then - _backupTypeDir=${_backupTypeDir}/ - fi - - _backupDir="${ROOT_BACKUP_DIR}${_backupTypeDir}`date +\%Y-\%m-\%d`/" - - # Don't actually create the folder if we're just printing the configuation. - if [ -z "${PRINT_CONFIG}" ]; then - echo "Making backup directory ${_backupDir} ..." >&2 - if ! makeDirectory ${_backupDir}; then - echo $(logError "Failed to create backup directory ${_backupDir}.") >&2 - exit 1; - fi; - fi - - echo ${_backupDir} - ) -} - -generateFilename(){ - ( - _backupDir=${1} - _databaseSpec=${2} - _coreFilename=$(generateCoreFilename ${_databaseSpec}) - _filename="${_backupDir}${_coreFilename}_`date +\%Y-\%m-\%d_%H-%M-%S`" - echo ${_filename} - ) -} - -generateCoreFilename(){ - ( - _databaseSpec=${1} - _hostname=$(getHostname ${_databaseSpec}) - _database=$(getDatabaseName ${_databaseSpec}) - _coreFilename="${_hostname}-${_database}" - echo ${_coreFilename} - ) -} - -rollingStrategy(){ - if [[ "${BACKUP_STRATEGY}" == "rolling" ]] && (( "${WEEKLY_BACKUPS}" > 0 )) && (( "${MONTHLY_BACKUPS}" >= 0 )); then - return 0 - else - return 1 - fi -} - -dailyStrategy(){ - if [[ "${BACKUP_STRATEGY}" == "daily" ]] || (( "${WEEKLY_BACKUPS}" <= 0 )); then - return 0 - else - return 1 - fi -} - -listSettings(){ - _backupDirectory=${1} - _databaseList=${2} - _yellow='\e[33m' - _nc='\e[0m' # No Color - _notConfigured="${_yellow}not configured${_nc}" - - echo -e \\n"Settings:" - if runOnce; then - echo "- Run mode: Once" - else - echo "- Run mode: Continuous" - fi - if rollingStrategy; then - echo "- Backup strategy: rolling" - fi - if dailyStrategy; then - echo "- Backup strategy: daily" - fi - if ! rollingStrategy && ! dailyStrategy; then - echoYellow "- Backup strategy: Unknown backup strategy; ${BACKUP_STRATEGY}" - _configurationError=1 - fi - backupType=$(getBackupType) - if [ -z "${backupType}" ]; then - echo "- Backup type: flat daily" - else - echo "- Backup type: ${backupType}" - fi - echo "- Number of each backup to retain: $(getNumBackupsToRetain)" - echo "- Backup folder: ${_backupDirectory}" - echo "- Databases:" - for _db in ${_databaseList}; do - echo " - ${_db}" - done - echo - if [ -z "${FTP_URL}" ]; then - echo -e "- FTP server: ${_notConfigured}" - else - echo "- FTP server: ${FTP_URL}" - fi - if [ -z "${WEBHOOK_URL}" ]; then - echo -e "- Webhook Endpoint: ${_notConfigured}" - else - echo "- Webhook Endpoint: ${WEBHOOK_URL}" - fi - if [ -z "${ENVIRONMENT_FRIENDLY_NAME}" ]; then - echo -e "- Environment Friendly Name: ${_notConfigured}" - else - echo -e "- Environment Friendly Name: ${ENVIRONMENT_FRIENDLY_NAME}" - fi - if [ -z "${ENVIRONMENT_NAME}" ]; then - echo -e "- Environment Name (Id): ${_notConfigured}" - else - echo "- Environment Name (Id): ${ENVIRONMENT_NAME}" - fi - - if [ ! -z "${_configurationError}" ]; then - logError "\nConfiguration error! The script will exit." - sleep 5 - exit 1 - fi - echo -} -# ====================================================================================== - -# ====================================================================================== -# Set Defaults -# -------------------------------------------------------------------------------------- -export BACKUP_FILE_EXTENSION=".sql.gz" -export IN_PROGRESS_BACKUP_FILE_EXTENSION=".sql.gz.in_progress" -export DEFAULT_PORT=${POSTGRESQL_PORT_NUM:-5432} -export DATABASE_SERVICE_NAME=${DATABASE_SERVICE_NAME:-postgresql} -export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-my_postgres_db} - -# Supports: -# - daily -# - rolling -export BACKUP_STRATEGY=$(echo "${BACKUP_STRATEGY:-daily}" | tr '[:upper:]' '[:lower:]') -export BACKUP_PERIOD=${BACKUP_PERIOD:-1d} -export ROOT_BACKUP_DIR=${ROOT_BACKUP_DIR:-${BACKUP_DIR:-/backups/}} -export BACKUP_CONF=${BACKUP_CONF:-backup.conf} - -# Used to prune the total number of backup when using the daily backup strategy. -# Default provides for one full month of backups -export NUM_BACKUPS=${NUM_BACKUPS:-31} - -# Used to prune the total number of backup when using the rolling backup strategy. -# Defaults provide for: -# - A week's worth of daily backups -# - A month's worth of weekly backups -# - The previous month's backup -export DAILY_BACKUPS=${DAILY_BACKUPS:-6} -export WEEKLY_BACKUPS=${WEEKLY_BACKUPS:-4} -export MONTHLY_BACKUPS=${MONTHLY_BACKUPS:-1} - -# Webhook defaults -WEBHOOK_TEMPLATE=${WEBHOOK_TEMPLATE:-webhook-template.json} -# ====================================================================================== - -# ================================================================================================================= -# Initialization: -# ----------------------------------------------------------------------------------------------------------------- -while getopts clr:f:1h FLAG; do - case $FLAG in - c) - export PRINT_CONFIG=1 - ;; - l) - listExistingBackups ${ROOT_BACKUP_DIR} - exit 0 - ;; - r) - # Trigger restore mode ... - export _restoreDatabase=${OPTARG} - ;; - f) - # Optionally specify the backup file to restore from ... - export _fromBackup=${OPTARG} - ;; - 1) - export RUN_ONCE=1 - ;; - h) - usage - ;; - \?) - echo -e \\n"Invalid option: -${OPTARG}"\\n - usage - ;; - esac -done -shift $((OPTIND-1)) -# ================================================================================================================= - -# ================================================================================================================= -# Main Script -# ----------------------------------------------------------------------------------------------------------------- -# If we are in restore mode, restore the database and exit. -if [ ! -z "${_restoreDatabase}" ]; then - restoreDatabase "${_restoreDatabase}" "${_fromBackup}" - exit 0 -fi - -# Otherwise enter backup mode. -while true; do - if [ -z "${PRINT_CONFIG}" ]; then - echoBlue "\nStarting backup process ..." - else - echoBlue "\nListing configuration settings ..." - fi - - databases=$(readConf) - backupDir=$(createBackupFolder) - listSettings "${backupDir}" "${databases}" - - if [ ! -z "${PRINT_CONFIG}" ]; then - exit 0 - fi - - for database in ${databases}; do - filename=$(generateFilename "${backupDir}" "${database}") - if backupDatabase "${database}" "${filename}"; then - finalizeBackup "${filename}" - ftpBackup "${filename}" - pruneBackups "${backupDir}" "${database}" - else - logError "Failed to backup ${database}." - fi - done - - listExistingBackups ${ROOT_BACKUP_DIR} - - if runOnce; then - echoGreen "Single backup run complete.\n" - exit 0 - fi - - echoYellow "Sleeping for ${BACKUP_PERIOD} ...\n" - sleep ${BACKUP_PERIOD} -done -# ================================================================================================================= diff --git a/backup-container/docs/ExampleLog.md b/backup-container/docs/ExampleLog.md deleted file mode 100644 index e42df1efb..000000000 --- a/backup-container/docs/ExampleLog.md +++ /dev/null @@ -1,50 +0,0 @@ - -## An example of the backup container in action -``` -Starting backup process ... -Reading backup config from backup.conf ... -Making backup directory /backups/daily/2018-10-04/ ... - -Settings: -- Backup strategy: rolling -- Backup type: daily -- Number of each backup to retain: 6 -- Backup folder: /backups/daily/2018-10-04/ -- Databases: - - wallet-db:5432/tob_verifier - - postgresql:5432/TheOrgBook_Database - - wallet-db:5432/tob_holder - -Backing up wallet-db:5432/tob_verifier ... -Elapsed time: 0h:0m:1s -Backup written to /backups/daily/2018-10-04/wallet-db-tob_verifier_2018-10-04_22-49-39.sql.gz ... - -Backing up postgresql:5432/TheOrgBook_Database ... -Elapsed time: 0h:2m:48s -Backup written to /backups/daily/2018-10-04/postgresql-TheOrgBook_Database_2018-10-04_22-49-41.sql.gz ... - -Backing up wallet-db:5432/tob_holder ... -Elapsed time: 0h:24m:34s -Backup written to /backups/daily/2018-10-04/wallet-db-tob_holder_2018-10-04_22-52-29.sql.gz ... - -================================================================================================================================ -Current Backups: --------------------------------------------------------------------------------------------------------------------------------- -4.0K 2018-10-04 17:10 /backups/.trashcan/internal_op -8.0K 2018-10-04 17:10 /backups/.trashcan -3.5K 2018-10-04 17:17 /backups/daily/2018-10-04/wallet-db-tob_verifier_2018-10-04_17-17-02.sql.gz -687M 2018-10-04 17:20 /backups/daily/2018-10-04/postgresql-TheOrgBook_Database_2018-10-04_17-17-03.sql.gz -9.1G 2018-10-04 17:44 /backups/daily/2018-10-04/wallet-db-tob_holder_2018-10-04_17-20-06.sql.gz -3.5K 2018-10-04 17:48 /backups/daily/2018-10-04/wallet-db-tob_verifier_2018-10-04_17-48-42.sql.gz -687M 2018-10-04 17:51 /backups/daily/2018-10-04/postgresql-TheOrgBook_Database_2018-10-04_17-48-44.sql.gz -9.1G 2018-10-04 18:16 /backups/daily/2018-10-04/wallet-db-tob_holder_2018-10-04_17-51-36.sql.gz -3.5K 2018-10-04 22:49 /backups/daily/2018-10-04/wallet-db-tob_verifier_2018-10-04_22-49-39.sql.gz -687M 2018-10-04 22:52 /backups/daily/2018-10-04/postgresql-TheOrgBook_Database_2018-10-04_22-49-41.sql.gz -9.1G 2018-10-04 23:17 /backups/daily/2018-10-04/wallet-db-tob_holder_2018-10-04_22-52-29.sql.gz -30G 2018-10-04 23:17 /backups/daily/2018-10-04 -30G 2018-10-04 23:17 /backups/daily -30G 2018-10-04 23:17 /backups/ -================================================================================================================================ - -Sleeping for 1d ... -``` \ No newline at end of file diff --git a/backup-container/docs/SampleRocketChatMessage.png b/backup-container/docs/SampleRocketChatMessage.png deleted file mode 100644 index 823559ff7..000000000 Binary files a/backup-container/docs/SampleRocketChatMessage.png and /dev/null differ diff --git a/backup-container/openshift/backup-conf-configmap_DeploymentConfig.json b/backup-container/openshift/backup-conf-configmap_DeploymentConfig.json deleted file mode 100644 index f9d49bf67..000000000 --- a/backup-container/openshift/backup-conf-configmap_DeploymentConfig.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "kind": "ConfigMap", - "apiVersion": "v1", - "metadata": { - "name": "backup-conf", - "creationTimestamp": null - }, - "data": { - "backup.conf": "# =========================================================\n# List the databases you want backed up here.\n# Databases will be backed up in the order they are listed.\n#\n# The entries must be in one of the following forms:\n# - \u003cHostname/\u003e/\u003cDatabaseName/\u003e\n# - \u003cHostname/\u003e:\u003cPort/\u003e/\u003cDatabaseName/\u003e\n#\n# Examples:\n# - postgresql/my_database\n# - postgresql:5432/my_database\n# --------------------------------------------------------\npostgresql/tfrs" - } -} diff --git a/backup-container/openshift/backup-deploy.overrides.param b/backup-container/openshift/backup-deploy.overrides.param deleted file mode 100644 index a22775263..000000000 --- a/backup-container/openshift/backup-deploy.overrides.param +++ /dev/null @@ -1,4 +0,0 @@ -FTP_URL= -FTP_USER= -FTP_PASSWORD= -WEBHOOK_URL= diff --git a/backup-container/openshift/backup-deploy.overrides.sh b/backup-container/openshift/backup-deploy.overrides.sh deleted file mode 100755 index e024cbd47..000000000 --- a/backup-container/openshift/backup-deploy.overrides.sh +++ /dev/null @@ -1,95 +0,0 @@ -# ======================================================================== -# Special Deployment Parameters needed for the backup instance. -# ------------------------------------------------------------------------ -# The generated config map is used to update the Backup configuration. -# ======================================================================== - -CONFIG_MAP_NAME=backup-conf -SOURCE_FILE=../config/backup.conf -OUTPUT_FORMAT=json -OUTPUT_FILE=backup-conf-configmap_DeploymentConfig.json - -generateConfigMap() { - _config_map_name=${1} - _source_file=${2} - _output_format=${3} - _output_file=${4} - if [ -z "${_config_map_name}" ] || [ -z "${_source_file}" ] || [ -z "${_output_format}" ] || [ -z "${_output_file}" ]; then - echo -e \\n"generateConfigMap; Missing parameter!"\\n - exit 1 - fi - - oc create configmap ${_config_map_name} --from-file ${_source_file} --dry-run -o ${_output_format} > ${_output_file} -} - -printStatusMsg(){ - ( - _msg=${1} - _yellow='\033[1;33m' - _nc='\033[0m' # No Color - printf "\n${_yellow}${_msg}\n${_nc}" >&2 - ) -} - -readParameter(){ - ( - _msg=${1} - _paramName=${2} - _defaultValue=${3} - _encode=${4} - - _yellow='\033[1;33m' - _nc='\033[0m' # No Color - _message=$(echo -e "\n${_yellow}${_msg}\n${_nc}") - - read -r -p $"${_message}" ${_paramName} - - writeParameter "${_paramName}" "${_defaultValue}" "${_encode}" - ) -} - -writeParameter(){ - ( - _paramName=${1} - _defaultValue=${2} - _encode=${3} - - if [ -z "${_encode}" ]; then - echo "${_paramName}=${!_paramName:-${_defaultValue}}" >> ${_overrideParamFile} - else - # The key/value pair must be contained on a single line - _encodedValue=$(echo -n "${!_paramName:-${_defaultValue}}"|base64 -w 0) - echo "${_paramName}=${_encodedValue}" >> ${_overrideParamFile} - fi - ) -} - -initialize(){ - # Define the name of the override param file. - _scriptName=$(basename ${0%.*}) - export _overrideParamFile=${_scriptName}.param - - printStatusMsg "Initializing ${_scriptName} ..." - - # Remove any previous version of the file ... - if [ -f ${_overrideParamFile} ]; then - printStatusMsg "Removing previous copy of ${_overrideParamFile} ..." - rm -f ${_overrideParamFile} - fi -} - -initialize - -generateConfigMap "${CONFIG_MAP_NAME}" "${SOURCE_FILE}" "${OUTPUT_FORMAT}" "${OUTPUT_FILE}" - -# Get the FTP URL and credentials -readParameter "FTP_URL - Please provide the FTP server URL. If left blank, the FTP backup feature will be disabled:" FTP_URL "" -readParameter "FTP_USER - Please provide the FTP user name:" FTP_USER "" -readParameter "FTP_PASSWORD - Please provide the FTP password:" FTP_PASSWORD "" - -# Get the webhook URL -readParameter "WEBHOOK_URL - Please provide the webhook endpoint URL. If left blank, the webhook integration feature will be disabled:" WEBHOOK_URL "" - -SPECIALDEPLOYPARMS="--param-file=${_overrideParamFile}" -echo ${SPECIALDEPLOYPARMS} - diff --git a/backup-container/openshift/templates/backup/backup-build.json b/backup-container/openshift/templates/backup/backup-build.json deleted file mode 100644 index cb1843e6c..000000000 --- a/backup-container/openshift/templates/backup/backup-build.json +++ /dev/null @@ -1,127 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}-build-template", - "creationTimestamp": null - }, - "objects": [ - { - "kind": "ImageStream", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}" - } - }, - { - "kind": "BuildConfig", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}", - "labels": { - "app": "${NAME}" - } - }, - "spec": { - "triggers": [ - { - "type": "ImageChange" - }, - { - "type": "ConfigChange" - } - ], - "runPolicy": "Serial", - "source": { - "type": "Git", - "git": { - "uri": "${GIT_REPO_URL}", - "ref": "${GIT_REF}" - }, - "contextDir": "${SOURCE_CONTEXT_DIR}" - }, - "strategy": { - "type": "Docker", - "dockerStrategy": { - "from": { - "kind": "${SOURCE_IMAGE_KIND}", - "name": "${SOURCE_IMAGE_NAME}:${SOURCE_IMAGE_TAG}" - }, - "dockerfilePath": "${DOCKER_FILE_PATH}" - } - }, - "output": { - "to": { - "kind": "ImageStreamTag", - "name": "${NAME}:${OUTPUT_IMAGE_TAG}" - } - } - } - } - ], - "parameters": [ - { - "name": "NAME", - "displayName": "Name", - "description": "The name assigned to all of the resources defined in this template.", - "required": true, - "value": "backup" - }, - { - "name": "GIT_REPO_URL", - "displayName": "Git Repo URL", - "description": "The URL to your GIT repo.", - "required": true, - "value": "https://github.com/BCDevOps/backup-container.git" - }, - { - "name": "GIT_REF", - "displayName": "Git Reference", - "description": "The git reference or branch.", - "required": true, - "value": "master" - }, - { - "name": "SOURCE_CONTEXT_DIR", - "displayName": "Source Context Directory", - "description": "The source context directory.", - "required": false, - "value": "/docker" - }, - { - "name": "SOURCE_IMAGE_KIND", - "displayName": "Source Image Kind", - "description": "The 'kind' (type) of the source image; typically ImageStreamTag, or DockerImage.", - "required": true, - "value": "DockerImage" - }, - { - "name": "SOURCE_IMAGE_NAME", - "displayName": "Source Image Name", - "description": "The name of the source image.", - "required": true, - "value": "registry.access.redhat.com/rhscl/postgresql-10-rhel7" - }, - { - "name": "SOURCE_IMAGE_TAG", - "displayName": "Source Image Tag", - "description": "The tag of the source image.", - "required": true, - "value": "latest" - }, - { - "name": "DOCKER_FILE_PATH", - "displayName": "Docker File Path", - "description": "The path to the docker file defining the build.", - "required": false, - "value": "Dockerfile" - }, - { - "name": "OUTPUT_IMAGE_TAG", - "displayName": "Output Image Tag", - "description": "The tag given to the built image.", - "required": true, - "value": "latest" - } - ] -} \ No newline at end of file diff --git a/backup-container/openshift/templates/backup/backup-cronjob.yaml b/backup-container/openshift/templates/backup/backup-cronjob.yaml deleted file mode 100644 index ab04d713a..000000000 --- a/backup-container/openshift/templates/backup/backup-cronjob.yaml +++ /dev/null @@ -1,228 +0,0 @@ ---- -kind: "Template" -apiVersion: "v1" -metadata: - name: "{$JOB_NAME}-cronjob-template" - annotations: - description: "Scheduled Task to perform a Database Backup" - tags: "cronjob,backup" -parameters: - - name: "JOB_NAME" - displayName: "Job Name" - description: "Name of the Scheduled Job to Create." - value: "backup" - required: true - - name: "JOB_PERSISTENT_STORAGE_NAME" - displayName: "Backup Persistent Storage Name" - description: "Pre-Created PVC to use for backup target" - value: "bk-devex-von-tools-a9vlgd1jpsg1" - required: true - - name: "SCHEDULE" - displayName: "Cron Schedule" - description: "Cron Schedule to Execute the Job (in UTC)" -# Currently targeting 5:00 AM Daily - value: "0 13 * * *" - required: true - - name: "SOURCE_IMAGE_NAME" - displayName: "Source Image Name" - description: "The name of the image to use for this resource." - required: true - value: "backup" - - name: "IMAGE_NAMESPACE" - displayName: "Image Namespace" - description: "The namespace of the OpenShift project containing the imagestream for the application." - required: true - value: "backup-container" - - name: "TAG_NAME" - displayName: "Environment TAG name" - description: "The TAG name for this environment, e.g., dev, test, prod" - required: true - value: "dev" - - name: "DATABASE_SERVICE_NAME" - displayName: "Database Service Name" - description: "The name of the database service." - required: true - value: "postgresql" - - name: "DATABASE_DEFAULT_PORT" - displayName: "Database Service Port" - description: "The configured port for the database service" - required: true - value: "5432" - - name: "DATABASE_NAME" - displayName: "Database Name" - description: "The name of the database." - required: true - value: "MyDatabase" - - name: "DATABASE_DEPLOYMENT_NAME" - displayName: "Database Deployment Name" - description: "The name associated to the database deployment resources. In particular, this is used to wire up the credentials associated to the database." - required: true - value: "postgresql" - - name: "BACKUP_STRATEGY" - displayName: "Backup Strategy" - description: "The strategy to use for backups; for example daily, or rolling." - required: true - value: "rolling" - - name: "BACKUP_DIR" - displayName: "The root backup directory" - description: "The name of the root backup directory" - required: true - value: "/backups/" - - name: "NUM_BACKUPS" - displayName: "The number of backup files to be retained" - description: "The number of backup files to be retained. Used for the `daily` backup strategy. Ignored when using the `rolling` backup strategy." - required: false - value: "5" - - name: "DAILY_BACKUPS" - displayName: "Number of Daily Backups to Retain" - description: "The number of daily backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "7" - - name: "WEEKLY_BACKUPS" - displayName: "Number of Weekly Backups to Retain" - description: "The number of weekly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "4" - - name: "MONTHLY_BACKUPS" - displayName: "Number of Monthly Backups to Retain" - description: "The number of monthly backup files to be retained. Used for the `rolling` backup strategy." - required: false - value: "1" - - name: "JOB_SERVICE_ACCOUNT" - displayName: "Service Account Name" - description: "Name of the Service Account To Exeucte the Job As." - value: "default" - required: true - - name: "SUCCESS_JOBS_HISTORY_LIMIT" - displayName: "Successful Job History Limit" - description: "The number of successful jobs that will be retained" - value: "5" - required: true - - name: "FAILED_JOBS_HISTORY_LIMIT" - displayName: "Failed Job History Limit" - description: "The number of failed jobs that will be retained" - value: "2" - required: true - - name: "JOB_BACKOFF_LIMIT" - displayName: "Job Backoff Limit" - description: "The number of attempts to try for a successful job outcome" - value: "0" - required: false -objects: -- kind: ConfigMap - apiVersion: v1 - metadata: - name: "${JOB_NAME}-config" - labels: - template: "${JOB_NAME}-config-template" - cronjob: "${JOB_NAME}" - data: - DATABASE_SERVICE_NAME: "${DATABASE_SERVICE_NAME}" - DEFAULT_PORT: "${DATABASE_DEFAULT_PORT}" - POSTGRESQL_DATABASE: "${DATABASE_NAME}" -# BACKUP_STRATEGY: "daily" - RETENTION.NUM_BACKUPS: "${NUM_BACKUPS}" - BACKUP_STRATEGY: "rolling" - RETENTION.DAILY_BACKUPS: "${DAILY_BACKUPS}" - RETENTION.WEEKLY_BACKUPS: "${WEEKLY_BACKUPS}" - RETENTION.MONTHLY_BACKUPS: "${MONTHLY_BACKUPS}" -- kind: "CronJob" - apiVersion: "batch/v1beta1" - metadata: - name: "${JOB_NAME}" - labels: - template: "${JOB_NAME}-cronjob" - cronjob: "${JOB_NAME}" - spec: - schedule: "${SCHEDULE}" - concurrencyPolicy: "Forbid" - successfulJobsHistoryLimit: "${{SUCCESS_JOBS_HISTORY_LIMIT}}" - failedJobsHistoryLimit: "${{FAILED_JOBS_HISTORY_LIMIT}}" - jobTemplate: - metadata: - labels: - template: "${JOB_NAME}-job" - cronjob: "${JOB_NAME}" - spec: - backoffLimit: ${JOB_BACKOFF_LIMIT} - template: - spec: - containers: - - name: "${JOB_NAME}-cronjob" - image: "docker-registry.default.svc:5000/${IMAGE_NAMESPACE}/${SOURCE_IMAGE_NAME}:${TAG_NAME}" -# image: backup - command: - - "/bin/bash" - - "-c" - - "/backup.sh -1" - volumeMounts: - - mountPath: "${BACKUP_DIR}" - name: "backup" - env: - - name: BACKUP_DIR - value: "${BACKUP_DIR}" - - name: BACKUP_STRATEGY - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: BACKUP_STRATEGY - - name: NUM_BACKUPS - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: NUM_BACKUPS - optional: true - - name: DAILY_BACKUPS - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: DAILY_BACKUPS - optional: true - - name: WEEKLY_BACKUPS - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: WEEKLY_BACKUPS - optional: true - - name: MONTHLY_BACKUPS - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: MONTHLY_BACKUPS - optional: true - - name: DATABASE_SERVICE_NAME - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: DATABASE_SERVICE_NAME - - name: DEFAULT_PORT - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: DEFAULT_PORT - optional: true - - name: POSTGRESQL_DATABASE - valueFrom: - configMapKeyRef: - name: "${JOB_NAME}-config" - key: POSTGRESQL_DATABASE - - name: POSTGRESQL_USER - valueFrom: - secretKeyRef: - name: "${DATABASE_DEPLOYMENT_NAME}" - key: database-user - - name: POSTGRESQL_PASSWORD - valueFrom: - secretKeyRef: - name: "${DATABASE_DEPLOYMENT_NAME}" - key: database-password - volumes: - - name: backup - persistentVolumeClaim: - claimName: "${JOB_PERSISTENT_STORAGE_NAME}" - restartPolicy: "Never" - terminationGracePeriodSeconds: 30 - activeDeadlineSeconds: 1600 - dnsPolicy: "ClusterFirst" - serviceAccountName: "${JOB_SERVICE_ACCOUNT}" - serviceAccount: "${JOB_SERVICE_ACCOUNT}" diff --git a/backup-container/openshift/templates/backup/backup-deploy.json b/backup-container/openshift/templates/backup/backup-deploy.json deleted file mode 100644 index b3bb92c2a..000000000 --- a/backup-container/openshift/templates/backup/backup-deploy.json +++ /dev/null @@ -1,468 +0,0 @@ -{ - "kind": "Template", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}-deployment-template" - }, - "objects": [ - { - "kind": "PersistentVolumeClaim", - "apiVersion": "v1", - "metadata": { - "name": "${PERSISTENT_VOLUME_NAME}", - "annotations": { - "volume.beta.kubernetes.io/storage-class": "gluster-file" - }, - "labels": { - "app": "${NAME}-persistent", - "template": "${NAME}-persistent-template" - } - }, - "spec": { - "storageClassName": "${PERSISTENT_VOLUME_CLASS}", - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "${PERSISTENT_VOLUME_SIZE}" - } - } - } - }, - { - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": "${FTP_SECRET_KEY}" - }, - "type": "Opaque", - "stringData": { - "ftp-user": "${FTP_USER}", - "ftp-password": "${FTP_PASSWORD}" - } - }, - { - "kind": "DeploymentConfig", - "apiVersion": "v1", - "metadata": { - "name": "${NAME}", - "labels": { - "template": "${NAME}-deployment" - }, - "annotations": { - "description": "Defines how to deploy the ${NAME} server" - } - }, - "spec": { - "strategy": { - "type": "Recreate" - }, - "triggers": [ - { - "type": "ConfigChange" - }, - { - "type": "ImageChange", - "imageChangeParams": { - "automatic": true, - "containerNames": [ - "${NAME}" - ], - "from": { - "kind": "ImageStreamTag", - "namespace": "${IMAGE_NAMESPACE}", - "name": "${SOURCE_IMAGE_NAME}:${TAG_NAME}" - } - } - } - ], - "replicas": 1, - "selector": { - "name": "${NAME}" - }, - "template": { - "metadata": { - "name": "${NAME}", - "labels": { - "name": "${NAME}" - } - }, - "spec": { - "volumes": [ - { - "name": "${NAME}", - "persistentVolumeClaim": { - "claimName": "${PERSISTENT_VOLUME_NAME}" - } - }, - { - "name": "${NAME}-config-volume", - "configMap": { - "name": "${CONFIG_MAP_NAME}", - "items": [ - { - "key": "${CONFIG_FILE_NAME}", - "path": "${CONFIG_FILE_NAME}" - } - ] - } - } - ], - "containers": [ - { - "name": "${NAME}", - "image": "", - "ports": [], - "env": [ - { - "name": "BACKUP_STRATEGY", - "value": "${BACKUP_STRATEGY}" - }, - { - "name": "BACKUP_DIR", - "value": "${BACKUP_DIR}" - }, - { - "name": "NUM_BACKUPS", - "value": "${NUM_BACKUPS}" - }, - { - "name": "DAILY_BACKUPS", - "value": "${DAILY_BACKUPS}" - }, - { - "name": "WEEKLY_BACKUPS", - "value": "${WEEKLY_BACKUPS}" - }, - { - "name": "MONTHLY_BACKUPS", - "value": "${MONTHLY_BACKUPS}" - }, - { - "name": "BACKUP_PERIOD", - "value": "${BACKUP_PERIOD}" - }, - { - "name": "DATABASE_SERVICE_NAME", - "value": "${DATABASE_SERVICE_NAME}" - }, - { - "name": "POSTGRESQL_DATABASE", - "value": "${DATABASE_NAME}" - }, - { - "name": "POSTGRESQL_USER", - "valueFrom": { - "secretKeyRef": { - "name": "${DATABASE_DEPLOYMENT_NAME}", - "key": "${DATABASE_USER_KEY_NAME}" - } - } - }, - { - "name": "POSTGRESQL_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "${DATABASE_DEPLOYMENT_NAME}", - "key": "${DATABASE_PASSWORD_KEY_NAME}" - } - } - }, - { - "name": "FTP_URL", - "value": "${FTP_URL}" - }, - { - "name": "FTP_USER", - "valueFrom": { - "secretKeyRef": { - "name": "${FTP_SECRET_KEY}", - "key": "ftp-user" - } - } - }, - { - "name": "FTP_PASSWORD", - "valueFrom": { - "secretKeyRef": { - "name": "${FTP_SECRET_KEY}", - "key": "ftp-password" - } - } - }, - { - "name": "WEBHOOK_URL", - "value": "${WEBHOOK_URL}" - }, - { - "name": "ENVIRONMENT_FRIENDLY_NAME", - "value": "${ENVIRONMENT_FRIENDLY_NAME}" - }, - { - "name": "ENVIRONMENT_NAME", - "value": "${ENVIRONMENT_NAME}" - } - ], - "resources": { - "requests": { - "cpu": "${CPU_REQUEST}", - "memory": "${MEMORY_REQUEST}" - }, - "limits": { - "cpu": "${CPU_LIMIT}", - "memory": "${MEMORY_LIMIT}" - } - }, - "volumeMounts": [ - { - "name": "${NAME}", - "mountPath": "${BACKUP_DIR}" - }, - { - "name": "${NAME}-config-volume", - "mountPath": "${CONFIG_MOUNT_PATH}${CONFIG_FILE_NAME}", - "subPath": "${CONFIG_FILE_NAME}" - } - ] - } - ] - } - } - } - } - ], - "parameters": [ - { - "name": "NAME", - "displayName": "Name", - "description": "The name assigned to all of the resources defined in this template.", - "required": true, - "value": "backup" - }, - { - "name": "SOURCE_IMAGE_NAME", - "displayName": "Source Image Name", - "description": "The name of the image to use for this resource.", - "required": true, - "value": "backup" - }, - { - "name": "IMAGE_NAMESPACE", - "displayName": "Image Namespace", - "description": "The namespace of the OpenShift project containing the imagestream for the application.", - "required": true, - "value": "backup-container" - }, - { - "name": "TAG_NAME", - "displayName": "Environment TAG name", - "description": "The TAG name for this environment, e.g., dev, test, prod", - "required": true, - "value": "dev" - }, - { - "name": "DATABASE_SERVICE_NAME", - "displayName": "Database Service Name", - "description": "The name of the database service.", - "required": true, - "value": "postgresql" - }, - { - "name": "DATABASE_NAME", - "displayName": "Database Name", - "description": "The name of the database.", - "required": true, - "value": "MyDatabase" - }, - { - "name": "DATABASE_DEPLOYMENT_NAME", - "displayName": "Database Deployment Name", - "description": "The name associated to the database deployment resources. In particular, this is used to wire up the credentials associated to the database.", - "required": true, - "value": "postgresql" - }, - { - "name": "DATABASE_USER_KEY_NAME", - "displayName": "Database User Key Name", - "description": "The datbase user key name stoed in database deployment resources specified by DATABASE_DEPLOYMENT_NAME.", - "required": true, - "value": "database-user" - }, - { - "name": "DATABASE_PASSWORD_KEY_NAME", - "displayName": "Database Password Key Name", - "description": "The database password key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME.", - "required": true, - "value": "database-password" - }, - { - "name": "BACKUP_STRATEGY", - "displayName": "Backup Strategy", - "description": "The strategy to use for backups; for example daily, or rolling.", - "required": true, - "value": "rolling" - }, - { - "name": "FTP_SECRET_KEY", - "displayName": "FTP Secret Key", - "description": "The FTP secret key is used to wire up the credentials associated to the FTP.", - "required": true, - "value": "ftp-secret" - }, - { - "name": "FTP_URL", - "displayName": "FTP Server URL", - "description": "The URL of the backup FTP server", - "required": false, - "value": "" - }, - { - "name": "FTP_USER", - "displayName": "FTP user name", - "description": "FTP user name", - "required": false, - "value": "" - }, - { - "name": "FTP_PASSWORD", - "displayName": "FTP password", - "description": "FTP password", - "required": false, - "value": "" - }, - { - "name": "WEBHOOK_URL", - "displayName": "Webhook URL", - "description": "The URL of the webhook to use for notifications. If not specified, the webhook integration feature is disabled.", - "required": false, - "value": "" - }, - { - "name": "ENVIRONMENT_FRIENDLY_NAME", - "displayName": "Friendly Environment Name", - "description": "The human readable name of the environment. This variable is used by the webhook integration to identify the environment in which the backup notifications originate.", - "required": false, - "value": "" - }, - { - "name": "ENVIRONMENT_NAME", - "displayName": "Environment Name (Environment Id)", - "description": "The name or Id of the environment. This variable is used by the webhook integration to identify the environment in which the backup notifications originate.", - "required": false, - "value": "" - }, - { - "name": "BACKUP_DIR", - "displayName": "The root backup directory", - "description": "The name of the root backup directory", - "required": true, - "value": "/backups/" - }, - { - "name": "NUM_BACKUPS", - "displayName": "The number of backup files to be retained", - "description": "The number of backup files to be retained. Used for the `daily` backup strategy. Ignored when using the `rolling` backup strategy.", - "required": false, - "value": "" - }, - { - "name": "DAILY_BACKUPS", - "displayName": "Number of Daily Backups to Retain", - "description": "The number of daily backup files to be retained. Used for the `rolling` backup strategy.", - "required": false, - "value": "" - }, - { - "name": "WEEKLY_BACKUPS", - "displayName": "Number of Weekly Backups to Retain", - "description": "The number of weekly backup files to be retained. Used for the `rolling` backup strategy.", - "required": false, - "value": "" - }, - { - "name": "MONTHLY_BACKUPS", - "displayName": "Number of Monthly Backups to Retain", - "description": "The number of monthly backup files to be retained. Used for the `rolling` backup strategy.", - "required": false, - "value": "" - }, - { - "name": "BACKUP_PERIOD", - "displayName": "Period (d,m,s) between backups in a format used by the sleep command", - "description": "Period (d,m,s) between backups in a format used by the sleep command", - "required": true, - "value": "1d" - }, - { - "name": "CONFIG_FILE_NAME", - "displayName": "Config File Name", - "description": "The name of the configuration file.", - "required": true, - "value": "backup.conf" - }, - { - "name": "CONFIG_MAP_NAME", - "displayName": "Config Map Name", - "description": "The name of the configuration map.", - "required": true, - "value": "backup-conf" - }, - { - "name": "CONFIG_MOUNT_PATH", - "displayName": "Config Mount Path", - "description": "The path to use to mount the config file.", - "required": true, - "value": "/" - }, - { - "name": "PERSISTENT_VOLUME_NAME", - "displayName": "Persistent Volume Name", - "description": "The name of the persistent volume associated with the deployment.", - "required": true, - "value": "backup-pvc" - }, - { - "name": "PERSISTENT_VOLUME_SIZE", - "displayName": "Persistent Volume Size", - "description": "The size of the persistent volume , e.g. 512Mi, 1Gi, 2Gi.", - "required": true, - "value": "1Gi" - }, - { - "name": "PERSISTENT_VOLUME_CLASS", - "displayName": "Persistent Volume Class name", - "description": "The class of the volume; gluster-file, gluster-block, gluster-file-db", - "required": false, - "value": "" - }, - { - "name": "CPU_REQUEST", - "displayName": "Resources CPU Request", - "description": "The resources CPU request (in cores) for this build.", - "required": true, - "value": "0" - }, - { - "name": "CPU_LIMIT", - "displayName": "Resources CPU Limit", - "description": "The resources CPU limit (in cores) for this build.", - "required": true, - "value": "0" - }, - { - "name": "MEMORY_REQUEST", - "displayName": "Resources Memory Request", - "description": "The resources Memory request (in Mi, Gi, etc) for this build.", - "required": true, - "value": "0Mi" - }, - { - "name": "MEMORY_LIMIT", - "displayName": "Resources Memory Limit", - "description": "The resources Memory limit (in Mi, Gi, etc) for this build.", - "required": true, - "value": "0Mi" - } - ] -} \ No newline at end of file diff --git a/backup-container/openshift/templates/backup/create-backup.sh b/backup-container/openshift/templates/backup/create-backup.sh deleted file mode 100644 index 5fc6e1c0e..000000000 --- a/backup-container/openshift/templates/backup/create-backup.sh +++ /dev/null @@ -1,71 +0,0 @@ -## tools -oc process -f ./backup-build.json \ -GIT_REPO_URL=https://github.com/bcgov/tfrs.git \ -GIT_REF=master \ -SOURCE_CONTEXT_DIR=/backup-container/docker | oc create -f - - -## dev -cd backup-container/openshift -oc create -f backup-conf-configmap_DeploymentConfig.json -n mem-tfrs-dev -cd backup-container/openshift/templates/backup -oc process -f ./backup-deploy.json \ -IMAGE_NAMESPACE=mem-tfrs-tools \ -BACKUP_STRATEGY=daily \ -NUM_BACKUPS=31 \ -DAILY_BACKUPS=6 \ -WEEKLY_BACKUPS=4 \ -MONTHLY_BACKUPS=1 \ -BACKUP_PERIOD=1d \ -DATABASE_SERVICE_NAME=postgresql \ -DATABASE_NAME=tfrs \ -DATABASE_DEPLOYMENT_NAME=tfrs-postgresql \ -DATABASE_USER_KEY_NAME=DATABASE_USER \ -DATABASE_PASSWORD_KEY_NAME=DATABASE_PASSWORD \ -ENVIRONMENT_FRIENDLY_NAME=dev \ -ENVIRONMENT_NAME=dev | oc create -f - - - - -## test -cd backup-container/openshift -oc create -f backup-conf-configmap_DeploymentConfig.json -n mem-tfrs-test -cd backup-container/openshift/templates/backup -oc process -f ./backup-deploy.json \ -IMAGE_NAMESPACE=mem-tfrs-tools \ -BACKUP_STRATEGY=daily \ -NUM_BACKUPS=31 \ -DAILY_BACKUPS=6 \ -WEEKLY_BACKUPS=4 \ -MONTHLY_BACKUPS=1 \ -BACKUP_PERIOD=1d \ -DATABASE_SERVICE_NAME=postgresql \ -DATABASE_NAME=tfrs \ -DATABASE_DEPLOYMENT_NAME=tfrs-postgresql \ -DATABASE_USER_KEY_NAME=DATABASE_USER \ -DATABASE_PASSWORD_KEY_NAME=DATABASE_PASSWORD \ -ENVIRONMENT_FRIENDLY_NAME=test \ -ENVIRONMENT_NAME=test | oc create -f - - -under backup-containers/openshift -run command: oc create -f backup-conf-configmap_DeploymentConfig.json -n mem-tfrs-test - -## prod -cd backup-container/openshift -oc create -f backup-conf-configmap_DeploymentConfig.json -n mem-tfrs-prod -cd backup-container/openshift/templates/backup -oc process -f ./backup-deploy.json \ -IMAGE_NAMESPACE=mem-tfrs-tools \ -BACKUP_STRATEGY=daily \ -NUM_BACKUPS=31 \ -DAILY_BACKUPS=6 \ -WEEKLY_BACKUPS=4 \ -MONTHLY_BACKUPS=1 \ -BACKUP_PERIOD=1d \ -DATABASE_SERVICE_NAME=postgresql \ -DATABASE_NAME=tfrs \ -DATABASE_DEPLOYMENT_NAME=tfrs-postgresql \ -DATABASE_USER_KEY_NAME=DATABASE_USER \ -DATABASE_PASSWORD_KEY_NAME=DATABASE_PASSWORD \ -ENVIRONMENT_FRIENDLY_NAME=prod \ -ENVIRONMENT_NAME=prod | oc create -f - - diff --git a/dummyfile.txt b/dummyfile.txt new file mode 100644 index 000000000..e69de29bb diff --git a/frontend/.s2i/bin/assemble b/frontend/.s2i/bin/assemble index 28737538b..2730fde77 100644 --- a/frontend/.s2i/bin/assemble +++ b/frontend/.s2i/bin/assemble @@ -77,6 +77,9 @@ echo "---> Building your Node application from source" # -d means --loglevel info npm install -d +# fix vulnerabilities +npm audit fix + # run webpack # create public/build folder under /opt/app-root/src # /opt/app-root/src/public/build has bundle.js and tokenRenewl.js diff --git a/maintenance/Caddyfile b/maintenance/Caddyfile index 8ec47ceb3..8f3266e6a 100644 --- a/maintenance/Caddyfile +++ b/maintenance/Caddyfile @@ -1,18 +1,7 @@ 0.0.0.0:2015 -root /var/www/html +root * /var/www/html log stdout -errors stdout - -rewrite / { - regexp .* - to /maintenance.html -} - -header / { - Cache-Control "no-cache, no-store, must-revalidate" - Pragma "no-cache" - Expires "0" -} +rewrite * /maintenance.html diff --git a/nagios/nagios3/conf.d/contacts.cfg b/nagios/nagios3/conf.d/contacts.cfg index 578661a14..ebde7df25 100644 --- a/nagios/nagios3/conf.d/contacts.cfg +++ b/nagios/nagios3/conf.d/contacts.cfg @@ -23,6 +23,6 @@ define contact { host_notification_options d,u,r service_notification_commands notify-service-by-email host_notification_commands notify-host-by-email - email tfrs@gov.bc.ca + email Kuan.Fan@gov.bc.ca can_submit_commands 1 } \ No newline at end of file diff --git a/nagios/nagios3/conf.d/hosts-test.cfg b/nagios/nagios3/conf.d/hosts-test.cfg index 7c2ed5cde..f8520cd1f 100644 --- a/nagios/nagios3/conf.d/hosts-test.cfg +++ b/nagios/nagios3/conf.d/hosts-test.cfg @@ -1,5 +1,5 @@ define host { - host_name client-test + host_name tfrs-frontend-test address client.mem-tfrs-test.svc hostgroups hosts-test check_command check_host diff --git a/nagios/nagios3/conf.d/services-other-prod.cfg b/nagios/nagios3/conf.d/services-other-prod.cfg index c867f649d..6d5fb79f1 100644 --- a/nagios/nagios3/conf.d/services-other-prod.cfg +++ b/nagios/nagios3/conf.d/services-other-prod.cfg @@ -7,7 +7,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -20,7 +20,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -33,7 +33,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -46,7 +46,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -59,7 +59,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -72,7 +72,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } \ No newline at end of file diff --git a/nagios/nagios3/conf.d/services-replica-prod.cfg b/nagios/nagios3/conf.d/services-replica-prod.cfg index b7f8882e5..60ad53525 100644 --- a/nagios/nagios3/conf.d/services-replica-prod.cfg +++ b/nagios/nagios3/conf.d/services-replica-prod.cfg @@ -7,7 +7,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -20,7 +20,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -33,7 +33,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -46,7 +46,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -59,7 +59,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -72,7 +72,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -85,7 +85,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -98,7 +98,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -111,7 +111,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } @@ -124,7 +124,7 @@ define service { max_check_attempts 5 check_period 24x7 contact_groups tfrs-devops - notification_interval 5 + notification_interval 0 notification_period 24x7 notifications_enabled 1 } \ No newline at end of file diff --git a/openshift-v4/templates/Openshift-v4-migration.txt b/openshift-v4/templates/Openshift-v4-migration.txt new file mode 100644 index 000000000..2fbdd4d8d --- /dev/null +++ b/openshift-v4/templates/Openshift-v4-migration.txt @@ -0,0 +1,73 @@ + +# TFRS Openshift Setup - migration from Openshift v3 to v4 + +## 1. Create generic network policies + +openshift-v4/templates/nsp/README.MD + +## 2 Setup Keycloak secret used by pipeline + +openshift-v4/templates/keycloak/README.MD + +## 3. Setup Minio + +openshift-v4/templates/minio/README.md + +## 4. setup Clamav + +openshift-v4/templates/clamav/README.md + +## 5. Setup Nginx + +openshift-v4/templates/nginx-runtime/Readme.md + +## 5. Pipeline prepare + +Only deploy patroni and rabbitmq through the pipeline first, disable the other deployments in deploy.js + +## 5.1 . Setup RabbitMQ + +openshift-v4/templates/rabbitmq/README.md +Note: remember to create tfrs user after rabbitmq is up and running + +### 5.2 Patroni + +openshift-v4/templates/patroni/README.md +Notes remember to sync the passwords in template same as V3 + +## 6. Pipeline +### 6.1 Backend + +openshift-v4/templates/backend/README.md + +### 6.2 frontend + +openshift-v4/templates/frontend/README.md + +### 6.3 Celery + +openshift-v4/templates/celery/README.md + +### 6.4 Notification Server + +openshift-v4/templates/notification/README.md + +### 6.6 Scan Coordinator + +openshift-v4/templates/scan-coordinator/README.md + +### 6.7 Scan Handler + +openshift-v4/templates/scan-handler/README.md + +### 6.8 SchemaSpy + +openshift-v4/templates/schema-spy/README.md + +## 7 Backup container + +openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/README.md + +## 8 Nagios + +openshift-v4/templates/nagios/README.md \ No newline at end of file diff --git a/openshift-v4/templates/backend/README.md b/openshift-v4/templates/backend/README.md new file mode 100644 index 000000000..9ba08171f --- /dev/null +++ b/openshift-v4/templates/backend/README.md @@ -0,0 +1,15 @@ +### Files included + +backend-bc.yaml build config +backend-dc.yaml deployment config +backend-dc-others.yaml create service and route +backend-secretes.yaml create keycloak-sa-client-secret and django-secret-key + +#### Before triggering pipeline + +* create keycloak-sa-client-secret and django-secret-key +oc process -f ./backend-secrets.yaml KEYCLOAK_SA_CLIENT_SECRET= | oc create -f - -n [env namespace] + +#### After pipeline completes + +After pipeline completes, create autoscaler for backend and check DJANGO_DEBUG is set to false on prod diff --git a/openshift-v4/templates/backend/backend-bc.yaml b/openshift-v4/templates/backend/backend-bc.yaml new file mode 100644 index 000000000..eb3ddea24 --- /dev/null +++ b/openshift-v4/templates/backend/backend-bc.yaml @@ -0,0 +1,80 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: tfrs-bc + creationTimestamp: +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: GIT_URL + displayName: + description: tfrs repo + required: true +- name: GIT_REF + displayName: + description: tfrs branch name of the pr + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the backend image + labels: + shared: "true" + creationTimestamp: null + generation: 643 + name: ${NAME}-backend + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-backend${SUFFIX} + creationTimestamp: + annotations: + description: Defines how to build the application + spec: + triggers: + - type: ImageChange + imageChange: {} + - type: ConfigChange + runPolicy: SerialLatestOnly + source: + type: Git + git: + uri: ${GIT_URL} + ref: ${GIT_REF} + contextDir: backend + strategy: + type: Source + sourceStrategy: + from: + kind: ImageStreamTag + namespace: openshift + name: python:3.6 + env: + - name: PIP_INDEX_URL + output: + to: + kind: ImageStreamTag + name: ${NAME}-backend:${VERSION} + resources: {} + postCommit: {} + nodeSelector: + status: + lastVersion: 0 diff --git a/openshift-v4/templates/backend/backend-dc-others.yaml b/openshift-v4/templates/backend/backend-dc-others.yaml new file mode 100644 index 000000000..904a0d4e3 --- /dev/null +++ b/openshift-v4/templates/backend/backend-dc-others.yaml @@ -0,0 +1,55 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: tfrs-dc-others + creationTimestamp: +labels: + template: tfrs-backend-template +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: BACKEND_HOST + displayName: Route host name + description: Route host name + required: true +objects: +- kind: Service + apiVersion: v1 + metadata: + name: ${NAME}-backend${SUFFIX} + spec: + ports: + - name: web + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + name: ${NAME}-backend${SUFFIX} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: v1 + metadata: + name: ${NAME}-backend${SUFFIX} + creationTimestamp: + annotations: {} + spec: + host: ${BACKEND_HOST} + to: + kind: Service + name: ${NAME}-backend${SUFFIX} + weight: 100 + port: + targetPort: web + tls: + termination: edge + wildcardPolicy: None diff --git a/openshift-v4/templates/backend/backend-dc.yaml b/openshift-v4/templates/backend/backend-dc.yaml new file mode 100644 index 000000000..8ed9a9817 --- /dev/null +++ b/openshift-v4/templates/backend/backend-dc.yaml @@ -0,0 +1,304 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: tfrs-dc + creationTimestamp: +labels: + template: tfrs-backend-template +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: ENV_NAME + displayName: environment name + description: such as dev, test and prod + required: true +- name: NAMESPACE + displayName: Environment name + description: 'Sample values: 0ab226-dev, 0ab226-test and 0ab226-prod' + required: true +- name: VERSION + displayName: null + description: image tag name for output + required: true +- name: KEYCLOAK_SA_BASEURL + displayName: KEYCLOAK_SA_BASEURL + description: 'Valid values: https://dev.oidc.gov.bc.ca, https://test.oidc.gov.bc.ca, + https://oidc.gov.bc.ca' + required: true +- name: KEYCLOAK_SA_CLIENT_ID + displayName: KEYCLOAK_SA_CLIENT_ID + description: 'Valid values: tfrs-dev-django-sa, tfrs-django-sa, tfrs-django-sa' + required: true +- name: KEYCLOAK_SA_REALM + displayName: KEYCLOAK_SA_REALM + description: 'Valid values: tfrs-dev, tfrs, tfrs' + required: true +- name: KEYCLOAK_AUDIENCE + displayName: KEYCLOAK_AUDIENCE + description: 'Valid values: tfrs-dev, tfrs, tfrs' + required: true +- name: KEYCLOAK_CERTS_URL + displayName: KEYCLOAK_CERTS_URL + description: 'Valid values: https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev/protocol/openid-connect/certs, + https://test.oidc.gov.bc.ca/auth/realms/tfrs/protocol/openid-connect/certs, https://oidc.gov.bc.ca/auth/realms/tfrs/protocol/openid-connect/certs' + required: true +- name: KEYCLOAK_CLIENT_ID + displayName: KEYCLOAK_CLIENT_ID + description: 'Valid values: tfrs-dev, tfrs, tfrs' + required: true +- name: KEYCLOAK_ISSUER + displayName: KEYCLOAK_ISSUER + description: 'Valid values: https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev, https://test.oidc.gov.bc.ca/auth/realms/tfrs, + https://oidc.gov.bc.ca/auth/realms/tfrs' + required: true +- name: KEYCLOAK_REALM + displayName: KEYCLOAK_REALM + description: 'Valid values: https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev, https://test.oidc.gov.bc.ca/auth/realms/tfrs, + https://oidc.gov.bc.ca/auth/realms/tfrs' + required: true +- name: CPU_REQUEST + displayName: Requested CPU + description: Requested CPU + required: true +- name: CPU_LIMIT + displayName: CPU upper limit + description: CPU upper limit + required: true +- name: MEMORY_REQUEST + displayName: Requested memory + description: Requested memory + required: true +- name: MEMORY_LIMIT + displayName: Memory upper limit + description: Memory upper limit + required: true +- name: REPLICAS + displayName: replicas + description: replicas + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the backend image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-backend + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-backend${SUFFIX} + generation: 1 + creationTimestamp: + labels: + name: ${NAME}-backend${SUFFIX} + annotations: + description: Defines how to deploy the application server + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + mid: + failurePolicy: Abort + execNewPod: + command: + - "/bin/sh" + - "-c" + - python manage.py migrate; + containerName: tfrs-app + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - tfrs-app + from: + kind: ImageStreamTag + name: ${NAME}-backend:${VERSION} + lastTriggeredImage: '' + - type: ConfigChange + replicas: ${{REPLICAS}} + test: false + selector: + name: ${NAME}-backend${SUFFIX} + template: + metadata: + name: ${NAME}-backend${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-backend${SUFFIX} + spec: + containers: + - name: tfrs-app + image: '' + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: KEYCLOAK_SA_BASEURL + value: ${KEYCLOAK_SA_BASEURL} + - name: KEYCLOAK_SA_CLIENT_ID + value: ${KEYCLOAK_SA_CLIENT_ID} + - name: KEYCLOAK_SA_REALM + value: ${KEYCLOAK_SA_REALM} + - name: KEYCLOAK_AUDIENCE + value: ${KEYCLOAK_AUDIENCE} + - name: KEYCLOAK_CERTS_URL + value: ${KEYCLOAK_CERTS_URL} + - name: KEYCLOAK_CLIENT_ID + value: ${KEYCLOAK_CLIENT_ID} + - name: KEYCLOAK_ENABLED + value: 'true' + - name: KEYCLOAK_ISSUER + value: ${KEYCLOAK_ISSUER} + - name: KEYCLOAK_REALM + value: ${KEYCLOAK_REALM} + - name: KEYCLOAK_SA_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: keycloak-sa-client-secret + key: KEYCLOAK_SA_CLIENT_SECRET + - name: AMQP_HOST + value: ${NAME}${SUFFIX}-rabbitmq-cluster.${NAMESPACE}.svc.cluster.local + - name: AMQP_USER + value: tfrs + - name: AMQP_VHOST + value: "/tfrs" + - name: BYPASS_CLAMAV + value: 'false' + - name: EMAIL_FROM_ADDRESS + value: donotreply@gov.bc.ca + - name: EMAIL_SENDING_ENABLED + value: 'true' + - name: MINIO_BUCKET_NAME + value: tfrs + - name: SMTP_SERVER_HOST + value: apps.smtp.gov.bc.ca + - name: DATABASE_SERVICE_NAME + value: patroni-master${SUFFIX} + - name: DATABASE_ENGINE + value: postgresql + - name: DATABASE_NAME + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-name + - name: APP_CONFIG + value: "/opt/app-root/src/gunicorn.cfg" + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-password + - name: RABBITMQ_USER + value: tfrs + - name: RABBITMQ_VHOST + value: "/tfrs" + - name: RABBITMQ_HOST + value: ${NAME}${SUFFIX}-rabbitmq-cluster.${NAMESPACE}.svc.cluster.local + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-rabbitmq-cluster-secret + key: tfrs_password + - name: MINIO_ENDPOINT + value: ${NAME}-minio-${ENV_NAME}.apps.silver.devops.gov.bc.ca:443 + - name: MINIO_USE_SSL + value: 'true' + - name: DOCUMENTS_API_ENABLED + value: 'true' + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio-${ENV_NAME} + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio-${ENV_NAME} + key: MINIO_SECRET_KEY + - name: AMQP_PASSWORD + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-rabbitmq-cluster-secret + key: tfrs_password + - name: CLAMAV_HOST + value: ${NAME}-clamav-${ENV_NAME}.${NAMESPACE}.svc.cluster.local + - name: FUEL_CODES_API_ENABLED + value: 'true' + - name: CREDIT_CALCULATION_API_ENABLED + value: 'true' + - name: COMPLIANCE_REPORTING_API_ENABLED + value: 'true' + - name: EXCLUSION_REPORTS_API_ENABLED + value: 'true' + - name: DJANGO_SECRET_KEY + valueFrom: + secretKeyRef: + name: django-secret-key + key: DJANGO_SECRET_KEY + resources: + limits: + cpu: "${CPU_LIMIT}" + memory: "${MEMORY_LIMIT}" + requests: + cpu: "${CPU_REQUEST}" + memory: "${MEMORY_REQUEST}" + livenessProbe: + httpGet: + path: "/health" + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: "/health" + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: IfNotPresent + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/backend/backend-secrets.yaml b/openshift-v4/templates/backend/backend-secrets.yaml new file mode 100644 index 000000000..ba4c573c4 --- /dev/null +++ b/openshift-v4/templates/backend/backend-secrets.yaml @@ -0,0 +1,28 @@ +apiVersion: template.openshift.io/v1 +kind: Template +parameters: +- name: KEYCLOAK_SA_CLIENT_SECRET + description: teh keycloak sa client secret + required: true +- name: DJANGO_SECRET_KEY + description: "secret used by Django" + from: "[a-zA-Z0-9]{50}" + generate: expression + required: true +objects: +- apiVersion: v1 + kind: Secret + metadata: + annotations: null + name: keycloak-sa-client-secret + stringData: + KEYCLOAK_SA_CLIENT_SECRET: ${KEYCLOAK_SA_CLIENT_SECRET} +- apiVersion: v1 + kind: Secret + metadata: + annotations: null + name: django-secret-key + stringData: + DJANGO_SECRET_KEY: ${DJANGO_SECRET_KEY} + + \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/.gitattributes b/openshift-v4/templates/backup-container-2.0.0/.gitattributes new file mode 100644 index 000000000..a295ec358 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/.gitattributes @@ -0,0 +1,12 @@ +# Set the default behavior, in case people don't have core.autocrlf set. +* text=auto + +# Declare files that will always have LF line endings on checkout. +backup.* text eol=lf +*.sh text eol=lf +*.md text eol=lf +*.json text eol=lf +*.conf text eol=lf +**/s2i/bin/* text eol=lf +**/root/**/* text eol=lf +**/.scripts/* text eol=lf \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/.gitignore b/openshift-v4/templates/backup-container-2.0.0/.gitignore new file mode 100644 index 000000000..1d1784a16 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/.gitignore @@ -0,0 +1,16 @@ +# See http://help.github.com/ignore-files/ for more about ignoring files. +.DS_Store + +# Files created by the scripts from; https://github.com/BCDevOps/openshift-project-tools +*_DeploymentConfig.json +*_BuildConfig.json +*.local.* +*.overrides.* + +# Visual Studio Code +.vscode + +# Local config +.env +backups +docker/backup.conf \ No newline at end of file diff --git a/backup-container/CODE_OF_CONDUCT.md b/openshift-v4/templates/backup-container-2.0.0/CODE_OF_CONDUCT.md similarity index 100% rename from backup-container/CODE_OF_CONDUCT.md rename to openshift-v4/templates/backup-container-2.0.0/CODE_OF_CONDUCT.md diff --git a/backup-container/CONTRIBUTING.md b/openshift-v4/templates/backup-container-2.0.0/CONTRIBUTING.md similarity index 100% rename from backup-container/CONTRIBUTING.md rename to openshift-v4/templates/backup-container-2.0.0/CONTRIBUTING.md diff --git a/backup-container/LICENSE b/openshift-v4/templates/backup-container-2.0.0/LICENSE similarity index 100% rename from backup-container/LICENSE rename to openshift-v4/templates/backup-container-2.0.0/LICENSE diff --git a/backup-container/README.md b/openshift-v4/templates/backup-container-2.0.0/README.md similarity index 60% rename from backup-container/README.md rename to openshift-v4/templates/backup-container-2.0.0/README.md index 21a70a09e..eba2501ef 100644 --- a/backup-container/README.md +++ b/openshift-v4/templates/backup-container-2.0.0/README.md @@ -1,14 +1,76 @@ - +--- +title: Backup Container +description: A simple containerized backup solution for backing up one or more postgres or mongo databases to a secondary location. +author: WadeBarnes +resourceType: Components +personas: + - Developer + - Product Owner + - Designer +labels: + - backup + - backups + - postgres + - mongo + - database +--- [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) # Backup Container -This is a simple containerized backup solution for backing up one or more postgres databases to a secondary location. _Code and documentation was originally pulled from the [HETS Project](https://github.com/bcgov/hets)_ +[Backup Container](https://github.com/BCDevOps/backup-container) is a simple containerized backup solution for backing up one or more postgres or mongo databases to a secondary location. _Code and documentation was originally pulled from the [HETS Project](https://github.com/bcgov/hets)_ + +# Backup Container Options +You can run the Backup Container for postgres and mongo databases separately or in a mixed environment. +For a mixed environment: +1) You MUST use the recommended `backup.conf` configuration. +2) Within the `backup.conf`, you MUST specify the `DatabaseType` for each listed database. +3) You will need to create two builds and two deployment configs. One for a postgres backup container and the other for a mongo backup container. +4) Mount the same `backup.conf` file (ConfigMap) to each deployed container. -## Postgres Backups in OpenShift -This project provides you with a starting point for integrating backups into your OpenShift projects. The scripts and templates provided in the [openshift](./openshift) directory are compatible with the [openshift-developer-tools](https://github.com/BCDevOps/openshift-developer-tools) scripts. They help you create an OpenShift deployment or cronjob called `backup` in your projects that runs backups on a Postgres database(s) within the project environment. You only need to integrate the scripts and templates into your project(s), the builds can be done with this repository as the source. +## Backups in OpenShift +This project provides you with a starting point for integrating backups into your OpenShift projects. The scripts and templates provided in the [openshift](./openshift) directory are compatible with the [openshift-developer-tools](https://github.com/BCDevOps/openshift-developer-tools) scripts. They help you create an OpenShift deployment or cronjob called `backup` in your projects that runs backups on databases within the project environment. You only need to integrate the scripts and templates into your project(s), the builds can be done with this repository as the source. Following are the instructions for running the backups and a restore. +## Storage +*Before we get too far into the the details, we're going to take a moment to discuss the most important part of the whole process - **The Storage**.* The backup container uses two volumes, one for storing the backups and the other for restore/verification testing. The deployment template separates them intentionally. + +The following sections on storage discuss the recommendations and limitations of the storage classes created specifically for the BC Government's environment. + + +### Backup Storage Volume +The recommended storage class for the backup volume is `nfs-backup`. This class of storage **cannot** be auto-provisioned through the use of a deployment template. The `PersistentVolumeClaim` declared in the supplied deployment template for the *backup volume* will purposely fail to properly provision and wire an `nfs-backup` volume if published before you manually provision your `nfs-backup` claim. + +When using `nfs-backup` you will need to provision your claims **before** you publish your deployment configuration, through either the [service catalog](https://github.com/BCDevOps/provision-nfs-apb#provision-via-gui-catalog) using the [BC Gov NFS Storage](https://github.com/BCDevOps/provision-nfs-apb/blob/master/docs/usage-gui.md) wizard, or by using the [svcat cli](https://github.com/BCDevOps/provision-nfs-apb#provision-via-svcat-cli). + +You'll note the name of the resulting storage claim has a random component to it (example, `bk-devex-von-bc-tob-test-xjrmkhsnshay`). This name needs to be injected into the default value of the `BACKUP_VOLUME_NAME` parameter of the template **before** publishing the deployment configuration in order for the storage to be correctly mounted to the `/backups/` directory of the container. + +`nfs-backup` storageClass is a lower tier of storage and not considered highly available. `read: don't use this for live application storage`. The storageClass **IS** covered by the default enterprise backup policies, and can be directly referenced for restores using the PVC name when opening a restore ticket with 7700. + +`nfs-backup` PVCs **cannot** be used for restore/verification. The permissions on the underlying volume do not allow the PostgreSql server to host it's configuration and data files on a directory backed by this class of storage. + +Ensure you review and plan your storage requirements before provisioning. + +More information on provisioning `nfs-backup` storage here; [provision-nfs-apb](https://github.com/BCDevOps/provision-nfs-apb) + +#### NFS Storage Backup and Retention Policy +NFS backed storage is covered by the following backup and retention policies: +- Backup + - Daily: Incremental + - Monthly: Full +- Retention + - 90 days + +### Restore/Verification Storage Volume +The default storage class for the restore/verification volume is `netapp-file-standard`. The supplied deployment template will auto-provision this volume for you with it is published. Refer to the *Storage Performance* section for performance considerations. + +This volume should be large enough to host your largest database. Set the size by updating/overriding the `VERIFICATION_VOLUME_SIZE` value within the template. + +### Storage Performance +The performance of `netapp-block-standard` for restore/verification is far superior to that of `netapp-file-standard`, however it should only be used in cases where the time it takes to verify a backup begins to encroach on the over-all timing and verification cycle. You want the verification(s) to complete before another backup and verification cycle begins and you want a bit of idle time between the end of one cycle and the beginning of another in case things take a little longer now and again. + +*There are currently no performance stats for the `netapp` storage types.* + ## Deployment / Configuration Together, the scripts and templates provided in the [openshift](./openshift) directory will automatically deploy the `backup` app as described below. The [backup-deploy.overrides.sh](./openshift/backup-deploy.overrides.sh) script generates the deployment configuration necessary for the [backup.conf](config/backup.conf) file to be mounted as a ConfigMap by the `backup` container. @@ -18,17 +80,19 @@ The following environment variables are defaults used by the `backup` app. | Name | Default (if not set) | Purpose | | ---- | ------- | ------- | -| BACKUP_STRATEGY | daily | To control the backup strategy used for backups. This is explained more below. | +| BACKUP_STRATEGY | rolling | To control the backup strategy used for backups. This is explained more below. | | BACKUP_DIR | /backups/ | The directory under which backups will be stored. The deployment configuration mounts the persistent volume claim to this location when first deployed. | -| NUM_BACKUPS | 31 | For backward compatibility this value is used with the daily backup strategy to set the number of backups to retain before pruning. | +| NUM_BACKUPS | 31 | Used for backward compatibility only, this value is used with the daily backup strategy to set the number of backups to retain before pruning. | | DAILY_BACKUPS | 6 | When using the rolling backup strategy this value is used to determine the number of daily (Mon-Sat) backups to retain before pruning. | | WEEKLY_BACKUPS | 4 | When using the rolling backup strategy this value is used to determine the number of weekly (Sun) backups to retain before pruning. | | MONTHLY_BACKUPS | 1 | When using the rolling backup strategy this value is used to determine the number of monthly (last day of the month) backups to retain before pruning. | -| BACKUP_PERIOD | 1d | The schedule on which to run the backups. The value is used by a sleep command and can be defined in d, h, m, or s. | -| DATABASE_SERVICE_NAME | postgresql | The name of the service/host for the *default* database target. | -| POSTGRESQL_DATABASE | my_postgres_db | The name of the *default* database target; the name of the database you want to backup. | -| POSTGRESQL_USER | *wired to a secret* | The username for the database(s) hosted by the `postgresql` Postgres server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-user`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. | -| POSTGRESQL_PASSWORD | *wired to a secret* | The password for the database(s) hosted by the `postgresql` Postgres server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-password`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. | +| BACKUP_PERIOD | 1d | Only used for Legacy Mode. Ignored when running in Cron Mode. The schedule on which to run the backups. The value is used by a sleep command and can be defined in d, h, m, or s. | +| DATABASE_SERVICE_NAME | postgresql | Used for backward compatibility only. The name of the service/host for the *default* database target. | +| DATABASE_USER_KEY_NAME | database-user | The database user key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. | +| DATABASE_PASSWORD_KEY_NAME | database-password | The database password key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. | +| DATABASE_NAME | my_postgres_db | Used for backward compatibility only. The name of the *default* database target; the name of the database you want to backup. | +| DATABASE_USER | *wired to a secret* | The username for the database(s) hosted by the database server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-user`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. | +| DATABASE_PASSWORD | *wired to a secret* | The password for the database(s) hosted by the database server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-password`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. | | FTP_URL | | The FTP server URL. If not specified, the FTP backup feature is disabled. The default value in the deployment configuration is an empty value - not specified. | | FTP_USER | *wired to a secret* | The username for the FTP server. The deployment configuration creates a secret with the name specified in the FTP_SECRET_KEY parameter (default: `ftp-secret`). The key for the username is `ftp-user` and the value is an empty value by default. | | FTP_PASSWORD | *wired to a secret* | The password for the FTP server. The deployment configuration creates a secret with the name specified in the FTP_SECRET_KEY parameter (default: `ftp-secret`). The key for the password is `ftp-password` and the value is an empty value by default. | @@ -36,12 +100,20 @@ The following environment variables are defaults used by the `backup` app. | ENVIRONMENT_FRIENDLY_NAME | | A friendly (human readable) name of the environment. This variable is used by the webhook integration to identify the environment from which the backup notifications originate. The default value in the deployment configuration is an empty value - not specified. | | ENVIRONMENT_NAME | | A name or ID of the environment. This variable is used by the webhook integration to identify the environment from which the backup notifications originate. The default value in the deployment configuration is an empty value - not specified. | -Using this default configuration you can easily back up a single postgres database, however you can extend the configuration and use the `backup.conf` file to list a number of databases for backup. +### backup.conf + +Using this default configuration you can easily back up a single postgres database, however we recommend you extend the configuration and use the `backup.conf` file to list a number of databases for backup and even set a cron schedule for the backups. + +When using the `backup.conf` file the following environment variables are ignored, since you list all of your `host`/`database` pairs in the file; `DATABASE_SERVICE_NAME`, `DATABASE_NAME`. To provide the credentials needed for the listed databases you extend the deployment configuration to include `hostname_USER` and `hostname_PASSWORD` credential pairs which are wired to the appropriate secrets (where hostname matches the hostname/servicename, in all caps and underscores, of the database). For example, if you are backing up a database named `wallet-db/my_wallet`, you would have to extend the deployment configuration to include a `WALLET_DB_USER` and `WALLET_DB_PASSWORD` credential pair, wired to the appropriate secrets, to access the database(s) on the `wallet-db` server. -When using the `backup.conf` file the following environment variables are ignored, since you list all of your `host`/`database` pairs in the file; `DATABASE_SERVICE_NAME`, `POSTGRESQL_DATABASE`. To provide the credentials needed for the listed databases you extend the deployment configuration to include `hostname_USER` and `hostname_PASSWORD` credential pairs which are wired to the appropriate secrets (where hostname matches the hostname/servicename, in all caps and underscores, of the database). For example, if you are backing up a database named `wallet-db/my_wallet`, you would have to extend the deployment configuration to include a `WALLET_DB_USER` and `WALLET_DB_PASSWORD` credential pair, wired to the appropriate secrets, to access the database(s) on the `wallet-db` server. You may notice the default configuration is already wired for the host/service name `postgresql`, so you're already covered if all your databases are on a server of that name. +### Cron Mode + +The `backup` container supports running the backups on a cron schedule. The schedule is specified in the `backup.conf` file. Refer to the [backup.conf](./config/backup.conf) file for additional details and examples. ### Cronjob Deployment / Configuration / Constraints +*This section describes the configuration of an OpenShift CronJob this is different than the Cron Mode supported by the container when deployed in "long running" mode.* + The cronjob object can be deployed in the same manner as the application, and will also have a dependency on the image built by the build config. The main constraint for the cronjob objects is that they will require a configmap in place of environment variables and does not support the `backup.conf` for multiple database backups in the same job. In order to backup multiple databases, create multiple cronjob objects with their associated configmaps and secrets. The following variables are supported in the first iteration of the backup cronjob: @@ -50,11 +122,14 @@ The following variables are supported in the first iteration of the backup cronj | ---- | -------------------- | ------- | | BACKUP_STRATEGY | daily | To control the backup strategy used for backups. This is explained more below. | | BACKUP_DIR | /backups/ | The directory under which backups will be stored. The deployment configuration mounts the persistent volume claim to this location when first deployed. | +| SCHEDULE | 0 1 * * * | Cron Schedule to Execute the Job (using local cluster system TZ). | | NUM_BACKUPS | 31 | For backward compatibility this value is used with the daily backup strategy to set the number of backups to retain before pruning. | | DAILY_BACKUPS | 6 | When using the rolling backup strategy this value is used to determine the number of daily (Mon-Sat) backups to retain before pruning. | | WEEKLY_BACKUPS | 4 | When using the rolling backup strategy this value is used to determine the number of weekly (Sun) backups to retain before pruning. | | MONTHLY_BACKUPS | 1 | When using the rolling backup strategy this value is used to determine the number of monthly (last day of the month) backups to retain before pruning. | | DATABASE_SERVICE_NAME | postgresql | The name of the service/host for the *default* database target. | +| DATABASE_USER_KEY_NAME | database-user | The database user key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. | +| DATABASE_PASSWORD_KEY_NAME | database-password | The database password key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. | | POSTGRESQL_DATABASE | my_postgres_db | The name of the *default* database target; the name of the database you want to backup. | | POSTGRESQL_USER | *wired to a secret* | The username for the database(s) hosted by the `postgresql` Postgres server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-user`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. | | POSTGRESQL_PASSWORD | *wired to a secret* | The password for the database(s) hosted by the `postgresql` Postgres server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-password`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. | @@ -73,6 +148,9 @@ The scheduled job does not yet support the FTP environment variables. | FTP_USER | | FTP_PASSWORD | +### Resources +The backup-container is assigned with `Best-effort` resource type (setting zero for request and limit), which allows the resources to scale up and down without an explicit limit as resource on the node allow. It benefits from large bursts of recourses for short periods of time to get things more quickly. After some time of running the backup-container, you could then set the request and limit according to the average resource consumption. + ## Multiple Databases When backing up multiple databases, the retention settings apply to each database individually. For instance if you use the `daily` strategy and set the retention number(s) to 5, you will retain 5 copies of each database. So plan your backup storage accordingly. @@ -152,6 +230,12 @@ Features include: - Running a single backup cycle, `backup.sh -1` - Restoring a database from backup, `backup.sh -r [-f ]` - Restore mode will allow you to restore a database to a different location (host, and/or database name) provided it can contact the host and you can provide the appropriate credentials. +- Verifying backups, `backup.sh [-s] -v [-f ]` + - Verify mode will restore a backup to the local server to ensure it can be restored without error. Once restored a table query is performed to ensure there was at least one table restored and queries against the database succeed without error. All database files and configuration are destroyed following the tests. + +## Using Backup Verification + +The [backup script](./docker/backup.sh) supports running manual or scheduled verifications on your backups; `backup.sh [-s] -v [-f ]`. Refer to the script documentation `backup.sh -h`, and the configuration documentation, [backup.conf](config/backup.conf), for additional details on how to use this feature. ## Using the FTP backup @@ -165,7 +249,7 @@ Features include: The Webhook integration feature is enabled by specifying the webhook URL, `WEBHOOK_URL`, in your configuration. It's recommended that you also provide values for `ENVIRONMENT_FRIENDLY_NAME` and `ENVIRONMENT_NAME`, so you can better identify the environment from which the messages originate and do things like produce links to the environment. -The Webhook integration feature was built with Rocket.Chat in mind and an integration script for Rocket.Chat can be found in [rocket.chat.integration.js](./scripts/rocket.chat.integration.js). This script was developed to support the BC OpenShift Pathfinder environment and will format the notifications from the backup script into Rocket.Chat messages (examples below). If you provide values for the environment name (`ENVIRONMENT_FRIENDLY_NAME` and `ENVIRONMENT_NAME`) hyperlinks will be added to the messages to link you to the pathfinder project console. +The Webhook integration feature was built with Rocket.Chat in mind and an integration script for Rocket.Chat can be found in [rocket.chat.integration.js](./scripts/rocket.chat.integration.js). This script was developed to support the BC OpenShift environment and will format the notifications from the backup script into Rocket.Chat messages (examples below). If you provide values for the environment name (`ENVIRONMENT_FRIENDLY_NAME` and `ENVIRONMENT_NAME`) hyperlinks will be added to the messages to link you to the project console. Sample Message: @@ -177,8 +261,35 @@ Sample Error Message: For information on how setup a webhook in Rocket.Chat refer to [Incoming WebHook Scripting](https://rocket.chat/docs/administrator-guides/integrations/). The **Webhook URL** created during this process is the URL you use for `WEBHOOK_URL` to enable the Webhook integration feature. +## Database Plugin Support + +The backup container uses a plugin architecture to perform the database specific operations needed to support various database types. + +The plugins are loaded dynamically based on the container type. By default the `backup.null.plugin` will be loaded when the container type is not recognized. + +To add support for a new database type: +1) Update the `getContainerType` function in [backup.container.utils](./docker/backup.container.utils) to detect the new type of database. +2) Using the existing plugins as reference, implement the database specific scripts for the new database type. +3) Using the existing docker files as reference, create a new one to build the new container type. +4) Update the build and deployment templates and their documentation as needed. +5) Update the project documentation as needed. +6) Test, test, test. +7) Submit a PR. + +Plugin Examples: +- [backup.postgres.plugin](./docker/backup.postgres.plugin) + - Postgres backup implementation. + +- [backup.mongo.plugin](./docker/backup.mongo.plugin) + - Mongo backup implementation. + +- [backup.null.plugin](./docker/backup.null.plugin) + - Sample/Template backup implementation that simply outputs log messages for the various operations. + ## Backup +*The following sections describes (some) postgres specific implementation, however the steps are generally the same between database implementations.* + The purpose of the backup app is to do automatic backups. Deploy the Backup app to do daily backups. Viewing the Logs for the Backup App will show a record of backups that have been completed. The Backup app performs the following sequence of operations: @@ -186,9 +297,9 @@ The Backup app performs the following sequence of operations: 1. Create a directory that will be used to store the backup. 2. Use the `pg_dump` and `gzip` commands to make a backup. 3. Cull backups more than $NUM_BACKUPS (default 31 - configured in deployment script) -4. Sleep for a day and repeat +4. Wait/Sleep for a period of time and repeat -Note that with the pod deployment, we are just using a simple "sleep" to run the backup periodically. With the OpenShift Scheduled Job deployment, use the backup-cronjob.yaml template and set the schedule via the cronjob object SCHEDULE template parameter. +Note that with the pod deployment, we support cron schedule(s) or the legacy mode (which uses a simple "sleep") to run the backup periodically. With the OpenShift Scheduled Job deployment, use the backup-cronjob.yaml template and set the schedule via the OpenShift cronjob object SCHEDULE template parameter. A separate pod is used vs. having the backups run from the Postgres Pod for fault tolerant purposes - to keep the backups separate from the database storage. We don't want to, for example, lose the storage of the database, or have the database and backups storage fill up, and lose both the database and the backups. @@ -243,6 +354,10 @@ Following are more detailed steps to perform a restore of a backup. Done! +## Tip and Tricks + +Please refer to the [Tips and Tricks](./docs/TipsAndTricks.md) document for solutions to known issues. + ## Getting Help or Reporting an Issue To report bugs/issues/feature requests, please file an [issue](../../issues). @@ -250,4 +365,4 @@ To report bugs/issues/feature requests, please file an [issue](../../issues). If you would like to contribute, please see our [CONTRIBUTING](./CONTRIBUTING.md) guidelines. Please note that this project is released with a [Contributor Code of Conduct](./CODE_OF_CONDUCT.md). -By participating in this project you agree to abide by its terms. \ No newline at end of file +By participating in this project you agree to abide by its terms. diff --git a/openshift-v4/templates/backup-container-2.0.0/config/backup.conf b/openshift-v4/templates/backup-container-2.0.0/config/backup.conf new file mode 100644 index 000000000..3656e78c2 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/config/backup.conf @@ -0,0 +1,52 @@ +# ============================================================ +# Databases: +# ------------------------------------------------------------ +# List the databases you want backed up here. +# Databases will be backed up in the order they are listed. +# +# The entries must be in one of the following forms: +# - / +# - :/ +# - =/ +# - =:/ +# can be postgres or mongo +# MUST be specified when you are sharing a +# single backup.conf file between postgres and mongo +# backup containers. If you do not specify +# the listed databases are assumed to be valid for the +# backup container in which the configuration is mounted. +# +# Examples: +# - postgres=postgresql/my_database +# - postgres=postgresql:5432/my_database +# - mongo=mongodb/my_database +# - mongo=mongodb:27017/my_database +# ----------------------------------------------------------- +# Cron Scheduling: +# ----------------------------------------------------------- +# List your backup and verification schedule(s) here as well. +# The schedule(s) must be listed as cron tabs that +# execute the script in 'scheduled' mode: +# - ./backup.sh -s +# +# Examples (assuming system's TZ is set to PST): +# - 0 1 * * * default ./backup.sh -s +# - Run a backup at 1am Pacific every day. +# +# - 0 4 * * * default ./backup.sh -s -v all +# - Verify the most recent backups for all datbases +# at 4am Pacific every day. +# ----------------------------------------------------------- +# Full Example: +# ----------------------------------------------------------- +# postgres=postgresql:5432/TheOrgBook_Database +# mongo=mender-mongodb:27017/useradm +# postgres=wallet-db/tob_issuer +# +# 0 1 * * * default ./backup.sh -s +# 0 4 * * * default ./backup.sh -s -v all +# ============================================================ +patroni-master-prod:5432/tfrs +0 21 * * * default ./backup.sh -s +0 22 * * * default ./backup.sh -s -v all +0 20 * * * default find /backups/minio-backup/* -type d -ctime +7 | xargs rm -rf;mkdir -p /backups/minio-backup/$(date +%Y%m%d);cp -rp /minio-data/* /backups/minio-backup/$(date +%Y%m%d) \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/Dockerfile b/openshift-v4/templates/backup-container-2.0.0/docker/Dockerfile new file mode 100644 index 000000000..4cc88a758 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/Dockerfile @@ -0,0 +1,42 @@ +# This image provides a postgres installation from which to run backups +FROM registry.access.redhat.com/rhscl/postgresql-10-rhel7 + +# Change timezone to PST for convenience +ENV TZ=PST8PDT + +# Set the workdir to be root +WORKDIR / + +# Load the backup scripts into the container (must be executable). +COPY backup.* / + +COPY webhook-template.json / + +# ======================================================================================================== +# Install go-crond (from https://github.com/BCDevOps/go-crond) +# - Adds some additional logging enhancements on top of the upstream project; +# https://github.com/webdevops/go-crond +# +# CRON Jobs in OpenShift: +# - https://blog.danman.eu/cron-jobs-in-openshift/ +# -------------------------------------------------------------------------------------------------------- +ARG SOURCE_REPO=BCDevOps +ARG GOCROND_VERSION=0.6.3 +ADD https://github.com/$SOURCE_REPO/go-crond/releases/download/$GOCROND_VERSION/go-crond-64-linux /usr/bin/go-crond + +USER root + +RUN chmod ug+x /usr/bin/go-crond +# ======================================================================================================== + +# ======================================================================================================== +# Perform operations that require root privilages here ... +# -------------------------------------------------------------------------------------------------------- +RUN echo $TZ > /etc/timezone +# ======================================================================================================== + +# Important - Reset to the base image's user account. +USER 26 + +# Set the default CMD. +CMD sh /backup.sh \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/Dockerfile_Mongo b/openshift-v4/templates/backup-container-2.0.0/docker/Dockerfile_Mongo new file mode 100644 index 000000000..4187c170f --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/Dockerfile_Mongo @@ -0,0 +1,42 @@ +# This image provides a mongo installation from which to run backups +FROM registry.access.redhat.com/rhscl/mongodb-36-rhel7 + +# Change timezone to PST for convenience +ENV TZ=PST8PDT + +# Set the workdir to be root +WORKDIR / + +# Load the backup scripts into the container (must be executable). +COPY backup.* / + +COPY webhook-template.json / + +# ======================================================================================================== +# Install go-crond (from https://github.com/BCDevOps/go-crond) +# - Adds some additional logging enhancements on top of the upstream project; +# https://github.com/webdevops/go-crond +# +# CRON Jobs in OpenShift: +# - https://blog.danman.eu/cron-jobs-in-openshift/ +# -------------------------------------------------------------------------------------------------------- +ARG SOURCE_REPO=BCDevOps +ARG GOCROND_VERSION=0.6.3 +ADD https://github.com/$SOURCE_REPO/go-crond/releases/download/$GOCROND_VERSION/go-crond-64-linux /usr/bin/go-crond + +USER root + +RUN chmod ug+x /usr/bin/go-crond +# ======================================================================================================== + +# ======================================================================================================== +# Perform operations that require root privilages here ... +# -------------------------------------------------------------------------------------------------------- +RUN echo $TZ > /etc/timezone +# ======================================================================================================== + +# Important - Reset to the base image's user account. +USER 26 + +# Set the default CMD. +CMD sh /backup.sh \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.config.utils b/openshift-v4/templates/backup-container-2.0.0/docker/backup.config.utils new file mode 100644 index 000000000..b933846b9 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.config.utils @@ -0,0 +1,485 @@ +#!/bin/bash +# ================================================================================================================= +# Configuration Utility Functions: +# ----------------------------------------------------------------------------------------------------------------- +function getDatabaseName(){ + ( + _databaseSpec=${1} + _databaseName=$(echo ${_databaseSpec} | sed -n 's~^.*/\(.*$\)~\1~p') + echo "${_databaseName}" + ) +} + +function getDatabaseType(){ + ( + _databaseSpec=${1} + _databaseType=$(echo ${_databaseSpec} | sed -n 's~^\(.*\)=.*$~\1~p' | tr '[:upper:]' '[:lower:]') + echo "${_databaseType}" + ) +} + +function getPort(){ + ( + local OPTIND + local localhost + unset localhost + while getopts :l FLAG; do + case $FLAG in + l ) localhost=1 ;; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + if [ -z "${localhost}" ]; then + portsed="s~^.*:\([[:digit:]]\+\)/.*$~\1~p" + _port=$(echo ${_databaseSpec} | sed -n "${portsed}") + fi + + echo "${_port}" + ) +} + +function getHostname(){ + ( + local OPTIND + local localhost + unset localhost + while getopts :l FLAG; do + case $FLAG in + l ) localhost=1 ;; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + if [ -z "${localhost}" ]; then + _hostname=$(echo ${_databaseSpec} | sed 's~^.\+[=]~~;s~[:/].*~~') + else + _hostname="127.0.0.1" + fi + + echo "${_hostname}" + ) +} + +function getHostPrefix(){ + ( + _hostname=${1} + _hostPrefix=$(echo ${_hostname} | tr '[:lower:]' '[:upper:]' | sed "s~-~_~g") + echo "${_hostPrefix}" + ) +} + +function getHostUserParam(){ + ( + _hostname=${1} + _hostUser=$(getHostPrefix ${_hostname})_USER + echo "${_hostUser}" + ) +} + +function getHostPasswordParam(){ + ( + _hostname=${1} + _hostPassword=$(getHostPrefix ${_hostname})_PASSWORD + echo "${_hostPassword}" + ) +} + +function readConf(){ + ( + local OPTIND + local readCron + local quiet + local all + unset readCron + unset quiet + while getopts cqa FLAG; do + case $FLAG in + c ) readCron=1 ;; + q ) quiet=1 ;; + a ) all=1 ;; + esac + done + shift $((OPTIND-1)) + + # Remove all comments and any blank lines + filters="/^[[:blank:]]*$/d;/^[[:blank:]]*#/d;/#.*/d;" + + if [ -z "${readCron}" ]; then + # Read in the database config ... + # - Remove any lines that do not match the expected database spec format(s) + # - [=]/ + # - [=]:/ + filters+="/^[a-zA-Z0-9=_/-]*\(:[0-9]*\)\?\/[a-zA-Z0-9_/-]*$/!d;" + if [ -z "${all}" ]; then + # Remove any database configs that are not for the current container type + # Database configs that do not define the database type are assumed to be for the current container type + filters+="/\(^[a-zA-Z0-9_/-]*\(:[0-9]*\)\?\/[a-zA-Z0-9_/-]*$\)\|\(^${CONTAINER_TYPE}=\)/!d;" + fi + else + # Read in the cron config ... + # - Remove any lines that MATCH expected database spec format(s), + # leaving, what should be, cron tabs. + filters+="/^[a-zA-Z0-9=_/-]*\(:[0-9]*\)\?\/[a-zA-Z0-9_/-]*$/d;" + fi + + if [ -f ${BACKUP_CONF} ]; then + if [ -z "${quiet}" ]; then + echo "Reading backup config from ${BACKUP_CONF} ..." >&2 + fi + _value=$(sed "${filters}" ${BACKUP_CONF}) + fi + + if [ -z "${_value}" ] && [ -z "${readCron}" ]; then + # Backward compatibility + if [ -z "${quiet}" ]; then + echo "Reading backup config from environment variables ..." >&2 + fi + _value="${DATABASE_SERVICE_NAME}${DEFAULT_PORT:+:${DEFAULT_PORT}}${POSTGRESQL_DATABASE:+/${POSTGRESQL_DATABASE}}" + fi + + echo "${_value}" + ) +} + +function getNumBackupsToRetain(){ + ( + _count=0 + _backupType=${1:-$(getBackupType)} + + case "${_backupType}" in + daily) + _count=${DAILY_BACKUPS} + if (( ${_count} <= 0 )) && (( ${WEEKLY_BACKUPS} <= 0 )) && (( ${MONTHLY_BACKUPS} <= 0 )); then + _count=1 + fi + ;; + weekly) + _count=${WEEKLY_BACKUPS} + ;; + monthly) + _count=${MONTHLY_BACKUPS} + ;; + *) + _count=${NUM_BACKUPS} + ;; + esac + + echo "${_count}" + ) +} + +function getUsername(){ + ( + _databaseSpec=${1} + _hostname=$(getHostname ${_databaseSpec}) + _paramName=$(getHostUserParam ${_hostname}) + # Backward compatibility ... + _username="${!_paramName:-${DATABASE_USER}}" + echo ${_username} + ) +} + +function getPassword(){ + ( + _databaseSpec=${1} + _hostname=$(getHostname ${_databaseSpec}) + _paramName=$(getHostPasswordParam ${_hostname}) + # Backward compatibility ... + _password="${!_paramName:-${DATABASE_PASSWORD}}" + echo ${_password} + ) +} + +function isLastDayOfMonth(){ + ( + _date=${1:-$(date)} + _day=$(date -d "${_date}" +%-d) + _month=$(date -d "${_date}" +%-m) + _lastDayOfMonth=$(date -d "${_month}/1 + 1 month - 1 day" "+%-d") + + if (( ${_day} == ${_lastDayOfMonth} )); then + return 0 + else + return 1 + fi + ) +} + +function isLastDayOfWeek(){ + ( + # We're calling Sunday the last dayt of the week in this case. + _date=${1:-$(date)} + _dayOfWeek=$(date -d "${_date}" +%u) + + if (( ${_dayOfWeek} == 7 )); then + return 0 + else + return 1 + fi + ) +} + +function getBackupType(){ + ( + _backupType="" + if rollingStrategy; then + if isLastDayOfMonth && (( "${MONTHLY_BACKUPS}" > 0 )); then + _backupType="monthly" + elif isLastDayOfWeek; then + _backupType="weekly" + else + _backupType="daily" + fi + fi + echo "${_backupType}" + ) +} + +function rollingStrategy(){ + if [[ "${BACKUP_STRATEGY}" == "rolling" ]] && (( "${WEEKLY_BACKUPS}" >= 0 )) && (( "${MONTHLY_BACKUPS}" >= 0 )); then + return 0 + else + return 1 + fi +} + +function dailyStrategy(){ + if [[ "${BACKUP_STRATEGY}" == "daily" ]] || (( "${WEEKLY_BACKUPS}" < 0 )); then + return 0 + else + return 1 + fi +} + +function listSettings(){ + _backupDirectory=${1:-$(createBackupFolder -g)} + _databaseList=${2:-$(readConf -q)} + _yellow='\e[33m' + _nc='\e[0m' # No Color + _notConfigured="${_yellow}not configured${_nc}" + + echo -e \\n"Settings:" + _mode=$(getMode 2>/dev/null) + echo -e "- Run mode: ${_mode}"\\n + + if rollingStrategy; then + echo "- Backup strategy: rolling" + fi + if dailyStrategy; then + echo "- Backup strategy: daily" + fi + if ! rollingStrategy && ! dailyStrategy; then + echoYellow "- Backup strategy: Unknown backup strategy; ${BACKUP_STRATEGY}" + _configurationError=1 + fi + backupType=$(getBackupType) + if [ -z "${backupType}" ]; then + echo "- Current backup type: flat daily" + else + echo "- Current backup type: ${backupType}" + fi + echo "- Backups to retain:" + if rollingStrategy; then + echo " - Daily: $(getNumBackupsToRetain daily)" + echo " - Weekly: $(getNumBackupsToRetain weekly)" + echo " - Monthly: $(getNumBackupsToRetain monthly)" + else + echo " - Total: $(getNumBackupsToRetain)" + fi + echo "- Current backup folder: ${_backupDirectory}" + + if [[ "${_mode}" != ${ONCE} ]]; then + if [[ "${_mode}" == ${CRON} ]] || [[ "${_mode}" == ${SCHEDULED} ]]; then + _backupSchedule=$(readConf -cq) + echo "- Time Zone: $(date +"%Z %z")" + fi + _backupSchedule=$(formatList "${_backupSchedule:-${BACKUP_PERIOD}}") + echo -e \\n"- Schedule:" + echo "${_backupSchedule}" + fi + + if [[ "${CONTAINER_TYPE}" == "${UNKNOWN_DB}" ]] && [ -z "${_allowNullPlugin}" ]; then + echoRed "\n- Container Type: ${CONTAINER_TYPE}" + _configurationError=1 + else + echo -e "\n- Container Type: ${CONTAINER_TYPE}" + fi + + _databaseList=$(formatList "${_databaseList}") + echo "- Databases (filtered by container type):" + echo "${_databaseList}" + echo + + if [ -z "${FTP_URL}" ]; then + echo -e "- FTP server: ${_notConfigured}" + else + echo "- FTP server: ${FTP_URL}" + fi + + if [ -z "${WEBHOOK_URL}" ]; then + echo -e "- Webhook Endpoint: ${_notConfigured}" + else + echo "- Webhook Endpoint: ${WEBHOOK_URL}" + fi + + if [ -z "${ENVIRONMENT_FRIENDLY_NAME}" ]; then + echo -e "- Environment Friendly Name: ${_notConfigured}" + else + echo -e "- Environment Friendly Name: ${ENVIRONMENT_FRIENDLY_NAME}" + fi + if [ -z "${ENVIRONMENT_NAME}" ]; then + echo -e "- Environment Name (Id): ${_notConfigured}" + else + echo "- Environment Name (Id): ${ENVIRONMENT_NAME}" + fi + + if [ ! -z "${_configurationError}" ]; then + echo + logError "Configuration error! The script will exit." + sleep 5 + exit 1 + fi + echo +} + +function isScheduled(){ + ( + if [ ! -z "${SCHEDULED_RUN}" ]; then + return 0 + else + return 1 + fi + ) +} + +function isScripted(){ + ( + if [ ! -z "${SCHEDULED_RUN}" ]; then + return 0 + else + return 1 + fi + ) +} + +function restoreMode(){ + ( + if [ ! -z "${_restoreDatabase}" ]; then + return 0 + else + return 1 + fi + ) +} + +function verifyMode(){ + ( + if [ ! -z "${_verifyBackup}" ]; then + return 0 + else + return 1 + fi + ) +} + +function pruneMode(){ + ( + if [ ! -z "${RUN_PRUNE}" ]; then + return 0 + else + return 1 + fi + ) +} + +function cronMode(){ + ( + cronTabs=$(readConf -cq) + if isInstalled "go-crond" && [ ! -z "${cronTabs}" ]; then + return 0 + else + return 1 + fi + ) +} + +function runOnce() { + if [ ! -z "${RUN_ONCE}" ]; then + return 0 + else + return 1 + fi +} + +function getMode(){ + ( + unset _mode + + if pruneMode; then + _mode="${PRUNE}" + fi + + if [ -z "${_mode}" ] && restoreMode; then + _mode="${RESTORE}" + fi + + if [ -z "${_mode}" ] && verifyMode; then + # Determine if this is a scheduled verification or a manual one. + if isScheduled; then + if cronMode; then + _mode="${SCHEDULED_VERIFY}" + else + _mode="${ERROR}" + logError "Scheduled mode cannot be used without cron being installed and at least one cron tab being defined in ${BACKUP_CONF}." + fi + else + _mode="${VERIFY}" + fi + fi + + if [ -z "${_mode}" ] && runOnce; then + _mode="${ONCE}" + fi + + if [ -z "${_mode}" ] && isScheduled; then + if cronMode; then + _mode="${SCHEDULED}" + else + _mode="${ERROR}" + logError "Scheduled mode cannot be used without cron being installed and at least one cron tab being defined in ${BACKUP_CONF}." + fi + fi + + if [ -z "${_mode}" ] && cronMode; then + _mode="${CRON}" + fi + + if [ -z "${_mode}" ]; then + _mode="${LEGACY}" + fi + + echo "${_mode}" + ) +} + +function validateOperation(){ + ( + _databaseSpec=${1} + _mode=${2} + _rtnCd=0 + + if [[ "${_mode}" == ${RESTORE} ]] && ! isForContainerType ${_databaseSpec}; then + echoRed "\nYou are attempting to restore database '${_databaseSpec}' from a ${CONTAINER_TYPE} container." + echoRed "Cannot continue with the restore. It must be initiated from the matching container type." + _rtnCd=1 + fi + + return ${_rtnCd} + ) +} +# ====================================================================================== \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.container.utils b/openshift-v4/templates/backup-container-2.0.0/docker/backup.container.utils new file mode 100644 index 000000000..3bb4115b6 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.container.utils @@ -0,0 +1,57 @@ +#!/bin/bash +# ================================================================================================================= +# Container Utility Functions: +# ----------------------------------------------------------------------------------------------------------------- +function isPostgres(){ + ( + if isInstalled "psql"; then + return 0 + else + return 1 + fi + ) +} + +function isMongo(){ + ( + if isInstalled "mongo"; then + return 0 + else + return 1 + fi + ) +} + +function getContainerType(){ + ( + local _containerType=${UNKNOWN_DB} + _rtnCd=0 + + if isPostgres; then + _containerType=${POSTGRE_DB} + elif isMongo; then + _containerType=${MONGO_DB} + else + _containerType=${UNKNOWN_DB} + _rtnCd=1 + fi + + echo "${_containerType}" + return ${_rtnCd} + ) +} + +function isForContainerType(){ + ( + _databaseSpec=${1} + _databaseType=$(getDatabaseType ${_databaseSpec}) + + # If the database type has not been defined, assume the database spec is valid for the current databse container type. + if [ -z "${_databaseType}" ] || [[ "${_databaseType}" == "${CONTAINER_TYPE}" ]]; then + return 0 + else + return 1 + fi + ) +} +# ====================================================================================== \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.file.utils b/openshift-v4/templates/backup-container-2.0.0/docker/backup.file.utils new file mode 100644 index 000000000..79dae39e6 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.file.utils @@ -0,0 +1,233 @@ +#!/bin/bash +# ================================================================================================================= +# File Utility Functions +# ----------------------------------------------------------------------------------------------------------------- +function makeDirectory() +{ + ( + # Creates directories with permissions reclusively. + # ${1} is the directory to be created + # Inspired by https://unix.stackexchange.com/questions/49263/recursive-mkdir + directory="${1}" + test $# -eq 1 || { echo "Function 'makeDirectory' can create only one directory (with it's parent directories)."; exit 1; } + test -d "${directory}" && return 0 + test -d "$(dirname "${directory}")" || { makeDirectory "$(dirname "${directory}")" || return 1; } + test -d "${directory}" || { mkdir --mode=g+w "${directory}" || return 1; } + return 0 + ) +} + +function finalizeBackup(){ + ( + _filename=${1} + _inProgressFilename="${_filename}${IN_PROGRESS_BACKUP_FILE_EXTENSION}" + _finalFilename="${_filename}${BACKUP_FILE_EXTENSION}" + + if [ -f ${_inProgressFilename} ]; then + mv "${_inProgressFilename}" "${_finalFilename}" + echo "${_finalFilename}" + fi + ) +} + +function listExistingBackups(){ + ( + local _backupDir=${1:-${ROOT_BACKUP_DIR}} + local database + local databases=$(readConf -q) + local output="\nDatabase,Current Size" + + for database in ${databases}; do + if isForContainerType ${database}; then + output+="\n${database},$(getDbSize "${database}")" + fi + done + + echoMagenta "\n================================================================================================================================" + echoMagenta "Current Backups:" + echoMagenta "\n$(echo -ne "${output}" | column -t -s ,)" + echoMagenta "\n$(df -h ${_backupDir})" + echoMagenta "--------------------------------------------------------------------------------------------------------------------------------" + du -ah --time ${_backupDir} + echoMagenta "================================================================================================================================\n" + ) +} + +function getDirectoryName(){ + ( + local path=${1} + path="${path%"${path##*[!/]}"}" + local name="${path##*/}" + echo "${name}" + ) +} + +function getBackupTypeFromPath(){ + ( + local path=${1} + path="${path%"${path##*[!/]}"}" + path="$(dirname "${path}")" + local backupType=$(getDirectoryName "${path}") + echo "${backupType}" + ) +} + +function prune(){ + ( + local database + local backupDirs + local backupDir + local backupType + local backupTypes + local pruneBackup + unset backupTypes + unset backupDirs + unset pruneBackup + + local databases=$(readConf -q) + if rollingStrategy; then + backupTypes="daily weekly monthly" + for backupType in ${backupTypes}; do + backupDirs="${backupDirs} $(createBackupFolder -g ${backupType})" + done + else + backupDirs=$(createBackupFolder -g) + fi + + if [ ! -z "${_fromBackup}" ]; then + pruneBackup="$(findBackup "" "${_fromBackup}")" + while [ ! -z "${pruneBackup}" ]; do + echoYellow "\nAbout to delete backup file: ${pruneBackup}" + waitForAnyKey + rm -rfvd "${pruneBackup}" + + # Quietly delete any empty directories that are left behind ... + find ${ROOT_BACKUP_DIR} -type d -empty -delete > /dev/null 2>&1 + pruneBackup="$(findBackup "" "${_fromBackup}")" + done + else + for backupDir in ${backupDirs}; do + for database in ${databases}; do + unset backupType + if rollingStrategy; then + backupType=$(getBackupTypeFromPath "${backupDir}") + fi + pruneBackups "${backupDir}" "${database}" "${backupType}" + done + done + fi + ) +} + +function pruneBackups(){ + ( + _backupDir=${1} + _databaseSpec=${2} + _backupType=${3:-''} + _pruneDir="$(dirname "${_backupDir}")" + _numBackupsToRetain=$(getNumBackupsToRetain "${_backupType}") + _coreFilename=$(generateCoreFilename ${_databaseSpec}) + + if [ -d ${_pruneDir} ]; then + let _index=${_numBackupsToRetain}+1 + _filesToPrune=$(find ${_pruneDir}* -type f -printf '%T@ %p\n' | grep ${_coreFilename} | sort -r | tail -n +${_index} | sed 's~^.* \(.*$\)~\1~') + + if [ ! -z "${_filesToPrune}" ]; then + echoYellow "\nPruning ${_coreFilename} backups from ${_pruneDir} ..." + echo "${_filesToPrune}" | xargs rm -rfvd + + # Quietly delete any empty directories that are left behind ... + find ${ROOT_BACKUP_DIR} -type d -empty -delete > /dev/null 2>&1 + fi + fi + ) +} + +function touchBackupFile() { + ( + # For safety, make absolutely certain the directory and file exist. + # The pruning process removes empty directories, so if there is an error + # during a backup the backup directory could be deleted. + _backupFile=${1} + _backupDir="${_backupFile%/*}" + makeDirectory ${_backupDir} && touch ${_backupFile} + ) +} + +function findBackup(){ + ( + _databaseSpec=${1} + _fileName=${2} + + # If no backup file was specified, find the most recent for the database. + # Otherwise treat the value provided as a filter to find the most recent backup file matching the filter. + if [ -z "${_fileName}" ]; then + _coreFilename=$(generateCoreFilename ${_databaseSpec}) + _fileName=$(find ${ROOT_BACKUP_DIR}* -type f -printf '%T@ %p\n' | grep ${_coreFilename} | sort | tail -n 1 | sed 's~^.* \(.*$\)~\1~') + else + _fileName=$(find ${ROOT_BACKUP_DIR}* -type f -printf '%T@ %p\n' | grep ${_fileName} | sort | tail -n 1 | sed 's~^.* \(.*$\)~\1~') + fi + + echo "${_fileName}" + ) +} + +function createBackupFolder(){ + ( + local OPTIND + local genOnly + unset genOnly + while getopts g FLAG; do + case $FLAG in + g ) genOnly=1 ;; + esac + done + shift $((OPTIND-1)) + + _backupTypeDir="${1:-$(getBackupType)}" + if [ ! -z "${_backupTypeDir}" ]; then + _backupTypeDir=${_backupTypeDir}/ + fi + + _backupDir="${ROOT_BACKUP_DIR}${_backupTypeDir}`date +\%Y-\%m-\%d`/" + + # Don't actually create the folder if we're just generating it for printing the configuation. + if [ -z "${genOnly}" ]; then + echo "Making backup directory ${_backupDir} ..." >&2 + if ! makeDirectory ${_backupDir}; then + logError "Failed to create backup directory ${_backupDir}." + exit 1; + fi; + fi + + echo ${_backupDir} + ) +} + +function generateFilename(){ + ( + _backupDir=${1} + _databaseSpec=${2} + _coreFilename=$(generateCoreFilename ${_databaseSpec}) + _filename="${_backupDir}${_coreFilename}_`date +\%Y-\%m-\%d_%H-%M-%S`" + echo ${_filename} + ) +} + +function generateCoreFilename(){ + ( + _databaseSpec=${1} + _hostname=$(getHostname ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _coreFilename="${_hostname}-${_database}" + echo ${_coreFilename} + ) +} + +function getFileSize(){ + ( + _filename=${1} + echo $(du -h "${_filename}" | awk '{print $1}') + ) +} +# ================================================================================================================= \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.ftp b/openshift-v4/templates/backup-container-2.0.0/docker/backup.ftp new file mode 100644 index 000000000..d0a935cf9 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.ftp @@ -0,0 +1,23 @@ +#!/bin/bash +# ================================================================================================================= +# FTP Support Functions: +# ----------------------------------------------------------------------------------------------------------------- +function ftpBackup(){ + ( + if [ -z "${FTP_URL}" ] ; then + return 0 + fi + + _filename=${1} + _filenameWithExtension="${_filename}${BACKUP_FILE_EXTENSION}" + echo "Transferring ${_filenameWithExtension} to ${FTP_URL}" + curl --ftp-ssl -T ${_filenameWithExtension} --user ${FTP_USER}:${FTP_PASSWORD} ${FTP_URL} + + if [ ${?} -eq 0 ]; then + logInfo "Successfully transferred ${_filenameWithExtension} to the FTP server" + else + logError "Failed to transfer ${_filenameWithExtension} with the exit code ${?}" + fi + ) +} +# ================================================================================================================= diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.logging b/openshift-v4/templates/backup-container-2.0.0/docker/backup.logging new file mode 100644 index 000000000..50449f0ae --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.logging @@ -0,0 +1,111 @@ +#!/bin/bash +# ================================================================================================================= +# Logging Functions: +# ----------------------------------------------------------------------------------------------------------------- +function debugMsg (){ + _msg="${@}" + if [ "${BACKUP_LOG_LEVEL}" == "debug" ]; then + echoGreen "$(date) - [DEBUG] - ${@}" >&2 + fi +} + +function echoRed (){ + _msg="${@}" + _red='\e[31m' + _nc='\e[0m' # No Color + echo -e "${_red}${_msg}${_nc}" +} + +function echoYellow (){ + _msg="${@}" + _yellow='\e[33m' + _nc='\e[0m' # No Color + echo -e "${_yellow}${_msg}${_nc}" +} + +function echoBlue (){ + _msg="${@}" + _blue='\e[34m' + _nc='\e[0m' # No Color + echo -e "${_blue}${_msg}${_nc}" +} + +function echoGreen (){ + _msg="${@}" + _green='\e[32m' + _nc='\e[0m' # No Color + echo -e "${_green}${_msg}${_nc}" +} + +function echoMagenta (){ + _msg="${@}" + _magenta='\e[35m' + _nc='\e[0m' # No Color + echo -e "${_magenta}${_msg}${_nc}" +} + +function logInfo(){ + ( + infoMsg="${1}" + echo -e "${infoMsg}" + postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \ + "${ENVIRONMENT_NAME}" \ + "INFO" \ + "${infoMsg}" + ) +} + +function logWarn(){ + ( + warnMsg="${1}" + echoYellow "${warnMsg}" + postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \ + "${ENVIRONMENT_NAME}" \ + "WARN" \ + "${warnMsg}" + ) +} + +function logError(){ + ( + errorMsg="${1}" + echoRed "[!!ERROR!!] - ${errorMsg}" >&2 + postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \ + "${ENVIRONMENT_NAME}" \ + "ERROR" \ + "${errorMsg}" + ) +} + +function getWebhookPayload(){ + _payload=$(eval "cat <<-EOF +$(<${WEBHOOK_TEMPLATE}) +EOF +") + echo "${_payload}" +} + +function formatWebhookMsg(){ + ( + # Escape all double quotes + # Escape all newlines + filters='s~"~\\"~g;:a;N;$!ba;s~\n~\\n~g;' + _value=$(echo "${1}" | sed "${filters}") + echo "${_value}" + ) +} + +function postMsgToWebhook(){ + ( + if [ -z "${WEBHOOK_URL}" ] && [ -f ${WEBHOOK_TEMPLATE} ]; then + return 0 + fi + + projectFriendlyName=${1} + projectName=${2} + statusCode=${3} + message=$(formatWebhookMsg "${4}") + curl -s -X POST -H 'Content-Type: application/json' --data "$(getWebhookPayload)" "${WEBHOOK_URL}" > /dev/null + ) +} +# ================================================================================================================= \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.misc.utils b/openshift-v4/templates/backup-container-2.0.0/docker/backup.misc.utils new file mode 100644 index 000000000..cab2ac3e3 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.misc.utils @@ -0,0 +1,30 @@ +#!/bin/bash +# ================================================================================================================= +# General Utility Functions: +# ----------------------------------------------------------------------------------------------------------------- +function waitForAnyKey() { + read -n1 -s -r -p $'\e[33mWould you like to continue?\e[0m Press Ctrl-C to exit, or any other key to continue ...' key + echo -e \\n + + # If we get here the user did NOT press Ctrl-C ... + return 0 +} + +function formatList(){ + ( + filters='s~^~ - ~;' + _value=$(echo "${1}" | sed "${filters}") + echo "${_value}" + ) +} + +function isInstalled(){ + rtnVal=$(type "$1" >/dev/null 2>&1) + rtnCd=$? + if [ ${rtnCd} -ne 0 ]; then + return 1 + else + return 0 + fi +} +# ====================================================================================== \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.mongo.plugin b/openshift-v4/templates/backup-container-2.0.0/docker/backup.mongo.plugin new file mode 100644 index 000000000..0f5583c60 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.mongo.plugin @@ -0,0 +1,226 @@ +#!/bin/bash +# ================================================================================================================= +# Mongo Backup and Restore Functions: +# - Dynamically loaded as a plug-in +# ----------------------------------------------------------------------------------------------------------------- +function onBackupDatabase(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _backupFile=${2} + + _hostname=$(getHostname ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${_databaseSpec}) + _portArg=${_port:+"--port=${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + echoGreen "Backing up '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' to '${_backupFile}' ..." + + _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"} + mongodump -h "${_hostname}" -d "${_database}" ${_authDbArg} ${_portArg} -u "${_username}" -p "${_password}" --quiet --gzip --archive=${_backupFile} + return ${?} + ) +} + +function onRestoreDatabase(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _fileName=${2} + _adminPassword=${3} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"--port=${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + echo -e "Restoring '${_fileName}' to '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' ...\n" >&2 + + # ToDo: + # - Add support for restoring to a different database. + # The following implementation only supports restoring to a database of the same name, + # unlike the postgres implementation that allows the database to be restored to a database of a different + # name for testing. + # Ref: https://stackoverflow.com/questions/36321899/mongorestore-to-a-different-database + + _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"} + mongorestore --drop -h ${_hostname} -d "${_database}" ${_authDbArg} ${_portArg} -u "${_username}" -p "${_password}" --gzip --archive=${_fileName} --nsInclude="*" + return ${?} + ) +} + +function onStartServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + # Start a local MongoDb instance + MONGODB_DATABASE=$(getDatabaseName "${_databaseSpec}") \ + MONGODB_USER=$(getUsername "${_databaseSpec}") \ + MONGODB_PASSWORD=$(getPassword "${_databaseSpec}") \ + MONGODB_ADMIN_PASSWORD=$(getPassword "${_databaseSpec}") \ + run-mongod >/dev/null 2>&1 & + ) +} + +function onStopServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _port=$(getPort ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=admin + _password=$(getPassword ${_databaseSpec}) + + _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"} + mongo admin ${_authDbArg} ${_portArg} -u "${_username}" -p "${_password}" --quiet --eval "db.shutdownServer()" + + # Delete the database files and configuration + echo -e "Cleaning up ...\n" >&2 + rm -rf /var/lib/mongodb/data/* + ) +} + +function onPingDbServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + _dbAddressArg=${_hostname}${_port:+:${_port}}${_database:+/${_database}} + _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"} + if mongo ${_dbAddressArg} ${_authDbArg} -u "${_username}" -p "${_password}" --quiet --eval='quit()' >/dev/null 2>&1; then + return 0 + else + return 1 + fi + ) +} + +function onVerifyBackup(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname -l ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort -l ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + _dbAddressArg=${_hostname}${_port:+:${_port}}${_database:+/${_database}} + _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"} + collections=$(mongo ${_dbAddressArg} ${_authDbArg} -u "${_username}" -p "${_password}" --quiet --eval 'var dbs = [];dbs = db.getCollectionNames();for (i in dbs){ print(db.dbs[i]);}';) + rtnCd=${?} + + # Get the size of the restored database + if (( ${rtnCd} == 0 )); then + size=$(getDbSize -l "${_databaseSpec}") + rtnCd=${?} + fi + + if (( ${rtnCd} == 0 )); then + numResults=$(echo "${collections}"| wc -l) + if [[ ! -z "${collections}" ]] && (( numResults >= 1 )); then + # All good + verificationLog="\nThe restored database contained ${numResults} collections, and is ${size} in size." + else + # Not so good + verificationLog="\nNo collections were found in the restored database ${_database}." + rtnCd="3" + fi + fi + + echo ${verificationLog} + return ${rtnCd} + ) +} + +function onGetDbSize(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + _dbAddressArg=${_hostname}${_port:+:${_port}}${_database:+/${_database}} + _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"} + size=$(mongo ${_dbAddressArg} ${_authDbArg} -u "${_username}" -p "${_password}" --quiet --eval 'printjson(db.stats().fsTotalSize)') + rtnCd=${?} + + echo ${size} + return ${rtnCd} + ) +} +# ================================================================================================================= \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.null.plugin b/openshift-v4/templates/backup-container-2.0.0/docker/backup.null.plugin new file mode 100644 index 000000000..14ceed050 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.null.plugin @@ -0,0 +1,195 @@ +#!/bin/bash +# ================================================================================================================= +# Null Backup and Restore Functions: +# - Dynamically loaded as a plug-in +# - Refer to existing plug-ins for implementation examples. +# ----------------------------------------------------------------------------------------------------------------- +function onBackupDatabase(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _backupFile=${2} + + _hostname=$(getHostname ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + echoGreen "Backing up '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' to '${_backupFile}' ..." + + echoRed "[backup.null.plugin] onBackupDatabase - Not Implemented" + # echoGreen "Starting database backup ..." + # Add your database specific backup operation(s) here. + return ${?} + ) +} + +function onRestoreDatabase(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _fileName=${2} + _adminPassword=${3} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + echo -e "Restoring '${_fileName}' to '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' ...\n" >&2 + + echoRed "[backup.null.plugin] onRestoreDatabase - Not Implemented" + # Add your database specific restore operation(s) here. + return ${?} + ) +} + +function onStartServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + echoRed "[backup.null.plugin] onStartServer - Not Implemented" + # Add your NON-BLOCKING database specific startup operation(s) here. + # - Start the database server as a background job. + ) +} + +function onStopServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + echoRed "[backup.null.plugin] onStopServer - Not Implemented" + + # echo "Shutting down..." + # Add your database specific shutdown operation(s) here. + + # Delete the database files and configuration + # echo -e "Cleaning up ...\n" >&2 + # Add your database specific cleanup operation(s) here. + ) +} + +function onPingDbServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + echoRed "[backup.null.plugin] onPingDbServer - Not Implemented" + # Add your database specific ping operation(s) here. + # if ; then + # return 0 + # else + # return 1 + # fi + ) +} + +function onVerifyBackup(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname -l ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort -l ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + echoRed "[backup.null.plugin] onVerifyBackup - Not Implemented" + # Add your database specific verification operation(s) here. + + # echo ${verificationLog} + # return ${rtnCd} + ) +} + +function onGetDbSize(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"--port ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + echoRed "[backup.null.plugin] onGetDbSize - Not Implemented" + # Add your database specific get size operation(s) here. + + # echo ${size} + # return ${rtnCd} + ) +} +# ================================================================================================================= diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.postgres.plugin b/openshift-v4/templates/backup-container-2.0.0/docker/backup.postgres.plugin new file mode 100644 index 000000000..e5248ac17 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.postgres.plugin @@ -0,0 +1,247 @@ +#!/bin/bash +# ================================================================================================================= +# Postgres Backup and Restore Functions: +# - Dynamically loaded as a plug-in +# ----------------------------------------------------------------------------------------------------------------- +function onBackupDatabase(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _backupFile=${2} + + _hostname=$(getHostname ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${_databaseSpec}) + _portArg=${_port:+"-p ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + echoGreen "Backing up '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' to '${_backupFile}' ..." + + PGPASSWORD=${_password} pg_dump -Fp -h "${_hostname}" ${_portArg} -U "${_username}" "${_database}" | gzip > ${_backupFile} + return ${PIPESTATUS[0]} + ) +} + +function onRestoreDatabase(){ + ( + local OPTIND + local unset quiet + local unset flags + while getopts :q FLAG; do + case $FLAG in + q ) + quiet=1 + flags+="-${FLAG} " + ;; + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _fileName=${2} + _adminPassword=${3} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"-p ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + echo -e "Restoring '${_fileName}' to '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' ...\n" >&2 + + export PGPASSWORD=${_adminPassword} + _rtnCd=0 + + # Drop + if (( ${_rtnCd} == 0 )); then + psql -h "${_hostname}" ${_portArg} -ac "DROP DATABASE \"${_database}\";" + _rtnCd=${?} + echo + fi + + # Create + if (( ${_rtnCd} == 0 )); then + psql -h "${_hostname}" ${_portArg} -ac "CREATE DATABASE \"${_database}\";" + _rtnCd=${?} + echo + fi + + # Grant User Access + if (( ${_rtnCd} == 0 )); then + psql -h "${_hostname}" ${_portArg} -ac "GRANT ALL ON DATABASE \"${_database}\" TO \"${_username}\";" + _rtnCd=${?} + echo + fi + + # Restore + if (( ${_rtnCd} == 0 )); then + gunzip -c "${_fileName}" | psql -v ON_ERROR_STOP=1 -x -h "${_hostname}" ${_portArg} -d "${_database}" + # Get the status code from psql specifically. ${?} would only provide the status of the last command, psql in this case. + _rtnCd=${PIPESTATUS[1]} + fi + + # List tables + if [ -z "${quiet}" ] && (( ${_rtnCd} == 0 )); then + psql -h "${_hostname}" ${_portArg} -d "${_database}" -c "\d" + _rtnCd=${?} + fi + + return ${_rtnCd} + ) +} + +function onStartServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + # Start a local PostgreSql instance + POSTGRESQL_DATABASE=$(getDatabaseName "${_databaseSpec}") \ + POSTGRESQL_USER=$(getUsername "${_databaseSpec}") \ + POSTGRESQL_PASSWORD=$(getPassword "${_databaseSpec}") \ + run-postgresql >/dev/null 2>&1 & + ) +} + +function onStopServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + # Stop the local PostgreSql instance + pg_ctl stop -D /var/lib/pgsql/data/userdata + + # Delete the database files and configuration + echo -e "Cleaning up ...\n" + rm -rf /var/lib/pgsql/data/userdata + ) +} + +function onPingDbServer(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"-p ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + if PGPASSWORD=${_password} psql -h ${_hostname} ${_portArg} -U ${_username} -q -d ${_database} -c 'SELECT 1' >/dev/null 2>&1; then + return 0 + else + return 1 + fi + ) +} + +function onVerifyBackup(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname -l ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort -l ${_databaseSpec}) + _portArg=${_port:+"-p ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + debugMsg "backup.postgres.plugin - onVerifyBackup" + tables=$(psql -h "${_hostname}" ${_portArg} -d "${_database}" -t -c "SELECT table_name FROM information_schema.tables WHERE table_schema='${TABLE_SCHEMA}' AND table_type='BASE TABLE';") + rtnCd=${?} + + # Get the size of the restored database + if (( ${rtnCd} == 0 )); then + size=$(getDbSize -l "${_databaseSpec}") + rtnCd=${?} + fi + + if (( ${rtnCd} == 0 )); then + numResults=$(echo "${tables}"| wc -l) + if [[ ! -z "${tables}" ]] && (( numResults >= 1 )); then + # All good + verificationLog="\nThe restored database contained ${numResults} tables, and is ${size} in size." + else + # Not so good + verificationLog="\nNo tables were found in the restored database." + rtnCd="3" + fi + fi + + echo ${verificationLog} + return ${rtnCd} + ) +} + +function onGetDbSize(){ + ( + local OPTIND + local unset flags + while getopts : FLAG; do + case $FLAG in + ? ) flags+="-${OPTARG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + + _hostname=$(getHostname ${flags} ${_databaseSpec}) + _database=$(getDatabaseName ${_databaseSpec}) + _port=$(getPort ${flags} ${_databaseSpec}) + _portArg=${_port:+"-p ${_port}"} + _username=$(getUsername ${_databaseSpec}) + _password=$(getPassword ${_databaseSpec}) + + size=$(PGPASSWORD=${_password} psql -h "${_hostname}" ${_portArg} -U "${_username}" -d "${_database}" -t -c "SELECT pg_size_pretty(pg_database_size(current_database())) as size;") + rtnCd=${?} + + echo ${size} + return ${rtnCd} + ) +} +# ================================================================================================================= diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.server.utils b/openshift-v4/templates/backup-container-2.0.0/docker/backup.server.utils new file mode 100644 index 000000000..9e938a150 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.server.utils @@ -0,0 +1,39 @@ +#!/bin/bash +# ================================================================================================================= +# Backup Server Utility Functions: +# ----------------------------------------------------------------------------------------------------------------- +function startCron(){ + logInfo "Starting backup server in cron mode ..." + listSettings + echoBlue "Starting go-crond as a background task ...\n" + CRON_CMD="go-crond -v --default-user=${UID} --allow-unprivileged ${BACKUP_CONF}" + exec ${CRON_CMD} & + wait +} + +function startLegacy(){ + ( + while true; do + runBackups + + echoYellow "Sleeping for ${BACKUP_PERIOD} ...\n" + sleep ${BACKUP_PERIOD} + done + ) +} + +function shutDown(){ + jobIds=$(jobs | awk -F '[][]' '{print $2}' ) + for jobId in ${jobIds} ; do + echo "Shutting down background job '${jobId}' ..." + kill %${jobId} + done + + if [ ! -z "${jobIds}" ]; then + echo "Waiting for any background jobs to complete ..." + fi + wait + + exit 0 +} +# ====================================================================================== \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.settings b/openshift-v4/templates/backup-container-2.0.0/docker/backup.settings new file mode 100644 index 000000000..7de738c8a --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.settings @@ -0,0 +1,55 @@ +#!/bin/bash +# ====================================================================================== +# Default Settings +# -------------------------------------------------------------------------------------- +export BACKUP_FILE_EXTENSION=".sql.gz" +export IN_PROGRESS_BACKUP_FILE_EXTENSION=".sql.gz.in_progress" +export DEFAULT_PORT=${POSTGRESQL_PORT_NUM:-5432} +export DATABASE_SERVICE_NAME=${DATABASE_SERVICE_NAME:-postgresql} +export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-my_postgres_db} +export TABLE_SCHEMA=${TABLE_SCHEMA:-public} + +# Supports: +# - daily +# - rolling +export BACKUP_STRATEGY=$(echo "${BACKUP_STRATEGY:-rolling}" | tr '[:upper:]' '[:lower:]') +export BACKUP_PERIOD=${BACKUP_PERIOD:-1d} +export ROOT_BACKUP_DIR=${ROOT_BACKUP_DIR:-${BACKUP_DIR:-/backups/}} +export BACKUP_CONF=${BACKUP_CONF:-backup.conf} + +# Used to prune the total number of backup when using the daily backup strategy. +# Default provides for one full month of backups +export NUM_BACKUPS=${NUM_BACKUPS:-31} + +# Used to prune the total number of backup when using the rolling backup strategy. +# Defaults provide for: +# - A week's worth of daily backups +# - A month's worth of weekly backups +# - The previous month's backup +export DAILY_BACKUPS=${DAILY_BACKUPS:-6} +export WEEKLY_BACKUPS=${WEEKLY_BACKUPS:-4} +export MONTHLY_BACKUPS=${MONTHLY_BACKUPS:-1} + +# Webhook defaults +WEBHOOK_TEMPLATE=${WEBHOOK_TEMPLATE:-webhook-template.json} + +# Modes: +export ONCE="once" +export SCHEDULED="scheduled" +export RESTORE="restore" +export VERIFY="verify" +export CRON="cron" +export LEGACY="legacy" +export ERROR="error" +export SCHEDULED_VERIFY="scheduled-verify" +export PRUNE="prune" + +# Supported Database Containers +export UNKNOWN_DB="null" +export MONGO_DB="mongo" +export POSTGRE_DB="postgres" +export CONTAINER_TYPE="$(getContainerType)" + +# Other: +export DATABASE_SERVER_TIMEOUT=${DATABASE_SERVER_TIMEOUT:-120} +# ====================================================================================== \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.sh b/openshift-v4/templates/backup-container-2.0.0/docker/backup.sh new file mode 100755 index 000000000..f1e17d87a --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.sh @@ -0,0 +1,140 @@ +#!/bin/bash + +# ====================================================================================== +# Imports +# -------------------------------------------------------------------------------------- +. ./backup.usage # Usage information +. ./backup.logging # Logging functions +. ./backup.config.utils # Configuration functions +. ./backup.container.utils # Container Utility Functions +. ./backup.ftp # FTP Support functions +. ./backup.misc.utils # General Utility Functions +. ./backup.file.utils # File Utility Functions +. ./backup.utils # Primary Database Backup and Restore Functions +. ./backup.server.utils # Backup Server Utility Functions +. ./backup.settings # Default Settings +# ====================================================================================== + +# ====================================================================================== +# Initialization: +# -------------------------------------------------------------------------------------- +trap shutDown EXIT TERM + +# Load database plug-in based on the container type ... +. ./backup.${CONTAINER_TYPE}.plugin > /dev/null 2>&1 +if [[ ${?} != 0 ]]; then + echoRed "backup.${CONTAINER_TYPE}.plugin not found." + + # Default to null plugin. + export CONTAINER_TYPE=${UNKNOWN_DB} + . ./backup.${CONTAINER_TYPE}.plugin > /dev/null 2>&1 +fi + +while getopts nclr:v:f:1spha: FLAG; do + case $FLAG in + n) + # Allow null database plugin ... + # Without this flag loading the null plugin is considered a configuration error. + # The null plugin can be used for testing. + export _allowNullPlugin=1 + ;; + c) + echoBlue "\nListing configuration settings ..." + listSettings + exit 0 + ;; + l) + listExistingBackups ${ROOT_BACKUP_DIR} + exit 0 + ;; + r) + # Trigger restore mode ... + export _restoreDatabase=${OPTARG} + ;; + v) + # Trigger verify mode ... + export _verifyBackup=${OPTARG} + ;; + f) + # Optionally specify the backup file to verify or restore from ... + export _fromBackup=${OPTARG} + ;; + 1) + export RUN_ONCE=1 + ;; + s) + export SCHEDULED_RUN=1 + ;; + p) + export RUN_PRUNE=1 + ;; + a) + export _adminPassword=${OPTARG} + ;; + h) + usage + ;; + \?) + echo -e \\n"Invalid option: -${OPTARG}"\\n + usage + ;; + esac +done +shift $((OPTIND-1)) +# ====================================================================================== + +# ====================================================================================== +# Main Script +# -------------------------------------------------------------------------------------- +case $(getMode) in + ${ONCE}) + runBackups + echoGreen "Single backup run complete.\n" + ;; + + ${SCHEDULED}) + runBackups + echoGreen "Scheduled backup run complete.\n" + ;; + + ${RESTORE}) + unset restoreFlags + if isScripted; then + restoreFlags="-q" + fi + + if validateOperation "${_restoreDatabase}" "${RESTORE}"; then + restoreDatabase ${restoreFlags} "${_restoreDatabase}" "${_fromBackup}" + fi + ;; + + ${VERIFY}) + verifyBackups "${_verifyBackup}" "${_fromBackup}" + ;; + + ${SCHEDULED_VERIFY}) + verifyBackups -q "${_verifyBackup}" "${_fromBackup}" + ;; + + ${CRON}) + startCron + ;; + + ${LEGACY}) + startLegacy + ;; + + ${PRUNE}) + prune + ;; + + ${ERROR}) + echoRed "A configuration error has occurred, review the details above." + usage + ;; + *) + echoYellow "Unrecognized operational mode; ${_mode}" + usage + ;; +esac +# ====================================================================================== \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.usage b/openshift-v4/templates/backup-container-2.0.0/docker/backup.usage new file mode 100644 index 000000000..32238fdff --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.usage @@ -0,0 +1,133 @@ +#!/bin/bash +# ================================================================================================================= +# Usage: +# ----------------------------------------------------------------------------------------------------------------- +function usage () { + cat <<-EOF + + Automated backup script for PostgreSQL and MongoDB databases. + + There are two modes of scheduling backups: + - Cron Mode: + - Allows one or more schedules to be defined as cron tabs in ${BACKUP_CONF}. + - If cron (go-crond) is installed (which is handled by the Docker file) and at least one cron tab is defined, the script will startup in Cron Mode, + otherwise it will default to Legacy Mode. + - Refer to ${BACKUP_CONF} for additional details and exples of using cron scheduling. + + - Legacy Mode: + - Uses a simple sleep command to set the schedule based on the setting of BACKUP_PERIOD; defaults to ${BACKUP_PERIOD} + + Refer to the project documentation for additional details on how to use this script. + - https://github.com/BCDevOps/backup-container + + Usage: + $0 [options] + + Standard Options: + ================= + -h prints this usage documentation. + + -1 run once. + Performs a single set of backups and exits. + + -s run in scheduled/silent (no questions asked) mode. + A flag to be used by cron scheduled backups to indicate they are being run on a schedule. + Requires cron (go-crond) to be installed and at least one cron tab to be defined in ${BACKUP_CONF} + Refer to ${BACKUP_CONF} for additional details and examples of using cron scheduling. + + -l lists existing backups. + Great for listing the available backups for a restore. + + -c lists the current configuration settings and exits. + Great for confirming the current settings, and listing the databases included in the backup schedule. + + -p prune backups + Used to manually prune backups. + This can be used with the '-f' option, see below, to prune specific backups or sets of backups. + Use caution when using the '-f' option. + + Verify Options: + ================ + The verify process performs the following basic operations: + - Start a local database server instance. + - Restore the selected backup locally, watching for errors. + - Run a table query on the restored database as a simple test to ensure tables were restored + and queries against the database succeed without error. + - Stop the local database server instance. + - Delete the local database and configuration. + + -v ; in the form =/, or =:/ + where defaults to container database type if omitted + must be one of "postgres" or "mongo" + must be specified in a mixed database container project + + Triggers verify mode and starts verify mode on the specified database. + + Example: + $0 -v postgresql=postgresql:5432/TheOrgBook_Database + - Would start the verification process on the database using the most recent backup for the database. + + $0 -v all + - Verify the most recent backup of all databases. + + -f ; an OPTIONAL filter to use to find/identify the backup file to restore. + Refer to the same option under 'Restore Options' for details. + + Restore Options: + ================ + The restore process performs the following basic operations: + - Drop and recreate the selected database. + - Grant the database user access to the recreated database + - Restore the database from the selected backup file + + Have the 'Admin' (postgres or mongo) password handy, the script will ask you for it during the restore. + + When in restore mode, the script will list the settings it will use and wait for your confirmation to continue. + This provides you with an opportunity to ensure you have selected the correct database and backup file + for the job. + + Restore mode will allow you to restore a database to a different location (host, and/or database name) provided + it can contact the host and you can provide the appropriate credentials. If you choose to do this, you will need + to provide a file filter using the '-f' option, since the script will likely not be able to determine which backup + file you would want to use. This functionality provides a convenient way to test your backups or migrate your + database/data without affecting the original database. + + -r ; in the form =/, or =:/ + where defaults to container database type if omitted + must be one of "postgres" or "mongo" + must be specified in a mixed database container project + + Triggers restore mode and starts restore mode on the specified database. + + Example: + $0 -r postgresql:5432/TheOrgBook_Database/postgres + - Would start the restore process on the database using the most recent backup for the database. + + -f ; an OPTIONAL filter to use to find/identify the backup file to restore. + This can be a full or partial file specification. When only part of a filename is specified the restore process + attempts to find the most recent backup matching the filter. + If not specified, the restore process attempts to locate the most recent backup file for the specified database. + + Examples: + $0 -r postgresql=wallet-db/test_db/postgres -f wallet-db-tob_holder + - Would try to find the latest backup matching on the partial file name provided. + + $0 -r wallet-db/test_db/postgres -f /backups/daily/2018-11-07/wallet-db-tob_holder_2018-11-07_23-59-35.sql.gz + - Would use the specific backup file. + + $0 -r wallet-db/test_db/postgres -f wallet-db-tob_holder_2018-11-07_23-59-35.sql.gz + - Would use the specific backup file regardless of its location in the root backup folder. + + -s OPTIONAL flag. Use with caution. Could cause unintentional data loss. + Run the restore in scripted/scheduled mode. In this mode the restore will not ask you to confirm the settings, + nor will ask you for the 'Admin' password. It will simply attempt to restore a database from a backup. + It's up to you to ensure it's targeting the correct database and using the correct backup file. + + -a ; an OPTIONAL flag used to specify the 'Admin' password. + Use with the '-s' flag to specify the 'Admin' password. Under normal usage conditions it's better to supply the + password when prompted so it is not visible on the console. + +EOF +exit 1 +} +# ================================================================================================================= \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/docker/backup.utils b/openshift-v4/templates/backup-container-2.0.0/docker/backup.utils new file mode 100644 index 000000000..ed54af7d4 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docker/backup.utils @@ -0,0 +1,268 @@ +#!/bin/bash +# ================================================================================================================= +# Primary Database Backup and Restore Functions: +# ----------------------------------------------------------------------------------------------------------------- +function backupDatabase(){ + ( + _databaseSpec=${1} + _fileName=${2} + + _backupFile="${_fileName}${IN_PROGRESS_BACKUP_FILE_EXTENSION}" + + touchBackupFile "${_backupFile}" + onBackupDatabase "${_databaseSpec}" "${_backupFile}" + _rtnCd=${?} + + if (( ${_rtnCd} != 0 )); then + rm -rfvd ${_backupFile} + fi + + return ${_rtnCd} + ) +} + +function restoreDatabase(){ + ( + local OPTIND + local quiet + local localhost + unset quiet + unset localhost + unset flags + while getopts ql FLAG; do + case $FLAG in + q ) + quiet=1 + flags+="-${FLAG} " + ;; + * ) flags+="-${FLAG} ";; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _fileName=${2} + _fileName=$(findBackup "${_databaseSpec}" "${_fileName}") + + if [ -z "${quiet}" ]; then + echoBlue "\nRestoring database ..." + echo -e "\nSettings:" + echo "- Database: ${_databaseSpec}" + + if [ ! -z "${_fileName}" ]; then + echo -e "- Backup file: ${_fileName}\n" + else + echoRed "- Backup file: No backup file found or specified. Cannot continue with the restore.\n" + exit 1 + fi + waitForAnyKey + fi + + if [ -z "${quiet}" ] && [ -z "${_adminPassword}" ]; then + # Ask for the Admin Password for the database, if it has not already been provided. + _msg="Admin password (${_databaseSpec}):" + _yellow='\033[1;33m' + _nc='\033[0m' # No Color + _message=$(echo -e "${_yellow}${_msg}${_nc}") + read -r -s -p $"${_message}" _adminPassword + echo -e "\n" + fi + + local startTime=${SECONDS} + onRestoreDatabase ${flags} "${_databaseSpec}" "${_fileName}" "${_adminPassword}" + _rtnCd=${?} + + local duration=$(($SECONDS - $startTime)) + if (( ${_rtnCd} == 0 )); then + echoGreen "\nRestore complete - Elapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s\n" + else + echoRed "\nRestore failed.\n" >&2 + fi + + return ${_rtnCd} + ) +} + +function runBackups(){ + ( + echoBlue "\nStarting backup process ..." + databases=$(readConf) + backupDir=$(createBackupFolder) + listSettings "${backupDir}" "${databases}" + + for database in ${databases}; do + if isForContainerType ${database}; then + local startTime=${SECONDS} + filename=$(generateFilename "${backupDir}" "${database}") + backupDatabase "${database}" "${filename}" + rtnCd=${?} + local duration=$(($SECONDS - $startTime)) + local elapsedTime="\n\nElapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s - Status Code: ${rtnCd}" + + if (( ${rtnCd} == 0 )); then + backupPath=$(finalizeBackup "${filename}") + dbSize=$(getDbSize "${database}") + backupSize=$(getFileSize "${backupPath}") + logInfo "Successfully backed up ${database}.\nBackup written to ${backupPath}.\nDatabase Size: ${dbSize}\nBackup Size: ${backupSize}${elapsedTime}" + ftpBackup "${filename}" + pruneBackups "${backupDir}" "${database}" + else + logError "Failed to backup ${database}.${elapsedTime}" + fi + fi + done + + listExistingBackups ${ROOT_BACKUP_DIR} + ) +} + +function startServer(){ + ( + # Start a local server instance ... + onStartServer ${@} + + # Wait for server to start ... + local startTime=${SECONDS} + rtnCd=0 + printf "waiting for server to start" + while ! pingDbServer ${@}; do + printf "." + local duration=$(($SECONDS - $startTime)) + if (( ${duration} >= ${DATABASE_SERVER_TIMEOUT} )); then + echoRed "\nThe server failed to start within ${duration} seconds.\n" + rtnCd=1 + break + fi + sleep 1 + done + echo + return ${rtnCd} + ) +} + +function stopServer(){ + ( + onStopServer ${@} + ) +} + +function pingDbServer(){ + ( + onPingDbServer ${@} + return ${?} + ) +} + +function verifyBackups(){ + ( + local OPTIND + local flags + unset flags + while getopts q FLAG; do + case $FLAG in + * ) flags+="-${FLAG} " ;; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _fileName=${2} + if [[ "${_databaseSpec}" == "all" ]]; then + databases=$(readConf -q) + else + databases=${_databaseSpec} + fi + + for database in ${databases}; do + if isForContainerType ${database}; then + verifyBackup ${flags} "${database}" "${_fileName}" + fi + done + ) +} + +function verifyBackup(){ + ( + local OPTIND + local quiet + unset quiet + while getopts q FLAG; do + case $FLAG in + q ) quiet=1 ;; + esac + done + shift $((OPTIND-1)) + + _databaseSpec=${1} + _fileName=${2} + _fileName=$(findBackup "${_databaseSpec}" "${_fileName}") + + echoBlue "\nVerifying backup ..." + echo -e "\nSettings:" + echo "- Database: ${_databaseSpec}" + + if [ ! -z "${_fileName}" ]; then + echo -e "- Backup file: ${_fileName}\n" + else + echoRed "- Backup file: No backup file found or specified. Cannot continue with the backup verification.\n" + exit 0 + fi + + if [ -z "${quiet}" ]; then + waitForAnyKey + fi + + local startTime=${SECONDS} + startServer -l "${_databaseSpec}" + rtnCd=${?} + + # Restore the database + if (( ${rtnCd} == 0 )); then + if [ -z "${quiet}" ]; then + restoreDatabase -ql "${_databaseSpec}" "${_fileName}" + rtnCd=${?} + else + # Filter out stdout, keep stderr + echo "Restoring from backup ..." + restoreLog=$(restoreDatabase -ql "${_databaseSpec}" "${_fileName}" 2>&1 >/dev/null) + rtnCd=${?} + + if [ ! -z "${restoreLog}" ] && (( ${rtnCd} == 0 )); then + echo ${restoreLog} + unset restoreLog + elif [ ! -z "${restoreLog}" ] && (( ${rtnCd} != 0 )); then + restoreLog="\n\nThe following issues were encountered during backup verification;\n${restoreLog}" + fi + fi + fi + + # Ensure there are tables in the databse and general queries work + if (( ${rtnCd} == 0 )); then + verificationLog=$(onVerifyBackup "${_databaseSpec}") + rtnCd=${?} + fi + + # Stop the database server + stopServer "${_databaseSpec}" + local duration=$(($SECONDS - $startTime)) + local elapsedTime="\n\nElapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s - Status Code: ${rtnCd}" + + if (( ${rtnCd} == 0 )); then + logInfo "Successfully verified backup: ${_fileName}${verificationLog}${restoreLog}${elapsedTime}" + else + logError "Backup verification failed: ${_fileName}${verificationLog}${restoreLog}${elapsedTime}" + fi + return ${rtnCd} + ) +} + +function getDbSize(){ + ( + size=$(onGetDbSize ${@}) + rtnCd=${?} + + echo ${size} + return ${rtnCd} + ) +} +# ================================================================================================================= diff --git a/backup-container/docker/webhook-template.json b/openshift-v4/templates/backup-container-2.0.0/docker/webhook-template.json similarity index 100% rename from backup-container/docker/webhook-template.json rename to openshift-v4/templates/backup-container-2.0.0/docker/webhook-template.json diff --git a/openshift-v4/templates/backup-container-2.0.0/docs/ExampleLog.md b/openshift-v4/templates/backup-container-2.0.0/docs/ExampleLog.md new file mode 100644 index 000000000..c769467f1 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docs/ExampleLog.md @@ -0,0 +1,62 @@ + +## An example of the backup container in action +``` +Starting backup process ... +Reading backup config from backup.conf ... +Making backup directory /backups/daily/2020-02-28/ ... + +Settings: +- Run mode: scheduled + +- Backup strategy: rolling +- Current backup type: daily +- Backups to retain: + - Daily: 6 + - Weekly: 4 + - Monthly: 1 +- Current backup folder: /backups/daily/2020-02-28/ +- Time Zone: PST -0800 + +- Schedule: + - 0 1 * * * default ./backup.sh -s + - 0 4 * * * default ./backup.sh -s -v all + +- Container Type: mongo +- Databases (filtered by container type): + - mongo=identity-kit-db-bc/identity_kit_db + +- FTP server: not configured +- Webhook Endpoint: https://chat.[ocp name].gov.bc.ca/hooks/*** +- Environment Friendly Name: Verifiable Organizations Network (mongo-test) +- Environment Name (Id): devex-von-test + +Backing up 'identity-kit-db-bc/identity_kit_db' to '/backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_08-07-10.sql.gz.in_progress' ... +Successfully backed up mongo=identity-kit-db-bc/identity_kit_db. +Backup written to /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_08-07-10.sql.gz. +Database Size: 1073741824 +Backup Size: 4.0K + +Elapsed time: 0h:0m:0s - Status Code: 0 + +================================================================================================================================ +Current Backups: + +Database Current Size +mongo=identity-kit-db-bc/identity_kit_db 1073741824 + +Filesystem Size Used Avail Use% Mounted on +192.168.111.90:/trident_qtree_pool_file_standard_WKDMGDWTSQ/file_standard_devex_von_test_backup_mongo_54218 1.0G 0 1.0G 0% /backups +-------------------------------------------------------------------------------------------------------------------------------- +4.0K 2020-02-27 13:26 /backups/daily/2020-02-27/identity-kit-db-bc-identity_kit_db_2020-02-27_13-26-21.sql.gz +4.0K 2020-02-27 13:27 /backups/daily/2020-02-27/identity-kit-db-bc-identity_kit_db_2020-02-27_13-27-10.sql.gz +12K 2020-02-27 13:27 /backups/daily/2020-02-27 +4.0K 2020-02-28 06:44 /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_06-44-19.sql.gz +4.0K 2020-02-28 07:12 /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_07-12-29.sql.gz +4.0K 2020-02-28 08:07 /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_08-07-10.sql.gz +16K 2020-02-28 08:07 /backups/daily/2020-02-28 +32K 2020-02-28 08:07 /backups/daily +36K 2020-02-28 08:07 /backups/ +================================================================================================================================ + +Scheduled backup run complete. +``` \ No newline at end of file diff --git a/backup-container/docs/SampleRocketChatErrorMessage.png b/openshift-v4/templates/backup-container-2.0.0/docs/SampleRocketChatErrorMessage.png similarity index 100% rename from backup-container/docs/SampleRocketChatErrorMessage.png rename to openshift-v4/templates/backup-container-2.0.0/docs/SampleRocketChatErrorMessage.png diff --git a/openshift-v4/templates/backup-container-2.0.0/docs/SampleRocketChatMessage.png b/openshift-v4/templates/backup-container-2.0.0/docs/SampleRocketChatMessage.png new file mode 100644 index 000000000..804548851 Binary files /dev/null and b/openshift-v4/templates/backup-container-2.0.0/docs/SampleRocketChatMessage.png differ diff --git a/openshift-v4/templates/backup-container-2.0.0/docs/TipsAndTricks.md b/openshift-v4/templates/backup-container-2.0.0/docs/TipsAndTricks.md new file mode 100644 index 000000000..7b5ed99e0 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/docs/TipsAndTricks.md @@ -0,0 +1,75 @@ +# Tips and Tricks + +## Verify Fails with - `error connecting to db server` or simular message + +### Issue + +The postgres and mongo containers used for the backup container have the following (simplified) startup sequence for the database server: +- Start the server to perform initial server and database configuration. +- Shutdown the server. +- Start the server with the created configuration. + +If memory and CPU requests and limits have been set for the container it is possible for this sequence to be slowed down enough that the `pingDbServer` operation will return success during the initial startup and configuration, and the subsequent `restoreDatabase` operation will run while the database server is not running (before it's started the second time). + +### Example Logs + +For a Mongo backup-container the error looks like this: +``` +sh-4.2$ ./backup.sh -s -v all + +Verifying backup ... + +Settings: +- Database: mongo=identity-kit-db-bc/identity_kit_db +- Backup file: /backups/daily/2020-03-06/identity-kit-db-bc-identity_kit_db_2020-03-06_01-00-00.sql.gz + +waiting for server to start.... +Restoring from backup ... +2020-03-06T07:28:31.299-0800 W NETWORK [thread1] Failed to connect to 127.0.0.1:27017, in(checking socket for error after poll), reason: Connection refused +2020-03-06T07:28:31.299-0800 E QUERY [thread1] Error: couldn't connect to server 127.0.0.1:27017, connection attempt failed : +connect@src/mongo/shell/mongo.js:251:13 +@(connect):1:21 +exception: connect failed +Cleaning up ... + +rm: cannot remove '/var/lib/mongodb/data/journal': Directory not empty +[!!ERROR!!] - Backup verification failed: /backups/daily/2020-03-06/identity-kit-db-bc-identity_kit_db_2020-03-06_01-00-00.sql.gz + +The following issues were encountered during backup verification; +Restoring '/backups/daily/2020-03-06/identity-kit-db-bc-identity_kit_db_2020-03-06_01-00-00.sql.gz' to '127.0.0.1/identity_kit_db' ... + +2020-03-06T07:28:30.785-0800 Failed: error connecting to db server: no reachable servers + +Restore failed. + +Elapsed time: 0h:0m:16s - Status Code: 1 +``` + + +### Solution + +Configure the `backup-container` to use best effort resource allocation. **This IS the default for the supplied deployment configuration template**; [backup-deploy.json](../openshift/templates/backup/backup-deploy.json) + +Best effort resource allocation can only be set using a template or by directly editing the DC's yaml file. + +The resources section in the containers template in the resulting DC looks like this: +``` +apiVersion: apps.openshift.io/v1 +kind: DeploymentConfig +... +spec: + ... + template: + ... + spec: + containers: + ... + resources: + limits: + cpu: '0' + memory: '0' + requests: + cpu: '0' + memory: '0' +... +``` \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/README.md b/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/README.md new file mode 100644 index 000000000..56fb5f870 --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/README.md @@ -0,0 +1,106 @@ +## Setup Backup container +Use backup container release 2.0.0 to run the backup, current folder is backup-container-2.0.0 +1. Request netapp-file-backup storage as backup space +follow https://github.com/BCDevOps/provision-nfs-apb/blob/master/docs/usage-gui.md to request nfs-backup storage + +2. add to ./config/backup.conf, 9pm run backup, 10pm run verification +patroni-master-test:5432/tfrs +0 21 * * * default ./backup.sh -s +0 22 * * * default ./backup.sh -s -v all +Notes: modify each line in the backup.conf before deploy to dev, test and prod environment + +3. Build patroni-backup image +oc -n 0ab226-tools process -f openshift/templates/backup/backup-build.yaml \ +-p NAME=patroni-backup OUTPUT_IMAGE_TAG=2.2.1 GIT_REF=2.2.1 \ +| oc -n 0ab226-tools create -f - + +4. create backup-conf configmap +oc -n 0ab226-test create configmap backup-conf --from-file=./config/backup.conf +oc -n 0ab226-prod create configmap backup-conf --from-file=./config/backup.conf + +5. mount the netapp-file-backup storage to frontend pod and create /patroni-backup, /minio-backup and /rabbitmq-backup. remove the mount later + +6. create deployment config for backup container +6.1 for dev +BACKUP_VOLUME_NAME is pvc name +oc -n 0ab226-test process -f ./templates/backup/backup-deploy.yaml \ + -p NAME=patroni-backup \ + -p SOURCE_IMAGE_NAME=patroni-backup \ + -p IMAGE_NAMESPACE=0ab226-tools \ + -p TAG_NAME=2.2.1 \ + -p DATABASE_SERVICE_NAME=patroni-master-dev-1696 \ + -p DATABASE_NAME=tfrs \ + -p DATABASE_DEPLOYMENT_NAME=patroni-dev-1696 \ + -p DATABASE_USER_KEY_NAME=app-db-username \ + -p DATABASE_PASSWORD_KEY_NAME=app-db-password \ + -p TABLE_SCHEMA=public \ + -p BACKUP_STRATEGY=rolling \ + -p DAILY_BACKUPS=31 \ + -p WEEKLY_BACKUPS=12 \ + -p MONTHLY_BACKUPS=3 \ + -p BACKUP_PERIOD=1d \ + -p BACKUP_VOLUME_NAME=backup-tfrs-dev \ + -p VERIFICATION_VOLUME_NAME=backup-verification \ + -p VERIFICATION_VOLUME_SIZE=2G \ + -p VERIFICATION_VOLUME_CLASS=netapp-file-standard \ + -p ENVIRONMENT_FRIENDLY_NAME='TFRS Database Backup' \ + -p ENVIRONMENT_NAME=tfrs-dev \ + -p MINIO_DATA_VOLUME_NAME=tfrs-minio-dev | \ + oc create -f - -n 0ab226-dev + +6.1 for test +BACKUP_VOLUME_NAME is pvc name +oc -n 0ab226-test process -f ./templates/backup/backup-deploy.yaml \ + -p NAME=patroni-backup \ + -p SOURCE_IMAGE_NAME=patroni-backup \ + -p IMAGE_NAMESPACE=0ab226-tools \ + -p TAG_NAME=2.2.1 \ + -p DATABASE_SERVICE_NAME=patroni-master-test \ + -p DATABASE_NAME=tfrs \ + -p DATABASE_DEPLOYMENT_NAME=patroni-test \ + -p DATABASE_USER_KEY_NAME=app-db-username \ + -p DATABASE_PASSWORD_KEY_NAME=app-db-password \ + -p TABLE_SCHEMA=public \ + -p BACKUP_STRATEGY=rolling \ + -p DAILY_BACKUPS=31 \ + -p WEEKLY_BACKUPS=12 \ + -p MONTHLY_BACKUPS=3 \ + -p BACKUP_PERIOD=1d \ + -p BACKUP_VOLUME_NAME=backup-tfrs-test \ + -p VERIFICATION_VOLUME_NAME=backup-verification \ + -p VERIFICATION_VOLUME_SIZE=2G \ + -p VERIFICATION_VOLUME_CLASS=netapp-file-standard \ + -p ENVIRONMENT_FRIENDLY_NAME='TFRS Database Backup' \ + -p ENVIRONMENT_NAME=tfrs-test \ + -p MINIO_DATA_VOLUME_NAME=tfrs-minio-test | \ + oc create -f - -n 0ab226-test + +6.2 for production +BACKUP_VOLUME_NAME is the nfs storage name +oc -n 0ab226-prod process -f ./templates/backup/backup-deploy.yaml \ + -p NAME=patroni-backup \ + -p SOURCE_IMAGE_NAME=patroni-backup \ + -p IMAGE_NAMESPACE=0ab226-tools \ + -p TAG_NAME=2.2.1 \ + -p DATABASE_SERVICE_NAME=patroni-master-prod \ + -p DATABASE_NAME=tfrs \ + -p DATABASE_DEPLOYMENT_NAME=patroni-prod \ + -p DATABASE_USER_KEY_NAME=app-db-username \ + -p DATABASE_PASSWORD_KEY_NAME=app-db-password \ + -p TABLE_SCHEMA=public \ + -p BACKUP_STRATEGY=rolling \ + -p DAILY_BACKUPS=31 \ + -p WEEKLY_BACKUPS=12 \ + -p MONTHLY_BACKUPS=3 \ + -p BACKUP_PERIOD=1d \ + -p BACKUP_VOLUME_NAME=backup-tfrs-prod \ + -p VERIFICATION_VOLUME_NAME=backup-verification \ + -p VERIFICATION_VOLUME_SIZE=2Gi \ + -p VERIFICATION_VOLUME_CLASS=netapp-file-standard \ + -p ENVIRONMENT_FRIENDLY_NAME='TFRS Database Backup' \ + -p ENVIRONMENT_NAME=tfrs-prod \ + -p MINIO_DATA_VOLUME_NAME=tfrs-minio-prod | \ + oc create -f - -n 0ab226-prod + +7. If need to remove, only keeps configmap/backup-conf and the the nfs storage +oc -n 0ab226-prod delete secret/patroni-backup secret/ftp-secret dc/patroni-backup pvc/backup-verification \ No newline at end of file diff --git a/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/backup-build.yaml b/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/backup-build.yaml new file mode 100644 index 000000000..a995522be --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/backup-build.yaml @@ -0,0 +1,79 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: ${NAME}-build-template + creationTimestamp: +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the backup image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME} + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: v1 + metadata: + name: ${NAME} + labels: + app: ${NAME} + spec: + triggers: + - type: ImageChange + - type: ConfigChange + runPolicy: Serial + source: + type: Git + git: + uri: ${GIT_REPO_URL} + ref: ${GIT_REF} + contextDir: ${SOURCE_CONTEXT_DIR} + strategy: + type: Docker + dockerStrategy: + dockerfilePath: ${DOCKER_FILE_PATH} + output: + to: + kind: ImageStreamTag + name: ${NAME}:${OUTPUT_IMAGE_TAG} +parameters: +- name: NAME + displayName: Name + description: The name assigned to all of the resources. Use 'backup-postgres' for + Postgres builds or 'backup-mongo' for MongoDB builds. + required: true + value: backup-postgres +- name: GIT_REPO_URL + displayName: Git Repo URL + description: The URL to your GIT repo. + required: true + value: https://github.com/BCDevOps/backup-container.git +- name: GIT_REF + displayName: Git Reference + description: The git reference or branch. + required: true + value: master +- name: SOURCE_CONTEXT_DIR + displayName: Source Context Directory + description: The source context directory. + required: false + value: "/docker" +- name: DOCKER_FILE_PATH + displayName: Docker File + description: The path and file of the docker file defining the build. Choose either + 'Dockerfile' for Postgres builds or 'Dockerfile_Mongo' for MongoDB builds. + required: false + value: Dockerfile +- name: OUTPUT_IMAGE_TAG + displayName: Output Image Tag + description: The tag given to the built image. + required: true + value: latest diff --git a/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/backup-deploy.yaml b/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/backup-deploy.yaml new file mode 100644 index 000000000..b5474facf --- /dev/null +++ b/openshift-v4/templates/backup-container-2.0.0/openshift/templates/backup/backup-deploy.yaml @@ -0,0 +1,384 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: "${NAME}-deployment-template" +objects: +- kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: "${VERIFICATION_VOLUME_NAME}" + labels: + app: "${NAME}-persistent" + template: "${NAME}-persistent-template" + spec: + storageClassName: "${VERIFICATION_VOLUME_CLASS}" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "${VERIFICATION_VOLUME_SIZE}" +- kind: Secret + apiVersion: v1 + metadata: + name: "${NAME}" + type: Opaque + stringData: + webhook-url: "${WEBHOOK_URL}" +- kind: Secret + apiVersion: v1 + metadata: + name: "${FTP_SECRET_KEY}" + type: Opaque + stringData: + ftp-url: "${FTP_URL}" + ftp-user: "${FTP_USER}" + ftp-password: "${FTP_PASSWORD}" +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: "${NAME}" + labels: + template: "${NAME}-deployment" + annotations: + description: Defines how to deploy the ${NAME} server + spec: + strategy: + type: Recreate + triggers: + - type: ConfigChange + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - "${NAME}" + from: + kind: ImageStreamTag + namespace: "${IMAGE_NAMESPACE}" + name: "${SOURCE_IMAGE_NAME}:${TAG_NAME}" + replicas: 1 + selector: + name: "${NAME}" + template: + metadata: + name: "${NAME}" + labels: + name: "${NAME}" + spec: + volumes: + - name: nfs-backup + persistentVolumeClaim: + claimName: "${BACKUP_VOLUME_NAME}" + - name: minio-data + persistentVolumeClaim: + claimName: "${MINIO_DATA_VOLUME_NAME}" + - name: "${VERIFICATION_VOLUME_NAME}" + persistentVolumeClaim: + claimName: "${VERIFICATION_VOLUME_NAME}" + - name: "${NAME}-config-volume" + configMap: + name: "${CONFIG_MAP_NAME}" + items: + - key: "${CONFIG_FILE_NAME}" + path: "${CONFIG_FILE_NAME}" + containers: + - name: "${NAME}" + image: '' + ports: [] + env: + - name: BACKUP_STRATEGY + value: "${BACKUP_STRATEGY}" + - name: BACKUP_DIR + value: "${BACKUP_DIR}" + - name: NUM_BACKUPS + value: "${NUM_BACKUPS}" + - name: DAILY_BACKUPS + value: "${DAILY_BACKUPS}" + - name: WEEKLY_BACKUPS + value: "${WEEKLY_BACKUPS}" + - name: MONTHLY_BACKUPS + value: "${MONTHLY_BACKUPS}" + - name: BACKUP_PERIOD + value: "${BACKUP_PERIOD}" + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: DATABASE_NAME + value: "${DATABASE_NAME}" + - name: MONGODB_AUTHENTICATION_DATABASE + value: "${MONGODB_AUTHENTICATION_DATABASE}" + - name: TABLE_SCHEMA + value: "${TABLE_SCHEMA}" + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: "${DATABASE_DEPLOYMENT_NAME}" + key: "${DATABASE_USER_KEY_NAME}" + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: "${DATABASE_DEPLOYMENT_NAME}" + key: "${DATABASE_PASSWORD_KEY_NAME}" + - name: FTP_URL + valueFrom: + secretKeyRef: + name: "${FTP_SECRET_KEY}" + key: ftp-url + - name: FTP_USER + valueFrom: + secretKeyRef: + name: "${FTP_SECRET_KEY}" + key: ftp-user + - name: FTP_PASSWORD + valueFrom: + secretKeyRef: + name: "${FTP_SECRET_KEY}" + key: ftp-password + - name: WEBHOOK_URL + valueFrom: + secretKeyRef: + name: "${NAME}" + key: webhook-url + - name: ENVIRONMENT_FRIENDLY_NAME + value: "${ENVIRONMENT_FRIENDLY_NAME}" + - name: ENVIRONMENT_NAME + value: "${ENVIRONMENT_NAME}" + resources: + requests: + cpu: "${CPU_REQUEST}" + memory: "${MEMORY_REQUEST}" + limits: + cpu: "${CPU_LIMIT}" + memory: "${MEMORY_LIMIT}" + volumeMounts: + - name: nfs-backup + mountPath: "/backups" + - name: minio-data + mountPath: "/minio-data" + - name: "${VERIFICATION_VOLUME_NAME}" + mountPath: "${VERIFICATION_VOLUME_MOUNT_PATH}" + - name: "${NAME}-config-volume" + mountPath: "${CONFIG_MOUNT_PATH}${CONFIG_FILE_NAME}" + subPath: "${CONFIG_FILE_NAME}" +parameters: +- name: NAME + displayName: Name + description: The name assigned to all of the resources. Use 'backup-postgres' for + Postgres deployments or 'backup-mongo' for MongoDB deployments. + required: true + value: backup-postgres +- name: SOURCE_IMAGE_NAME + displayName: Source Image Name + description: The name of the image to use for this resource. Use 'backup-postgres' + for Postgres deployments or 'backup-mongo' for MongoDB deployments. + required: true + value: backup-postgres +- name: IMAGE_NAMESPACE + displayName: Image Namespace + description: The namespace of the OpenShift project containing the imagestream for + the application. + required: true + value: backup-container +- name: TAG_NAME + displayName: Environment TAG name + description: The TAG name for this environment, e.g., dev, test, prod + required: true + value: dev +- name: DATABASE_SERVICE_NAME + displayName: Database Service Name + description: Used for backward compatibility only. Not needed when using the recommended + 'backup.conf' configuration. The name of the database service. + required: false + value: '' +- name: DATABASE_NAME + displayName: Database Name + description: Used for backward compatibility only. Not needed when using the recommended + 'backup.conf' configuration. The name of the database. + required: false + value: '' +- name: MONGODB_AUTHENTICATION_DATABASE + displayName: MongoDB Authentication Database + description: This is only required if you are backing up mongo database with a separate + authentication database. + required: false + value: '' +- name: DATABASE_DEPLOYMENT_NAME + displayName: Database Deployment Name + description: The name associated to the database deployment resources. In particular, + this is used to wire up the credentials associated to the database. + required: true + value: postgresql +- name: DATABASE_USER_KEY_NAME + displayName: Database User Key Name + description: The datbase user key name stoed in database deployment resources specified + by DATABASE_DEPLOYMENT_NAME. + required: true + value: database-user +- name: DATABASE_PASSWORD_KEY_NAME + displayName: Database Password Key Name + description: The database password key name stored in database deployment resources + specified by DATABASE_DEPLOYMENT_NAME. + required: true + value: database-password +- name: TABLE_SCHEMA + displayName: Table Schema + description: The table schema for your database. Used for Postgres backups. + required: true + value: public +- name: BACKUP_STRATEGY + displayName: Backup Strategy + description: The strategy to use for backups; for example daily, or rolling. + required: true + value: rolling +- name: FTP_SECRET_KEY + displayName: FTP Secret Key + description: The FTP secret key is used to wire up the credentials associated to + the FTP. + required: false + value: ftp-secret +- name: FTP_URL + displayName: FTP Server URL + description: The URL of the backup FTP server + required: false + value: '' +- name: FTP_USER + displayName: FTP user name + description: FTP user name + required: false + value: '' +- name: FTP_PASSWORD + displayName: FTP password + description: FTP password + required: false + value: '' +- name: WEBHOOK_URL + displayName: Webhook URL + description: The URL of the webhook to use for notifications. If not specified, + the webhook integration feature is disabled. + required: false + value: '' +- name: ENVIRONMENT_FRIENDLY_NAME + displayName: Friendly Environment Name + description: The human readable name of the environment. This variable is used + by the webhook integration to identify the environment in which the backup notifications + originate. + required: false + value: '' +- name: ENVIRONMENT_NAME + displayName: Environment Name (Environment Id) + description: The name or Id of the environment. This variable is used by the webhook + integration to identify the environment in which the backup notifications originate. + required: false + value: '' +- name: BACKUP_DIR + displayName: The root backup directory + description: The name of the root backup directory. + required: true + value: "/backups/patroni-backup/" +- name: NUM_BACKUPS + displayName: The number of backup files to be retained + description: Used for backward compatibility only. Ignored when using the recommended + `rolling` backup strategy. The number of backup files to be retained. Used for + the `daily` backup strategy. + required: false + value: '' +- name: DAILY_BACKUPS + displayName: Number of Daily Backups to Retain + description: The number of daily backup files to be retained. Used for the `rolling` + backup strategy. + required: false + value: '' +- name: WEEKLY_BACKUPS + displayName: Number of Weekly Backups to Retain + description: The number of weekly backup files to be retained. Used for the `rolling` + backup strategy. + required: false + value: '' +- name: MONTHLY_BACKUPS + displayName: Number of Monthly Backups to Retain + description: The number of monthly backup files to be retained. Used for the `rolling` + backup strategy. + required: false + value: '' +- name: BACKUP_PERIOD + displayName: Period (d,m,s) between backups in a format used by the sleep command + description: Used for backward compatibility only. Ignored when using the recommended + `backup.conf` and cron backup strategy. Period (d,m,s) between backups in a format + used by the sleep command + required: false + value: '' +- name: CONFIG_FILE_NAME + displayName: Config File Name + description: The name of the configuration file. + required: true + value: backup.conf +- name: CONFIG_MAP_NAME + displayName: Config Map Name + description: The name of the configuration map. + required: true + value: backup-conf +- name: CONFIG_MOUNT_PATH + displayName: Config Mount Path + description: The path to use to mount the config file. + required: true + value: "/" +- name: BACKUP_VOLUME_NAME + displayName: Backup Volume Name + description: The name of the persistent volume used to store the backups. Please + note, when using the recommended nfs-backup storage class the name of the pvc + MUST be taken from the manually provisioned claim; nfs-backup storage MUST be + provisioned manually. + required: true + value: backup +- name: VERIFICATION_VOLUME_NAME + displayName: Verification Volume Name + description: The name for the verification volume, used for restoring and verifying + backups. When using the recommend nfs-backup storage class for backups, this + volume MUST be either netapp-file-standard or netapp-block-standard storage; netapp-block-standard + is recommended (it has far better performance). + required: false + value: backup-verification +- name: VERIFICATION_VOLUME_SIZE + displayName: Backup Volume Size + description: The size of the persistent volume used for restoring and verifying + backups, e.g. 512Mi, 1Gi, 2Gi. Ensure this is sized correctly. It should be + large enough to contain your largest database. + required: true + value: 1Gi +- name: VERIFICATION_VOLUME_CLASS + displayName: Backup Volume Class + description: The class of the persistent volume used for restoring and verifying + backups; should be one of netapp-block-standard or netapp-file-standard. netapp-block-standard + performs better. + required: true + value: netapp-file-standard +- name: VERIFICATION_VOLUME_MOUNT_PATH + displayName: Verification Volume Mount Path + description: The path on which to mount the verification volume. This is used by + the database server to contain the database configuration and data files. For + Mongo, please use /var/lib/mongodb/data + required: true + value: "/var/lib/pgsql/data" +- name: CPU_REQUEST + displayName: Resources CPU Request + description: The resources CPU request (in cores) for this build. + required: true + value: 100m +- name: CPU_LIMIT + displayName: Resources CPU Limit + description: The resources CPU limit (in cores) for this build. + required: true + value: 150m +- name: MEMORY_REQUEST + displayName: Resources Memory Request + description: The resources Memory request (in Mi, Gi, etc) for this build. + required: true + value: 100M +- name: MEMORY_LIMIT + displayName: Resources Memory Limit + description: The resources Memory limit (in Mi, Gi, etc) for this build. + required: true + value: 150M +- name: MINIO_DATA_VOLUME_NAME + displayName: Minio data pvc name + description: Minio data pvc name. + required: true diff --git a/backup-container/scripts/rocket.chat.integration.js b/openshift-v4/templates/backup-container-2.0.0/scripts/rocket.chat.integration.js similarity index 100% rename from backup-container/scripts/rocket.chat.integration.js rename to openshift-v4/templates/backup-container-2.0.0/scripts/rocket.chat.integration.js diff --git a/openshift-v4/templates/celery/README.md b/openshift-v4/templates/celery/README.md new file mode 100644 index 000000000..51dbb814d --- /dev/null +++ b/openshift-v4/templates/celery/README.md @@ -0,0 +1,10 @@ +### Files included + +celery-bc.yaml build config +celery-dc.yaml deplyment config + +### Before pipeline started +N/A + +### After pipeline completes +N/A diff --git a/openshift-v4/templates/celery/celery-bc.yaml b/openshift-v4/templates/celery/celery-bc.yaml new file mode 100644 index 000000000..66202f197 --- /dev/null +++ b/openshift-v4/templates/celery/celery-bc.yaml @@ -0,0 +1,89 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: celery-bc + creationTimestamp: +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: GIT_URL + displayName: + description: tfrs repo + required: true +- name: RELEASE_BRANCH + displayName: + description: the release branch name + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the celery image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-celery + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-celery${SUFFIX} + creationTimestamp: + spec: + triggers: [] + runPolicy: Serial + source: + type: Dockerfile + dockerfile: | + FROM python:3.6.8-slim-stretch + ENV TFRS_RELEASE=${tfrs_release} + RUN apt-get update \ + && apt-get install -y git \ + && apt-get install -y supervisor + WORKDIR /app + RUN git clone https://github.com/bcgov/tfrs.git + WORKDIR /app/tfrs + RUN git checkout $TFRS_RELEASE + RUN pip install --upgrade pip \ + && pip install -r backend/requirements.txt + RUN cp /app/tfrs/security-scan/scan-handler/celery.conf /etc/supervisor/conf.d + RUN chgrp -R root /var/log/supervisor + RUN chmod -R g+w /var/log/supervisor + RUN chmod -R g+w /run || : + RUN chmod -R g+w /app + CMD ["supervisord"] + strategy: + type: Docker + dockerStrategy: + noCache: true + env: + - name: tfrs_release + value: ${RELEASE_BRANCH} + forcePull: true + output: + to: + kind: ImageStreamTag + name: ${NAME}-celery:${VERSION} + resources: {} + postCommit: {} + nodeSelector: + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + status: + lastVersion: 0 diff --git a/openshift-v4/templates/celery/celery-dc.yaml b/openshift-v4/templates/celery/celery-dc.yaml new file mode 100644 index 000000000..ab9a0d486 --- /dev/null +++ b/openshift-v4/templates/celery/celery-dc.yaml @@ -0,0 +1,183 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: celery-dc + creationTimestamp: +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: NAMESPACE + displayName: Environment name + description: 'Sample values: 0ab226-dev, 0ab226-test and 0ab226-prod' + required: true +- name: VERSION + displayName: null + description: image tag name for output + required: true +- name: ENV_NAME + displayName: environment name + description: such as dev, test and prod + required: true +- description: Starting amount of CPU the container can use. + displayName: CPU REQUEST + name: CPU_REQUEST + value: '100m' + required: true +- description: Maximum amount of CPU the container can use. + displayName: CPU Limit + name: CPU_LIMIT + value: '250m' + required: true +- description: Starting amount of memory the container can use. + displayName: Memory Request + name: MEMORY_REQUEST + value: 1600Mi + required: true +- description: Maximum amount of memory the container can use. + displayName: Memory Limit + name: MEMORY_LIMIT + value: 3Gi + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the celery image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-celery + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-celery${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-celery${SUFFIX} + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - celery + from: + kind: ImageStreamTag + name: ${NAME}-celery:${VERSION} + lastTriggeredImage: '' + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + name: ${NAME}-celery${SUFFIX} + template: + metadata: + creationTimestamp: + labels: + name: ${NAME}-celery${SUFFIX} + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + containers: + - name: celery + image: '' + env: + - name: RABBITMQ_VHOST + value: "/tfrs" + - name: RABBITMQ_USER + value: tfrs + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-rabbitmq-cluster-secret + key: tfrs_password + - name: RABBITMQ_HOST + value: ${NAME}${SUFFIX}-rabbitmq-cluster.${NAMESPACE}.svc.cluster.local + - name: RABBITMQ_PORT + value: '5672' + - name: DATABASE_SERVICE_NAME + value: patroni-master${SUFFIX} + - name: DATABASE_ENGINE + value: postgresql + - name: DATABASE_NAME + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-name + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-password + - name: MINIO_ENDPOINT + value: ${NAME}-minio-${ENV_NAME}.apps.silver.devops.gov.bc.ca:443 + - name: MINIO_USE_SSL + value: 'true' + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio-${ENV_NAME} + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio-${ENV_NAME} + key: MINIO_SECRET_KEY + - name: MINIO_BUCKET_NAME + value: tfrs + - name: EMAIL_FROM_ADDRESS + value: tfrs@gov.bc.ca + - name: EMAIL_SENDING_ENABLED + value: 'true' + - name: SMTP_SERVER_HOST + value: apps.smtp.gov.bc.ca + resources: + limits: + cpu: 250m + memory: 3Gi + requests: + cpu: 100m + memory: 1600Mi + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/clamav/README.md b/openshift-v4/templates/clamav/README.md new file mode 100644 index 000000000..d7b9713c5 --- /dev/null +++ b/openshift-v4/templates/clamav/README.md @@ -0,0 +1,15 @@ +### Files included + +clamav-bc.yaml build config +clamav-dc.yaml deployment config + +### Build and deploy clamav + +oc process -f ./clamav-bc.yaml | oc create -f - -n 0ab226-tools +oc tag clamav:latest clamav:dev -n 0ab226-tools +oc tag clamav:latest clamav:test -n 0ab226-tools +oc tag clamav:latest clamav:prod -n 0ab226-tools + +oc process -f ./clamav-dc.yaml ENV_NAME=[dev, test or prod] | oc create -f - -n [dev, test or prod namespace] +Note: a new release add-ca-cert is being used otherwise the virus database download will fail + diff --git a/openshift-v4/templates/clamav/clamav-bc.yaml b/openshift-v4/templates/clamav/clamav-bc.yaml new file mode 100644 index 000000000..09005bc2a --- /dev/null +++ b/openshift-v4/templates/clamav/clamav-bc.yaml @@ -0,0 +1,51 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: clamav + creationTimestamp: +objects: +- kind: ImageStream + apiVersion: v1 + metadata: + name: clamav + generation: 1 + creationTimestamp: + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: BuildConfig + apiVersion: v1 + metadata: + name: clamav + creationTimestamp: + labels: + app: clamav + name: clamav + template: clamav + spec: + triggers: [] + runPolicy: Serial + source: + type: Git + git: + ref: add-ca-cert + uri: https://github.com/bcgov/docker-clamav.git + strategy: + type: Docker + dockerStrategy: + pullSecret: + name: docker-creds + forcePull: true + noCache: true + output: + to: + kind: ImageStreamTag + name: clamav:latest + resources: {} + postCommit: {} + nodeSelector: {} + status: + lastVersion: 0 diff --git a/openshift-v4/templates/clamav/clamav-dc.yaml b/openshift-v4/templates/clamav/clamav-dc.yaml new file mode 100644 index 000000000..1cead7126 --- /dev/null +++ b/openshift-v4/templates/clamav/clamav-dc.yaml @@ -0,0 +1,110 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: clamav + creationTimestamp: +parameters: +- name: ENV_NAME + required: true +objects: +- kind: Service + apiVersion: v1 + metadata: + name: tfrs-clamav-${ENV_NAME} + creationTimestamp: + labels: + name: tfrs-clamav-${ENV_NAME} + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + ports: + - name: 3310-tcp + protocol: TCP + port: 3310 + targetPort: 3310 + selector: + name: tfrs-clamav-${ENV_NAME} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: tfrs-clamav-${ENV_NAME} + generation: 1 + creationTimestamp: + labels: + name: tfrs-clamav-${ENV_NAME} + spec: + strategy: + type: Recreate + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - clamav + from: + kind: ImageStreamTag + namespace: 0ab226-tools + name: clamav:${ENV_NAME} + - type: ConfigChange + replicas: 1 + test: false + selector: + name: tfrs-clamav-${ENV_NAME} + template: + metadata: + creationTimestamp: + labels: + name: tfrs-clamav-${ENV_NAME} + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + containers: + - name: clamav + image: image-registry.openshift-image-registry.svc:5000/0ab226-tools/clamav:${ENV_NAME} + ports: + - containerPort: 3310 + protocol: TCP + resources: + limits: + cpu: 1000m + memory: 2Gi + requests: + cpu: 100m + memory: 500Mi + livenessProbe: + tcpSocket: + port: 3310 + initialDelaySeconds: 240 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + tcpSocket: + port: 3310 + initialDelaySeconds: 240 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/frontend/README.md b/openshift-v4/templates/frontend/README.md new file mode 100644 index 000000000..861b936f2 --- /dev/null +++ b/openshift-v4/templates/frontend/README.md @@ -0,0 +1,13 @@ +### Files included + +frontend-angular-app-bc.yaml base image build +frontend-bc.yaml base image +frontend-dc.yaml deployment config +frontend-dc-others.yaml create client service, route and configmap(to be mounted to frontend pods) + +### Before triggering pipeline + +oc tag e52f12-tools/nodejs:12-1-45 0ab226-tools/nodejs:12-1-45 +Make sure nginx-runtime image has been built + + diff --git a/openshift-v4/templates/frontend/frontend-angular-app-bc.yaml b/openshift-v4/templates/frontend/frontend-angular-app-bc.yaml new file mode 100644 index 000000000..04a8509fc --- /dev/null +++ b/openshift-v4/templates/frontend/frontend-angular-app-bc.yaml @@ -0,0 +1,82 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: tfrs-frontend-angular-app + creationTimestamp: +parameters: +- name: NAME + description: tfrs + value: tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: GIT_URL + displayName: + description: tfrs repo + required: true +- name: GIT_REF + displayName: + description: tfrs branch name of the pr + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the frontend angular image + labels: + shared: "true" + creationTimestamp: null + generation: 643 + name: ${NAME}-frontend-angular-app + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-frontend-angular-app${SUFFIX} + creationTimestamp: + spec: + triggers: + - type: ConfigChange + - type: ImageChange + imageChange: {} + runPolicy: Serial + source: + type: Git + git: + uri: ${GIT_URL} + ref: ${GIT_REF} + contextDir: frontend + strategy: + type: Source + sourceStrategy: + from: + kind: ImageStreamTag + name: nodejs:12-1-45 + output: + to: + kind: ImageStreamTag + name: ${NAME}-frontend-angular-app:${VERSION} + resources: + limits: + cpu: '1' + memory: 4Gi + requests: + memory: 2Gi + postCommit: {} + nodeSelector: + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + status: + lastVersion: 0 diff --git a/openshift-v4/templates/frontend/frontend-bc.yaml b/openshift-v4/templates/frontend/frontend-bc.yaml new file mode 100644 index 000000000..10ed42726 --- /dev/null +++ b/openshift-v4/templates/frontend/frontend-bc.yaml @@ -0,0 +1,88 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: client-bc + creationTimestamp: +parameters: +- name: NAME + description: tfrs + value: tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: GIT_URL + displayName: + description: tfrs repo + required: true +- name: GIT_REF + displayName: + description: tfrs branch name of the pr + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the frontend angular image + labels: + shared: "true" + creationTimestamp: null + generation: 643 + name: ${NAME}-frontend + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-frontend${SUFFIX} + creationTimestamp: + annotations: + description: Defines how to build the front end image + spec: + triggers: + - type: ImageChange + imageChange: {} + - type: ConfigChange + runPolicy: Serial + source: + type: Dockerfile + dockerfile: |- + FROM tfrs-frontend-angular-app:thisLineToBeReplace + COPY * /tmp/app/dist/ + CMD /usr/libexec/s2i/run + images: + - from: + kind: ImageStreamTag + name: ${NAME}-frontend-angular-app:${VERSION} + as: + paths: + - sourcePath: "/opt/app-root/src/public/." + destinationDir: tmp + strategy: + type: Docker + dockerStrategy: + from: + kind: ImageStreamTag + name: nginx-runtime:20210115 + output: + to: + kind: ImageStreamTag + name: ${NAME}-frontend:${VERSION} + resources: + limits: + cpu: '1' + memory: 4Gi + postCommit: {} + nodeSelector: + status: + lastVersion: 0 diff --git a/openshift-v4/templates/frontend/frontend-dc-others.yaml b/openshift-v4/templates/frontend/frontend-dc-others.yaml new file mode 100644 index 000000000..c069e6f2b --- /dev/null +++ b/openshift-v4/templates/frontend/frontend-dc-others.yaml @@ -0,0 +1,106 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: tfrs-frontend-dc + creationTimestamp: +parameters: +- name: NAME + description: tfrs + value: tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: KEYCLOAK_AUTHORITY + displayName: keycloak.authority + description: + required: true +- name: KEYCLOAK_CLIENT_ID + displayName: keycloak.client_id + description: Values tfrs-dev tfrs tfrs + required: true +- name: KEYCLOAK_CALLBACK_URL + displayName: keycloak.callback_url + description: +- name: KEYCLOAK_LOGOUT_URL + displayName: keycloak.post_logout_url + description: + required: true +- name: FRONTEND_HOST + displayName: Frontend route host name + description: + required: true +- name: BACKEND_HOST + displayName: Backend route host name + description: + required: true +objects: +- kind: Service + apiVersion: v1 + metadata: + name: ${NAME}-frontend${SUFFIX} + spec: + ports: + - name: web + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + name: ${NAME}-frontend${SUFFIX} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: v1 + metadata: + name: ${NAME}-frontend${SUFFIX} + creationTimestamp: + labels: + frontend: 'true' + annotations: {} + spec: + host: ${FRONTEND_HOST} + to: + kind: Service + name: ${NAME}-frontend${SUFFIX} + weight: 100 + port: + targetPort: web + tls: + termination: edge + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None +- kind: ConfigMap + apiVersion: v1 + metadata: + name: ${NAME}-keycloak-config${SUFFIX} + creationTimestamp: + data: + features.js: | + window.tfrs_config = { + "keycloak.enabled": true, + "keycloak.authority": "${KEYCLOAK_AUTHORITY}" , + "keycloak.client_id": "${KEYCLOAK_CLIENT_ID}", + "keycloak.callback_url": "${KEYCLOAK_CALLBACK_URL}", + "keycloak.post_logout_url": "${KEYCLOAK_LOGOUT_URL}", + "debug.enabled": false, + "secure_document_upload.enabled": true, + "secure_document_upload.max_file_size": 50000000, + "fuel_codes.enabled": true, + "keycloak.custom_login": true, + "credit_transfer.enabled": true, + "compliance_reporting.enabled": true, + "credit_calculation_api.enabled": true, + "compliance_reporting.starting_year": 2017, + "compliance_reporting.create_effective_date": "2019-01-01", + "exclusion_reports.create_effective_date": "2019-01-01", + "exclusion_reports.enabled": true, + "api_base": "https://${BACKEND_HOST}/api" + }; diff --git a/openshift-v4/templates/frontend/frontend-dc.yaml b/openshift-v4/templates/frontend/frontend-dc.yaml new file mode 100644 index 000000000..c1b5af625 --- /dev/null +++ b/openshift-v4/templates/frontend/frontend-dc.yaml @@ -0,0 +1,153 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: client-dc + creationTimestamp: +parameters: +- name: NAME + description: tfrs + value: tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: CPU_REQUEST + displayName: Requested CPU + description: Requested CPU + required: true +- name: CPU_LIMIT + displayName: CPU upper limit + description: CPU upper limit + required: true +- name: MEMORY_REQUEST + displayName: Requested memory + description: Requested memory + required: true +- name: MEMORY_LIMIT + displayName: Memory upper limit + description: Memory upper limit + required: true +- name: REPLICAS + displayName: replicas + description: replicas + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the frontend image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-frontend + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-frontend${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-frontend${SUFFIX} + annotations: + description: Defines how to deploy the frontend + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - client + from: + kind: ImageStreamTag + name: ${NAME}-frontend:${VERSION} + lastTriggeredImage: '' + - type: ConfigChange + replicas: ${{REPLICAS}} + test: false + selector: + name: ${NAME}-frontend${SUFFIX} + template: + metadata: + name: ${NAME}-frontend${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-frontend${SUFFIX} + spec: + volumes: + - name: keycloak-config-data${SUFFIX} + configMap: + name: ${NAME}-keycloak-config${SUFFIX} + defaultMode: 420 + containers: + - name: client + image: '' + ports: + - containerPort: 3000 + protocol: TCP + env: + - name: RealIpFrom + - name: AdditionalRealIpFromRules + - name: IpFilterRules + - name: HTTP_BASIC + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + volumeMounts: + - name: keycloak-config-data${SUFFIX} + mountPath: /tmp/app/dist/config + livenessProbe: + httpGet: + path: "/" + port: 8080 + scheme: HTTP + initialDelaySeconds: 20 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: "/" + port: 8080 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: IfNotPresent + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/keycloak/README.md b/openshift-v4/templates/keycloak/README.md new file mode 100644 index 000000000..f2f75a3c6 --- /dev/null +++ b/openshift-v4/templates/keycloak/README.md @@ -0,0 +1,16 @@ +### Files included +* keycloak-secret.yaml includes keycloak secrets, it is only used by .pipeline/lib/keycloak.js + +### Create Secret keycloak-secret.yaml in tools, dev, test and prod env. The value for tools and dev should be same +oc process -f keycloak-secret.yaml +KEYCLOAK_SA_CLIENT_SECRET=[Clients->sa client->Credentials->secret] \ +clientId=[sa client] \ +clientSecret=[same value of KEYCLOAK_SA_CLIENT_SECRET] \ +tfrsPublic=[public client id, it is not tfrs, on sso console click Clients->tfrs] \ +realmId=[realmId] \ +host=[sso host name] \ +| oc create -f - -n 0ab226-xxx --dry-run=client +Notes: in keycloak, there are two clients: one is sa client, the other one is public client + + + diff --git a/openshift-v4/templates/keycloak/keycloak-secret.yaml b/openshift-v4/templates/keycloak/keycloak-secret.yaml new file mode 100644 index 000000000..0681f3474 --- /dev/null +++ b/openshift-v4/templates/keycloak/keycloak-secret.yaml @@ -0,0 +1,37 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: tfrs-keycloak-secret +parameters: + - name: KEYCLOAK_SA_CLIENT_SECRET + description: teh secret for private client + required: true + - name: clientId + description: + required: true + - name: clientSecret + description: + required: true + - name: tfrsPublic + description: the key in the pubic client url, is not the client id + required: true + - name: realmId + description: + required: true + - name: host + description: host is sso host name, such as sso-dev.[ocp name].gov.bc.ca,sso-test.[ocp name].gov.bc.ca and sso.[ocp name].gov.bc.ca + required: true +objects: + - apiVersion: v1 + kind: Secret + metadata: + name: tfrs-keycloak + labels: {} + stringData: + KEYCLOAK_SA_CLIENT_SECRET: "${KEYCLOAK_SA_CLIENT_SECRET}" + clientId: "${clientId}" + clientSecret: "${clientSecret}" + tfrsPublic: "${tfrsPublic}" + realmId: "${realmId}" + host: "${host}" diff --git a/openshift-v4/templates/maintenance-page/README.md b/openshift-v4/templates/maintenance-page/README.md new file mode 100644 index 000000000..0098eeec4 --- /dev/null +++ b/openshift-v4/templates/maintenance-page/README.md @@ -0,0 +1,45 @@ +# TFRS + +## Maintenance Mode + +### Usage + +Maintenance-page pods serving static html are deployed to our prod, dev and test environments. To enable maintenance mode switch the routes between the tfrs and Maintenance-page services. A namespace (project) for deployment must be specified. + +Expected namespaces: + +* mem-tfrs-prod +* mem-tfrs-dev +* mem-tfrs-test + +1. ##### Build Maintenance Page Image + + ``` + oc project mem-tfrs-tools + oc process -f ./maintenance-page.bc.json | oc create -f - + ``` + +2. ##### Deploy Maintenance Page Image + + ``` + oc project mem-tfrs-[ENV_NAME] + oc process -f ./maintenance-page.dc.json ENV_NAME=[ENV_NAME] | oc create -f - + ``` + +2. ##### Enable/Disable by Command line + + Maintenance mode on. + + ``` + oc patch route dev-lowcarbonfuels-frontend -n mem-tfrs-[ENV_NAME] \ + -p '{ "spec": { "to": { "name": "maintenance-page" },"port": { "targetPort": "2015-tcp" }}}' + + ``` + + Maintenance mode off. + + ``` + oc patch route dev-lowcarbonfuels-frontend -n mem-tfrs-[ENV_NAME] \ + -p '{ "spec": { "to": { "name": "client" },"port": { "targetPort": "web" }}}' + + ``` diff --git a/openshift-v4/templates/maintenance-page/create-maintenance-page.sh b/openshift-v4/templates/maintenance-page/create-maintenance-page.sh new file mode 100644 index 000000000..91ea63474 --- /dev/null +++ b/openshift-v4/templates/maintenance-page/create-maintenance-page.sh @@ -0,0 +1,16 @@ +## tools +oc process -f ./maintenance-page.bc.json | oc create -f - + +## dev +oc process -f ./maintenance-page.dc.json ENV_NAME=dev | oc create -f - + +## test +oc process -f ./maintenance-page.dc.json ENV_NAME=test | oc create -f - + +## prod +oc process -f ./maintenance-page.dc.json ENV_NAME=prod | oc create -f - + +## turn on maintenance page +oc patch route dev-lowcarbonfuels-frontend -n mem-tfrs-[ENV_NAME] -p '{ "spec": { "to": { "name": "maintenance-page" },"port": { "targetPort": "2015-tcp" }}}' +# turn off maintenance page +oc patch route dev-lowcarbonfuels-frontend -n mem-tfrs-[ENV_NAME] -p '{ "spec": { "to": { "name": "client" },"port": { "targetPort": "web" }}}' \ No newline at end of file diff --git a/openshift-v4/templates/maintenance-page/maintenance-page-bc.yaml b/openshift-v4/templates/maintenance-page/maintenance-page-bc.yaml new file mode 100644 index 000000000..c515e00bc --- /dev/null +++ b/openshift-v4/templates/maintenance-page/maintenance-page-bc.yaml @@ -0,0 +1,59 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: maintenance-page +parameters: +- name: NAME + displayName: Name + description: The suffix for all created objects + required: false + value: maintenance-page +objects: +- kind: ImageStream + apiVersion: v1 + metadata: + name: maintenance-page + creationTimestamp: + labels: + app: maintenance-page + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: BuildConfig + apiVersion: v1 + metadata: + name: maintenance-page + creationTimestamp: + labels: + app: maintenance-page + spec: + triggers: + - type: ConfigChange + - type: ImageChange + imageChange: {} + runPolicy: SerialLatestOnly + source: + type: Git + contextDir: "/maintenance" + git: + uri: https://github.com/bcgov/tfrs.git + ref: openshift-v4-migration + strategy: + type: Source + sourceStrategy: + from: + kind: ImageStreamTag + namespace: 0ab226-tools + name: s2i-caddy:prod + output: + to: + kind: ImageStreamTag + name: maintenance-page:latest + resources: {} + postCommit: {} + nodeSelector: + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 diff --git a/openshift-v4/templates/maintenance-page/maintenance-page-dc.yaml b/openshift-v4/templates/maintenance-page/maintenance-page-dc.yaml new file mode 100644 index 000000000..445cfbe73 --- /dev/null +++ b/openshift-v4/templates/maintenance-page/maintenance-page-dc.yaml @@ -0,0 +1,109 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: maintenance-page-dc +parameters: +- name: ENV_NAME + displayName: Environment name + description: The environment name + required: true +objects: +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: maintenance-page + creationTimestamp: + labels: + app: maintenance-page + spec: + strategy: + type: Rolling + rollingParams: + updatePeriodSeconds: 1 + intervalSeconds: 1 + timeoutSeconds: 600 + maxUnavailable: 25% + maxSurge: 25% + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - maintenance-page + from: + kind: ImageStreamTag + namespace: 0ab226-tools + name: maintenance-page:prod + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + app: maintenance-page + template: + metadata: + creationTimestamp: + labels: + app: maintenance-page + annotations: + openshift.io/generated-by: OpenShiftNewApp + spec: + containers: + - name: maintenance-page + image: " " + ports: + - containerPort: 2015 + protocol: TCP + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler +- kind: Service + apiVersion: v1 + metadata: + name: maintenance-page + creationTimestamp: + labels: + app: maintenance-page + spec: + ports: + - name: 2015-tcp + protocol: TCP + port: 2015 + targetPort: 2015 + selector: + app: maintenance-page + type: ClusterIP + sessionAffinity: None +- kind: Route + apiVersion: route.openshift.io/v1 + metadata: + name: maintenance-page + creationTimestamp: + labels: + app: maintenance-page + annotations: + openshift.io/host.generated: 'true' + spec: + host: maintenance-page-${ENV_NAME}.apps.silver.devops.gov.bc.ca + to: + kind: Service + name: maintenance-page + weight: 100 + port: + targetPort: 2015-tcp + wildcardPolicy: None diff --git a/openshift-v4/templates/minio/README.md b/openshift-v4/templates/minio/README.md new file mode 100644 index 000000000..faf902dc1 --- /dev/null +++ b/openshift-v4/templates/minio/README.md @@ -0,0 +1,35 @@ +### Files included + +* minio-bc.yaml minio build config +* minio-dc.yaml minio deployment config +* minio-secret.yaml create template.minio-secret, it is NOT being used as minio creation is not part of pipeline anymore + +### build minio + +oc tag registry.access.redhat.com/rhel:7.7-481 0ab226-tools/rhel7:7.7-481 +oc tag minio:latest minio:20210111 +oc process -f ./minio-bc.yaml | oc create -f - -n 0ab226-tools + + +### One minio instance serve all PRs on Dev + +oc process -f ./minio-dc.yaml \ +NAME=tfrs ENV_NAME=dev SUFFIX=-dev OCP_NAME=apps.silver.devops PVC_SIZE=2Gi \ +| oc create -f - -n 0ab226-dev + +#### Test and Prod Minio setup + +oc process -f ./minio-dc.yaml \ +NAME=tfrs ENV_NAME=test SUFFIX=-test OCP_NAME=apps.silver.devops PVC_SIZE=2Gi \ +| oc create -f - -n 0ab226-test + +oc process -f ./minio-dc.yaml \ +NAME=tfrs ENV_NAME=prod SUFFIX=-prod OCP_NAME=apps.silver.devops PVC_SIZE=3Gi \ +| oc create -f - -n 0ab226-prod + +#### Minio data migration from Openshift V3 to V4 + +on V4, mount minio data storage to nagios /minio, then in minio pod, it has /minio/tfrs +on V4 nagios pod, oc rsync minio data from V3 to V4 /minio/tfrs +on V4, verify the files through minio web interface and make sure the files can be downloaded and opened. +on V4, remove the minio mount on nagios \ No newline at end of file diff --git a/openshift-v4/templates/minio/docker/Dockerfile b/openshift-v4/templates/minio/docker/Dockerfile new file mode 100644 index 000000000..41585e607 --- /dev/null +++ b/openshift-v4/templates/minio/docker/Dockerfile @@ -0,0 +1,33 @@ +FROM registry.access.redhat.com/rhel7/rhel + +RUN useradd -d /opt/minio -g root minio + +WORKDIR /opt/minio + +ADD entrypoint.sh . + +RUN curl -o minio https://dl.minio.io/server/minio/release/linux-amd64/minio && \ + curl -o mc https://dl.minio.io/client/mc/release/linux-amd64/mc && \ + chmod +x minio && \ + chmod +x mc && \ + mkdir config && \ + mkdir data && \ + mkdir s3 && \ + mkdir s3/config && \ + mkdir s3/data && \ + chown minio:root -R . && chmod 777 -R . + +USER minio + +ENV MINIO_ACCESS_KEY="demoaccesskey" +ENV MINIO_SECRET_KEY="mysecret" +ENV MINIO_BIN=/opt/minio/minio +ENV MINIO_DATA_DIR=/opt/minio/s3/data +ENV MINIO_CONFIG_DIR=/opt/minio/s3/config + +VOLUME $MINIO_CONFIG_DIR +VOLUME $MINIO_DATA_DIR + +EXPOSE 9000 + +ENTRYPOINT [ "./entrypoint.sh" ] diff --git a/openshift-v4/templates/minio/docker/entrypoint.sh b/openshift-v4/templates/minio/docker/entrypoint.sh new file mode 100755 index 000000000..86717f150 --- /dev/null +++ b/openshift-v4/templates/minio/docker/entrypoint.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +${MINIO_BIN} server --config-dir=${MINIO_CONFIG_DIR} $@ ${MINIO_DATA_DIR} diff --git a/openshift-v4/templates/minio/minio-bc.yaml b/openshift-v4/templates/minio/minio-bc.yaml new file mode 100644 index 000000000..ce9c3c49a --- /dev/null +++ b/openshift-v4/templates/minio/minio-bc.yaml @@ -0,0 +1,64 @@ +--- +kind: Template +apiVersion: v1 +metadata: + creationTimestamp: null + name: minio +parameters: + - name: GIT_URL + displayName: Zeva Git Repo URL + description: The URL to your GIT repo + required: true + value: https://github.com/bcgov/tfrs.git + - name: GIT_REF + displayName: Git Reference + description: The git reference or branch. + required: true + value: openshift-v4-migration +objects: + - kind: ImageStream + apiVersion: v1 + metadata: + name: minio + creationTimestamp: + labels: + shared: 'true' + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' + - apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: minio + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: minio:latest + postCommit: {} + resources: {} + runPolicy: Serial + source: + contextDir: openshift-v4/templates/minio/docker + git: + ref: ${GIT_REF} + uri: ${GIT_URL} + type: Git + strategy: + dockerStrategy: + from: + kind: ImageStreamTag + name: rhel7:7.7-481 + type: Docker + successfulBuildsHistoryLimit: 5 + triggers: + - imageChange: {} + type: ImageChange + - type: ConfigChange + status: + lastVersion: 0 diff --git a/openshift-v4/templates/minio/minio-dc.yaml b/openshift-v4/templates/minio/minio-dc.yaml new file mode 100644 index 000000000..48a37a380 --- /dev/null +++ b/openshift-v4/templates/minio/minio-dc.yaml @@ -0,0 +1,213 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: minio + annotations: + description: Minio with persistent storage. By BC Gov. + openshift.io/display-name: BC Gov Minio +parameters: + - name: NAME + value: tfrs + required: true + - name: SUFFIX + displayName: + description: sample is -pr-0 + required: true + - name: ENV_NAME + value: dev + required: true + - name: PVC_SIZE + displayName: Volume Capacity + description: Volume space available to Minio server for files, e.g. 512Mi, 2Gi. + value: 5Gi + required: true + - name: CPU_REQUEST + displayName: Requested CPU + description: Requested CPU + required: true + value: '100m' + - name: CPU_LIMIT + displayName: CPU upper limit + description: CPU upper limit + required: true + value: '200m' + - name: MEMORY_REQUEST + displayName: Requested memory + description: Requested memory + required: true + value: '200M' + - name: MEMORY_LIMIT + displayName: Memory upper limit + description: Memory upper limit + required: true + value: '500M' + - name: MINIO_ACCESS_KEY + description: Minio access key + from: "[a-zA-Z0-9]{8}" + generate: expression + required: true + - name: MINIO_SECRET_KEY + description: Minio secret key + from: "[a-zA-Z0-9]{16}" + generate: expression + required: true + - name: OCP_NAME + displayName: Openshift Name + description: Openshift Name + required: true +objects: +# only create once +# - apiVersion: v1 +# kind: Secret +# metadata: +# name: ${NAME}-minio${SUFFIX} +# stringData: +# MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY} +# MINIO_SECRET_KEY: ${MINIO_SECRET_KEY} + - kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: ${NAME}-minio${SUFFIX} + annotations: + volume.beta.kubernetes.io/storage-class: netapp-file-standard + template.openshift.io.bcgov/create: 'true' + spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: ${PVC_SIZE} + status: {} + - kind: Service + apiVersion: v1 + metadata: + name: ${NAME}-minio${SUFFIX} + creationTimestamp: + spec: + ports: + - name: 9000-tcp + protocol: TCP + port: 9000 + targetPort: 9000 + selector: + name: ${NAME}-minio${SUFFIX} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} + - apiVersion: route.openshift.io/v1 + kind: Route + metadata: + creationTimestamp: null + name: ${NAME}-minio${SUFFIX} + spec: + host: ${NAME}-minio${SUFFIX}.${OCP_NAME}.gov.bc.ca + port: + targetPort: 9000-tcp + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: ${NAME}-minio${SUFFIX} + weight: 100 + wildcardPolicy: None + - kind: DeploymentConfig + apiVersion: v1 + metadata: + name: ${NAME}-minio${SUFFIX} + spec: + strategy: + type: Recreate + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ConfigChange + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - minio + from: + kind: ImageStreamTag + namespace: 0ab226-tools + name: minio:20210111 + replicas: 1 + test: false + selector: + name: ${NAME}-minio${SUFFIX} + template: + metadata: + creationTimestamp: + labels: + name: ${NAME}-minio${SUFFIX} + spec: + volumes: + - name: minio-data + persistentVolumeClaim: + claimName: ${NAME}-minio${SUFFIX} + containers: + - name: minio + image: + ports: + - containerPort: 9000 + protocol: TCP + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio${SUFFIX} + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio${SUFFIX} + key: MINIO_SECRET_KEY + - name: MINIO_CONFIG_DIR + value: "/tmp" + - name: MINIO_DATA_DIR + value: "/data" + resources: + limits: + cpu: '${CPU_LIMIT}' + memory: '${MEMORY_LIMIT}' + requests: + cpu: '${CPU_REQUEST}' + memory: '${MEMORY_REQUEST}' + volumeMounts: + - name: minio-data + mountPath: /data + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 35 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 9000 + timeoutSeconds: 3 + readinessProbe: + exec: + command: + - /bin/sh + - '-c' + - mkdir -p /data/tfrs + failureThreshold: 3 + initialDelaySeconds: 25 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/minio/minio-secret.yaml b/openshift-v4/templates/minio/minio-secret.yaml new file mode 100644 index 000000000..bae505b28 --- /dev/null +++ b/openshift-v4/templates/minio/minio-secret.yaml @@ -0,0 +1,25 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: template.minio-secret +parameters: + - name: MINIO_ACCESS_KEY + description: Minio access key + from: "[a-zA-Z0-9]{8}" + generate: expression + required: true + - name: MINIO_SECRET_KEY + description: Minio secret key + from: "[a-zA-Z0-9]{16}" + generate: expression + required: true +objects: + - apiVersion: v1 + kind: Secret + metadata: + name: template.minio-secret + labels: {} + stringData: + MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY} + MINIO_SECRET_KEY: ${MINIO_SECRET_KEY} diff --git a/openshift-v4/templates/nagios/.kube/.empty b/openshift-v4/templates/nagios/.kube/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/openshift-v4/templates/nagios/Dockerfile b/openshift-v4/templates/nagios/Dockerfile new file mode 100644 index 000000000..ee4a87b5b --- /dev/null +++ b/openshift-v4/templates/nagios/Dockerfile @@ -0,0 +1,50 @@ +FROM image-registry.openshift-image-registry.svc:5000/0ab226-tools/nagios-base:latest +EXPOSE 8080 +RUN mkdir /var/run/apache2-supervisord \ + && chown -R nagios.nagios /var/run/apache2-supervisord \ + && mkdir /var/run/supervisord \ + && chown -R nagios.nagios /var/run/supervisord \ + && mkdir /docroot \ + && chown -R nagios.nagios /docroot +WORKDIR / +ADD docroot /docroot +ADD apache2 /etc/apache2 +ADD supervisord /etc +# remove the default configuration +RUN rm -fr /etc/nagios3 \ + && mkdir /etc/nagios3 +ADD nagios3 /etc/nagios3 +RUN chown -R nagios.nagios /etc/nagios3 +ARG NAGIOS_USER +ARG NAGIOS_PASSWORD +ARG ENV_NAME +RUN /etc/nagios3/cleanup-cfg.sh $ENV_NAME +RUN echo $NAGIOS_USER \ + && htpasswd -bc /etc/nagios3/htpasswd.users $NAGIOS_USER $NAGIOS_PASSWORD +ADD .kube /var/lib/nagios/.kube +RUN chown -R nagios.nagios /var/lib/nagios/.kube \ + && chgrp -R root /var/log/supervisor \ + && chmod -R g+w /var/log/supervisor \ + && chgrp -R root /var/log/apache2 \ + && chmod -R g+w /var/log/apache2 \ + && chgrp -R root /run/supervisord \ + && chmod -R g+w /run/supervisord \ + && chgrp -R root /run/apache2 \ + && chmod -R g+w /run/apache2 \ + && chgrp -R root /run/apache2-supervisord \ + && chmod -R g+w /run/apache2-supervisord \ + && chgrp -R root /run/nagios3 \ + && chmod -R g+w /run/nagios3 \ + && chgrp -R root /etc/nagios3 \ + && chmod -R g+w /etc/nagios3 \ + && chgrp -R root /var/cache/nagios3 \ + && chmod -R g+w /var/cache/nagios3 \ + && chgrp -R root /var/lib/nagios3 \ + && chmod -R g+w /var/lib/nagios3 \ + && chgrp -R root /var/log/nagios3 \ + && chmod -R g+w /var/log/nagios3 \ + && mkdir /.kube \ + && chgrp -R root /.kube \ + && chmod -R g+w /.kube \ + && chmod +w / +CMD supervisord \ No newline at end of file diff --git a/openshift-v4/templates/nagios/Dockerfile-base b/openshift-v4/templates/nagios/Dockerfile-base new file mode 100644 index 000000000..b060570eb --- /dev/null +++ b/openshift-v4/templates/nagios/Dockerfile-base @@ -0,0 +1,11 @@ +FROM debian:jessie +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install nagios3 monitoring-plugins-standard monitoring-plugins-basic supervisor vim net-tools curl git jq exim4 tzdata check-postgres python3 python3-pip libpq-dev nano -y +RUN ln -fs /usr/share/zoneinfo/Canada/Pacific /etc/localtime \ + && dpkg-reconfigure --frontend noninteractive tzdata +RUN pip3 install "pika==0.12.0" && \ + pip3 install minio requests psycopg2 +RUN curl --silent -L -o /tmp/oc.tgz https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz +WORKDIR /tmp +RUN tar xzf oc.tgz +RUN cp openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit/oc /bin diff --git a/openshift-v4/templates/nagios/README.md b/openshift-v4/templates/nagios/README.md new file mode 100644 index 000000000..466e38c89 --- /dev/null +++ b/openshift-v4/templates/nagios/README.md @@ -0,0 +1,49 @@ +### Files included + +* nagios-base-bc.yaml nagios base image build config +* nagios-bc.yaml nagios image build config +* nagios-dc.yaml nagios deployment config +* nagios-secret.yaml create nagios-secret +* Dockerfile-base: build nagios base image to be used by Dockerfile +* Dockerfile: build final nagios image + +### Build and deploy nagios + +1. Build nagios base image +oc create imagestream nagios-base -n [tools namespace] +oc process -f ./nagios-base-bc.yaml | oc create -f - -n [tools namespace] + +2. Create nagios secret +oc process -f ./nagios-secret.yaml | oc create -f - -n [test namespace] +oc process -f ./nagios-secret.yaml | oc create -f - -n [prod namespace] + +3. Build nagios image for environment +oc process -f ./nagios-bc.yaml ENV_NAME=test | oc create -f - -n [test namespace] +oc process -f ./nagios-bc.yaml ENV_NAME=prod | oc create -f - -n [prod namespace] + +4. Tag nagios image for environment +oc tag [test namespace]/nagios:latest [test namespace]/nagios:test +oc tag [prod namespace]/nagios:latest [prod namespace]/nagios:prod + +5. Deploy nagios for environment +oc process -f ./nagios-dc.yaml \ +ENV_NAME=test \ +KEYCLOAK_CLIENT_ID=tfrs \ +KEYCLOAK_SA_REALM=tfrs \ +KEYCLOAK_SA_CLIENT_ID=tfrs-django-sa \ +KEYCLOAK_SA_BASEURL=https://test.oidc.gov.bc.ca \ +KEYCLOAK_REALM=https://test.oidc.gov.bc.ca/auth/realms/tfrs \ +SMTP_SERVER_HOST=apps.smtp.gov.bc.ca \ +DATABASE_SERVICE_NAME=patroni-master-test \ +| oc create -f - -n 0ab226-test + +oc process -f ./nagios-dc.yaml \ +ENV_NAME=prod \ +KEYCLOAK_CLIENT_ID=tfrs \ +KEYCLOAK_SA_REALM=tfrs \ +KEYCLOAK_SA_CLIENT_ID=tfrs-django-sa \ +KEYCLOAK_SA_BASEURL=https://oidc.gov.bc.ca \ +KEYCLOAK_REALM=https://oidc.gov.bc.ca/auth/realms/tfrs \ +SMTP_SERVER_HOST=apps.smtp.gov.bc.ca \ +DATABASE_SERVICE_NAME=patroni-master-prod \ +| oc create -f - -n 0ab226-prod diff --git a/openshift-v4/templates/nagios/apache2/apache2.conf b/openshift-v4/templates/nagios/apache2/apache2.conf new file mode 100644 index 000000000..dfadcd760 --- /dev/null +++ b/openshift-v4/templates/nagios/apache2/apache2.conf @@ -0,0 +1,52 @@ +ServerName localhost +Listen 8080 +PidFile /var/run/apache2-supervisord/httpd.pid +#LockFile /var/lock/accept.lock + +HostnameLookups Off + +LogLevel info +ErrorLog "|/bin/cat" +LogFormat "%h %l %u %t \"%r\" %>s %b" common +CustomLog "|/bin/cat" common + +IncludeOptional mods-enabled/*.load +IncludeOptional mods-enabled/*.conf + +AccessFileName .htaccess + + Require all denied + + +DocumentRoot "/docroot" + + AllowOverride all + Order allow,deny + Allow from all + + +ScriptAlias /cgi-bin/nagios3 /usr/lib/cgi-bin/nagios3 +ScriptAlias /nagios3/cgi-bin /usr/lib/cgi-bin/nagios3 +Alias /nagios3/stylesheets /etc/nagios3/stylesheets +Alias /nagios3 /usr/share/nagios3/htdocs + + + Options FollowSymLinks + + DirectoryIndex index.php index.html + + AllowOverride AuthConfig + + Require all denied + + AuthName "Nagios Access" + AuthType Basic + AuthUserFile /etc/nagios3/htpasswd.users + require valid-user + + + + Options +ExecCGI + + + diff --git a/openshift-v4/templates/nagios/docroot/emptyFile.txt b/openshift-v4/templates/nagios/docroot/emptyFile.txt new file mode 100644 index 000000000..e69de29bb diff --git a/openshift-v4/templates/nagios/nagios-base-bc.yaml b/openshift-v4/templates/nagios/nagios-base-bc.yaml new file mode 100644 index 000000000..1754ec28c --- /dev/null +++ b/openshift-v4/templates/nagios/nagios-base-bc.yaml @@ -0,0 +1,38 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: nagios-base-bc +objects: +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: nagios-base + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: nagios-base:latest + namespace: 0ab226-tools + postCommit: {} + resources: {} + runPolicy: Serial + source: + contextDir: openshift-v4/templates/nagios + git: + ref: openshift-v4-migration + uri: https://github.com/bcgov/tfrs.git + type: Git + strategy: + dockerStrategy: + dockerfilePath: Dockerfile-base + forcePull: true + noCache: true + type: Docker + successfulBuildsHistoryLimit: 5 + triggers: [] + status: + lastVersion: 0 diff --git a/openshift-v4/templates/nagios/nagios-bc.yaml b/openshift-v4/templates/nagios/nagios-bc.yaml new file mode 100644 index 000000000..3c7654780 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios-bc.yaml @@ -0,0 +1,65 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: nagios-bc +parameters: + - name: ENV_NAME + description: test or prod + value: test + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the nagios image + creationTimestamp: null + name: nagios + spec: + lookupPolicy: + local: false +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: nagios + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: nagios:latest + namespace: 0ab226-${ENV_NAME} + postCommit: {} + resources: {} + runPolicy: Serial + source: + contextDir: openshift-v4/templates/nagios + git: + ref: openshift-v4-migration + uri: https://github.com/bcgov/tfrs.git + type: Git + strategy: + dockerStrategy: + env: + - name: NAGIOS_USER + valueFrom: + secretKeyRef: + key: NAGIOS_USER + name: nagios-secret + - name: NAGIOS_PASSWORD + valueFrom: + secretKeyRef: + key: NAGIOS_PASSWORD + name: nagios-secret + - name: ENV_NAME + value: ${ENV_NAME} + forcePull: true + noCache: true + type: Docker + successfulBuildsHistoryLimit: 5 + triggers: [] + status: + lastVersion: 0 diff --git a/openshift-v4/templates/nagios/nagios-dc.yaml b/openshift-v4/templates/nagios/nagios-dc.yaml new file mode 100644 index 000000000..b694b34f8 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios-dc.yaml @@ -0,0 +1,245 @@ +--- +kind: Template +apiVersion: v1 +metadata: + name: nagios-dc + creationTimestamp: +parameters: +- name: ENV_NAME + required: true +- name: KEYCLOAK_CLIENT_ID + displayName: KEYCLOAK_CLIENT_ID + description: 'Valid values: tfrs-dev, tfrs, tfrs' + required: true +- name: KEYCLOAK_SA_REALM + displayName: KEYCLOAK_SA_REALM + description: 'Valid values: tfrs-dev, tfrs, tfrs' + required: true +- name: KEYCLOAK_SA_CLIENT_ID + displayName: KEYCLOAK_SA_CLIENT_ID + description: 'Valid values: tfrs-dev-django-sa, tfrs-django-sa, tfrs-django-sa' + required: true +- name: KEYCLOAK_SA_BASEURL + displayName: KEYCLOAK_SA_BASEURL + description: 'Valid values: https://dev.oidc.gov.bc.ca, https://test.oidc.gov.bc.ca, + https://oidc.gov.bc.ca' + required: true +- name: KEYCLOAK_REALM + displayName: KEYCLOAK_REALM + description: 'Valid values: https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev, https://test.oidc.gov.bc.ca/auth/realms/tfrs, + https://oidc.gov.bc.ca/auth/realms/tfrs' + required: true +- name: SMTP_SERVER_HOST + displayName: SMTP_SERVER_HOST + description: All environment use same email server + value: apps.smtp.gov.bc.ca + required: true +- name: DATABASE_SERVICE_NAME + displayName: database service name + description: database service name + required: true +objects: +- apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: nagios + annotations: + volume.beta.kubernetes.io/storage-class: netapp-file-standard + template.openshift.io.bcgov/create: 'true' + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +- kind: DeploymentConfig + apiVersion: v1 + metadata: + name: nagios + generation: 1 + creationTimestamp: + labels: + app: nagios + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: + requests: + cpu: 50m + memory: 150Mi + limits: + cpu: 200m + memory: 200Mi + activeDeadlineSeconds: 700 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - nagios + from: + kind: ImageStreamTag + name: nagios:${ENV_NAME} + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + app: nagios + deploymentconfig: nagios + template: + metadata: + creationTimestamp: + labels: + app: nagios + deploymentconfig: nagios + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + volumes: + - name: nagios-data + persistentVolumeClaim: + claimName: nagios + containers: + - name: nagios + image: '' + env: + - name: AMQP_HOST + value: tfrs-${ENV_NAME}-rabbitmq-cluster.0ab226-${ENV_NAME}.svc.cluster.local + - name: AMQP_PORT + value: '5672' + - name: AMQP_USER + value: tfrs + - name: AMQP_VHOST + value: "/tfrs" + - name: AMQP_PASSWORD + valueFrom: + secretKeyRef: + name: tfrs-${ENV_NAME}-rabbitmq-cluster-secret + key: tfrs_password + - name: MINIO_ENDPOINT + value: tfrs-minio-${ENV_NAME}.apps.silver.devops.gov.bc.ca:443 + - name: MINIO_USE_SSL + value: 'true' + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: tfrs-minio-${ENV_NAME} + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: tfrs-minio-${ENV_NAME} + key: MINIO_SECRET_KEY + - name: KEYCLOAK_SA_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: keycloak-sa-client-secret + key: KEYCLOAK_SA_CLIENT_SECRET + - name: KEYCLOAK_CLIENT_ID + value: "${KEYCLOAK_CLIENT_ID}" + - name: KEYCLOAK_SA_REALM + value: "${KEYCLOAK_SA_REALM}" + - name: KEYCLOAK_SA_CLIENT_ID + value: "${KEYCLOAK_SA_CLIENT_ID}" + - name: KEYCLOAK_SA_BASEURL + value: "${KEYCLOAK_SA_BASEURL}" + - name: KEYCLOAK_REALM + value: "${KEYCLOAK_REALM}" + - name: SMTP_SERVER_HOST + value: "${SMTP_SERVER_HOST}" + - name: DATABASE_SERVICE_NAME + value: "${DATABASE_SERVICE_NAME}" + - name: DATABASE_NAME + valueFrom: + secretKeyRef: + name: patroni-${ENV_NAME} + key: app-db-name + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: patroni-${ENV_NAME} + key: app-db-username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: patroni-${ENV_NAME} + key: app-db-password + resources: + limits: + cpu: 200m + memory: 2Gi + volumeMounts: + - name: nagios-data + mountPath: "/var/log/nagios3" + terminationMessagePath: "/dev/termination-log" + imagePullPolicy: Always + serviceAccount: nagios + serviceAccountName: nagios + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 +- kind: Service + apiVersion: v1 + metadata: + name: nagios + creationTimestamp: + labels: + app: nagios + annotations: + openshift.io/generated-by: OpenShiftWebConsole + spec: + ports: + - name: 8080-tcp + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + deploymentconfig: nagios + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: route.openshift.io/v1 + metadata: + name: nagios + creationTimestamp: + labels: + app: nagios + annotations: + openshift.io/host.generated: 'true' + spec: + host: nagios-tfrs-${ENV_NAME}.apps.silver.devops.gov.bc.ca + path: "/" + to: + kind: Service + name: nagios + weight: 100 + port: + targetPort: 8080-tcp + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + wildcardPolicy: None +- kind: ServiceAccount + apiVersion: v1 + metadata: + name: nagios + creationTimestamp: + labels: + app: nagios + annotations: + serviceaccounts.openshift.io/oauth-redirectreference.nagios: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"nagios"}}' diff --git a/openshift-v4/templates/nagios/nagios-secret.yaml b/openshift-v4/templates/nagios/nagios-secret.yaml new file mode 100644 index 000000000..7eacb7672 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios-secret.yaml @@ -0,0 +1,20 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: nagios-secret +parameters: + - name: NAGIOS_PASSWORD + description: nagios password + from: "[a-zA-Z0-9]{8}" + generate: expression + required: true +objects: + - apiVersion: v1 + kind: Secret + metadata: + name: nagios-secret + labels: {} + stringData: + NAGIOS_USER: nagiosadmin + NAGIOS_PASSWORD: ${NAGIOS_PASSWORD} diff --git a/openshift-v4/templates/nagios/nagios3/cgi.cfg b/openshift-v4/templates/nagios/nagios3/cgi.cfg new file mode 100644 index 000000000..1d6cc30e7 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/cgi.cfg @@ -0,0 +1,377 @@ +################################################################# +# +# CGI.CFG - Sample CGI Configuration File for Nagios +# +################################################################# + + +# MAIN CONFIGURATION FILE +# This tells the CGIs where to find your main configuration file. +# The CGIs will read the main and host config files for any other +# data they might need. + +main_config_file=/etc/nagios3/nagios.cfg + + + +# PHYSICAL HTML PATH +# This is the path where the HTML files for Nagios reside. This +# value is used to locate the logo images needed by the statusmap +# and statuswrl CGIs. + +physical_html_path=/usr/share/nagios3/htdocs + + + +# URL HTML PATH +# This is the path portion of the URL that corresponds to the +# physical location of the Nagios HTML files (as defined above). +# This value is used by the CGIs to locate the online documentation +# and graphics. If you access the Nagios pages with an URL like +# http://www.myhost.com/nagios, this value should be '/nagios' +# (without the quotes). + +url_html_path=/nagios3 + + + +# CONTEXT-SENSITIVE HELP +# This option determines whether or not a context-sensitive +# help icon will be displayed for most of the CGIs. +# Values: 0 = disables context-sensitive help +# 1 = enables context-sensitive help + +show_context_help=1 + + + +# PENDING STATES OPTION +# This option determines what states should be displayed in the web +# interface for hosts/services that have not yet been checked. +# Values: 0 = leave hosts/services that have not been check yet in their original state +# 1 = mark hosts/services that have not been checked yet as PENDING + +use_pending_states=1 + +# NAGIOS PROCESS CHECK COMMAND +# This is the full path and filename of the program used to check +# the status of the Nagios process. It is used only by the CGIs +# and is completely optional. However, if you don't use it, you'll +# see warning messages in the CGIs about the Nagios process +# not running and you won't be able to execute any commands from +# the web interface. The program should follow the same rules +# as plugins; the return codes are the same as for the plugins, +# it should have timeout protection, it should output something +# to STDIO, etc. +# +# Note: The command line for the check_nagios plugin below may +# have to be tweaked a bit, as different versions of the plugin +# use different command line arguments/syntaxes. + +nagios_check_command=/usr/lib/nagios/plugins/check_nagios /var/cache/nagios3/status.dat 5 '/usr/sbin/nagios3' + + +# AUTHENTICATION USAGE +# This option controls whether or not the CGIs will use any +# authentication when displaying host and service information, as +# well as committing commands to Nagios for processing. +# +# Read the HTML documentation to learn how the authorization works! +# +# NOTE: It is a really *bad* idea to disable authorization, unless +# you plan on removing the command CGI (cmd.cgi)! Failure to do +# so will leave you wide open to kiddies messing with Nagios and +# possibly hitting you with a denial of service attack by filling up +# your drive by continuously writing to your command file! +# +# Setting this value to 0 will cause the CGIs to *not* use +# authentication (bad idea), while any other value will make them +# use the authentication functions (the default). + +use_authentication=1 + + + + +# x509 CERT AUTHENTICATION +# When enabled, this option allows you to use x509 cert (SSL) +# authentication in the CGIs. This is an advanced option and should +# not be enabled unless you know what you're doing. + +use_ssl_authentication=0 + + + + +# DEFAULT USER +# Setting this variable will define a default user name that can +# access pages without authentication. This allows people within a +# secure domain (i.e., behind a firewall) to see the current status +# without authenticating. You may want to use this to avoid basic +# authentication if you are not using a secure server since basic +# authentication transmits passwords in the clear. +# +# Important: Do not define a default username unless you are +# running a secure web server and are sure that everyone who has +# access to the CGIs has been authenticated in some manner! If you +# define this variable, anyone who has not authenticated to the web +# server will inherit all rights you assign to this user! + +#default_user_name=guest + + + +# SYSTEM/PROCESS INFORMATION ACCESS +# This option is a comma-delimited list of all usernames that +# have access to viewing the Nagios process information as +# provided by the Extended Information CGI (extinfo.cgi). By +# default, *no one* has access to this unless you choose to +# not use authorization. You may use an asterisk (*) to +# authorize any user who has authenticated to the web server. + +authorized_for_system_information=nagiosadmin + + + +# CONFIGURATION INFORMATION ACCESS +# This option is a comma-delimited list of all usernames that +# can view ALL configuration information (hosts, commands, etc). +# By default, users can only view configuration information +# for the hosts and services they are contacts for. You may use +# an asterisk (*) to authorize any user who has authenticated +# to the web server. + +authorized_for_configuration_information=nagiosadmin + + + +# SYSTEM/PROCESS COMMAND ACCESS +# This option is a comma-delimited list of all usernames that +# can issue shutdown and restart commands to Nagios via the +# command CGI (cmd.cgi). Users in this list can also change +# the program mode to active or standby. By default, *no one* +# has access to this unless you choose to not use authorization. +# You may use an asterisk (*) to authorize any user who has +# authenticated to the web server. + +authorized_for_system_commands=nagiosadmin + + + +# GLOBAL HOST/SERVICE VIEW ACCESS +# These two options are comma-delimited lists of all usernames that +# can view information for all hosts and services that are being +# monitored. By default, users can only view information +# for hosts or services that they are contacts for (unless you +# you choose to not use authorization). You may use an asterisk (*) +# to authorize any user who has authenticated to the web server. + + +authorized_for_all_services=nagiosadmin +authorized_for_all_hosts=nagiosadmin + + + +# GLOBAL HOST/SERVICE COMMAND ACCESS +# These two options are comma-delimited lists of all usernames that +# can issue host or service related commands via the command +# CGI (cmd.cgi) for all hosts and services that are being monitored. +# By default, users can only issue commands for hosts or services +# that they are contacts for (unless you you choose to not use +# authorization). You may use an asterisk (*) to authorize any +# user who has authenticated to the web server. + +authorized_for_all_service_commands=nagiosadmin +authorized_for_all_host_commands=nagiosadmin + + + +# READ-ONLY USERS +# A comma-delimited list of usernames that have read-only rights in +# the CGIs. This will block any service or host commands normally shown +# on the extinfo CGI pages. It will also block comments from being shown +# to read-only users. + +#authorized_for_read_only=user1,user2 + + + + +# STATUSMAP BACKGROUND IMAGE +# This option allows you to specify an image to be used as a +# background in the statusmap CGI. It is assumed that the image +# resides in the HTML images path (i.e. /usr/local/nagios/share/images). +# This path is automatically determined by appending "/images" +# to the path specified by the 'physical_html_path' directive. +# Note: The image file may be in GIF, PNG, JPEG, or GD2 format. +# However, I recommend that you convert your image to GD2 format +# (uncompressed), as this will cause less CPU load when the CGI +# generates the image. + +#statusmap_background_image=smbackground.gd2 + + + + +# STATUSMAP TRANSPARENCY INDEX COLOR +# These options set the r,g,b values of the background color used the statusmap CGI, +# so normal browsers that can't show real png transparency set the desired color as +# a background color instead (to make it look pretty). +# Defaults to white: (R,G,B) = (255,255,255). + +#color_transparency_index_r=255 +#color_transparency_index_g=255 +#color_transparency_index_b=255 + + + + +# DEFAULT STATUSMAP LAYOUT METHOD +# This option allows you to specify the default layout method +# the statusmap CGI should use for drawing hosts. If you do +# not use this option, the default is to use user-defined +# coordinates. Valid options are as follows: +# 0 = User-defined coordinates +# 1 = Depth layers +# 2 = Collapsed tree +# 3 = Balanced tree +# 4 = Circular +# 5 = Circular (Marked Up) + +default_statusmap_layout=5 + + + +# DEFAULT STATUSWRL LAYOUT METHOD +# This option allows you to specify the default layout method +# the statuswrl (VRML) CGI should use for drawing hosts. If you +# do not use this option, the default is to use user-defined +# coordinates. Valid options are as follows: +# 0 = User-defined coordinates +# 2 = Collapsed tree +# 3 = Balanced tree +# 4 = Circular + +default_statuswrl_layout=4 + + + +# STATUSWRL INCLUDE +# This option allows you to include your own objects in the +# generated VRML world. It is assumed that the file +# resides in the HTML path (i.e. /usr/local/nagios/share). + +#statuswrl_include=myworld.wrl + + + +# PING SYNTAX +# This option determines what syntax should be used when +# attempting to ping a host from the WAP interface (using +# the statuswml CGI. You must include the full path to +# the ping binary, along with all required options. The +# $HOSTADDRESS$ macro is substituted with the address of +# the host before the command is executed. +# Please note that the syntax for the ping binary is +# notorious for being different on virtually ever *NIX +# OS and distribution, so you may have to tweak this to +# work on your system. + +ping_syntax=/bin/ping -n -U -c 5 $HOSTADDRESS$ + + + +# REFRESH RATE +# This option allows you to specify the refresh rate in seconds +# of various CGIs (status, statusmap, extinfo, and outages). + +refresh_rate=90 + +# DEFAULT PAGE LIMIT +# This option allows you to specify the default number of results +# displayed on the status.cgi. This number can be adjusted from +# within the UI after the initial page load. Setting this to 0 +# will show all results. + +result_limit=100 + + +# ESCAPE HTML TAGS +# This option determines whether HTML tags in host and service +# status output is escaped in the web interface. If enabled, +# your plugin output will not be able to contain clickable links. + +escape_html_tags=1 + + + + +# SOUND OPTIONS +# These options allow you to specify an optional audio file +# that should be played in your browser window when there are +# problems on the network. The audio files are used only in +# the status CGI. Only the sound for the most critical problem +# will be played. Order of importance (higher to lower) is as +# follows: unreachable hosts, down hosts, critical services, +# warning services, and unknown services. If there are no +# visible problems, the sound file optionally specified by +# 'normal_sound' variable will be played. +# +# +# = +# +# Note: All audio files must be placed in the /media subdirectory +# under the HTML path (i.e. /usr/local/nagios/share/media/). + +#host_unreachable_sound=hostdown.wav +#host_down_sound=hostdown.wav +#service_critical_sound=critical.wav +#service_warning_sound=warning.wav +#service_unknown_sound=warning.wav +#normal_sound=noproblem.wav + + + +# URL TARGET FRAMES +# These options determine the target frames in which notes and +# action URLs will open. + +action_url_target=_blank +notes_url_target=_blank + + + + +# LOCK AUTHOR NAMES OPTION +# This option determines whether users can change the author name +# when submitting comments, scheduling downtime. If disabled, the +# author names will be locked into their contact name, as defined in Nagios. +# Values: 0 = allow editing author names +# 1 = lock author names (disallow editing) + +lock_author_names=1 + + + + +# SPLUNK INTEGRATION OPTIONS +# These options allow you to enable integration with Splunk +# in the web interface. If enabled, you'll be presented with +# "Splunk It" links in various places in the CGIs (log file, +# alert history, host/service detail, etc). Useful if you're +# trying to research why a particular problem occurred. +# For more information on Splunk, visit http://www.splunk.com/ + +# This option determines whether the Splunk integration is enabled +# Values: 0 = disable Splunk integration +# 1 = enable Splunk integration + +#enable_splunk_integration=1 + + +# This option should be the URL used to access your instance of Splunk + +#splunk_url=http://127.0.0.1:8000/ + + + diff --git a/openshift-v4/templates/nagios/nagios3/cleanup-cfg.sh b/openshift-v4/templates/nagios/nagios3/cleanup-cfg.sh new file mode 100755 index 000000000..920201a7c --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/cleanup-cfg.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +ENV_NAME=$1 + +if [ ${ENV_NAME} == "dev" ]; then + rm -f /etc/nagios3/conf.d/*-test.cfg + rm -f /etc/nagios3/conf.d/*-prod.cfg +fi +if [ ${ENV_NAME} == "test" ]; then + rm -f /etc/nagios3/conf.d/*-dev.cfg + rm -f /etc/nagios3/conf.d/*-prod.cfg +fi +if [ ${ENV_NAME} == "prod" ]; then + rm -f /etc/nagios3/conf.d/*-dev.cfg + rm -f /etc/nagios3/conf.d/*-test.cfg +fi \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands.cfg b/openshift-v4/templates/nagios/nagios3/commands.cfg new file mode 100644 index 000000000..5ba2b4cdc --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands.cfg @@ -0,0 +1,91 @@ +############################################################################### +# COMMANDS.CFG - SAMPLE COMMAND DEFINITIONS FOR NAGIOS +############################################################################### + + +################################################################################ +# NOTIFICATION COMMANDS +################################################################################ + + +# 'notify-host-by-email' command definition +define command{ + command_name notify-host-by-email + command_line python3 /etc/nagios3/commands/notify_by_email.py "** Nagios $HOSTNAME$ $NOTIFICATIONTYPE$ Host Alert: $HOSTNAME$ is $HOSTSTATE$ **" "***** Nagios *****\n\nNotification Type: $NOTIFICATIONTYPE$\nHost: $HOSTNAME$\nState: $HOSTSTATE$\nAddress: $HOSTADDRESS$\nInfo: $HOSTOUTPUT$\n\nDate/Time: $LONGDATETIME$\n" $CONTACTEMAIL$ + } + +# 'notify-service-by-email' command definition +define command { + command_name notify-service-by-email + command_line python3 /etc/nagios3/commands/notify_by_email.py "** Nagios $HOSTALIAS$ $NOTIFICATIONTYPE$ Service Alert: $HOSTALIAS$/$SERVICEDESC$ is $SERVICESTATE$ **" "***** Nagios *****\n\nNotification Type: $NOTIFICATIONTYPE$\n\nService: $SERVICEDESC$\nHost: $HOSTALIAS$\nAddress: $HOSTADDRESS$\nState: $SERVICESTATE$\n\nDate/Time: $LONGDATETIME$\n\nAdditional Info:\n\n$SERVICEOUTPUT$\n" $CONTACTEMAIL$ +} + +################################################################################ +# HOST CHECK COMMANDS +################################################################################ + +# On Debian, check-host-alive is being defined from within the +# nagios-plugins-basic package + +################################################################################ +# PERFORMANCE DATA COMMANDS +################################################################################ + + +# 'process-host-perfdata' command definition +define command{ + command_name process-host-perfdata + command_line /usr/bin/printf "%b" "$LASTHOSTCHECK$\t$HOSTNAME$\t$HOSTSTATE$\t$HOSTATTEMPT$\t$HOSTSTATETYPE$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$\n" >> /var/lib/nagios3/host-perfdata.out + } + + +# 'process-service-perfdata' command definition +define command{ + command_name process-service-perfdata + command_line /usr/bin/printf "%b" "$LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICESTATE$\t$SERVICEATTEMPT$\t$SERVICESTATETYPE$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$\n" >> /var/lib/nagios3/service-perfdata.out + } + +define command { + command_name check_replicas + command_line /etc/nagios3/commands/check_replicas.sh '$ARG1$' '$ARG2$' '$ARG3$' +} + +define command { + command_name check_diskusage + command_line /etc/nagios3/commands/check_diskusage.sh '$ARG1$' +} + +define command { + command_name check_postgresql_liveness + command_line /etc/nagios3/commands/check_postgresql_liveness.sh +} + +define command { + command_name check_host + command_line /etc/nagios3/commands/check_host.sh +} + +define command { + command_name check_rabbitmq_connection + command_line /etc/nagios3/commands/check_rabbitmq_connection.sh +} + +define command { + command_name check_minio_connection + command_line /etc/nagios3/commands/check_minio_connection.sh +} + +define command { + command_name check_keycloak_connection + command_line /etc/nagios3/commands/check_keycloak_connection.sh +} + +define command { + command_name check_email_connection + command_line /etc/nagios3/commands/check_email_connection.sh +} + +define command { + command_name check_patroni_health + command_line /etc/nagios3/commands/check_patroni_health.sh +} diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_diskusage.sh b/openshift-v4/templates/nagios/nagios3/commands/check_diskusage.sh new file mode 100755 index 000000000..25a575945 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_diskusage.sh @@ -0,0 +1,22 @@ +PROJECT_NAME=$1 + +databasePodName=`oc get pods -n $PROJECT_NAME | grep postgresql96 | awk '{print $1}'` + +backupDiskUsagePercent=`oc exec $databasePodName -c postgresql96 -n $PROJECT_NAME -- df -k | grep "/postgresql-backup" | awk '{print $5}'` +backupDiskUsage=${backupDiskUsagePercent%?} +diskusageAlarm=false +if [ ${backupDiskUsage} -gt 70 ]; then + diskusageAlarm=true +fi + +databaseDiskUsagePercent=`oc exec $databasePodName -c postgresql96 -n $PROJECT_NAME -- df -k | grep "/var/lib/pgsql/data" | awk '{print $5}'` +databaseDiskUsage=${databaseDiskUsagePercent%?} +if [ ${databaseDiskUsage} -gt 70 ]; then + diskusageAlarm=true +fi +if [ ${diskusageAlarm} = true ]; then + echo "CRITICAL - $1 Posgresql liveness checking failed" + exit 2 +fi +echo "OK - $1 Posgresql liveness checking passed" +exit 0 diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_email_connection.py b/openshift-v4/templates/nagios/nagios3/commands/check_email_connection.py new file mode 100644 index 000000000..ae3b9d179 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_email_connection.py @@ -0,0 +1,16 @@ +import os + +import smtplib + +EMAIL = { + 'SMTP_SERVER_HOST': os.getenv('SMTP_SERVER_HOST', 'apps.smtp.gov.bc.ca'), + 'SMTP_SERVER_PORT': int(os.getenv('SMTP_SERVER_PORT', 25)) +} + +try: + with smtplib.SMTP(host=EMAIL['SMTP_SERVER_HOST'], + port=EMAIL['SMTP_SERVER_PORT']) as server: + server.noop() + print('OK - Email connection checking passed') +except Exception as error: + print('CRITICAL - Email connection checking failed') diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_email_connection.sh b/openshift-v4/templates/nagios/nagios3/commands/check_email_connection.sh new file mode 100755 index 000000000..e49e5ebd4 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_email_connection.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +emailConnectionTest=$(python3 /etc/nagios3/commands/check_email_connection.py) +echo $emailConnectionTest +if [[ $emailConnectionTest == OK* ]]; +then + exit 0 +else + exit 2 +fi diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_host.sh b/openshift-v4/templates/nagios/nagios3/commands/check_host.sh new file mode 100755 index 000000000..782c37e85 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_host.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +echo "OK - This check is same as replica check." +exit 0 \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_keycloak_connection.py b/openshift-v4/templates/nagios/nagios3/commands/check_keycloak_connection.py new file mode 100755 index 000000000..ec7ddc0c7 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_keycloak_connection.py @@ -0,0 +1,62 @@ +import os +import requests + +KEYCLOAK = { + 'REALM': os.getenv('KEYCLOAK_REALM', 'http://localhost:8888/auth/realms/tfrs'), + 'CLIENT_ID': os.getenv('KEYCLOAK_CLIENT_ID', 'tfrs-app'), + 'SERVICE_ACCOUNT_REALM': os.getenv('KEYCLOAK_SA_REALM', 'tfrs'), + 'SERVICE_ACCOUNT_CLIENT_ID': os.getenv('KEYCLOAK_SA_CLIENT_ID', 'tfrs'), + 'SERVICE_ACCOUNT_KEYCLOAK_API_BASE': os.getenv('KEYCLOAK_SA_BASEURL', 'http://localhost:8888'), + 'SERVICE_ACCOUNT_CLIENT_SECRET': os.getenv('KEYCLOAK_SA_CLIENT_SECRET', '') + } + +""" +This function will generate the token for the Service Account. +This token is most likely going to be used to update information +for the logged-in user (not to be confused with the service account) +such as auto-mapping the user upon first login. +""" +token_url = '{keycloak}/auth/realms/{realm}/protocol/openid-connect/token'.format( + keycloak=KEYCLOAK['SERVICE_ACCOUNT_KEYCLOAK_API_BASE'], + realm=KEYCLOAK['SERVICE_ACCOUNT_REALM']) + +response = requests.post(token_url, + auth=(KEYCLOAK['SERVICE_ACCOUNT_CLIENT_ID'], + KEYCLOAK['SERVICE_ACCOUNT_CLIENT_SECRET']), + data={'grant_type': 'client_credentials'}) + +token = response.json()['access_token'] + +""" +Retrieves the list of users found in Keycloak. +Not to be confused with the list of users found in the actual +database. +""" +users_url = '{keycloak}/auth/admin/realms/{realm}/users'.format( + keycloak=KEYCLOAK['SERVICE_ACCOUNT_KEYCLOAK_API_BASE'], + realm=KEYCLOAK['SERVICE_ACCOUNT_REALM']) + +headers = {'Authorization': 'Bearer {}'.format(token)} + +response = requests.get(users_url, + headers=headers) + +i=0 +all_users = response.json() +for user in all_users: + i=i+1 + users_detail_url = '{keycloak}/auth/admin/realms/{realm}/users/{user_id}/federated-identity'.format( + keycloak=KEYCLOAK['SERVICE_ACCOUNT_KEYCLOAK_API_BASE'], + realm=KEYCLOAK['SERVICE_ACCOUNT_REALM'], + user_id=user['id']) + + response = requests.get(users_detail_url, + headers=headers) + if i>=1: + break + + +if response.status_code == 200: + print('OK - Keycloak connection checking passed') +else: + print('CRITICAL - Keycloak connection checking failed') diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_keycloak_connection.sh b/openshift-v4/templates/nagios/nagios3/commands/check_keycloak_connection.sh new file mode 100755 index 000000000..31b77c503 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_keycloak_connection.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +keycloakConnectionTest=$(python3 /etc/nagios3/commands/check_keycloak_connection.py) +echo $keycloakConnectionTest +if [[ $keycloakConnectionTest == OK* ]]; +then + exit 0 +else + exit 2 +fi \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_minio_connection.py b/openshift-v4/templates/nagios/nagios3/commands/check_minio_connection.py new file mode 100644 index 000000000..95e041fc6 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_minio_connection.py @@ -0,0 +1,26 @@ +import os +from minio import Minio +from minio.error import MinioError, ResponseError + +MINIO = { + 'ENDPOINT': os.getenv('MINIO_ENDPOINT', None), + 'USE_SSL': bool(os.getenv('MINIO_USE_SSL', 'False').lower() in ['true', 1]), + 'ACCESS_KEY': os.getenv('MINIO_ACCESS_KEY', None), + 'SECRET_KEY': os.getenv('MINIO_SECRET_KEY', None) +} + +minio = Minio(MINIO['ENDPOINT'], + access_key=MINIO['ACCESS_KEY'], + secret_key=MINIO['SECRET_KEY'], + secure=MINIO['USE_SSL']) + +try: + minio = Minio(MINIO['ENDPOINT'], + access_key=MINIO['ACCESS_KEY'], + secret_key=MINIO['SECRET_KEY'], + secure=MINIO['USE_SSL']) + + _objects = minio.list_buckets() + print('OK - Minio connection checking passed') +except MinioError as _error: + print('CRITICAL - Minio connection checking failed') \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_minio_connection.sh b/openshift-v4/templates/nagios/nagios3/commands/check_minio_connection.sh new file mode 100755 index 000000000..1d975100b --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_minio_connection.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +minioConnectionTest=$(python3 /etc/nagios3/commands/check_minio_connection.py) +echo $minioConnectionTest +if [[ $minioConnectionTest == OK* ]]; +then + exit 0 +else + exit 2 +fi diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_patroni_health.sh b/openshift-v4/templates/nagios/nagios3/commands/check_patroni_health.sh new file mode 100755 index 000000000..7e046e1ec --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_patroni_health.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +patroniReplicas=$1 + +## the value must be running +master_node_state=`curl --silent --show-error http://$DATABASE_SERVICE_NAME.$OPENSHIFT_BUILD_NAMESPACE.svc.cluster.local:8008/patroni | jq --raw-output .state` +if [ $master_node_state != "running" ]; then + echo "CRITICAL - Patroni health checking failed" + exit 2 +fi + +if [ ${patroniReplicas} == 2 ]; then + ## the value must be streaming + echo "p2" + second_node_state=`curl --silent --show-error http://$DATABASE_SERVICE_NAME.$OPENSHIFT_BUILD_NAMESPACE.svc.cluster.local:8008/patroni | jq --raw-output .replication[0].state` + if [ $second_node_state != "streaming" ]; then + echo "CRITICAL - Patroni health checking failed" + exit 2 + fi +fi + +if [ ${patroniReplicas} == 3 ]; then + ## the value must be streaming + echo "p3" + third_node_state=`curl --silent --show-error http://$DATABASE_SERVICE_NAME.$OPENSHIFT_BUILD_NAMESPACE.svc.cluster.local:8008/patroni | jq --raw-output .replication[1].state` + if [ $third_node_state != "streaming" ]; then + echo "CRITICAL - Patroni health checking failed" + exit 2 + fi +fi + +echo "OK - Patroni health checking passed" +exit 0 \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_postgresql_liveness.py b/openshift-v4/templates/nagios/nagios3/commands/check_postgresql_liveness.py new file mode 100644 index 000000000..e85f9533f --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_postgresql_liveness.py @@ -0,0 +1,28 @@ +#!/usr/bin/python +import psycopg2 +import sys +import os + +DB = { + 'DATABASE_SERVICE_NAME': os.getenv('DATABASE_SERVICE_NAME', None), + 'DATABASE_USER': os.getenv('DATABASE_USER', None), + 'DATABASE_PASSWORD': os.getenv('DATABASE_PASSWORD', None), + 'DATABASE_NAME': os.getenv('DATABASE_NAME', None) +} + +try: + + conn_string = "host='{database_service_name}' dbname='{database_name}' user='{database_user}' password='{database_password}'".format( + database_service_name=DB['DATABASE_SERVICE_NAME'], + database_name=DB['DATABASE_NAME'], + database_user=DB['DATABASE_USER'], + database_password=DB['DATABASE_PASSWORD'] + ) + + conn=psycopg2.connect(conn_string) + conn.close() + + print('OK - Database connection checking passed') + +except Exception as error: + print('CRITICAL - Database connection checking failed') \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_postgresql_liveness.sh b/openshift-v4/templates/nagios/nagios3/commands/check_postgresql_liveness.sh new file mode 100755 index 000000000..5f89c9528 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_postgresql_liveness.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +dbConnectionTest=$(python3 /etc/nagios3/commands/check_postgresql_liveness.py) +echo $dbConnectionTest +if [[ $dbConnectionTest == OK* ]]; +then + exit 0 +else + exit 2 +fi + +#PROJECT_NAME=$1 + +#databasePodName=`oc get pods -n $PROJECT_NAME | grep postgresql96 | awk '{print $1}'` + +#databaseLivenessAlarm=false +#selectResult=`oc exec $databasePodName -c postgresql96 -n $PROJECT_NAME -- psql -h 127.0.0.1 -q -d tfrs -c 'SELECT 1' | grep row` +#if [ ${selectResult} == "(1 row)" ]; then +# databaseLivenessAlarm=true +#fi +#if [ ${databaseLivenessAlarm} = true ]; then +# echo "CRITICAL - $1 Postgresql liveness checking failed" +# exit 2 +#fi +#echo "OK - $1 Postgresql liveness checking passed successfully" +#exit 0 diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_rabbitmq_connection.py b/openshift-v4/templates/nagios/nagios3/commands/check_rabbitmq_connection.py new file mode 100644 index 000000000..d67fab501 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_rabbitmq_connection.py @@ -0,0 +1,29 @@ +import os +import pika +from pika import ConnectionParameters, PlainCredentials +from pika.exceptions import AMQPError + +AMQP = { + 'ENGINE': 'rabbitmq', + 'VHOST': os.getenv('AMQP_VHOST', '/'), + 'USER': os.getenv('AMQP_USER', 'guest'), + 'PASSWORD': os.getenv('AMQP_PASSWORD', 'guest'), + 'HOST': os.getenv('AMQP_HOST', 'localhost'), + 'PORT': os.getenv('AMQP_PORT', '5672') +} + +AMQP_CONNECTION_PARAMETERS = ConnectionParameters( + host=AMQP['HOST'], + port=AMQP['PORT'], + virtual_host=AMQP['VHOST'], + credentials=PlainCredentials(AMQP['USER'], AMQP['PASSWORD']) +) + +try: + parameters = AMQP_CONNECTION_PARAMETERS + connection = pika.BlockingConnection(parameters) + connection.channel() + connection.close() + print('OK - Rabbitmq connection checking passed') +except AMQPError as _error: + print('CRITICAL - Rabbitmq connection checking failed') \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_rabbitmq_connection.sh b/openshift-v4/templates/nagios/nagios3/commands/check_rabbitmq_connection.sh new file mode 100755 index 000000000..4a4688b5e --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_rabbitmq_connection.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +rabbitmqConnectionTest=$(python3 /etc/nagios3/commands/check_rabbitmq_connection.py) +echo $rabbitmqConnectionTest +if [[ $rabbitmqConnectionTest == OK* ]]; +then + exit 0 +else + exit 2 +fi \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/commands/check_replicas.sh b/openshift-v4/templates/nagios/nagios3/commands/check_replicas.sh new file mode 100755 index 000000000..ac3e363c0 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/check_replicas.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +PROJECT_NAME=$1 +DEPLOYMENT=$2 +MIN_REPLICAS=$3 + +if [ ${DEPLOYMENT} == "tfrs-test-rabbitmq-cluster" ] || [ ${DEPLOYMENT} == "tfrs-prod-rabbitmq-cluster" ]; then + availableReplicas=$(oc get -o json StatefulSet ${DEPLOYMENT} -n $PROJECT_NAME | jq '.status.currentReplicas') +elif [ ${DEPLOYMENT} == "patroni-dev" ] || [ ${DEPLOYMENT} == "patroni-test" ] || [ ${DEPLOYMENT} == "patroni-prod" ]; then + availableReplicas=$(oc get -o json StatefulSet ${DEPLOYMENT} -n $PROJECT_NAME | jq '.status.currentReplicas') +else availableReplicas=$(oc get -o json dc $DEPLOYMENT -n $PROJECT_NAME | jq '.status.availableReplicas') +fi + +if (($availableReplicas>=$MIN_REPLICAS)); then + echo "OK - $1:$2 has $availableReplicas replicas available" + exit 0 + else + echo "CRITICAL - $1:$2 has $availableReplicas replicas available" + exit 2 +fi + diff --git a/openshift-v4/templates/nagios/nagios3/commands/notify_by_email.py b/openshift-v4/templates/nagios/nagios3/commands/notify_by_email.py new file mode 100644 index 000000000..2c755923e --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/commands/notify_by_email.py @@ -0,0 +1,31 @@ +import os +import sys +from email.message import EmailMessage +from email.mime.text import MIMEText +import smtplib + +# sys.argv[1] is subject +# sys.argv[2] is email body +# sys.argv[3] is receiver email address(es) + +EMAIL = { + 'SMTP_SERVER_HOST': os.getenv('SMTP_SERVER_HOST', 'apps.smtp.gov.bc.ca'), + 'SMTP_SERVER_PORT': int(os.getenv('SMTP_SERVER_PORT', 25)) +} + +emailBody = sys.argv[2].replace("\\n", "\n") + +msg = EmailMessage() +msg.set_content(emailBody) + +msg['Subject'] = sys.argv[1] +msg['From'] = "noreply@gov.bc.ca" +msg['To'] = sys.argv[3] + +with smtplib.SMTP(host=EMAIL['SMTP_SERVER_HOST'], + port=EMAIL['SMTP_SERVER_PORT']) as server: + try: + server.send_message(msg) + print('OK - Email sending succeed') + except Exception as error: + print('CRITICAL - Email sending failed') \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/contact-groups.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/contact-groups.cfg new file mode 100644 index 000000000..a5bb5ee15 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/contact-groups.cfg @@ -0,0 +1,5 @@ +define contactgroup{ + contactgroup_name tfrs-devops + alias TFRS DevOps Group + members Kuan.Fan, tfrs.team + } diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/contacts.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/contacts.cfg new file mode 100644 index 000000000..578661a14 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/contacts.cfg @@ -0,0 +1,28 @@ +define contact { + contact_name Kuan.Fan + alias TFRS DevOps Kuan + host_notifications_enabled 1 + service_notifications_enabled 1 + service_notification_period 24x7 + host_notification_period 24x7 + service_notification_options w,u,c,r + host_notification_options d,u,r + service_notification_commands notify-service-by-email + host_notification_commands notify-host-by-email + email Kuan.Fan@gov.bc.ca + can_submit_commands 1 +} +define contact { + contact_name tfrs.team + alias TFRS Team + host_notifications_enabled 1 + service_notifications_enabled 1 + service_notification_period 24x7 + host_notification_period 24x7 + service_notification_options w,u,c,r + host_notification_options d,u,r + service_notification_commands notify-service-by-email + host_notification_commands notify-host-by-email + email tfrs@gov.bc.ca + can_submit_commands 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-dev.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-dev.cfg new file mode 100644 index 000000000..b9e233fc9 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-dev.cfg @@ -0,0 +1,5 @@ +define hostgroup { + hostgroup_name hosts-dev + alias TFRS Openshift Dev Hosts + members client-dev, tfrs-dev, postgresql-dev, celery-dev, clamav-dev, minio-dev, notification-server-dev, scan-coordinator-dev, scan-handler-dev, rabbitmq-dev, keycloak-dev, email-dev +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-prod.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-prod.cfg new file mode 100644 index 000000000..c2aeb7151 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-prod.cfg @@ -0,0 +1,5 @@ +define hostgroup { + hostgroup_name hosts-prod + alias TFRS Openshift Prod Hosts + members tfrs-frontend-prod, tfrs-backend-prod, tfrs-celery-prod, tfrs-clamav-prod, tfrs-minio-prod, tfrs-notification-server-prod, tfrs-scan-coordinator-prod, tfrs-scan-handler-prod, tfrs-prod-rabbitmq-cluster, patroni-prod, keycloak-prod, email-prod +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-test.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-test.cfg new file mode 100644 index 000000000..02df75881 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/host-groups-test.cfg @@ -0,0 +1,5 @@ +define hostgroup { + hostgroup_name hosts-test + alias TFRS Openshift Test Hosts + members tfrs-frontend-test, tfrs-backend-test, tfrs-celery-test, tfrs-clamav-test, tfrs-minio-test, tfrs-notification-server-test, tfrs-scan-coordinator-test, tfrs-scan-handler-test, tfrs-test-rabbitmq-cluster, patroni-test, keycloak-test, email-test +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/hosts-dev.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/hosts-dev.cfg new file mode 100644 index 000000000..772352675 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/hosts-dev.cfg @@ -0,0 +1,161 @@ +define host { + host_name client-dev + address client.mem-tfrs-dev.svc + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-dev + address backend.mem-tfrs-dev.svc + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name postgresql-dev + address postgresql.mem-tfrs-dev.svc + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name celery-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name clamav-dev + address clamav.mem-tfrs-dev.svc + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name minio-dev + address minio.mem-tfrs-dev.svc + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name notification-server-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name scan-coordinator-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name scan-handler-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name rabbitmq-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name keycloak-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name email-dev + hostgroups hosts-dev + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/hosts-prod.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/hosts-prod.cfg new file mode 100644 index 000000000..304d79102 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/hosts-prod.cfg @@ -0,0 +1,166 @@ +define host { + host_name tfrs-frontend-prod + address tfrs-frontend-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-backend-prod + address tfrs-backend-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-celery-prod + address tfrs-celery-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-clamav-prod + address tfrs-clamav-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-minio-prod + address tfrs-minio-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host{ + host_name tfrs-notification-server-prod + address tfrs-notification-server-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-scan-coordinator-prod + address tfrs-scan-coordinator-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-scan-handler-prod + address tfrs-scan-handler-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-prod-rabbitmq-cluster + address tfrs-prod-rabbitmq-cluster.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name patroni-prod + address patroni-master-prod.0ab226-prod.svc.cluster.local + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name keycloak-prod + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name email-prod + hostgroups hosts-prod + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/hosts-test.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/hosts-test.cfg new file mode 100644 index 000000000..2ab4c73c5 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/hosts-test.cfg @@ -0,0 +1,166 @@ +define host { + host_name tfrs-frontend-test + address tfrs-frontend-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-backend-test + address tfrs-backend-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-celery-test + address tfrs-celery-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-clamav-test + address tfrs-clamav-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-minio-test + address tfrs-minio-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host{ + host_name tfrs-notification-server-test + address tfrs-notification-server-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-scan-coordinator-test + address tfrs-scan-coordinator-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-scan-handler-test + address tfrs-scan-handler-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name tfrs-test-rabbitmq-cluster + address tfrs-test-rabbitmq-cluster.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name patroni-test + address patroni-master-test.0ab226-test.svc.cluster.local + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name keycloak-test + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define host { + host_name email-test + hostgroups hosts-test + check_command check_host + check_interval 60 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/services-other-dev.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/services-other-dev.cfg new file mode 100644 index 000000000..39a5924ba --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/services-other-dev.cfg @@ -0,0 +1,65 @@ +define service { + host_name postgresql-dev + service_description Postgresql liveness check + check_command check_postgresql_liveness + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name rabbitmq-dev + service_description Rabbitmq connection check + check_command check_rabbitmq_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name minio-dev + service_description Minio connection check + check_command check_minio_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name keycloak-dev + service_description Keycloak connection check + check_command check_keycloak_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name email-dev + service_description Email connection check + check_command check_email_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/services-other-prod.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/services-other-prod.cfg new file mode 100644 index 000000000..dfb9f34d2 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/services-other-prod.cfg @@ -0,0 +1,79 @@ +define service { + host_name patroni-prod + service_description Patroni liveness check + check_command check_postgresql_liveness + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-prod-rabbitmq-cluster + service_description Rabbitmq connection check + check_command check_rabbitmq_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +# minit test script needs to be revised +#define service { +# host_name tfrs-minio-prod +# service_description Minio connection check +# check_command check_minio_connection +# check_interval 5 +# retry_interval 1 +# max_check_attempts 5 +# check_period 24x7 +# contact_groups tfrs-devops +# notification_interval 0 +# notification_period 24x7 +# notifications_enabled 1 +#} +define service { + host_name keycloak-prod + service_description Keycloak connection check + check_command check_keycloak_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name email-prod + service_description Email connection check + check_command check_email_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name patroni-prod + service_description Patroni health check + check_command check_patroni_health!3 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/services-other-test.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/services-other-test.cfg new file mode 100644 index 000000000..2be0dce15 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/services-other-test.cfg @@ -0,0 +1,79 @@ +define service { + host_name patroni-test + service_description Patroni liveness check + check_command check_postgresql_liveness + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-test-rabbitmq-cluster + service_description Rabbitmq connection check + check_command check_rabbitmq_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +# minit test script needs to be revised +#define service { +# host_name tfrs-minio-test +# service_description Minio connection check +# check_command check_minio_connection +# check_interval 5 +# retry_interval 1 +# max_check_attempts 5 +# check_period 24x7 +# contact_groups tfrs-devops +# notification_interval 0 +# notification_period 24x7 +# notifications_enabled 1 +#} +define service { + host_name keycloak-test + service_description Keycloak connection check + check_command check_keycloak_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name email-test + service_description Email connection check + check_command check_email_connection + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name patroni-test + service_description Patroni health check + check_command check_patroni_health!2 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-dev.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-dev.cfg new file mode 100644 index 000000000..d30c50f9c --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-dev.cfg @@ -0,0 +1,130 @@ +define service { + host_name client-dev + service_description client replica count + check_command check_replicas!mem-tfrs-dev!client!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-dev + service_description TFRS replica count + check_command check_replicas!mem-tfrs-dev!tfrs!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name postgresql-dev + service_description Postgresql replica count + check_command check_replicas!mem-tfrs-dev!patroni-dev!3 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name celery-dev + service_description Celery replica count + check_command check_replicas!mem-tfrs-dev!celery!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name clamav-dev + service_description Clamav replica count + check_command check_replicas!mem-tfrs-dev!clamav!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name minio-dev + service_description Minio replica count + check_command check_replicas!mem-tfrs-dev!minio!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name notification-server-dev + service_description Notification-server replica count + check_command check_replicas!mem-tfrs-dev!notification-server!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name scan-coordinator-dev + service_description Scan-coordinator replica count + check_command check_replicas!mem-tfrs-dev!scan-coordinator!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name scan-handler-dev + service_description Scan-handler replica count + check_command check_replicas!mem-tfrs-dev!scan-handler!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name rabbitmq-dev + service_description Rabbitmq replica count + check_command check_replicas!mem-tfrs-dev!rabbitmq!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 5 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-prod.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-prod.cfg new file mode 100644 index 000000000..97f96e5f6 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-prod.cfg @@ -0,0 +1,130 @@ +define service { + host_name tfrs-frontend-prod + service_description Frontend replica count + check_command check_replicas!0ab226-prod!tfrs-frontend-prod!2 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-backend-prod + service_description Backend replica count + check_command check_replicas!0ab226-prod!tfrs-backend-prod!2 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name patroni-prod + service_description Patroni replica count + check_command check_replicas!0ab226-prod!patroni-prod!3 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-celery-prod + service_description Celery replica count + check_command check_replicas!0ab226-prod!tfrs-celery-prod!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-clamav-prod + service_description Clamav replica count + check_command check_replicas!0ab226-prod!tfrs-clamav-prod!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-minio-prod + service_description Minio replica count + check_command check_replicas!0ab226-prod!tfrs-minio-prod!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-notification-server-prod + service_description Notification-server replica count + check_command check_replicas!0ab226-prod!tfrs-notification-server-prod!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-scan-coordinator-prod + service_description Scan-coordinator replica count + check_command check_replicas!0ab226-prod!tfrs-scan-coordinator-prod!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-scan-handler-prod + service_description Scan-handler replica count + check_command check_replicas!0ab226-prod!tfrs-scan-handler-prod!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-prod-rabbitmq-cluster + service_description Rabbitmq replica count + check_command check_replicas!0ab226-prod!tfrs-prod-rabbitmq-cluster!2 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-test.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-test.cfg new file mode 100644 index 000000000..ee8fe7e1c --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/services-replica-test.cfg @@ -0,0 +1,130 @@ +define service { + host_name tfrs-frontend-test + service_description Frontend replica count + check_command check_replicas!0ab226-test!tfrs-frontend-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-backend-test + service_description Backend replica count + check_command check_replicas!0ab226-test!tfrs-backend-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name patroni-test + service_description Patroni replica count + check_command check_replicas!0ab226-test!patroni-test!2 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-celery-test + service_description Celery replica count + check_command check_replicas!0ab226-test!tfrs-celery-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-clamav-test + service_description Clamav replica count + check_command check_replicas!0ab226-test!tfrs-clamav-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-minio-test + service_description Minio replica count + check_command check_replicas!0ab226-test!tfrs-minio-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-notification-server-test + service_description Notification-server replica count + check_command check_replicas!0ab226-test!tfrs-notification-server-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-scan-coordinator-test + service_description Scan-coordinator replica count + check_command check_replicas!0ab226-test!tfrs-scan-coordinator-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-scan-handler-test + service_description Scan-handler replica count + check_command check_replicas!0ab226-test!tfrs-scan-handler-test!1 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} +define service { + host_name tfrs-test-rabbitmq-cluster + service_description Rabbitmq replica count + check_command check_replicas!0ab226-test!tfrs-test-rabbitmq-cluster!2 + check_interval 5 + retry_interval 1 + max_check_attempts 5 + check_period 24x7 + contact_groups tfrs-devops + notification_interval 0 + notification_period 24x7 + notifications_enabled 1 +} \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/conf.d/timeperiods.cfg b/openshift-v4/templates/nagios/nagios3/conf.d/timeperiods.cfg new file mode 100644 index 000000000..0872cd5a5 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/conf.d/timeperiods.cfg @@ -0,0 +1,11 @@ +define timeperiod{ + timeperiod_name 24x7 + alias Always + sunday 00:00-24:00 + monday 00:00-24:00 + tuesday 00:00-24:00 + wednesday 00:00-24:00 + thursday 00:00-24:00 + friday 00:00-24:00 + saturday 00:00-24:00 +} diff --git a/openshift-v4/templates/nagios/nagios3/nagios.cfg b/openshift-v4/templates/nagios/nagios3/nagios.cfg new file mode 100644 index 000000000..2d4a7eb4d --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/nagios.cfg @@ -0,0 +1,111 @@ +log_file=/var/log/nagios3/nagios3.log +cfg_file=/etc/nagios3/commands.cfg +cfg_dir=/etc/nagios-plugins/config +cfg_dir=/etc/nagios3/conf.d +log_rotation_method=d +log_archive_path=/var/log/nagios3 +object_cache_file=/var/cache/nagios3/objects.cache +precached_object_file=/var/lib/nagios3/objects.precache +resource_file=/etc/nagios3/resource.cfg +status_file=/var/cache/nagios3/status.dat +status_update_interval=10 +nagios_user=nagios +nagios_group=nagios +check_external_commands=0 +command_check_interval=5 +command_file=/var/lib/nagios3/rw/nagios.cmd +external_command_buffer_slots=4096 +lock_file=/var/run/nagios3/nagios3.pid +temp_file=/var/cache/nagios3/nagios.tmp +temp_path=/tmp +event_broker_options=-1 +use_syslog=0 +log_notifications=0 +log_service_retries=0 +log_host_retries=0 +log_event_handlers=1 +log_initial_states=1 +log_external_commands=1 +log_passive_checks=1 +service_inter_check_delay_method=s +max_service_check_spread=30 +service_interleave_factor=s +host_inter_check_delay_method=s +max_host_check_spread=30 +max_concurrent_checks=1 +check_result_reaper_frequency=10 +max_check_result_reaper_time=30 +check_result_path=/var/lib/nagios3/spool/checkresults +max_check_result_file_age=3600 +cached_host_check_horizon=15 +cached_service_check_horizon=15 +enable_predictive_host_dependency_checks=1 +enable_predictive_service_dependency_checks=1 +soft_state_dependencies=0 +auto_reschedule_checks=0 +auto_rescheduling_interval=30 +auto_rescheduling_window=180 +sleep_time=0.25 +service_check_timeout=60 +host_check_timeout=30 +event_handler_timeout=30 +notification_timeout=30 +ocsp_timeout=5 +perfdata_timeout=5 +retain_state_information=1 +state_retention_file=/var/lib/nagios3/retention.dat +retention_update_interval=60 +use_retained_program_state=1 +use_retained_scheduling_info=1 +retained_host_attribute_mask=0 +retained_service_attribute_mask=0 +retained_process_host_attribute_mask=0 +retained_process_service_attribute_mask=0 +retained_contact_host_attribute_mask=0 +retained_contact_service_attribute_mask=0 +interval_length=60 +check_for_updates=1 +bare_update_check=0 +use_aggressive_host_checking=0 +execute_service_checks=1 +accept_passive_service_checks=1 +execute_host_checks=1 +accept_passive_host_checks=1 +enable_notifications=1 +enable_event_handlers=1 +process_performance_data=0 +obsess_over_services=0 +obsess_over_hosts=0 +translate_passive_host_checks=0 +passive_host_checks_are_soft=0 +check_for_orphaned_services=1 +check_for_orphaned_hosts=1 +check_service_freshness=1 +service_freshness_check_interval=60 +service_check_timeout_state=c +check_host_freshness=0 +host_freshness_check_interval=60 +additional_freshness_latency=15 +enable_flap_detection=1 +low_service_flap_threshold=5.0 +high_service_flap_threshold=20.0 +low_host_flap_threshold=5.0 +high_host_flap_threshold=20.0 +date_format=iso8601 +p1_file=/usr/lib/nagios3/p1.pl +enable_embedded_perl=1 +use_embedded_perl_implicitly=1 +use_timezone=Canada/Pacific +illegal_object_name_chars=`~!$%^&*|'"<>?,()= +illegal_macro_output_chars=`~$&|'"<> +use_regexp_matching=0 +use_true_regexp_matching=0 +admin_email=kuan.fan@gov.bc.ca +admin_pager=pageroot@localhost +daemon_dumps_core=0 +use_large_installation_tweaks=0 +enable_environment_macros=1 +debug_level=32 +debug_verbosity=2 +debug_file=/var/log/nagios3/nagios3-debug.log +max_debug_file_size=10000000 diff --git a/openshift-v4/templates/nagios/nagios3/resource.cfg b/openshift-v4/templates/nagios/nagios3/resource.cfg new file mode 100644 index 000000000..3ed732bb1 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/resource.cfg @@ -0,0 +1,31 @@ +########################################################################### +# +# RESOURCE.CFG - Resource File for Nagios +# +# You can define $USERx$ macros in this file, which can in turn be used +# in command definitions in your host config file(s). $USERx$ macros are +# useful for storing sensitive information such as usernames, passwords, +# etc. They are also handy for specifying the path to plugins and +# event handlers - if you decide to move the plugins or event handlers to +# a different directory in the future, you can just update one or two +# $USERx$ macros, instead of modifying a lot of command definitions. +# +# The CGIs will not attempt to read the contents of resource files, so +# you can set restrictive permissions (600 or 660) on them. +# +# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$) +# +# Resource files may also be used to store configuration directives for +# external data sources like MySQL... +# +########################################################################### + +# Sets $USER1$ to be the path to the plugins +$USER1$=/usr/lib/nagios/plugins + +# Sets $USER2$ to be the path to event handlers +#$USER2$=/usr/lib/nagios/plugins/eventhandlers + +# Store some usernames and passwords (hidden from the CGIs) +#$USER3$=someuser +#$USER4$=somepassword diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/avail.css b/openshift-v4/templates/nagios/nagios3/stylesheets/avail.css new file mode 100644 index 000000000..b1ad0bfe3 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/avail.css @@ -0,0 +1,35 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.avail { } + +.data { font-size: 9pt; } + +.hostUP { background-color: #cce8cc; padding: 0 4 0 4; } +.hostDOWN { background-color: #ffdddd; padding: 0 4 0 4; } +.hostUNREACHABLE { background-color: #ffddaa; padding: 0 4 0 4; } + +.serviceOK { background-color: #cce8cc; padding: 0 4 0 4; } +.serviceWARNING { background-color: #feffc1; padding: 0 4 0 4; } +.serviceUNKNOWN { background-color: #ffddaa; padding: 0 4 0 4; } +.serviceCRITICAL { background-color: #ffdddd; padding: 0 4 0 4; } + +table.logEntries { font-size: 9pt; padding: 5 0 0 0; } +th.logEntries { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +.logEntriesOdd { background-color: #e7e7e7; padding: 0 4 0 4; } +.logEntriesEven { background-color: #f4f2f2; padding: 0 4 0 4; } + +.logEntriesOK { background-color: #cce8cc; padding: 0 4 0 4; } +.logEntriesUNKNOWN { background-color: #ffddaa; padding: 0 4 0 4; } +.logEntriesWARNING { background-color: #feffc1; padding: 0 4 0 4; } +.logEntriesCRITICAL { background-color: #ffdddd; padding: 0 4 0 4; } +.logEntriesUP { background-color: #cce8cc; padding: 0 4 0 4; } +.logEntriesDOWN { background-color: #ffdddd; padding: 0 4 0 4; } +.logEntriesUNREACHABLE { background-color: #ffddaa; padding: 0 4 0 4; } +.logEntriesINDETERMINATE { background-color: #ddccff; padding: 0 4 0 4; } + +.infoMessage { font-size: 9pt; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/checksanity.css b/openshift-v4/templates/nagios/nagios3/stylesheets/checksanity.css new file mode 100644 index 000000000..49922d19b --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/checksanity.css @@ -0,0 +1,27 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.extinfo { } + +.Title { font-size: 12pt; text-align: center; font-weight: bold; } +.SectionTitle { font-size: 12pt; text-align: center; font-weight: bold; } + +.DynamicData { font-size: 10pt; padding: 2; } +.StaticData { font-size: 10pt; padding: 2; } +.TableHeader { font-size: 10pt; background-color: #d0d0d0; font-weight: bold; } + +.Item { font-size: 10pt; background-color: #f4f2f2; font-weight: bold; } +.DataSource { font-size: 10pt; background-color: #f4f2f2; } +.Number { font-size: 10pt; background-color: #f4f2f2; } + +.Value { font-size: 10pt; background-color: #f4f2f2; font-weight: bold; } +.ValueOk { font-size: 10pt; background-color: #88d066; font-weight: bold; } +.ValueError { font-size: 10pt; background-color: #f88888; font-weight: bold; } + + + + diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/cmd.css b/openshift-v4/templates/nagios/nagios3/stylesheets/cmd.css new file mode 100644 index 000000000..f8698f736 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/cmd.css @@ -0,0 +1,14 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.cmd { } + +.cmdType { font-size: 12pt; font-weight: bold; color: #aa0000; padding-bottom: 40; } +.commandDescription { font-size: 8pt; background-color: #f4f2f2; border: 1px solid #d0d0d0; margin: 4 0 0 0; padding: 4 4 4 4; } +.descriptionTitle { font-size: 10pt; font-weight: bold; } +.infoMessage { font-size: 8pt; background-color: #efefaa; border: 1px solid #777777; margin: 40 20% 0 20%; padding: 5 5 5 5; } +.optBox { font-size: 9pt; padding: 5 5 5 5; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/common.css b/openshift-v4/templates/nagios/nagios3/stylesheets/common.css new file mode 100644 index 000000000..6d8e1aae8 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/common.css @@ -0,0 +1,370 @@ +/* exfoliation: a nagios makeover */ +/* version: 0.7 */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +/* thanks to jacob.laack for nagiosneat */ +/* thanks to nagios authors for a solid base */ + +/* these are reference colors for the color scheme: + + color dark light border + OK: green 88d066 cce8cc 777777 + WARNING: yellow ffff00 feffc1 777777 + CRITICAL: red f88888 ffdddd 777777 + PENDING: grey acacac fefefe 777777 + UNKNOWN: orange ffbb55 ffddaa 777777 + UNREACHABLE: orange ffbb55 ffddaa 777777 +INDETERMINATE: purple ddccff 777777 + PROBLEMS: blue aaccff 777777 + + ACK: aaaaaa + CUSTOM: 778899 + + important: blue 99aacc 777777 +not important: blue aaccff 777777 + + table header: d0d0d0 + odd rows: e7e7e7 + even rows: f4f2f2 + + titles: 12pt + body: 10pt + table headings: 9pt + controls: 9pt + data in tables: 8pt or 9pt +*/ + +body { + margin: 0em; + margin: 0.8em 0.8em 2em 0.8em; + color: black; + background-color: white; + font-family: arial, verdana, serif; + font-weight: normal; + font-size: 10pt; +} + +table { + border: none; + margin: 0; +} + +th, td { + border: none; + padding: 0 2px 0 2px; +} + +form { + margin: 0; + padding: 0; +} + + +a img { + border: none; +} +a { + text-decoration: none; + color: #40529b; +} +a:hover { + text-decoration: underline; + color: #3f5bcd; +} +a:active { + color: #496aef; +} +a:visited { + color: #2c3763; +} + + +div.navbarlogo { + margin: 0 0 10px 0; +} +div.navsection { + margin: 5px 0 10px 0; +} +div.navsectiontitle { + font-size: 9pt; + font-weight: bold; + padding: 2px; + background-color: #efefef; + border:1px solid #dddddd; +} +div.navsectionlinks { + margin: 3px 0 0 0; +} + +ul.navsectionlinks { + margin: 0; + padding: 0; + list-style: none; +} +ul.navsectionlinks li { } +ul.navsectionlinks li a { + font-weight: bold; + font-size: 9pt; + text-decoration: none; + padding: 0 0 0 15px; + /* background: transparent url(../images/greendot.gif) no-repeat scroll 0 0; */ + +} +ul.navsectionlinks li ul { + margin: 0px; + padding: 0 0 0 30px; + list-style: none; +} +ul.navsectionlinks li ul li { } +ul.navsectionlinks li a:hover { +/* background: transparent url(../images/orangedot.gif) no-repeat scroll 0 0; */ +color: #8391cd; +} +ul.navsectionlinks li ul li a { + background: none; + padding: 0; + font-weight: normal; +} +ul.navsectionlinks li ul li a:hover { + background: none; +} +ul.navsectionlinks li ul li ul { + margin: 0px; + padding: 0 0 0 15px; + list-style: none; +} + + +.navbarsearch { + margin: 5px 0 0 0; +} +.navbarsearch fieldset { + border: none; +} + +.navbarsearch fieldset legend { + font-size: 8pt; +} +.navbarsearch input{ + font-size: 9pt; +} + + +#splashpage{ + text-align: center; +} +#mainbrandsplash{ + font-size: 12pt; + font-weight: bold; + margin: 0 0 35px 0; +} +#maincopy{ + margin: 0 0 15px 0; +} +#currentversioninfo{ + font-size: 12pt; +} +#currentversioninfo .product{ + font-size: 14pt; + font-weight: bold; +} +#currentversioninfo .version{ + font-size: 14pt; + font-weight: bold; +} +#currentversioninfo .releasedate{ + font-size: 11pt; + margin: 5px 0 0 0; +} +#currentversioninfo .checkforupdates{ + font-size: 11pt; + font-weight: bold; +} +#currentversioninfo .whatsnew{ + font-size: 11pt; + font-weight: bold; + margin: 50px 0 0 0; +} +#updateversioninfo{ + margin: 15px auto 35px auto; + width: 400px; +} +.updatechecksdisabled{ + background-color: #FF9F9F; + border: 1px solid red; + padding: 10px; +} +.updatechecksdisabled div.warningmessage{ + font-weight: bold; +} +#updateversioninfo div.submessage{ + clear: left; +} +.updateavailable{ + background-color: #9FD4FF; + border: 1px solid blue; + padding: 10px; +} +.updateavailable div.updatemessage{ + font-size: 12pt; + font-weight: bold; +} + +#splashpage #mainfooter{ + /*margin: 100px 0 0 0;*/ + clear: both; + font-size: 8pt; + padding-top: 35px; +} +#splashpage #mainfooter .disclaimer{ + /*width: 80%;*/ + margin: auto; +} +#splashpage #mainfooter .logos{ + margin: 15px 0 0 0; +} + + + + + + +table.infoBox { width: 100%; } +td.infoBox { font-size: 8pt; padding: 0 0 1em 0; white-space: nowrap; } +div.infoBoxTitle { font-size: 10pt; font-weight: bold; } +div.infoBoxBadProcStatus { font-size: 8pt; font-weight: bold; color: red; } + +.linkBox { font-size: 8pt; padding: 1; } +table.linkBox td { white-space: nowrap; } + +.filter { font-size: 8pt; padding: 1; } +.filterTitle { font-size: 9pt; font-weight: bold; } +.filterName { font-size: 8pt; text-align: right; font-weight: bold; } +.filterValue { font-size: 8pt; } + +.errorDescription { font-size: 10pt; text-align: center; font-weight: bold; } +.errorMessage { font-size: 10pt; text-align: center; font-weight: bold; color: red; } +.warningMessage { font-size: 10pt; text-align: center; font-weight: bold; color: red; } + +.statusTitle { text-align: center; font-weight: bold; font-size: 12pt; white-space: nowrap; } +.statusSort { font-size: 8pt; } + +table.data { padding: 0; } +th.data { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +.dataOdd { font-size: 8pt; background-color: #e7e7e7; padding: 0 4 0 4; } +.dataEven { font-size: 8pt; background-color: #f4f2f2; padding: 0 4 0 4; } +.dataTitle { font-size: 12pt; text-align: center; font-weight: bold; } +.dataSubTitle { font-size: 10pt; text-align: center; font-weight: bold; } + +.optBox { font-size: 9pt; white-space: nowrap; padding: 2 0 0 0; } +.optBoxTitle { font-size: 10pt; font-weight: bold; text-align: center; } +.optBoxRequiredItem { font-size: 9pt; text-align: right; padding: 0 5 0 5; color: red; } +.optBoxItem { font-size: 9pt; text-align: right; padding: 0 5 0 5; } +.optBoxValue { font-size: 9pt; } + +.optionBoxTitle { font-size: 10pt; text-align: center; font-weight: bold; } +.optionBox { font-size: 10pt; padding: 2; } + +.navBoxTitle { font-size: 10pt; font-weight: bold; white-space: nowrap; } +.navBoxItem { font-size: 8pt; } +.navBoxDate { font-size: 8pt; white-space: nowrap; } +.navBoxFile { font-size: 8pt; text-align: center; } + +.helpfulHint { font-size: 8pt; font-style: italic; text-align: center; } + +.logEntries { font-size: 8pt; white-space: nowrap; } + +.dateTimeBreak { font-size: 9pt; font-weight: bold; } + +.reportRange { font-size: 10pt; white-space: nowrap; } +.reportDuration { font-size: 8pt; white-space: nowrap; } +.reportTime { font-size: 8pt; white-space: nowrap; text-align: right; font-style: italic; } + +.reportSelectTitle { font-size: 12pt; text-align: center; font-weight: bold; } +.reportSelectSubTitle { font-size: 9pt; text-align: right; } +.reportSelectItem { font-size: 9pt; } +.reportSelectTip { font-size: 8pt; font-style: italic; } + +.dateSelectTitle { font-size: 12pt; text-align: center; font-weight: bold; } +.dateSelectSubTitle { font-size: 9pt; text-align: right; } +.dateSelectItem { font-size: 9pt; } + +.popupText { font-size: 8pt; background-color: #eeeeaa; border: 1px solid #777777; padding: 10 10 10 10; } + +.hostImportantProblem { font-size: 8pt; background-color: #88aadd; border: 1px solid #aaaaaa; padding: 0 5 0 5; } +.hostUnimportantProblem { font-size: 8pt; background-color: #aaccff; border: 1px solid #888888; padding: 0 5 0 5; } + +.serviceImportantProblem { font-size: 8pt; background-color: #88aadd; border: 1px solid #aaaaaa; padding: 0 5 0 5; } +.serviceUnimportantProblem { font-size: 8pt; background-color: #aaccff; border: 1px solid #888888; padding: 0 5 0 5; } + +.outageImportantProblem { font-size: 8pt; background-color: #88aadd; border: 1px solid #aaaaaa; padding: 0 5 0 5; } +.outageUnimportantProblem { font-size: 8pt; background-color: #aaccff; border: 1px solid #888888; padding: 0 5 0 5; } + + +/* Some nagios configurations have side.html rather than side.php and define */ +/* a slightly different set of nav elements. */ +.NavBarTitle { + font-size: 9pt; + font-weight: bold; + margin: 5px 0 10px 0; + padding: 2px; + background-color: #efefef; + border:v1px solid #dddddd; +} + +.NavBarItem { + font-size: 9pt; + font-weight: bold; + list-style: none; + text-decoration: none; + margin: 0; + padding: 0 0 0 0; +} + +.NavBarSearchItem { + font-size: 9pt; +} + + + +#splashboxes { + /*border: 1px solid blue;*/ + margin: auto; + width: 90%; + } +.splashbox{ + padding: 5px; + margin: 5px 5px 5px; + border: 1px solid #AAAAAA; + float: left; + text-align: left; + height: 140px; + } +.splashbox h2{ + margin: 0px; + font-size: 12pt; + } +.splashbox ul{ + margin: 0; + padding: 5px 5px 5px 15px; + } +.splashbox ul li{ + clear: both; + } +#splashbox1 { + width: 250px; + } +#splashbox2 { + width: 500px; + } +#splashbox3 { + width: 250px; + clear: both; + } +#splashbox4 { + width: 500px; + } \ No newline at end of file diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/config.css b/openshift-v4/templates/nagios/nagios3/stylesheets/config.css new file mode 100644 index 000000000..558d4faa5 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/config.css @@ -0,0 +1,11 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.config { } + +.reportSelectSubTitle { font-size: 9pt; text-align: left; } +.reportSelectItem { font-size: 9pt; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/extinfo.css b/openshift-v4/templates/nagios/nagios3/stylesheets/extinfo.css new file mode 100644 index 000000000..4c81320b4 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/extinfo.css @@ -0,0 +1,84 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.extinfo { } + +.perfTypeTitle { font-size: 10pt; text-align: right; font-weight: bold; } + +.stateInfoPanel { font-size: 9pt; } +.stateStatisticsPanel { } +.stateInfoTable1 { background-color: #f4f2f2; border: 1px solid #d0d0d0; } +.stateInfoTable2 { background-color: #f4f2f2; border: 1px solid #d0d0d0; } + +.dataVar { font-size: 9pt; font-weight: bold; } +.dataVal { font-size: 9pt; } + +/* FIXME: override the defaults until php/html is fixed */ +/* .data { font-size: 10pt; font-weight: bold; } */ +div.data { font-size: 10pt; font-weight: normal; } +.dataTitle { font-size: 10pt; font-weight: bold; padding-bottom: 5; } + +.commandTitle { font-size: 10pt; text-align: center; font-weight: bold; padding-bottom: 5; } +TABLE.command { background-color: #f4f2f2; border: 1px solid #d0d0d0; } +.command { font-size: 9pt; } +.commandPanel { } +.commentPanel { } + +.commentTitle { font-size: 10pt; text-align: center; font-weight: bold; } +DIV.commentNav { font-size: 10pt; text-align: center; } +A.commentNav { font-size: 10pt; } + +TABLE.comment { font-size: 10pt; background-color: white; padding: 2; } +TH.comment { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +.commentOdd { font-size: 9pt; background-color: #e7e7e7; } +.commentEven { font-size: 9pt; background-color: #f4f2f2; } +DIV.comment,A.comment { font-size: 10pt; background-color: white; text-align: center; } + +.downtimeTitle { font-size: 12pt; text-align: center; font-weight: bold; } +DIV.downtimeNav { font-size: 10pt; text-align: center; } +A.downtimeNav { font-size: 10pt; } + +TABLE.downtime { font-size: 10pt; background-color: white; padding: 2; } +TH.downtime { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +.downtimeOdd { font-size: 9pt; background-color: #e7e7e7; } +.downtimeEven { font-size: 9pt; background-color: #f4f2f2; } + +.notflapping { background-color: #88d066; border: 1px solid #777777; font-weight: bold; float: left; } +.flapping { background-color: #f88888; border: 1px solid #777777; font-weight: bold; float: left; } +.notificationsENABLED { background-color: #88d066; border: 1px solid #777777; font-weight: bold; } +.notificationsDISABLED { background-color: #f88888; border: 1px solid #777777; font-weight: bold; } +.checksENABLED { background-color: #88d066; border: 1px solid #777777; font-weight: bold; } +.checksDISABLED { background-color: #f88888; border: 1px solid #777777; font-weight: bold; } +.eventhandlersENABLED { background-color: #88d066; border: 1px solid #777777; font-weight: bold; } +.eventhandlersDISABLED { background-color: #f88888; border: 1px solid #777777; font-weight: bold; } +.flapdetectionENABLED { background-color: #88d066; border: 1px solid #777777; font-weight: bold; } +.flapdetectionDISABLED { background-color: #f88888; border: 1px solid #777777; font-weight: bold; } +.downtimeACTIVE { background-color: #f88888; border: 1px solid #777777; font-weight: bold; float: left; } +.downtimeINACTIVE { background-color: #88d066; border: 1px solid #777777; font-weight: bold; float: left; } +.processOK { background-color: #88d066; border: 1px solid #777777; font-weight: bold; } +.processUNKNOWN { background-color: #ffbb55; border: 1px solid #777777; font-weight: bold; } +.processWARNING { background-color: #ffff00; border: 1px solid #777777; font-weight: bold; } +.processCRITICAL { background-color: #f88888; border: 1px solid #777777; font-weight: bold; } +.modeACTIVE { background-color: #88d066; border: 1px solid #777777; font-weight: bold; } +.modeSTANDBY { background-color: #ffff00; border: 1px solid #777777; font-weight: bold; } + +.hostUP { background-color: #88d066; border: 1px solid #777777; font-weight: bold; float: left; } +.hostDOWN { background-color: #f88888; border: 1px solid #777777; font-weight: bold; float: left; } +.hostUNREACHABLE { background-color: #f88888; border: 1px solid #777777; font-weight: bold; float: left; } + +.serviceOK { background-color: #88d066; border: 1px solid #777777; font-weight: bold; float: left; } +.serviceWARNING { background-color: #ffff00; border: 1px solid #777777; font-weight: bold; float: left; } +.serviceUNKNOWN { background-color: #ffbb55; border: 1px solid #777777; font-weight: bold; float: left; } +.serviceCRITICAL { background-color: #f88888; border: 1px solid #777777; font-weight: bold; float: left; } + +.queueTitle { font-size: 12pt; text-align: center; font-weight: bold; } +TABLE.queue { font-size: 9pt; padding: 0; } +TH.queue { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +.queueOdd { font-size: 9pt; background-color: #e7e7e7; padding: 0 4 0 4; } +.queueEven { font-size: 9pt; background-color: #f4f2f2; padding: 0 4 0 4; } +.queueENABLED { font-size: 9pt; background-color: #88d066; border: 1px solid #777777; padding: 0 4 0 4; } +.queueDISABLED { font-size: 9pt; background-color: #f88888; border: 1px solid #777777; padding: 0 4 0 4; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/histogram.css b/openshift-v4/templates/nagios/nagios3/stylesheets/histogram.css new file mode 100644 index 000000000..9b147f6bd --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/histogram.css @@ -0,0 +1,10 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.histogram { } + +.helpfulHints { font-size: 10pt; font-style: italic; text-align: center; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/history.css b/openshift-v4/templates/nagios/nagios3/stylesheets/history.css new file mode 100644 index 000000000..ed8dba30f --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/history.css @@ -0,0 +1,8 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.history { } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/ministatus.css b/openshift-v4/templates/nagios/nagios3/stylesheets/ministatus.css new file mode 100644 index 000000000..040c8ac4a --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/ministatus.css @@ -0,0 +1,64 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.status { } + +TABLE.status { font-size: 9pt; padding: 2; } +TH.status { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +DIV.status { font-size: 10pt; text-align: center; } +.statusOdd { font-size: 9pt; background-color: #e7e7e7; } +.statusEven { font-size: 9pt; background-color: #f4f2f2; } + +.statusPENDING { font-size: 9pt; background-color: #acacac; } +.statusOK { font-size: 9pt; background-color: #88d066; } +.statusRECOVERY { font-size: 9pt; background-color: #88d066; } +.statusUNKNOWN { font-size: 9pt; background-color: #ffbb55; } +.statusWARNING { font-size: 9pt; background-color: #ffff00; } +.statusCRITICAL { font-size: 9pt; background-color: #f88888; } + +.statusHOSTPENDING { font-size: 9pt; background-color: #acacac; } +.statusHOSTUP { font-size: 9pt; background-color: #88d066; } +.statusHOSTDOWN { font-size: 9pt; background-color: #f88888; } +.statusHOSTUNREACHABLE { font-size: 9pt; background-color: #ffbb55; } + +.statusBGUNKNOWN { font-size: 9pt; background-color: #ffddaa; } +.statusBGWARNING { font-size: 9pt; background-color: #feffc1; } +.statusBGCRITICAL { font-size: 9pt; background-color: #ffdddd; } +.statusBGDOWN { font-size: 9pt; background-color: #ffdddd; } +.statusBGUNREACHABLE { font-size: 9pt; background-color: #ffbb55; } + +DIV.serviceTotals { font-size: 10pt; text-align: center; font-weight: bold; } +TABLE.serviceTotals { font-size: 10pt; font-weight: bold; } +TH.serviceTotals,A.serviceTotals { font-size: 9pt; } +TD.serviceTotals { font-size: 9pt; text-align: center; background-color: #e0e0e0; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsOK { font-size: 9pt; text-align: center; background-color: #88d066; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsWARNING { font-size: 9pt; text-align: center; background-color: #ffff00; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsUNKNOWN { font-size: 9pt; text-align: center; background-color: #ffbb55; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsCRITICAL { font-size: 9pt; text-align: center; background-color: #f88888; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsPENDING { font-size: 9pt; text-align: center; background-color: #acacac; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsPROBLEMS { font-size: 9pt; text-align: center; background-color: #aaccff; border: 1px solid #777777; padding: 2 4 2 4; } + + +DIV.hostTotals { font-size: 10pt; text-align: center; font-weight: bold; } +TABLE.hostTotals { font-size: 10pt; font-weight: bold; } +TH.hostTotals,A.hostTotals { font-size: 9pt; } +TD.hostTotals { font-size: 9pt; text-align: center; background-color: #e4e4e4; } +.hostTotalsUP { font-size: 9pt; text-align: center; background-color: #88d066; } +.hostTotalsDOWN { font-size: 9pt; text-align: center; background-color: #f88888; } +.hostTotalsUNREACHABLE { font-size: 9pt; text-align: center; background-color: #ffbb55; } +.hostTotalsPENDING { font-size: 9pt; text-align: center; background-color: #acacac; } +.hostTotalsPROBLEMS { font-size: 9pt; text-align: center; background-color: #aaccff; } + +.miniStatusPENDING { font-size: 9pt; background-color: #acacac; text-align: center; } +.miniStatusOK { font-size: 9pt; background-color: #66ffee; text-align: center; } +.miniStatusUNKNOWN { font-size: 9pt; background-color: #ffbb55; text-align: center; } +.miniStatusWARNING { font-size: 9pt; background-color: #ffff00; text-align: center; } +.miniStatusCRITICAL { font-size: 9pt; background-color: #f88888; text-align: center; } + +.miniStatusUP { font-size: 9pt; background-color: #66ffee; text-align: center; } +.miniStatusDOWN { font-size: 9pt; background-color: #f88888; text-align: center; } +.miniStatusUNREACHABLE { font-size: 9pt; background-color: #ffbb55; text-align: center; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/notifications.css b/openshift-v4/templates/nagios/nagios3/stylesheets/notifications.css new file mode 100644 index 000000000..113802510 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/notifications.css @@ -0,0 +1,29 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.notifications { } + +TABLE.notifications { padding: 0; margin: 0; } +TH.notifications { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +.notificationsOdd { font-size: 8pt; background-color: #e7e7e7; padding: 0 4 0 4; } +.notificationsEven { font-size: 8pt; background-color: #f4f2f2; padding: 0 4 0 4; } + +/* these are dark colors */ +.notificationsOK { background-color: #88d066; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsUNKNOWN { background-color: #ffbb55; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsWARNING { background-color: #ffff00; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsCRITICAL { background-color: #f88888; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsACKNOWLEDGEMENT { background-color: #aaaaaa; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsCUSTOM { background-color: #778899; border: 1px solid #777777; padding: 0 4 0 4; } + +/* these are dark colors */ +.notificationsHOSTUP { background-color: #88d066; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsHOSTDOWN { background-color: #f88888; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsHOSTUNREACHABLE { background-color: #ffbb55; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsHOSTACKNOWLEDGEMENT { background-color: #aaaaaa; border: 1px solid #777777; padding: 0 4 0 4; } +.notificationsHOSTCUSTOM { background-color: #778899; border: 1px solid #777777; padding: 0 4 0 4; } + diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/outages.css b/openshift-v4/templates/nagios/nagios3/stylesheets/outages.css new file mode 100644 index 000000000..10db27d43 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/outages.css @@ -0,0 +1,15 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.outages { } + +.itemTotalsTitle { font-size: 8pt; text-align: center; } + +.hostUP { background-color: #88d066; font-weight: bold; } +.hostDOWN { background-color: #f88888; font-weight: bold; } +.hostUNREACHABLE { background-color: #ffbb55; font-weight: bold; } + diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/showlog.css b/openshift-v4/templates/nagios/nagios3/stylesheets/showlog.css new file mode 100644 index 000000000..ccbd242ed --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/showlog.css @@ -0,0 +1,8 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.showlog { } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/status.css b/openshift-v4/templates/nagios/nagios3/stylesheets/status.css new file mode 100644 index 000000000..51f1e4827 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/status.css @@ -0,0 +1,88 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.status { } + +.itemTotalsTitle { font-size: 8pt; font-style: italic; clear:both;} + +table.status { font-size: 9pt; padding: 0 0 10 0; } +th.status { font-size: 9pt; text-align: left; padding: 0 3px 0 3px; border-bottom: 1px solid #777777; color: #333333; } +div.status { font-size: 10pt; text-align: center; } +.statusOdd { font-size: 8pt; background-color: #e7e7e7; line-height: 150%; padding: 0 4 0 4; } +.statusEven { font-size: 8pt; background-color: #f4f2f2; line-height: 150%; padding: 0 4 0 4; } + +.statusPENDING { font-size: 8pt; background-color: #acacac; border: 1px solid #777777; padding: 0 5 0 5; } +.statusOK { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; padding: 0 5 0 5; } +.statusRECOVERY { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; padding: 0 5 0 5; } +.statusUNKNOWN { font-size: 8pt; background-color: #ffbb55; border: 1px solid #777777; padding: 0 5 0 5; } +.statusWARNING { font-size: 8pt; background-color: #ffff00; border: 1px solid #777777; padding: 0 5 0 5; } +.statusCRITICAL { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; padding: 0 5 0 5; } + +.statusHOSTPENDING { font-size: 8pt; background-color: #acacac; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTUP { font-size: 8pt; background-color: #cce8cc; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTDOWN { font-size: 8pt; background-color: #ffdddd; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTDOWNACK { font-size: 8pt; background-color: #ffdddd; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTDOWNSCHED { font-size: 8pt; background-color: #ffdddd; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTUNREACHABLE { font-size: 8pt; background-color: #ffddaa; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTUNREACHABLEACK { font-size: 8pt; background-color: #ffddaa; line-height: 150%; padding: 0 4 0 4; } +.statusHOSTUNREACHABLESCHED { font-size: 8pt; background-color: #ffddaa; line-height: 150%; padding: 0 4 0 4; } + +.statusBGUNKNOWN { font-size: 8pt; background-color: #ffddaa; } +.statusBGUNKNOWNACK { font-size: 8pt; background-color: #ffddaa; } +.statusBGUNKNOWNSCHED { font-size: 8pt; background-color: #ffddaa; } +.statusBGWARNING { font-size: 8pt; background-color: #feffc1; } +.statusBGWARNINGACK { font-size: 8pt; background-color: #feffc1; } +.statusBGWARNINGSCHED { font-size: 8pt; background-color: #feffc1; } +.statusBGCRITICAL { font-size: 8pt; background-color: #ffdddd; } +.statusBGCRITICALACK { font-size: 8pt; background-color: #ffdddd; } +.statusBGCRITICALSCHED { font-size: 8pt; background-color: #ffdddd; } +.statusBGDOWN { font-size: 8pt; background-color: #ffdddd; } +.statusBGDOWNACK { font-size: 8pt; background-color: #ffdddd; } +.statusBGDOWNSCHED { font-size: 8pt; background-color: #ffdddd; } +.statusBGUNREACHABLE { font-size: 8pt; background-color: #ffddaa; } +.statusBGUNREACHABLEACK { font-size: 8pt; background-color: #ffddaa; } +.statusBGUNREACHABLESCHED { font-size: 8pt; background-color: #ffddaa; } + +div.serviceTotals { font-size: 10pt; text-align: center; font-weight: bold; } +table.serviceTotals { font-size: 10pt; font-weight: bold; } +th.serviceTotals,a.serviceTotals { font-size: 8pt; } +td.serviceTotals { font-size: 8pt; text-align: center; background-color: #e0e0e0; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsOK { font-size: 8pt; text-align: center; background-color: #88d066; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsWARNING { font-size: 8pt; text-align: center; background-color: #ffff00; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsUNKNOWN { font-size: 8pt; text-align: center; background-color: #ffbb55; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsCRITICAL { font-size: 8pt; text-align: center; background-color: #f88888; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsPENDING { font-size: 8pt; text-align: center; background-color: #acacac; border: 1px solid #777777; padding: 2 4 2 4; } +.serviceTotalsPROBLEMS { font-size: 8pt; text-align: center; background-color: #aaccff; border: 1px solid #777777; padding: 2 4 2 4; } + +div.hostTotals { font-size: 10pt; text-align: center; font-weight: bold; } +table.hostTotals { font-size: 10pt; font-weight: bold; } +th.hostTotals,a.hostTotals { font-size: 8pt; } +td.hostTotals { font-size: 8pt; text-align: center; background-color: #e0e0e0; border: 1px solid #777777; padding: 2 4 2 4; } +.hostTotalsUP { font-size: 8pt; text-align: center; background-color: #88d066; border: 1px solid #777777; padding: 2 4 2 4; } +.hostTotalsDOWN { font-size: 8pt; text-align: center; background-color: #f88888; border: 1px solid #777777; padding: 2 4 2 4; } +.hostTotalsUNREACHABLE { font-size: 8pt; text-align: center; background-color: #ffbb55; border: 1px solid #777777; padding: 2 4 2 4; } +.hostTotalsPENDING { font-size: 8pt; text-align: center; background-color: #acacac; border: 1px solid #777777; padding: 2 4 2 4; } +.hostTotalsPROBLEMS { font-size: 8pt; text-align: center; background-color: #aaccff; border: 1px solid #777777; padding: 2 4 2 4; } + +.miniStatusPENDING { font-size: 8pt; text-align: center; background-color: #acacac; border: 1px solid #777777; padding: 0 5 0 5; } +.miniStatusOK { font-size: 8pt; text-align: center; background-color: #88d066; border: 1px solid #777777; padding: 0 5 0 5; } +.miniStatusUNKNOWN { font-size: 8pt; text-align: center; background-color: #ffbb55; border: 1px solid #777777; padding: 0 5 0 5; } +.miniStatusWARNING { font-size: 8pt; text-align: center; background-color: #ffff00; border: 1px solid #777777; padding: 0 5 0 5; } +.miniStatusCRITICAL { font-size: 8pt; text-align: center; background-color: #f88888; border: 1px solid #777777; padding: 0 5 0 5; } + +.miniStatusUP { font-size: 8pt; text-align: center; background-color: #88d066; border: 1px solid #777777; padding: 0 5 0 5; } +.miniStatusDOWN { font-size: 8pt; text-align: center; background-color: #f88888; border: 1px solid #777777; padding: 0 5 0 5; } +.miniStatusUNREACHABLE { font-size: 8pt; text-align: center; background-color: #ffbb55; border: 1px solid #777777; padding: 0 5 0 5; } + +/* page number styles, added 2/01/2012 -MG */ +#top_page_numbers { float:right;} +#result_limit { display:inline;} +.pagenumber { display: block; float:left; border: 1px solid #AAAAAA; padding: 0 2px 0 2px; margin: 1px;text-align:center; height:15px; } +a.pagenumber:hover { background-color: #EFEFEF;text-decoration:none;} +.current_page { color: #AAA; } +#inner_numbers { clear:right;} +#pagelimit,#bottom_page_numbers { font-size:8pt;} diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/statusmap.css b/openshift-v4/templates/nagios/nagios3/stylesheets/statusmap.css new file mode 100644 index 000000000..d41888f76 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/statusmap.css @@ -0,0 +1,14 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.statusmap { } + +.imageInfo { font-size: 8pt; font-weight: bold; text-align: center; } + +.zoomTitle { font-size: 8pt; font-weight: bold; } + +.popupText { font-size: 8pt; background-color: #eeeeaa; border: 1px solid #777777; padding: 0 5 0 5; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/summary.css b/openshift-v4/templates/nagios/nagios3/stylesheets/summary.css new file mode 100644 index 000000000..f6a9f3236 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/summary.css @@ -0,0 +1,30 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.summary { } + +/* override to match filter table style */ +.optBoxItem { font-size: 8pt; font-weight: bold; } +.optBoxValue { font-size: 8pt; } + +/* override to match query info style */ +.dataSubTitle { font-size: 8pt; text-align: center; font-weight: normal; } + +/* override so we get a bit of whitespace */ +table.data { padding-top: 15; } + +.reportDataOdd { font-size: 9pt; background-color: #e7e7e7; padding: 0 4 0 4; } +.reportDataEven { font-size: 9pt; background-color: #f4f2f2; padding: 0 4 0 4; } + +.hostUP { font-size: 9pt; background-color: #88d066; border: 1px solid #777777; padding: 0 4 0 4; } +.hostDOWN { font-size: 9pt; background-color: #f88888; border: 1px solid #777777; padding: 0 4 0 4; } +.hostUNREACHABLE { font-size: 9pt; background-color: #ffbb55; border: 1px solid #777777; padding: 0 4 0 4; } + +.serviceOK { font-size: 9pt; background-color: #88d066; border: 1px solid #777777; padding: 0 4 0 4; } +.serviceWARNING { font-size: 9pt; background-color: #ffff00; border: 1px solid #777777; padding: 0 4 0 4; } +.serviceUNKNOWN { font-size: 9pt; background-color: #ffbb55; border: 1px solid #777777; padding: 0 4 0 4; } +.serviceCRITICAL { font-size: 9pt; background-color: #f88888; border: 1px solid #777777; padding: 0 4 0 4; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/tac.css b/openshift-v4/templates/nagios/nagios3/stylesheets/tac.css new file mode 100644 index 000000000..a5ed1e54a --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/tac.css @@ -0,0 +1,75 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.tac { font-size: 10pt; } + +.title { font-weight: bold; } +.titleItem { font-size: 8pt; font-weight: bold; } + +td.perfTitle { font-size: 10pt; font-weight: bold; background-color: #d0d0d0; border: 1px solid #aaaaaa; } +.perfBox { background-color: #eeeeee; border: 1px solid #cccccc; } +.perfItem { font-size: 8pt; font-weight: bold; } +.perfValue { font-size: 8pt; } + +.healthTitle { font-weight: bold; font-size: 10pt; background-color: #d0d0d0; border: 1px solid #aaaaaa; } +.healthBox { } +.healthItem { font-size: 10pt; font-weight: bold; } +.healthBar { background-color: grey; padding: 2 4 2 4; } + +.outageTitle { font-weight: bold; background-color: #d0d0d0; border: 1px solid #aaaaaa; } +.outageHeader { font-weight: bold; border-bottom: 1px solid #aaaaaa; } + +.hostTitle { font-weight: bold; background-color: #d0d0d0; border: 1px solid #aaaaaa; } +td.hostHeader { font-weight: bold; border-bottom: 1px solid #aaaaaa; } + +.serviceTitle { font-weight: bold; background-color: #d0d0d0; border: 1px solid #aaaaaa; } +td.serviceHeader { font-weight: bold; border-bottom: 1px solid #aaaaaa; } + +.featureTitle { font-weight: bold; background-color: #d0d0d0; border: 1px solid #aaaaaa; } +td.featureHeader { font-weight: bold; border-bottom: 1px solid #aaaaaa; } + +.featureEnabled { text-align: center; background-color: #ccffcc; } +.featureDisabled { text-align: center; background-color: #ffcccc; } + +.featureEnabledFlapDetection { text-align: center; font-weight: bold; } +.featureDisabledFlapDetection { text-align: center; font-weight: bold; } +.featureItemEnabledServiceFlapDetection { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledServiceFlapDetection { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemEnabledHostFlapDetection { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledHostFlapDetection { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemServicesNotFlapping { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemServicesFlapping { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemHostsNotFlapping { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemHostsFlapping { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } + +.featureEnabledNotifications { text-align: center; font-weight: bold; } +.featureDisabledNotifications { text-align: center; font-weight: bold; } +.featureItemEnabledServiceNotifications { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledServiceNotifications { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemEnabledHostNotifications { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledHostNotifications { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } + +.featureEnabledHandlers { text-align: center; font-weight: bold; } +.featureDisabledHandlers { text-align: center; font-weight: bold; } +.featureItemEnabledServiceHandlers { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledServiceHandlers { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemEnabledHostHandlers { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledHostHandlers { font-size: 8pt; background-color: #f88888; } + +.featureEnabledActiveChecks { text-align: center; font-weight: bold; } +.featureDisabledActiveChecks { text-align: center; font-weight: bold; } +.featureItemEnabledActiveServiceChecks { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledActiveServiceChecks { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemEnabledActiveHostChecks { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledActiveHostChecks { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } + +.featureEnabledPassiveChecks { text-align: center; font-weight: bold; } +.featureDisabledPassiveChecks { text-align: center; font-weight: bold; } +.featureItemEnabledPassiveServiceChecks { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledPassiveServiceChecks { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } +.featureItemEnabledPassiveHostChecks { font-size: 8pt; background-color: #88d066; border: 1px solid #777777; } +.featureItemDisabledPassiveHostChecks { font-size: 8pt; background-color: #f88888; border: 1px solid #777777; } diff --git a/openshift-v4/templates/nagios/nagios3/stylesheets/trends.css b/openshift-v4/templates/nagios/nagios3/stylesheets/trends.css new file mode 100644 index 000000000..6ca9d8b37 --- /dev/null +++ b/openshift-v4/templates/nagios/nagios3/stylesheets/trends.css @@ -0,0 +1,8 @@ +/* exfoliation: a nagios makeover */ +/* Copyright 2010 Matthew Wall, all rights reserved */ +/* */ +/* Permission to use, copy, modify, and distribute this software for any */ +/* purpose with or without fee is hereby granted, provided that the above */ +/* copyright notice and this permission notice appear in all copies. */ + +.trends { } diff --git a/openshift-v4/templates/nagios/supervisord/supervisord.conf b/openshift-v4/templates/nagios/supervisord/supervisord.conf new file mode 100644 index 000000000..c3e298d99 --- /dev/null +++ b/openshift-v4/templates/nagios/supervisord/supervisord.conf @@ -0,0 +1,21 @@ +[supervisord] +logfile = /dev/fd/1 +loglevel = info +logfile_maxbytes = 0 +redirect_stderr=true +nodaemon = true +pidfile=/var/run/supervisord/supervisord.pid + +[program:nagios] +logfile = /dev/fd1 +loglevel = info +logfile_maxbytes = 0 +redirect_stderr = true +command = /usr/sbin/nagios3 /etc/nagios3/nagios.cfg + +[program:apache] +logfile = /dev/fd/1 +loglevel = info +logfile_maxbytes = 0 +redirect_stderr = true +command = /usr/sbin/apachectl -DFOREGROUND \ No newline at end of file diff --git a/openshift-v4/templates/nginx-runtime/Dockerfile b/openshift-v4/templates/nginx-runtime/Dockerfile new file mode 100644 index 000000000..627c83015 --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/Dockerfile @@ -0,0 +1,43 @@ +# Use the offical nginx (based on debian) +FROM nginx:stable + +ENV STI_SCRIPTS_PATH=/usr/libexec/s2i + +# Required for HTTP Basic feature +RUN apt-get update -y && \ + apt-get install -y openssl ca-certificates && \ + rm -rf /var/lib/apt/lists/* + +# Copy our OpenShift s2i scripts over to default location +COPY ./s2i/bin/ /usr/libexec/s2i/ + +# Expose this variable to OpenShift +LABEL io.openshift.s2i.scripts-url=image:///usr/libexec/s2i + +# Copy config from source to container +COPY nginx.conf.template /tmp/ + +# ================================================================================= +# Fix up permissions +# ref: https://torstenwalter.de/openshift/nginx/2017/08/04/nginx-on-openshift.html +# - S2I sripts must be executable +# - Make sure nginx can read and write it's working directories. +# - The container dynamically configures nginx on startup +# - The application artifacts live in /tmp +# --------------------------------------------------------------------------------- +RUN chmod -R g+rwx $STI_SCRIPTS_PATH +RUN chmod g+rw /var/cache/nginx \ + /var/run \ + /var/log/nginx \ + /etc/nginx/nginx.conf \ + /tmp +# ================================================================================= + +# Work-around for issues with S2I builds on Windows +WORKDIR /tmp + +# Nginx runs on port 8080 by default +EXPOSE 8080 + +# Switch to usermode +USER 104 \ No newline at end of file diff --git a/openshift-v4/templates/nginx-runtime/Readme.md b/openshift-v4/templates/nginx-runtime/Readme.md new file mode 100644 index 000000000..185e1af0d --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/Readme.md @@ -0,0 +1,10 @@ +### Files included + +nginx-runtime.yaml: nginx build file +Dockefile: docker build file, it is used by nginx-runtime.yaml +nginx.conf.template: the nginx fonig template + +### Build nginx image, it will be used by frontend build + +oc process -f ./nginx-runtime.yaml -n 0ab226-tools +oc tag 0ab226-tools/nginx-runtime:latest 0ab226-tools/nginx-runtime:20210115 \ No newline at end of file diff --git a/openshift-v4/templates/nginx-runtime/nginx-runtime.yaml b/openshift-v4/templates/nginx-runtime/nginx-runtime.yaml new file mode 100644 index 000000000..0258f36f2 --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/nginx-runtime.yaml @@ -0,0 +1,48 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: nginx-runtime +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: image stream for nginx + labels: + shared: "true" + creationTimestamp: null + name: nginx-runtime + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + labels: + app: nginx-runtime + name: nginx-runtime + spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: nginx-runtime:latest + postCommit: {} + resources: {} + runPolicy: Serial + source: + contextDir: openshift-v4/templates/nginx-runtime + git: + ref: openshift-v4-migration + uri: https://github.com/bcgov/tfrs.git + type: Git + strategy: + dockerStrategy: {} + type: Docker + triggers: [] + status: + lastVersion: 0 diff --git a/openshift-v4/templates/nginx-runtime/nginx.conf.template b/openshift-v4/templates/nginx-runtime/nginx.conf.template new file mode 100644 index 000000000..c4fbe1ff1 --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/nginx.conf.template @@ -0,0 +1,103 @@ +worker_processes auto; + +error_log /var/log/nginx/error.log; +pid /var/run/nginx.pid; + +events { + worker_connections 4096; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + server_tokens off; + + # ip filtering + %IpFilterRules% + + # logging rules + geo $loggable { + default 1; + %RealIpFrom% 0; + } + + # Use a w3c standard log format + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main if=$loggable; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + #real_ip module + set_real_ip_from %RealIpFrom%; + %AdditionalRealIpFromRules% + real_ip_recursive on; + real_ip_header X-Forwarded-For; + + #throttle zones + limit_req_zone $binary_remote_addr zone=bra1:10m rate=1r/s; + limit_req_zone $binary_remote_addr zone=bra3:10m rate=3r/s; + limit_req_zone $binary_remote_addr zone=bra5:10m rate=5r/s; + limit_req_zone $binary_remote_addr zone=bra25:10m rate=25r/s; + limit_req_zone $binary_remote_addr zone=bra100:10m rate=100r/s; + + #default throttle; not inherited if set in nested level + limit_req zone=bra5 burst=100; + + # HTTP Basic rules + auth_basic_user_file /tmp/.htpasswd; + + server { + listen 8080; + server_name localhost; + + # add in most common security headers + add_header Content-Security-Policy "default-src * data: blob: filesystem: 'unsafe-inline' 'unsafe-eval'"; + add_header Strict-Transport-Security "max-age=86400; includeSubDomains"; + add_header X-Content-Type-Options "nosniff"; + add_header X-XSS-Protection 1; + add_header Access-Control-Expose-Headers "Content-Disposition"; + + # serve our app here + location / { + root /tmp/app/dist; + index index.html index.htm; + try_files $uri $uri/ /index.html; + gzip on; + gzip_min_length 1000; + gzip_types *; + + # Deploy-time configurable + %HTTP_BASIC% + } + + # redirect server error pages to the static page /50x.html + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + + # For status of ngnix service, OpenShift is configured to call this + location /nginx_status { + # Enable Nginx stats + stub_status on; + + # Only allow access from localhost + allow all; + + # Other request should be denied + # deny all; + + # No need to log this request, its just noise + access_log off; + } + + } +} diff --git a/openshift-v4/templates/nginx-runtime/s2i/bin/assemble b/openshift-v4/templates/nginx-runtime/s2i/bin/assemble new file mode 100644 index 000000000..50ff38e0d --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/s2i/bin/assemble @@ -0,0 +1,2 @@ +#!/bin/bash +echo no assemble needed diff --git a/openshift-v4/templates/nginx-runtime/s2i/bin/assemble-runtime b/openshift-v4/templates/nginx-runtime/s2i/bin/assemble-runtime new file mode 100644 index 000000000..e6aba6ed5 --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/s2i/bin/assemble-runtime @@ -0,0 +1,2 @@ +#!/bin/bash +echo no assemble-runtime needed diff --git a/openshift-v4/templates/nginx-runtime/s2i/bin/run b/openshift-v4/templates/nginx-runtime/s2i/bin/run new file mode 100644 index 000000000..1f0e36b08 --- /dev/null +++ b/openshift-v4/templates/nginx-runtime/s2i/bin/run @@ -0,0 +1,84 @@ +#!/bin/bash + +getApiUrl (){ + # ================================================================================ + # Extract the API URL from the container's environment variables based on + # OpenShift service conventions. + # + # Assumes there is a default for API_URL + # -------------------------------------------------------------------------------- + # API_URL: + # - The default URL for the API endpoint. + # - Used in the case API_SERVICE_NAME or one of the related service resource + # variables is not defined. + # + # API_SERVICE_NAME: + # - The name of the service endpoint for the API. + # - For example; django + # + # API_PATH: + # - The root path for the API. + # - For example /api/v1/ + # -------------------------------------------------------------------------------- + # Examples: + # + # 1) + # API_URL=https://django-devex-von-dev.pathfinder.gov.bc.ca/api/v1/ + # API_SERVICE_NAME=django + # DJANGO_SERVICE_HOST=172.50.105.217 + # DJANGO_SERVICE_PORT=8080 + # API_PATH=/api/v1/ + # + # Results in API_URL=http://172.50.105.217:8080/api/v1/ + # + # 2) + # API_URL=https://django-devex-von-dev.pathfinder.gov.bc.ca/api/v1/ + # API_SERVICE_NAME=django + # DJANGO_SERVICE_HOST=172.50.105.217 + # API_PATH=/api/v1/ + # + # Results in API_URL=http://172.50.105.217/api/v1/ + # + # 3) + # If either API_SERVICE_NAME or *_SERVICE_HOST are not defined... + # + # API_URL=https://django-devex-von-dev.pathfinder.gov.bc.ca/api/v1/ + # + # Results in API_URL=https://django-devex-von-dev.pathfinder.gov.bc.ca/api/v1/ + # ================================================================================ + if [ ! -z "${API_SERVICE_NAME}" ]; then + _SERVICE_NAME="$(tr '[:lower:]' '[:upper:]' <<< ${API_SERVICE_NAME/-/_})" + _SERVICE_HOST_NAME=${_SERVICE_NAME}_SERVICE_HOST + _SERVICE_PORT_NAME=${_SERVICE_NAME}_SERVICE_PORT + + if [ ! -z "${!_SERVICE_HOST_NAME}" ]; then + if [ ! -z "${!_SERVICE_PORT_NAME}" ]; then + API_URL="http://${!_SERVICE_HOST_NAME}:${!_SERVICE_PORT_NAME}${API_PATH}" + else + API_URL="http://${!_SERVICE_HOST_NAME}${API_PATH}" + fi + fi + fi + + echo ${API_URL} +} + +export API_URL=$(getApiUrl) + +echo "---> Replacing Configuration ..." +echo "Setting:" +echo "RealIpFrom = ${RealIpFrom:-172.51.0.0/16}" +echo "IpFilterRules = ${IpFilterRules}" +echo "AdditionalRealIpFromRules = ${AdditionalRealIpFromRules}" +echo "HTTP_BASIC = ${HTTP_BASIC}" +echo "API_URL = ${API_URL}" + +sed "s~%RealIpFrom%~${RealIpFrom:-172.51.0.0/16}~g; s~%IpFilterRules%~${IpFilterRules}~g; s~%AdditionalRealIpFromRules%~${AdditionalRealIpFromRules}~g; s~%HTTP_BASIC%~${HTTP_BASIC}~g; s~%API_URL%~${API_URL}~g" /tmp/nginx.conf.template > /etc/nginx/nginx.conf + +if [ -n "$HTTP_BASIC_USERNAME" ] && [ -n "$HTTP_BASIC_PASSWORD" ]; then + echo "---> Generating .htpasswd file" + `echo "$HTTP_BASIC_USERNAME:$(openssl passwd -crypt $HTTP_BASIC_PASSWORD)" > /tmp/.htpasswd` +fi + +echo "---> Starting nginx ..." +/usr/sbin/nginx -g "daemon off;" diff --git a/openshift-v4/templates/notification/README.md b/openshift-v4/templates/notification/README.md new file mode 100644 index 000000000..834a3dcbd --- /dev/null +++ b/openshift-v4/templates/notification/README.md @@ -0,0 +1,13 @@ +### Files included + +notification-server-bc.json build config +notification-server-dc.json deployment config +notification-server-others-dc.json create service and route + +### Before trigging pipeline + +N/A + +### After pipeline completes + +N/A \ No newline at end of file diff --git a/openshift-v4/templates/notification/notification-server-bc.yaml b/openshift-v4/templates/notification/notification-server-bc.yaml new file mode 100644 index 000000000..90f699ec8 --- /dev/null +++ b/openshift-v4/templates/notification/notification-server-bc.yaml @@ -0,0 +1,78 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: notification-server-bc + creationTimestamp: +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: GIT_URL + displayName: + description: tfrs repo + required: true +- name: GIT_REF + displayName: + description: tfrs ref + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the celery image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-notification-server + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-notification-server${SUFFIX} + creationTimestamp: + spec: + triggers: [] + runPolicy: Serial + source: + type: Git + git: + uri: ${GIT_URL} + ref: ${GIT_REF} + contextDir: frontend + strategy: + type: Source + sourceStrategy: + from: + kind: ImageStreamTag + name: nodejs:12-1-45 + output: + to: + kind: ImageStreamTag + name: ${NAME}-notification-server:${VERSION} + resources: + limits: + cpu: '1' + memory: 4Gi + requests: + cpu: 250m + memory: 2Gi + postCommit: {} + nodeSelector: + failedBuildsHistoryLimit: 5 + status: + lastVersion: 0 diff --git a/openshift-v4/templates/notification/notification-server-dc.yaml b/openshift-v4/templates/notification/notification-server-dc.yaml new file mode 100644 index 000000000..64279972e --- /dev/null +++ b/openshift-v4/templates/notification/notification-server-dc.yaml @@ -0,0 +1,156 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: notification-server-dc + creationTimestamp: +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: NAMESPACE + displayName: Environment name + description: 'Sample values: 0ab226-dev, 0ab226-test and 0ab226-prod' + required: true +- name: VERSION + displayName: null + description: image tag name for output + required: true +- name: KEYCLOAK_CERTS_URL + displayName: KEYCLOAK_CERTS_URL + description: 'Valid values: https://dev.oidc.gov.bc.ca/auth/realms/tfrs-dev/protocol/openid-connect/certs, + https://test.oidc.gov.bc.ca/auth/realms/tfrs/protocol/openid-connect/certs, https://oidc.gov.bc.ca/auth/realms/tfrs/protocol/openid-connect/certs' + required: true +- name: CPU_REQUEST + displayName: Requested CPU + description: Requested CPU + required: true +- name: CPU_LIMIT + displayName: CPU upper limit + description: CPU upper limit + required: true +- name: MEMORY_REQUEST + displayName: Requested memory + description: Requested memory + required: true +- name: MEMORY_LIMIT + displayName: Memory upper limit + description: Memory upper limit + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the notification server image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-notification-server + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-notification-server${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-notification-server${SUFFIX} + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - notification-server + from: + kind: ImageStreamTag + name: ${NAME}-notification-server:${VERSION} + lastTriggeredImage: '' + - type: ConfigChange + replicas: 1 + test: false + selector: + name: ${NAME}-notification-server${SUFFIX} + template: + metadata: + name: ${NAME}-notification-server${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-notification-server${SUFFIX} + spec: + containers: + - name: notification-server + image: '' + ports: + - containerPort: 3000 + protocol: TCP + env: + - name: RABBITMQ_HOST + value: ${NAME}${SUFFIX}-rabbitmq-cluster.${NAMESPACE}.svc.cluster.local + - name: RABBITMQ_VHOST + value: "/tfrs" + - name: RABBITMQ_USER + value: tfrs + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-rabbitmq-cluster-secret + key: tfrs_password + - name: NPM_RUN + value: start:notifications + - name: KEYCLOAK_CERTS_URL + value: ${KEYCLOAK_CERTS_URL} + resources: + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + livenessProbe: + tcpSocket: + port: 3000 + initialDelaySeconds: 35 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + tcpSocket: + port: 3000 + initialDelaySeconds: 30 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: IfNotPresent + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/notification/notification-server-others-dc.yaml b/openshift-v4/templates/notification/notification-server-others-dc.yaml new file mode 100644 index 000000000..cb651fc59 --- /dev/null +++ b/openshift-v4/templates/notification/notification-server-others-dc.yaml @@ -0,0 +1,53 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: notification-server-dc + creationTimestamp: +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: FRONTEND_HOST + displayName: frontend host name + description: + required: true +objects: +- kind: Service + apiVersion: v1 + metadata: + name: ${NAME}-notification-server${SUFFIX} + creationTimestamp: + spec: + ports: + - protocol: TCP + port: 8080 + targetPort: 3000 + selector: + name: ${NAME}-notification-server${SUFFIX} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: route.openshift.io/v1 + metadata: + name: ${NAME}-notification-server${SUFFIX} + creationTimestamp: + spec: + host: ${FRONTEND_HOST} + path: /socket.io + to: + kind: Service + name: ${NAME}-notification-server${SUFFIX} + weight: 100 + tls: + termination: edge + wildcardPolicy: None + status: {} diff --git a/openshift-v4/templates/nsp/README.MD b/openshift-v4/templates/nsp/README.MD new file mode 100644 index 000000000..7166da3b9 --- /dev/null +++ b/openshift-v4/templates/nsp/README.MD @@ -0,0 +1,54 @@ +### Files included + +* quickstart-nsp.yaml apply this to all namespaces + +### Apply to name spaces before anything else + +API_VERSION=security.devops.gov.bc.ca/v1alpha1 +NAMESPACE_PREFIX=0ab226 + +oc process -f ./quickstart-nsp.yaml API_VERSION=security.devops.gov.bc.ca/v1alpha1 NAMESPACE_PREFIX=0ab226 ENV_NAME=tools | oc create -f - -n 0ab226-tools +oc process -f ./quickstart-nsp.yaml API_VERSION=security.devops.gov.bc.ca/v1alpha1 NAMESPACE_PREFIX=0ab226 ENV_NAME=dev | oc create -f - -n 0ab226-dev +oc process -f ./quickstart-nsp.yaml API_VERSION=security.devops.gov.bc.ca/v1alpha1 NAMESPACE_PREFIX=0ab226 ENV_NAME=test | oc create -f - -n 0ab226-test +oc process -f ./quickstart-nsp.yaml API_VERSION=security.devops.gov.bc.ca/v1alpha1 NAMESPACE_PREFIX=0ab226 ENV_NAME=prod | oc create -f - -n 0ab226-prod + +### Apply specific nsps + +only keep int-cluster-k8s-api-comms +./pipeline/npm run deploy-nsps -- --pr=1696 --env=prod + +### Add role to users, check if the settings already exist before run the grant + +* tools project + +oc policy add-role-to-user admin system:serviceaccount:0ab226-tools:jenkins-prod --namespace=0ab226-tools +oc policy add-role-to-user system:image-puller system:serviceaccount:0ab226-dev:default --namespace=0ab226-tools +oc policy add-role-to-user system:image-puller system:serviceaccount:0ab226-dev:builder --namespace=0ab226-tools +oc policy add-role-to-user system:image-puller system:serviceaccount:0ab226-test:default --namespace=0ab226-tools +oc policy add-role-to-user system:image-puller system:serviceaccount:0ab226-test:builder --namespace=0ab226-tools +oc policy add-role-to-user system:image-puller system:serviceaccount:0ab226-prod:default --namespace=0ab226-tools +oc policy add-role-to-user system:image-puller system:serviceaccount:0ab226-prod:builder --namespace=0ab226-tools + + +* dev enviornment + +oc policy add-role-to-user admin system:serviceaccount:0ab226-tools:jenkins-prod --namespace=0ab226-dev +oc policy add-role-to-user edit system:serviceaccount:0ab226-dev:nagios --namespace=0ab226-dev +oc policy add-role-to-user system:deployer system:serviceaccount:0ab226-dev:deployer --namespace=0ab226-dev +oc policy add-role-to-user system:image-builder system:serviceaccount:0ab226-dev:builder --namespace=0ab226-dev + +* test enviornment + +oc policy add-role-to-user admin system:serviceaccount:0ab226-tools:jenkins-prod --namespace=0ab226-test +oc policy add-role-to-user edit system:serviceaccount:0ab226-test:nagios --namespace=0ab226-test +oc policy add-role-to-user system:deployer system:serviceaccount:0ab226-test:deployer --namespace=0ab226-test +oc policy add-role-to-user system:image-builder system:serviceaccount:0ab226-test:builder --namespace=0ab226-test + +* prod enviornment + +oc policy add-role-to-user admin system:serviceaccount:0ab226-tools:jenkins-prod --namespace=0ab226-prod +oc policy add-role-to-user edit system:serviceaccount:0ab226-prod:nagios --namespace=0ab226-prod +oc policy add-role-to-user system:deployer system:serviceaccount:0ab226-prod:deployer --namespace=0ab226-prod +oc policy add-role-to-user system:image-builder system:serviceaccount:0ab226-prod:builder --namespace=0ab226-prod + + diff --git a/openshift-v4/templates/nsp/Untitled.drawio b/openshift-v4/templates/nsp/Untitled.drawio new file mode 100644 index 000000000..b4db87efe --- /dev/null +++ b/openshift-v4/templates/nsp/Untitled.drawio @@ -0,0 +1 @@ +5VpPc+IgHP00Hu0ESDQeW3W7h+1lu7Pt7o0xaLIlwSGocT/9kkpMELTZmSppvTjygBDe7/H4lx4ap8U9x8v4gUWE9qAXFT006UEYgkD+lsB2B/gDfwcseBLtIFADj8lfokBPoaskIrlWUDBGRbLUwRnLMjITGoY5Zxu92JxRvdUlXhADeJxhaqJPSSRi1a3Aq/GvJFnEVcvAUzkprgorII9xxDYNCE17aMwZE7t/aTEmtOSu4mVX78uR3P2LcZKJNhX6DyKd3pEf9y/z9GXtP03S2UMfDtTLiW3VYxJJAlSScRGzBcswndboHWerLCLlYz2Zqst8Y2wpQSDBP0SIrYomXgkmoVikVOXu2iwbOtoXBeVsxWfkRAcqTWC+IOJUR4M95VKqhKVE8K2syAnFIlnrL4KVaBb7cjWv8o+i9j9oRt6V0OyUZfXcNaYr1ZLJOqXSOUp2N3EiyOMSv3Z7I71L5w7ny52dzJOijIEic024IMVpOs3uVxV8JQPlhaCyhk3tLAMFxQ1TqbB3JwwahKU4F4QbtMk+C52fXHD2QsaMMi6RjGUlp/OE0gMI02SRyeRMciQfjO5KBhPpr7cqI02iiB4LiD4E3iEE4UEEfDMCviUC8FwRQEYE5FAhOCd9cAPCzxuHw6EwNOMwvGQcqmXKZQ1aksW3z6r+a+JXmbgJquSkaGZOtloA3sXYg5bGDnyXzh503dlHupyhxVcu6uyDU74y+sS+EgZaIJBlir2ssQCD7OswlmFLYwld+srQGCa/pz9v+2BkDhGn/oKA7i9o5NhfgMWAP9SGJmwpz5FLeYaGPMcWO3EpTB92TZjoSv121HYh59kDehlFj0xFd2wpF3RO0Z7JmUXkLjkbdI0z6DtxgSIRz43/DQ+QqdoCysQZHAD4LS0AIpcWUL2mNqv1O6ZoFL69obPtI4ZncwEnB/UdmNdA240EcLpUA0e2ErC6D+qMsg+82g9dz2/mMefH2kqAtkuvY5G5kEItay/LNOl0N4E6pk1oGbxX4br7u/Y3NQ1darp6cFPTlpN8p/uJzmkampxZFhdO9xOd48y8pxv7/Y6xFvgdW7NCyynqh5rZYdvbMej0FBua12NyR9UxdQ4CXZ0AmOoMLeoMz6ZO83T1+/6OzDbxu2RviN4e2xdlD5kz73XcMB7q+HxfLshk/XXga17jE0s0/Qc= \ No newline at end of file diff --git a/openshift-v4/templates/nsp/nsp-diagram.drawio b/openshift-v4/templates/nsp/nsp-diagram.drawio new file mode 100644 index 000000000..803a9685b --- /dev/null +++ b/openshift-v4/templates/nsp/nsp-diagram.drawio @@ -0,0 +1 @@ +5Vxbc5s6EP41eUwGITDwmKa5dKbt9HIuSd5kkG01GLkgJ/b59UfEAowEBl8ln9POtGhZhNjdb3ellXwBb6aL+xTNJl9ohOML24oWF/DjhW0DG/j8v5yyFBTPAivKOCWRoFWEn+QfLIiWoM5JhLMaI6M0ZmRWJ4Y0SXDIajSUpvStzjaicf2tMzTGCuFniGKV+jeJ2GRF9V2roj9gMp4UbwaWuDNFBbMgZBMU0bc1Ery9gDcppWx1NV3c4DiXXiGX1XN3LXfLgaU4YX0eGDrPX+0v7o/Rt6fr39+//Hq4gdElcMXg2LL4YhxxAYgmTdmEjmmC4tuK+iGl8yTCebcWb1U8nymdcSLgxF+YsaXQJpozykkTNo3FXbwg7HHt+inv6soVrY8L0fN7YykaGUvpC76hMU3fxwnv7gL+h99RBSFkk9F5GuJNXy8MCqVjzDbwwRVfLpm1Fwgx32M6xSxdcoYUx4iR17rpIGGB45KvUhK/EHraQmcO0KEznfIvIGSIAkS/ryieizeNUpownESqYuKYe6pcAW8TwvDPGXoXyBv3lnXxomy2cl8jssjV1C7VV5wyvNgoBnEX+gLdwvleAkcI8m3NlQnSZM2LFbSDS86G/xXThedpuraj099ba/4enNzb91VZYJbG9ETohI/9sQrLefOpUFTeqNT23jJBb8A2S3GD/3Nq1VtpZuVWtmeM0oxVmW+UxqCSjA1R+GJeLuZZtVzM05yJQT2JQBlWtgkqI55cN5n6MUDgnGWSALUkCTrlD4FRCnAUL5RQRkYk5O+myWWGU+4qzPJIruSR+FRF9+zQ1+qTwLpPAh0+6Ri2756l77GD89GaGRPRvno2LDeGllZFW1eeV5+LXgWe16XwvPUNp4SLAKcbrOD06XZfKzAs3bY1G4G7Dd516u3wGYp49BslfMxlHA/sehwvw3jRxWqk4ilJ/eUwdrcIV0l9QhxjYWrGZDuelO1AV/cETMvy0DEAMegbzsxan4N6lnoaZ8Dnkbf0VbRZ+SnUO6vYW8+nz0x6A1pbfvpw++kxC2f3Q/L4/fkVfh1a46ay7BCHaM4jTr49hKYRSRDjYuKvDkOcZTjjl1OSEHo+E5gudUuqOK7+4eaQe2ldAb+wkWKRwRFBYNekp2Cho1GGj5LPDBQzykKUXNYsyOTMxgW6MxuDFgR01Cu9swyTjubZ/TZaMyNM9tWzWRN4r9m9TVASxaYtUQ+gtETd4NqCBtcWHMu1+YrwZohbVkLMkpsTtCwJ6AoJjp69EDulWKesNgY9XYhhu8gCBQYpGg4Jm/42Cwc2NA0HWvY/7jYXPcqGX+ssY2YxbGmTyXzGZwUJQyQxLXCWpxCE4Tu+ZsMHegOAAWUD0Hez+8GnBc1lA+BLvlFOmo5cNgB6Z4lbWkTTDr3TL8D2tiHDKsflB559raO3BgzL2Gz7nNCmVXOGpR7qqnaCxoRmZiUcEEgJR3GwcC3hGLhqwlHQDi43V89y1tYbuVsWwCq8bA0Raf5qAf4X7gcduyd0jrTl4TpN0XKNYZbnJFl7auM49aIDcC3JnlY9HjafsRWghjGaolfDgOrVzwTauqsFzhmV4U+5NAT6HscwBHSedNjUdqTj6B38INjM7w+C/fg9/wROQD2N0lJg1rqs3rZjTNvGb3V9BS8YTjmizZKcWxec9kM8anJoptx8qUYNtEcddddidYbf4tGC+9NSkoOYD+fDMOVX4/xK5iwPnMmMjWdAunrf8FC1yi4/VA6hq3eJUXinDq6yyNXB13t8RWYks9VLkV2dNnG3DlUBBDddtmnBIKFJjpYRiWOJhGIyTngzxqO8hxwGXF/xtSCzPAdpRFk9bTkErgY1WF06DT/wAHxHBRbwjoYstUZaabDLOoU5drEV5tPJJzYib8fXgok6U6ud1dnajbeYxXe6gxpf3/5avqHO1Fa73jZASHhom4A24+QAKLClpewWGDTEF3C8AKPWSNVtZBt11ca+0Xc28G92yvUaVodtlzd6h4Upyf+dUPqS89PuiJKFEzzlFpardzYfxiTc6hE0jwjb4yV8CsFxmG8s7P2W9kdkv9daJN8WcG0I6xWYDgA4eX/JiQEXLJ7/Wv4Y0+Vz4v45//z2x/Unt+F3ABQxh3OeTEXlPD+6zn8fjjc5orKM28DeP6Rhzh5EEYQPtti9ple3Qa0Fbc+apNu2b6noYvXdSk1S7WggdzSod9RS3Nx20WPgNA+4dVzSBx56ZbIRGeqG5v2Q0W6vp8PM7sjou+Wqbxn+NMiAwLuS1tfgjtiAA7krxSkfCB3qm4pBt45N+cxibDsihDerX9hcsVc/VApv/wU= \ No newline at end of file diff --git a/openshift-v4/templates/nsp/nsp-env.yaml b/openshift-v4/templates/nsp/nsp-env.yaml new file mode 100644 index 000000000..9aafe95be --- /dev/null +++ b/openshift-v4/templates/nsp/nsp-env.yaml @@ -0,0 +1,402 @@ +--- +kind: Template +apiVersion: v1 +labels: + template: tfrs-nsp +metadata: + name: tfrs-nsp + creationTimestamp: +parameters: + - name: NAME + displayName: App Name + description: App Name + value: 'tfrs' + required: true + - name: ENV_NAME + displayName: Environment Name + description: Environment Name + required: true + - name: SUFFIX + displayName: + description: sample is -dev-365, -test and -prod + value: "" + required: true + - name: API_VERSION + displayName: Api version + description: Api version, either secops.pathfinder.gov.bc.ca/v1alpha1 or security.devops.gov.bc.ca/v1alpha1 + required: true +objects: + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-frontend-to-internet + spec: + description: | + allow frontend to talk to the internet. + source: + - - name=${NAME}-frontend${SUFFIX} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-frontend-to-backend + spec: + description: | + Allow frontend to communicate with backend. + source: + - - name=${NAME}-frontend${SUFFIX} + destination: + - - name=${NAME}-backend${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-notification-server-to-internet + spec: + description: | + allow notification-server to talk to the internet. + source: + - - name=${NAME}-notification-server${SUFFIX} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-notification-server-to-rabbitmq + spec: + description: | + allow notification-server to talk to rabbitmq. + source: + - - name=${NAME}-notification-server${SUFFIX} + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backend-to-internet + spec: + description: | + allow backend to talk to the internet. + source: + - - name=${NAME}-backend${SUFFIX} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backend-to-minio + spec: + description: | + allow backend to talk to minio. + source: + - - name=${NAME}-backend${SUFFIX} + destination: + - - name=${NAME}-minio-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backend-to-patroni + spec: + description: | + allow backend to talk to patroni. + source: + - - name=${NAME}-backend${SUFFIX} + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backend-to-rabbitmq + spec: + description: | + allow backend to talk to rabbitmq. + source: + - - name=${NAME}-backend${SUFFIX} + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backend-to-clamav + spec: + description: | + allow backend to talk to clamav. + source: + - - name=${NAME}-backend${SUFFIX} + destination: + - - name=${NAME}-clamav-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-scan-handler-to-rabbitmq + spec: + description: | + allow scan-handler to talk to rabbitmq. + source: + - - name=${NAME}-scan-handler${SUFFIX} + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-scan-handler-to-patroni + spec: + description: | + allow scan-handler to talk to patroni. + source: + - - name=${NAME}-scan-handler${SUFFIX} + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-rabbitmq-to-external + spec: + description: | + allow rabbitmq to talk to external. + source: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-minio-to-external + spec: + description: | + allow minio to talk to external. + source: + - - name=${NAME}-minio-${ENV_NAME} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-celery-to-external + spec: + description: | + allow celery to talk to external. + source: + - - name=${NAME}-celery${SUFFIX} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-celery-to-minio + spec: + description: | + allow celery to talk to minio. + source: + - - name=${NAME}-celery${SUFFIX} + destination: + - - name=${NAME}-minio-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-celery-to-patroni + spec: + description: | + allow celery to talk to patroni. + source: + - - name=${NAME}-celery${SUFFIX} + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-celery-to-rabbitmq + spec: + description: | + allow celery to talk to rabbitmq. + source: + - - name=${NAME}-celery${SUFFIX} + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-nagios-to-external + spec: + description: | + allow nagios to talk to external. + source: + - - app=nagios + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-nagio-to-rabbitmq + spec: + description: | + allow cenagiolery to talk to rabbitmq. + source: + - - app=nagios + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-nagios-to-minio + spec: + description: | + allow nagios to talk to minio. + source: + - - app=nagios + destination: + - - name=${NAME}-minio-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-nagios-to-patroni + spec: + description: | + allow nagios to talk to patroni. + source: + - - app=nagios + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-scan-coordinator-to-minio + spec: + description: | + allow scan-coordinator to talk to minio. + source: + - - name=${NAME}-scan-coordinator${SUFFIX} + destination: + - - name=${NAME}-minio-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-scan-coordinator-to-clamav + spec: + description: | + allow scan-coordinator to talk to clamav. + source: + - - name=${NAME}-scan-coordinator${SUFFIX} + destination: + - - name=${NAME}-clamav-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-scan-coordinator-to-rabbitmq + spec: + description: | + allow scan-coordinator to talk to rabbitmq. + source: + - - name=${NAME}-scan-coordinator${SUFFIX} + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backup-container-to-patroni + spec: + description: | + allow backup-container to talk to patroni. + source: + - - name=patroni-backup + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-patroni-to-patroni-itself + spec: + description: | + Allow patroni pods to communicate in patroni cluster. + source: + - - cluster-name=patroni${SUFFIX} + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-backendmidlifecyclepod-to-patroni + spec: + description: | + Allow backend mid lifecycle pod to run django migrations against patroni. + source: + - - openshift.io/deployer-pod.type=hook-mid + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-schemaspy-public-to-patroni + spec: + description: | + Allow schemaspy public to communicate with patroni. + source: + - - name=${NAME}-schema-spy-public${SUFFIX} + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-schemaspy-audit-to-patroni + spec: + description: | + Allow schemaspy audit to communicate with patroni. + source: + - - name=${NAME}-schema-spy-audit${SUFFIX} + destination: + - - cluster-name=patroni${SUFFIX} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-schemaspy-public-to-internet + spec: + description: | + allow schemaspy public to talk to the internet. + source: + - - name=${NAME}-schema-spy-public${SUFFIX} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-schemaspy-audit-to-internet + spec: + description: | + allow schemaspy audit to talk to the internet. + source: + - - name=${NAME}-schema-spy-public${SUFFIX} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-rabbitmq-to-rabbitmq-itself + spec: + description: | + Allow rabbitmq pods to communicate in rabbitmq cluster. + source: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + destination: + - - app=${NAME}${SUFFIX}-rabbitmq-cluster + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-clamav-to-external + spec: + description: | + allow clamav to talk to external. + source: + - - name=${NAME}-clamav-${ENV_NAME} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: ${NAME}${SUFFIX}-custom-scan-coordinator-to-external + spec: + description: | + allow scan-coordinator to talk to external. + source: + - - name=${NAME}-scan-coordinator${SUFFIX} + destination: + - - ext:network=any \ No newline at end of file diff --git a/openshift-v4/templates/nsp/quickstart-nsp.yaml b/openshift-v4/templates/nsp/quickstart-nsp.yaml new file mode 100644 index 000000000..b2e931ff7 --- /dev/null +++ b/openshift-v4/templates/nsp/quickstart-nsp.yaml @@ -0,0 +1,55 @@ +--- +kind: Template +apiVersion: v1 +labels: + template: tfrs-generic-policy +metadata: + name: tfrs-generic-policy + creationTimestamp: +parameters: + - name: NAMESPACE_PREFIX + displayName: Namespace prefix + description: either 0ab226-tools + required: true + - name: ENV_NAME + displayName: Environment name + description: values are tools, dev, test or prod + required: true + - name: API_VERSION + displayName: Api version + description: Api version, either secops.pathfinder.gov.bc.ca/v1alpha1 or security.devops.gov.bc.ca/v1alpha1 + required: true +objects: + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: egress-internet + spec: + description: | + allow the ${NAMESPACE_PREFIX}-${ENV_NAME} namespace to talk to the internet. + source: + - - $namespace=${NAMESPACE_PREFIX}-${ENV_NAME} + destination: + - - ext:network=any + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: intra-namespace-comms + spec: + description: | + allow the ${NAMESPACE_PREFIX}-${ENV_NAME} namespace to talk to itself + source: + - - $namespace=${NAMESPACE_PREFIX}-${ENV_NAME} + destination: + - - $namespace=${NAMESPACE_PREFIX}-${ENV_NAME} + - kind: NetworkSecurityPolicy + apiVersion: ${API_VERSION} + metadata: + name: int-cluster-k8s-api-comms + spec: + description: | + allow ${NAMESPACE_PREFIX}-${ENV_NAME} pods to talk to the k8s api + destination: + - - int:network=internal-cluster-api-endpoint + source: + - - $namespace=${NAMESPACE_PREFIX}-${ENV_NAME} \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/build.js b/openshift-v4/templates/patroni/.pipeline/build.js new file mode 100755 index 000000000..4df964f0b --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/build.js @@ -0,0 +1,5 @@ +'use strict'; +const build = require('./lib/build.js') +const phases = require('./lib/config.js') + +build({phases:phases}) \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/clean.js b/openshift-v4/templates/patroni/.pipeline/clean.js new file mode 100755 index 000000000..f88e35923 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/clean.js @@ -0,0 +1,4 @@ +'use strict'; +const build = require('./lib/clean.js') + +build() \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/deploy.js b/openshift-v4/templates/patroni/.pipeline/deploy.js new file mode 100755 index 000000000..a2d1f0da1 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/deploy.js @@ -0,0 +1,6 @@ +'use strict'; +const phases = require('./lib/config.js') +const deploy = require('./lib/deploy.js') +const options= require('pipeline-cli').Util.parseArguments() + +deploy({phases:phases, options:options}) diff --git a/openshift-v4/templates/patroni/.pipeline/lib/build.js b/openshift-v4/templates/patroni/.pipeline/lib/build.js new file mode 100755 index 000000000..59ddc3436 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/lib/build.js @@ -0,0 +1,25 @@ +'use strict'; +const {OpenShiftClientX} = require('pipeline-cli') +const path = require('path'); + +module.exports = (settings)=>{ + const phases=settings.phases + const oc=new OpenShiftClientX({'namespace':phases.build.namespace}); + const phase='build' + var objects = [] + var git_http_url = oc.git.http_url + //git_http_url = 'https://github.com/BCDevOps/platform-services.git' + + objects = objects.concat(oc.processDeploymentTemplate(oc.toFileUrl(path.resolve(__dirname, '../../openshift/build.yaml')), { + 'param':{ + 'NAME': phases[phase].name, + 'SUFFIX': phases[phase].suffix, + 'VERSION': phases[phase].tag, + 'GIT_URI': git_http_url, + 'GIT_REF': oc.git.ref + } + })) + + oc.applyRecommendedLabels(objects, phases[phase].name, phase, phases[phase].changeId, phases[phase].instance) + oc.applyAndBuild(objects) +} \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/lib/clean.js b/openshift-v4/templates/patroni/.pipeline/lib/clean.js new file mode 100755 index 000000000..0e0f74f12 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/lib/clean.js @@ -0,0 +1,22 @@ +'use strict'; +const {OpenShiftClientX} = require('pipeline-cli') +const phases = require('./config') +const options= require('pipeline-cli').Util.parseArguments() + +module.exports = (settings)=>{ + const oc=new OpenShiftClientX({'namespace':phases.build.namespace}); + const target_phase=options.env + + //console.log(`target_phase=${target_phase}`) + + for (var k in phases){ + if (phases.hasOwnProperty(k) && k != 'prod') { + const phase=phases[k] + if (k == target_phase){ + //console.log(`phase=${phase}`) + oc.raw('delete', ['all'], {selector:`app-name=${phase.name},env-id=${phase.changeId},env-name!=prod,!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, namespace:phase.namespace}) + oc.raw('delete', ['pvc,Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints'], {selector:`app-name=${phase.name},env-id=${phase.changeId},env-name!=prod,!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`, namespace:phase.namespace}) + } + } + } +} \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/lib/config.js b/openshift-v4/templates/patroni/.pipeline/lib/config.js new file mode 100644 index 000000000..020029fdf --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/lib/config.js @@ -0,0 +1,13 @@ +'use strict'; +const options= require('pipeline-cli').Util.parseArguments() +const changeId = options.pr //aka pull-request +const version = '10' +const name = 'patroni' + +const phases = { + build: {namespace:'bcgov-tools' , name: `${name}`, phase: 'build', changeId:changeId, suffix: `-build-${changeId}`, instance: `${name}-build-${changeId}`, tag:`v${version}-${changeId}`}, + test: {namespace:`bcgov`, name: `${name}`, phase: 'test' , changeId:changeId, suffix: '-test' , instance: `${name}-test` , tag:`v${version}-latest`}, + prod: {namespace:`bcgov`, name: `${name}`, phase: 'prod' , changeId:changeId, suffix: '' , instance: `${name}-prod` , tag:`v${version}-stable`} +} + +module.exports = exports = phases diff --git a/openshift-v4/templates/patroni/.pipeline/lib/deploy.js b/openshift-v4/templates/patroni/.pipeline/lib/deploy.js new file mode 100755 index 000000000..6e0131e7f --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/lib/deploy.js @@ -0,0 +1,12 @@ +'use strict'; +const {OpenShiftClientX} = require('pipeline-cli') + + +module.exports = (settings)=>{ + const phases=settings.phases + const phase=settings.options.env + const oc=new OpenShiftClientX({'namespace':phases[phase].namespace}); + + oc.tag([`${phases.build.namespace}/${phases.build.name}:${phases.build.tag}`, `${phases[phase].namespace}/${phases[phase].name}:${phases[phase].tag}`]) + +} \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/npmw b/openshift-v4/templates/patroni/.pipeline/npmw new file mode 100755 index 000000000..e7a80b4c2 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/npmw @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +set -e + +curl -sSL 'https://raw.githubusercontent.com/BCDevOps/pipeline-cli/v1.0/cli.sh' | bash -s "$@" \ No newline at end of file diff --git a/openshift-v4/templates/patroni/.pipeline/package.json b/openshift-v4/templates/patroni/.pipeline/package.json new file mode 100644 index 000000000..9d62bc018 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/package.json @@ -0,0 +1,27 @@ +{ + "name": "pipeline", + "version": "1.0.0", + "description": "This a pipeliene script", + "engines": { + "node": ">=8" + }, + "scripts": { + "build": "node build.js", + "clean": "node clean.js", + "deploy": "node deploy.js", + "test": "mocha", + "version": "echo \"node@$(node --version) ($(which node))\" && echo \"npm@$(npm --version) ($(which npm))\" && npm ls" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/BCDevOps/platform-services.git" + }, + "author": "", + "license": "Apache-2.0", + "dependencies": { + "pipeline-cli": "git+https://github.com/BCDevOps/pipeline-cli.git#v1.0" + }, + "devDependencies": { + "mocha": "^5.2.0" + } +} diff --git a/openshift-v4/templates/patroni/.pipeline/test/e2e.js b/openshift-v4/templates/patroni/.pipeline/test/e2e.js new file mode 100644 index 000000000..662954204 --- /dev/null +++ b/openshift-v4/templates/patroni/.pipeline/test/e2e.js @@ -0,0 +1,173 @@ +var assert = require('assert'); +const {OpenShiftClientX} = require('pipeline-cli') +const {spawnSync} = require('child_process'); +const path = require('path'); + +function randomstring(L) { + var s = ''; + var randomchar = function() { + var n = Math.floor(Math.random() * 62); + if (n < 10) return n; //1-10 + if (n < 36) return String.fromCharCode(n + 55); //A-Z + return String.fromCharCode(n + 61); //a-z + } + while (s.length < L) s += randomchar(); + return s; +} + +describe('e2e2', function() { + const namespace='a1b2c3d';//randomstring(6).toLowerCase() + const buildNamespace = `${namespace}-tools`; + const deployNamespace = `${namespace}-tools`; + const oc=new OpenShiftClientX(); + let currentNamespace="" + + before(function() { + currentNamespace=oc.raw('project', ['-q']).stdout + console.log(`currentNamespace=${currentNamespace}`) + }); + + it(`delete project`, function(done) { + this.timeout(20000) + spawnSync('oc', ['delete', `project/${buildNamespace}`], {encoding:'utf8'}) + //oc.raw('delete', [`namespace/${buildNamespace}`]) + //assert.equal([1,2,3].indexOf(4), -1); + setTimeout(function(){done()}, 5000) + }); + + it('create project', function() { + currentNamespace=oc.raw('project', ['-q']).stdout + console.log(`currentNamespace=${currentNamespace}`) + oc.raw('create', ['namespace',buildNamespace]) + oc.raw('label', [`namespace/${buildNamespace}`, 'mocha=e2e', 'name=patroni']) + }); + + it('build', function() { + this.timeout(60000) + const build = require('../lib/build.js'); + const changeId=0; + const _phase={name:'patroni', changeId:0} + const settings={ + phases:{ + build:{ + namespace: buildNamespace, + name:`${_phase.name}`, + suffix:'-build', + tag:`v10-${_phase.changeId}`, + instance: `${_phase.name}-build-${_phase.changeId}` + } + } + } + build(settings) + assert.equal([1,2,3].indexOf(4), -1); + }); + + it('deploy', function() { + this.timeout(60000) + const _phase={name:'patroni', changeId:0} + const settings={ + phases:{ + build:{ + namespace: buildNamespace, + name:`${_phase.name}`, + suffix:'-build', + tag:`v10-${_phase.changeId}`, + instance: `${_phase.name}-build-${_phase.changeId}`, + changeId: _phase.changeId + }, + e2e:{ + namespace: deployNamespace, + name:`${_phase.name}`, + suffix:'-e2e', + tag:`v10-${_phase.changeId}`, + instance: `${_phase.name}-e2e-${_phase.changeId}`, + changeId: _phase.changeId + } + } + } + const phases = settings.phases + const phase = 'e2e' + let objects =[] + + //Switch to Build Namespace + oc.namespace(deployNamespace); + + objects = objects.concat(oc.processDeploymentTemplate(oc.toFileUrl(path.resolve(__dirname, '../../openshift/deployment-prereq.yaml')), { + 'param':{ + 'NAME': `${phases[phase].name}-pgsql`, + 'SUFFIX': phases[phase].suffix, + 'APP_DB_USERNAME': 'rhsso', + 'APP_DB_NAME': 'rhsso' + } + })) + + objects = objects.concat(oc.processDeploymentTemplate(oc.toFileUrl(path.resolve(__dirname, '../../openshift/deployment.yaml')), { + 'param':{ + 'NAME': `${phases[phase].name}-pgsql`, + 'SUFFIX': phases[phase].suffix, + 'INSTANCE': `${phases[phase].name}-pgsql${phases[phase].suffix}`, + 'IMAGE_STREAM_NAMESPACE': phases[phase].namespace, + 'OPENSHIFT_IMAGE_REGISTRY': '172.30.1.1:5000', + 'IMAGE_STREAM_TAG': `patroni:v10-${phases[phase].changeId}` + } + })) + + oc.applyRecommendedLabels(objects, phases[phase].name, phase, `${phases[phase].changeId}`, phases[phase].instance) + + objects.forEach((item)=>{ + if (item.kind == 'StatefulSet' && item.metadata.labels["app.kubernetes.io/name"] === "patroni"){ + oc.copyRecommendedLabels(item.metadata.labels, item.spec.selector.matchLabels) + oc.copyRecommendedLabels(item.metadata.labels, item.spec.template.metadata.labels) + + item.spec.template.spec.containers.forEach((container)=>{ + container.env.forEach((env)=>{ + if (env.name === "PATRONI_KUBERNETES_LABELS"){ + var labels = JSON.parse(env.value) + oc.copyRecommendedLabels(item.metadata.labels, labels) + env.value = JSON.stringify(labels) + } + }) + }) + } + }) + + oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag) + oc.applyAndDeploy(objects, phases[phase].instance) + + }); + + + after(function() { + //this.timeout(10000) + //let p1=spawnSync('bash', ['-c', `oc delete "project/${buildNamespace}"`], {encoding:'utf8'}) + //console.dir(p1.output) + /* + return new Promise( (resolve) => { + resolve(true) + }).then((result)=>{ + return new Promise((resolve)=>{ + setTimeout(function(){ + resolve(true) + }, 5000) + }) + }).then((result)=>{ + let p2=spawnSync('oc', ['delete', `namespace/${buildNamespace}`], {encoding:'utf8'}) + console.dir(p2.output) + done() + }); + */ + //.finally(done) + //setTimeout(function(){ + /* + let p1=spawnSync('oc', ['delete', `namespace/${buildNamespace}`], {encoding:'utf8'}) + console.dir(p1.output) + + console.log(`previousNamespace=${currentNamespace}`) + let p2=spawnSync('oc', ['project', currentNamespace], {encoding:'utf8'}) + console.dir(p2.output) + oc.raw('delete', [`namespace/${buildNamespace}`]) + done() + */ + //}, 5) + }) +}); \ No newline at end of file diff --git a/openshift-v4/templates/patroni/README.md b/openshift-v4/templates/patroni/README.md new file mode 100644 index 000000000..1dd213846 --- /dev/null +++ b/openshift-v4/templates/patroni/README.md @@ -0,0 +1,77 @@ +### Files included + +* build.yaml build patroni image +* deployment.yaml deploy patroni +* deployment-prereq.yaml create pre-required objects for patroni +* secret-template.yaml create template.patroni-patroni secret, it is used by pipeline + +### Before triggering pipeline + +1. Create template.patroni-patroni secret, make sure the secretes are same as Openshift V3 +oc process -f ./secret-template.yaml | oc create -f - -n [environment namespace] +Note, when patroni is deployed, the username will be changed to all lowercase, read next section for details for Openshift v4 Migration + +2. Build patroni image +oc process -f ./build.yaml | oc create -f - -n [tools namespace] + +3. tag the patroni image to environment +oc tag [tools namspace]/patroni:v10-latest [env namspace]/patroni:v10-stable + +### Database Migration from Openshift v3 to Openshift 4 + +1. On Openshift V4, update the secrets in template.patroni-patroni same a Openshift V3 + +2. For example, Openshift v3 tfrs db user name is userABC and password is pwpwpwpwpw + create user "userABC" with password 'pwpwpwpwpw'; //password is same with secret + ALTER DATABASE tfrs OWNER TO "userABC"; + DROP USER userabc; //userabc is the old user on Openshift v4 + +3. Openshift v3 - Create backup +login to patroni-backup pod and run backup.sh -1 + created backup: /backups/2020-08-28/postgresql-zeva_2020-08-28_19-06-28.sql.gz + +4. Move the above backup file from backup container on Openshift v3 to v4 + for example: moved to /backups/fromv3/postgresql-zeva_2020-08-28_19-06-28.sql.gz + +5. Recover the backup to paroni database on Openshift v4 +login patroni-backup pod on Openshift v4, run the following command +./backup.sh -r patroni-master-prod/tfrs -f /backups/fromv3 +Notes, yes, folder name only + +6. Verify the database on Openshift v3 and v4 to make sure they are same + +### Create staging patroni in order to test the operational scripts + +1. backup prod database and rsync to test env ex. /backups/2020-10-30-prod/patroni-master-prod-tfrs_2020-10-30_12-29-48.sql.gz + +2. create patroni-staging statefulset + oc process -f ./deployment-prereq.yaml SUFFIX=-staging ... //make sure the user passwors are same as prod + oc process -f ./deployment.yaml \ + NAME=patroni \ + ENV_NAME=test \ + SUFFIX=-staging \ + CPU_REQUEST=200m \ + CPU_LIMIT=400m \ + MEMORY_REQUEST=250M \ + MEMORY_LIMIT=500M \ + IMAGE_REGISTRY=docker-registry.default.svc:5000 \ + IMAGE_STREAM_NAMESPACE=mem-tfrs-test \ + IMAGE_STREAM_TAG=patroni:v10-stable \ + REPLICA=1 \ + PVC_SIZE=1G \ + STORAGE_CLASS=netapp-block-standard \ + | oc create -f - -n mem-tfrs-test + +3. restore + +login to patroni-master-staging pod: + create user "userSRU" with password ''; //password to find in patroni-staging secret + ALTER DATABASE tfrs OWNER TO "userSRU"; + DROP USER usersru; + +on backup pod in test env: +./backup.sh -r patroni-master-staging:5430/tfrs -f /backups/2020-10-30-prod/patroni-master-prod-tfrs_2020-10-30_12-29-48.sql.gz +the admin command can get from the patroni-staging secret + +4. update bacckend dc to connect to staging database + diff --git a/openshift-v4/templates/patroni/build.yaml b/openshift-v4/templates/patroni/build.yaml new file mode 100644 index 000000000..d04418e97 --- /dev/null +++ b/openshift-v4/templates/patroni/build.yaml @@ -0,0 +1,110 @@ +apiVersion: v1 +kind: Template +metadata: + creationTimestamp: null + name: patroni +labels: + app: ${NAME}${SUFFIX} + phase: build + app.kubernetes.io/component: database + app.kubernetes.io/name: patroni + app.kubernetes.io/managed-by: template + app.kubernetes.io/version: "10" +parameters: + - name: NAME + value: patroni + - name: SUFFIX + value: "" + description: for tfrs, use empty value as pipeline doesn't build patroni for single PR + - name: VERSION + description: Ouput version + value: "v10-latest" + - name: GIT_URI + value: https://github.com/bcgov/tfrs.git + - name: GIT_REF + value: openshift-v4-migration + - name: POSTGRES_VERSION + value: "10" +objects: +#ImageStream is create if it doesn't already exist +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: {} + creationTimestamp: null + generation: 1 + name: postgres + spec: + lookupPolicy: + local: false + tags: + - annotations: null + from: + kind: DockerImage + name: registry.hub.docker.com/library/postgres:${POSTGRES_VERSION} + generation: 1 + importPolicy: {} + name: "${POSTGRES_VERSION}" + referencePolicy: + type: Source + status: + dockerImageRepository: "" +#- apiVersion: v1 +# generation: 0 +# kind: ImageStreamTag +# lookupPolicy: +# local: false +# metadata: +# creationTimestamp: null +# name: postgres:${POSTGRES_VERSION} +# tag: +# annotations: null +# from: +# kind: DockerImage +# name: registry.hub.docker.com/library/postgres:${POSTGRES_VERSION} +# generation: 0 +# importPolicy: {} +# name: "${POSTGRES_VERSION}" +# referencePolicy: +# type: Source +- apiVersion: v1 + kind: ImageStream + metadata: + creationTimestamp: null + name: ${NAME} + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- apiVersion: v1 + kind: BuildConfig + metadata: + creationTimestamp: null + name: ${NAME}${SUFFIX} + spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: "${NAME}:${VERSION}" + postCommit: {} + resources: {} + source: + contextDir: openshift-v4/templates/patroni/docker + git: + ref: ${GIT_REF} + uri: ${GIT_URI} + type: Git + strategy: + dockerStrategy: + from: + kind: ImageStreamTag + name: postgres:${POSTGRES_VERSION} + type: Docker + triggers: + - type: ConfigChange + - imageChange: {} + type: ImageChange + status: + lastVersion: 0 \ No newline at end of file diff --git a/openshift-v4/templates/patroni/deployment-prereq.yaml b/openshift-v4/templates/patroni/deployment-prereq.yaml new file mode 100644 index 000000000..7f8956f54 --- /dev/null +++ b/openshift-v4/templates/patroni/deployment-prereq.yaml @@ -0,0 +1,102 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + annotations: + description: |- + Patroni Postgresql database cluster, with persistent storage. + iconClass: icon-postgresql + openshift.io/display-name: Patroni Postgresql (Persistent) + openshift.io/long-description: This template deploys a patroni postgresql HA + cluster with persistent storage. + tags: postgresql + name: patroni-pgsql-persistent +labels: + app.kubernetes.io/component: database + app.kubernetes.io/name: patroni + app.kubernetes.io/managed-by: template + app.kubernetes.io/version: "10" +objects: +# move ServiceAccount and Role to here, otherwise image pulled may fail because ServiceAccount hasn't been created +- apiVersion: v1 + kind: Secret + metadata: + labels: + app: ${NAME}${SUFFIX} + cluster-name: ${NAME}${SUFFIX} + annotations: + as-copy-of: "template.${NAME}-patroni" + name: ${NAME}${SUFFIX} + stringData: + replication-username: ${PATRONI_REPLICATION_USERNAME} + replication-password: ${PATRONI_REPLICATION_PASSWORD} + superuser-username: ${PATRONI_SUPERUSER_USERNAME} + superuser-password: ${PATRONI_SUPERUSER_PASSWORD} + app-db-name: ${APP_DB_NAME} + app-db-username: ${APP_DB_USERNAME} + app-db-password: ${APP_DB_PASSWORD} +- apiVersion: v1 + kind: ServiceAccount + metadata: + labels: + cluster-name: ${NAME}${SUFFIX} + name: ${NAME}${SUFFIX} +- apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + labels: + cluster-name: ${NAME}${SUFFIX} + name: ${NAME}${SUFFIX} + rules: + - apiGroups: + - "" + resources: + - services + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - patch + - update + - watch + - delete + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - patch + - update + - create + - list + - watch + - delete + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - patch + - update + - watch +parameters: +- description: The name of the application for labelling all artifacts. + displayName: Application Name + name: NAME + value: patroni +- name: SUFFIX + required: true \ No newline at end of file diff --git a/openshift-v4/templates/patroni/deployment.yaml b/openshift-v4/templates/patroni/deployment.yaml new file mode 100644 index 000000000..73e08b66f --- /dev/null +++ b/openshift-v4/templates/patroni/deployment.yaml @@ -0,0 +1,336 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + annotations: + description: |- + Patroni Postgresql database cluster, with persistent storage. + iconClass: icon-postgresql + openshift.io/display-name: Patroni Postgresql (Persistent) + openshift.io/long-description: This template deploys a patroni postgresql HA + cluster with persistent storage. + tags: postgresql + name: patroni-pgsql-persistent +labels: + app: ${NAME}${SUFFIX} + phase: deploy + app.kubernetes.io/instance: ${NAME}${SUFFIX} + app.kubernetes.io/component: database + app.kubernetes.io/name: patroni + app.kubernetes.io/managed-by: template + app.kubernetes.io/version: "10" +objects: +# It doesn't seem to be used/needed - remote it? +#- apiVersion: v1 +# kind: Service +# metadata: +# creationTimestamp: null +# labels: +# cluster-name: ${NAME}${SUFFIX} +# name: ${NAME}${SUFFIX} +# spec: +# ports: +# - name: 'postgresql' +# port: 5432 +# protocol: TCP +# targetPort: 5432 +# sessionAffinity: None +# type: ClusterIP +# status: +# loadBalancer: {} +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + cluster-name: ${NAME}${SUFFIX} + name: ${NAME}-master${SUFFIX} + spec: + ports: + - port: 5432 + name: postgresql + protocol: TCP + targetPort: 5432 + - port: 8008 + name: health + protocol: TCP + targetPort: 8008 + selector: + cluster-name: ${NAME}${SUFFIX} + role: master + app.kubernetes.io/name: patroni + sessionAffinity: None + type: ClusterIP + status: + loadBalancer: {} +#- apiVersion: v1 +# kind: Service +# metadata: +# creationTimestamp: null +# labels: +# cluster-name: ${NAME}${SUFFIX} +# name: ${NAME}-replica${SUFFIX} +# spec: +# ports: +# - port: 5432 +## name: 'postgresql' +# protocol: TCP +# targetPort: 5432 +# selector: +# cluster-name: ${NAME}${SUFFIX} +# app.kubernetes.io/name: patroni +# role: replica +# sessionAffinity: None +# type: ClusterIP +# status: +# loadBalancer: {} +# - apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: ${NAME}${SUFFIX}-config +# - apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: ${NAME}${SUFFIX}-leader +- apiVersion: apps/v1 + kind: StatefulSet + metadata: + creationTimestamp: null + generation: 3 + labels: + cluster-name: ${NAME}${SUFFIX} + app: ${NAME}${SUFFIX} + role: patroni + env: ${ENV_NAME} + name: ${NAME}${SUFFIX} + spec: + podManagementPolicy: OrderedReady + replicas: ${{REPLICA}} + revisionHistoryLimit: 10 + selector: + matchLabels: + statefulset: ${NAME}${SUFFIX} + serviceName: ${NAME}${SUFFIX} + template: + metadata: + creationTimestamp: null + labels: + statefulset: ${NAME}${SUFFIX} + cluster-name: ${NAME}${SUFFIX} + app.kubernetes.io/name: patroni + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: statefulset + operator: In + values: + - ${NAME}${SUFFIX} + topologyKey: "kubernetes.io/hostname" + containers: + - env: + #TODO: Remove POD_IP in favor of PATRONI_KUBERNETES_POD_IP + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP +# - name: PATRONI_KUBERNETES_USE_ENDPOINTS +# value: 'true' +# - name: PATRONI_KUBERNETES_POD_IP +# valueFrom: +# fieldRef: +# apiVersion: v1 +# fieldPath: status.podIP +# - name: PATRONI_KUBERNETES_PORTS +# value: '{[{"name": "postgresql", "port": 5432}]}' + - name: PATRONI_KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: PATRONI_KUBERNETES_LABELS + value: '{"cluster-name": "${NAME}${SUFFIX}", "app.kubernetes.io/name": "patroni"}' + - name: PATRONI_SUPERUSER_USERNAME + valueFrom: + secretKeyRef: + key: superuser-username + name: ${NAME}${SUFFIX} + - name: PATRONI_SUPERUSER_PASSWORD + valueFrom: + secretKeyRef: + key: superuser-password + name: ${NAME}${SUFFIX} + - name: PATRONI_REPLICATION_USERNAME + valueFrom: + secretKeyRef: + key: replication-username + name: ${NAME}${SUFFIX} + - name: PATRONI_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + key: replication-password + name: ${NAME}${SUFFIX} + - name: APP_USER + valueFrom: + secretKeyRef: + key: app-db-username + name: ${NAME}${SUFFIX} + - name: APP_PASSWORD + valueFrom: + secretKeyRef: + key: app-db-password + name: ${NAME}${SUFFIX} + - name: APP_DATABASE + valueFrom: + secretKeyRef: + key: app-db-name + name: ${NAME}${SUFFIX} + - name: PATRONI_SCOPE + value: ${NAME}${SUFFIX} + - name: PATRONI_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: PATRONI_LOG_LEVEL + value: WARNING + - name: PATRONI_POSTGRESQL_DATA_DIR + value: /home/postgres/pgdata/pgroot/data + - name: PATRONI_POSTGRESQL_PGPASS + value: /tmp/pgpass + - name: PATRONI_POSTGRESQL_LISTEN + value: 0.0.0.0:5432 + - name: PATRONI_RESTAPI_LISTEN + value: 0.0.0.0:8008 +# - name: PATRONI_LOG_LEVEL +# value: DEBUG +# - name: PATRONI_LOG_DIR +# value: /tmp +# - name: PATRONI_LOG_FILE_SIZE +# value: '50000000' +# - name: PATRONI_LOG_FILE_NUM +# value: '4' + image: ${IMAGE_REGISTRY}/${IMAGE_STREAM_NAMESPACE}/${IMAGE_STREAM_TAG} + # Because we are using image reference to a tag, we need to always pull the image otherwise + # we end up with outdated/out-of-sync image depending on the node where it is running + imagePullPolicy: Always + name: postgresql + ports: + - containerPort: 8008 + protocol: TCP + - containerPort: 5432 + protocol: TCP + resources: + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File +# used /usr/share/scripts/patroni/health_check.sh, but it doesn't output Lag in MB any more + readinessProbe: + failureThreshold: 20 + httpGet: + path: /health + port: 8008 + scheme: HTTP + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 3 + volumeMounts: + - mountPath: /home/postgres/pgdata + name: postgresql + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + serviceAccountName: ${NAME}${SUFFIX} + terminationGracePeriodSeconds: 0 + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + annotations: + volume.beta.kubernetes.io/storage-class: ${STORAGE_CLASS} + labels: + app: ${NAME}${SUFFIX} + name: postgresql + spec: + storageClassName: ${STORAGE_CLASS} + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${PVC_SIZE} +# It doesn't seem to be used/needed - remote it? +#- apiVersion: v1 +# kind: Endpoints +# metadata: +# labels: +# app: ${NAME}${SUFFIX} +# cluster-name: ${NAME}${SUFFIX} +# name: ${NAME}${SUFFIX} +# subsets: [] +- apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + cluster-name: ${NAME}${SUFFIX} + name: ${NAME}${SUFFIX} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ${NAME}${SUFFIX} + subjects: + - kind: ServiceAccount + name: ${NAME}${SUFFIX} +parameters: +- description: The name of the application for labelling all artifacts. + displayName: Application Name + name: NAME + value: patroni +- description: The environment name + displayName: environment name + name: ENV_NAME + required: true +- name: SUFFIX + description: A suffix appended to all artifact's name (NAME) +- description: Starting amount of CPU the container can use. + displayName: CPU REQUEST + name: CPU_REQUEST + value: '250m' +- description: Maximum amount of CPU the container can use. + displayName: CPU Limit + name: CPU_LIMIT + value: '1' +- description: Starting amount of memory the container can use. + displayName: Memory Request + name: MEMORY_REQUEST + value: 512Mi +- description: Maximum amount of memory the container can use. + displayName: Memory Limit + name: MEMORY_LIMIT + value: 512Mi +- description: The OpenShift Namespace where the patroni and postgresql ImageStream + resides. + displayName: ImageStream Namespace + name: IMAGE_STREAM_NAMESPACE + value: "bcgov" +- name: IMAGE_STREAM_TAG + description: Patroni ImageTag + value: patroni:v10-stable +- description: The size of the persistent volume to create. + displayName: Persistent Volume Size + name: PVC_SIZE + value: 1G +- name: STORAGE_CLASS + value: netapp-block-standard +- name: IMAGE_REGISTRY + value: image-registry.openshift-image-registry.svc:5000 +- name: REPLICA + value: '2' diff --git a/openshift-v4/templates/patroni/docker/Dockerfile b/openshift-v4/templates/patroni/docker/Dockerfile new file mode 100644 index 000000000..0588f64aa --- /dev/null +++ b/openshift-v4/templates/patroni/docker/Dockerfile @@ -0,0 +1,43 @@ +FROM postgres:10 +MAINTAINER Alexander Kukushkin + +ARG PGHOME=/home/postgres + +RUN export DEBIAN_FRONTEND=noninteractive \ + && echo 'APT::Install-Recommends "0";\nAPT::Install-Suggests "0";' > /etc/apt/apt.conf.d/01norecommend \ + && apt-get update -y \ + && apt-get upgrade -y \ + && apt-cache depends patroni | sed -n -e 's/.* Depends: \(python3-.\+\)$/\1/p' \ + | grep -Ev '^python3-(sphinx|etcd|consul|kazoo|kubernetes)' \ + | xargs apt-get install -y gettext curl jq locales git python3-pip python3-wheel \ + + ## Make sure we have a en_US.UTF-8 locale available + && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 \ + + && pip3 install setuptools \ + && pip3 install 'git+https://github.com/zalando/patroni.git#egg=patroni[kubernetes]' \ + + && mkdir -p $PGHOME \ + && sed -i "s|/var/lib/postgresql.*|$PGHOME:/bin/bash|" /etc/passwd \ + + # Set permissions for OpenShift + && chmod 775 $PGHOME \ + && chmod 664 /etc/passwd \ + && mkdir -p $PGHOME/pgdata/pgroot \ + && chgrp -R 0 $PGHOME \ + && chown -R postgres $PGHOME \ + && chmod -R 775 $PGHOME \ + # Clean up + && apt-get remove -y git python3-pip python3-wheel \ + && apt-get autoremove -y \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* /root/.cache + +COPY contrib/root / + +VOLUME /home/postgres/pgdata +EXPOSE 5432 8008 +ENV LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 +USER postgres +WORKDIR /home/postgres +CMD ["/bin/bash", "/usr/bin/entrypoint.sh"] \ No newline at end of file diff --git a/openshift-v4/templates/patroni/docker/contrib/root/usr/bin/entrypoint.sh b/openshift-v4/templates/patroni/docker/contrib/root/usr/bin/entrypoint.sh new file mode 100755 index 000000000..2932b1040 --- /dev/null +++ b/openshift-v4/templates/patroni/docker/contrib/root/usr/bin/entrypoint.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +if [[ $UID -ge 10000 ]]; then + GID=$(id -g) + sed -e "s/^postgres:x:[^:]*:[^:]*:/postgres:x:$UID:$GID:/" /etc/passwd > /tmp/passwd + cat /tmp/passwd > /etc/passwd + rm /tmp/passwd +fi + +# FIX -> FATAL: data directory "..." has group or world access +mkdir -p "$PATRONI_POSTGRESQL_DATA_DIR" +chmod 700 "$PATRONI_POSTGRESQL_DATA_DIR" + +cat > /home/postgres/patroni.yml <<__EOF__ +bootstrap: + post_bootstrap: /usr/share/scripts/patroni/post_init.sh + dcs: + postgresql: + use_pg_rewind: true + parameters: + max_connections: ${POSTGRESQL_MAX_CONNECTIONS:-100} + max_prepared_transactions: ${POSTGRESQL_MAX_PREPARED_TRANSACTIONS:-0} + max_locks_per_transaction: ${POSTGRESQL_MAX_LOCKS_PER_TRANSACTION:-64} + log_autovacuum_min_duration: 250 + log_checkpoints: on + log_lock_waits: on + log_min_duration_statement: 1000 + log_temp_files: 1000 + min_wal_size: 300MB + max_wal_size: 1GB + track_io_timing: on + idle_in_transaction_session_timeout: 20s + initdb: + - auth-host: md5 + - auth-local: trust + - encoding: UTF8 + - locale: en_US.UTF-8 + - data-checksums + pg_hba: + - host all all 0.0.0.0/0 md5 + - host replication ${PATRONI_REPLICATION_USERNAME} ${POD_IP}/16 md5 +restapi: + connect_address: '${POD_IP}:8008' +postgresql: + connect_address: '${POD_IP}:5432' + authentication: + superuser: + password: '${PATRONI_SUPERUSER_PASSWORD}' + replication: + password: '${PATRONI_REPLICATION_PASSWORD}' +__EOF__ + +unset PATRONI_SUPERUSER_PASSWORD PATRONI_REPLICATION_PASSWORD +export KUBERNETES_NAMESPACE=$PATRONI_KUBERNETES_NAMESPACE +export POD_NAME=$PATRONI_NAME + +exec /usr/bin/python3 /usr/local/bin/patroni /home/postgres/patroni.yml \ No newline at end of file diff --git a/openshift-v4/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/health_check.sh b/openshift-v4/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/health_check.sh new file mode 100755 index 000000000..acc485baa --- /dev/null +++ b/openshift-v4/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/health_check.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +set -Eeu +set -o pipefail + +pg_isready -q && patronictl list --format=json | jq -e ".[] | select(.Member == \"$(hostname)\" and .State == \"running\" and .\"Lag in MB\" == 0)" diff --git a/openshift-v4/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/post_init.sh b/openshift-v4/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/post_init.sh new file mode 100755 index 000000000..90e35f53b --- /dev/null +++ b/openshift-v4/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/post_init.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -Eeu + +if [[ (! -z "$APP_USER") && (! -z "$APP_PASSWORD") && (! -z "$APP_DATABASE")]]; then + echo "Creating user ${APP_USER}" + psql "$1" -w -c "create user ${APP_USER} WITH LOGIN ENCRYPTED PASSWORD '${APP_PASSWORD}'" + + echo "Creating database ${APP_DATABASE}" + psql "$1" -w -c "CREATE DATABASE ${APP_DATABASE} OWNER ${APP_USER} ENCODING '${APP_DB_ENCODING:-UTF8}' LC_COLLATE = '${APP_DB_LC_COLLATE:-en_US.UTF-8}' LC_CTYPE = '${APP_DB_LC_CTYPE:-en_US.UTF-8}'" + + echo "Creating extensions" + psql -U postgres -q -d "${APP_DATABASE}" -c 'create extension if not exists hstore' + psql -U postgres -q -d "${APP_DATABASE}" -c 'create extension if not exists pg_trgm' + +else + echo "Skipping user creation" + echo "Skipping database creation" +fi \ No newline at end of file diff --git a/openshift-v4/templates/patroni/secret-template.yaml b/openshift-v4/templates/patroni/secret-template.yaml new file mode 100644 index 000000000..2f40958ec --- /dev/null +++ b/openshift-v4/templates/patroni/secret-template.yaml @@ -0,0 +1,56 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + annotations: + description: Patroni Postgresql database cluster template secret + name: patroni-pgsql-secret-template +objects: +- apiVersion: v1 + kind: Secret + metadata: + labels: + app: ${NAME} + name: template.${NAME}-patroni + stringData: + replication-username: ${PATRONI_REPLICATION_USERNAME} + replication-password: ${PATRONI_REPLICATION_PASSWORD} + superuser-username: ${PATRONI_SUPERUSER_USERNAME} + superuser-password: ${PATRONI_SUPERUSER_PASSWORD} + app-db-name: ${APP_DB_NAME} + app-db-username: ${APP_DB_USERNAME} + app-db-password: ${APP_DB_PASSWORD} +parameters: +- description: The name of the application for labelling all artifacts. + displayName: Application Name + name: NAME + value: patroni +- description: Username of the superuser account for initialization. + displayName: Superuser Username + name: PATRONI_SUPERUSER_USERNAME + value: postgres +# generate: expression +# from: super-[a-zA-Z0-9]{6} +- description: Password of the superuser account for initialization. + displayName: Superuser Passsword + name: PATRONI_SUPERUSER_PASSWORD + generate: expression + from: '[a-zA-Z0-9]{32}' +- description: Username of the replication account for initialization. + displayName: Replication Username + name: PATRONI_REPLICATION_USERNAME + value: replication +# generate: expression +# from: rep-[a-zA-Z0-9]{6} +- description: Password of the replication account for initialization. + displayName: Repication Passsword + name: PATRONI_REPLICATION_PASSWORD + generate: expression + from: '[a-zA-Z0-9]{32}' +- name: APP_DB_USERNAME + from: user[a-z]{3} + generate: expression +- name: APP_DB_NAME + value: tfrs +- name: APP_DB_PASSWORD + generate: expression + from: '[a-zA-Z0-9]{32}' diff --git a/openshift-v4/templates/rabbitmq/README.md b/openshift-v4/templates/rabbitmq/README.md new file mode 100644 index 000000000..3e715e736 --- /dev/null +++ b/openshift-v4/templates/rabbitmq/README.md @@ -0,0 +1,27 @@ +### Files included + +* rabbitmq-cluster-dc.yaml deployment config file +* rabbitmq-secret-configmap-only.yaml create secret from secret template and create configmap, it is also included in rabbitmq-cluster-dc.yaml +* rabbitmq-web-route.yaml create route to rabbitmq gui +* secret-template.yaml create template.rabbitmq-secret + +### Before triggering pipeline + +1. Create template.rabbitmq-secret +oc process -f ./secret-template.yaml | oc create -f - -n [environment namespace] + +### After pipeline completes + + +1. login to one of rabbitmq cluster pod, run below commands + rabbitmqctl add_user tfrs [password] ## password refers to secret tfrs-dev-1696-rabbitmq-cluster-secret.tfrs_password + rabbitmqctl add_vhost /tfrs ## yes, it is /tfrs + +2. create web route + + oc process -f ./rabbitmq-web-route.yaml SUFFIX= | oc create -f - -n [env name] + +3. login rabbitmq admin web and go to Admin tab and set as below: + * Name | Tags | Can access virtual hosts | Has password + * rabbitmq | administrator | No Access | ● + * tfrs | | /tfrs | ● \ No newline at end of file diff --git a/openshift-v4/templates/rabbitmq/rabbitmq-cluster-dc.yaml b/openshift-v4/templates/rabbitmq/rabbitmq-cluster-dc.yaml new file mode 100644 index 000000000..6725feb33 --- /dev/null +++ b/openshift-v4/templates/rabbitmq/rabbitmq-cluster-dc.yaml @@ -0,0 +1,318 @@ +apiVersion: v1 +kind: Template +metadata: + name: rabbitmq-cluster + annotations: + description: "Deploys a RabbitMQ cluster" + iconClass: icon-rabbitmq + tags: rabbitmq,mq,messaging +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: ENV_NAME + displayName: + description: sample is dev, test and prod + required: true +- name: SUFFIX + displayName: + description: sample is -dev-161 + required: true +- name: NAMESPACE + description: "OpenShift project (current namespace)" + value: 0ab226-dev + required: true +- name: CLUSTER_NAME + description: "Name of the RabbitMQ cluster" + value: rabbitmq-cluster +- name: ISTAG + description: "Image to deploy" + value: rabbitmq:3.8.3-management +- name: SERVICE_ACCOUNT + description: "Name of the service account used by RabbitMQ k8s plugin" + value: rabbitmq-discovery +- name: VOLUME_SIZE + description: "Size of the RabbitMQ data volume" + value: '1Gi' +- name: CPU_REQUEST + displayName: Requested CPU + description: Requested CPU + required: true + value: '200m' +- name: CPU_LIMIT + displayName: CPU upper limit + description: CPU upper limit + required: true + value: '1000m' +- name: MEMORY_REQUEST + displayName: Requested memory + description: Requested memory + required: true + value: '256Mi' +- name: MEMORY_LIMIT + displayName: Memory upper limit + description: Memory upper limit + required: true + value: '2Gi' +- name: REPLICA + value: '2' +- name: POST_START_SLEEP + value: '120' +- name: STORAGE_CLASS + value: netapp-block-standard + +objects: + +# This service account is needed for rabbit_peer_discovery_k8s plugin to be able to discover +# cluster nodes +- apiVersion: v1 + kind: ServiceAccount + metadata: + name: ${NAME}${SUFFIX}-${SERVICE_ACCOUNT} + +- apiVersion: v1 + kind: RoleBinding + metadata: + name: ${NAME}${SUFFIX}-${SERVICE_ACCOUNT}-view + roleRef: + kind: Role + name: view + subjects: + - kind: ServiceAccount + name: ${NAME}${SUFFIX}-${SERVICE_ACCOUNT} + +- apiVersion: v1 + kind: Secret + metadata: + annotations: + as-copy-of: template.rabbitmq-secret + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-secret + stringData: + username: ${RABBITMQ_USER} + password: ${RABBITMQ_PASS} + cookie: ${ERLANG_COOKIE} + tfrs_password: ${TFRS_PASS} + type: kubernetes.io/basic-auth + +- apiVersion: v1 + kind: ConfigMap + metadata: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-config + app: rabbitmq-cluster + data: + rabbitmq.conf: | + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.k8s.address_type = hostname + cluster_formation.k8s.hostname_suffix = .${NAME}${SUFFIX}-${CLUSTER_NAME}.${NAMESPACE}.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + ## queue master locator + queue_master_locator=min-masters + ## policy.json is from image customized + ## management.load_definitions = /etc/rabbitmq/policy.json + enabled_plugins: | + [rabbitmq_management,rabbitmq_peer_discovery_k8s]. + +# Load balancer +#- kind: Service +# apiVersion: v1 +# metadata: +# name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-balancer +# spec: +# type: ClusterIP +# ports: +# - name: web +# protocol: TCP +# port: 15672 +# targetPort: 15672 +# - name: amqp +# protocol: TCP +# port: 5672 +# targetPort: 5672 +# selector: +# app: ${NAME}${SUFFIX}-${CLUSTER_NAME} + +# Headless service that makes it possible to lookup individual rabbitmq nodes +- apiVersion: v1 + kind: Service + metadata: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME} + spec: + selector: + app: ${NAME}${SUFFIX}-${CLUSTER_NAME} + clusterIP: None + ports: + - name: amqp + protocol: TCP + port: 5672 + targetPort: 5672 + - name: web + protocol: TCP + port: 15672 + targetPort: 15672 + +- apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME} + spec: + serviceName: ${NAME}${SUFFIX}-${CLUSTER_NAME} + replicas: ${{REPLICA}} + podManagementPolicy: OrderedReady + selector: + matchLabels: + app: ${NAME}${SUFFIX}-${CLUSTER_NAME} + template: + metadata: + labels: + app: ${NAME}${SUFFIX}-${CLUSTER_NAME} + spec: + serviceAccountName: ${NAME}${SUFFIX}-${SERVICE_ACCOUNT} + terminationGracePeriodSeconds: 30 + containers: + - name: rabbitmq + lifecycle: + postStart: + exec: + command: + - /bin/sh + - '-c' + - |- + sleep 90 + rabbitmqctl list_users | grep guest 2> /dev/null + if [ $? -ne 0 ]; + then + rabbitmqctl add_user guest guest + rabbitmqctl set_user_tags guest administrator + rabbitmqctl set_policy --vhost /tfrs ha-all ".*" '{"ha-mode":"all"}' + fi + command: + - sh + args: + - -c + - cp -v /etc/rabbitmq/rabbitmq.conf ${RABBITMQ_CONFIG_FILE}.conf; exec docker-entrypoint.sh rabbitmq-server + image: ${ISTAG} + imagePullPolicy: IfNotPresent + volumeMounts: + - name: config-volume + mountPath: /etc/rabbitmq + - name: rabbitmq-storage + mountPath: /var/lib/rabbitmq + ports: + - name: web + protocol: TCP + containerPort: 15672 + - name: amqp + protocol: TCP + containerPort: 5672 + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} +# livenessProbe: +# exec: +# command: ["rabbitmqctl", "node_health_check", "--timeout", "10"] +# failureThreshold: 3 +# initialDelaySeconds: 120 +# periodSeconds: 10 +# successThreshold: 1 +# timeoutSeconds: 10 + readinessProbe: + exec: + command: ["rabbitmqctl", "node_health_check", "--timeout", "10"] + failureThreshold: 3 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + env: + - name: RABBITMQ_DEFAULT_USER + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-secret + key: username + - name: RABBITMQ_DEFAULT_PASS + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-secret + key: password + - name: RABBITMQ_ERLANG_COOKIE + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-secret + key: cookie + - name: K8S_SERVICE_NAME + value: ${NAME}${SUFFIX}-${CLUSTER_NAME} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_NODENAME + value: "rabbit@$(POD_NAME).${NAME}${SUFFIX}-${CLUSTER_NAME}.$(POD_NAMESPACE).svc.cluster.local" + - name: RABBITMQ_CONFIG_FILE + value: /var/lib/rabbitmq/rabbitmq + - name: RABBITMQ_DEFAULT_VHOST + value: /tfrs + volumes: + - name: config-volume + configMap: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + - key: enabled_plugins + path: enabled_plugins + volumeClaimTemplates: + - metadata: + name: rabbitmq-storage + spec: + storageClassName: ${STORAGE_CLASS} + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: ${VOLUME_SIZE} + +## faster http test +# livenessProbe: +# failureThreshold: 3 +# httpGet: +# httpHeaders: +# - name: Authorization +# value: Basic +# path: /api/healthchecks/node +# port: 15672 +# scheme: HTTP +# initialDelaySeconds: 35 +# periodSeconds: 10 +# successThreshold: 1 +# timeoutSeconds: 20 +# readinessProbe: +# failureThreshold: 3 +# httpGet: +# httpHeaders: +# - name: Authorization +# value: Basic +# path: /api/healthchecks/node +# port: 15672 +# scheme: HTTP +# initialDelaySeconds: 35 +# periodSeconds: 10 +# successThreshold: 1 +# timeoutSeconds: 20 \ No newline at end of file diff --git a/openshift-v4/templates/rabbitmq/rabbitmq-secret-configmap-only.yaml b/openshift-v4/templates/rabbitmq/rabbitmq-secret-configmap-only.yaml new file mode 100644 index 000000000..3070aa2ba --- /dev/null +++ b/openshift-v4/templates/rabbitmq/rabbitmq-secret-configmap-only.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Template +metadata: + name: rabbitmq-cluster + annotations: + description: "Deploys a RabbitMQ cluster" + iconClass: icon-rabbitmq + tags: rabbitmq,mq,messaging +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: ENV_NAME + displayName: + description: sample is dev, test and prod + required: true +- name: SUFFIX + displayName: + description: sample is -dev-161 + required: true +- name: NAMESPACE + description: "OpenShift project (current namespace)" + value: e52f12-dev + required: true +- name: CLUSTER_NAME + description: "Name of the RabbitMQ cluster" + value: rabbitmq-cluster +objects: +- apiVersion: v1 + kind: Secret + metadata: + annotations: + as-copy-of: template.rabbitmq-secret + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-secret + stringData: + username: ${RABBITMQ_USER} + password: ${RABBITMQ_PASS} + cookie: ${ERLANG_COOKIE} + tfrs_password: ${TFRS_PASS} + type: kubernetes.io/basic-auth + +- apiVersion: v1 + kind: ConfigMap + metadata: + name: ${NAME}${SUFFIX}-${CLUSTER_NAME}-config + app: rabbitmq-cluster + data: + rabbitmq.conf: | + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.k8s.address_type = hostname + cluster_formation.k8s.hostname_suffix = .${NAME}${SUFFIX}-${CLUSTER_NAME}.${NAMESPACE}.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + ## queue master locator + queue_master_locator=min-masters + ## policy.json is from image customized + ## management.load_definitions = /etc/rabbitmq/policy.json + enabled_plugins: | + [rabbitmq_management,rabbitmq_peer_discovery_k8s]. diff --git a/openshift-v4/templates/rabbitmq/rabbitmq-web-route.yaml b/openshift-v4/templates/rabbitmq/rabbitmq-web-route.yaml new file mode 100644 index 000000000..6b21e68b7 --- /dev/null +++ b/openshift-v4/templates/rabbitmq/rabbitmq-web-route.yaml @@ -0,0 +1,48 @@ +apiVersion: v1 +kind: Template +metadata: + name: rabbitmq-web-route + annotations: + description: "Deploys a RabbitMQ web route" + iconClass: icon-rabbitmq + tags: rabbitmq,mq,messaging +parameters: +- name: NAME + displayName: the module name + description: the module name entered when run yo bcdk:pipeline, which is /tfrs + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: sample is -dev-1696 + required: true +- name: CLUSTER_NAME + displayName: rabbitmq cluster name + description: rabbitmq cluster name + value: rabbitmq-cluster + required: true +- name: OCP_NAME + displayName: openshift name + description: openshift name + value: apps.silver.devops + required: true +objects: +- apiVersion: route.openshift.io/v1 + kind: Route + metadata: + creationTimestamp: null + labels: + name: ${NAME}-rabbitmq-web${SUFFIX} + name: ${NAME}-rabbitmq-web${SUFFIX} + spec: + host: ${NAME}-rabbitmq-web${SUFFIX}.${OCP_NAME}.gov.bc.ca + port: + targetPort: web + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: ${NAME}${SUFFIX}-${CLUSTER_NAME} + weight: 100 + wildcardPolicy: None \ No newline at end of file diff --git a/openshift-v4/templates/rabbitmq/secret-template.yaml b/openshift-v4/templates/rabbitmq/secret-template.yaml new file mode 100644 index 000000000..cae67a3e2 --- /dev/null +++ b/openshift-v4/templates/rabbitmq/secret-template.yaml @@ -0,0 +1,33 @@ +apiVersion: template.openshift.io/v1 +kind: Template +parameters: +- name: RABBITMQ_USER + description: "Username for the RabbitMQ instance" + value: rabbitmq +- name: RABBITMQ_PASS + description: "Password securing the RabbitMQ instance" + from: '[a-zA-Z0-9]{16}' + generate: expression + required: true +- name: ERLANG_COOKIE + description: "Cookie used for authentication of cluster nodes" + from: '[a-zA-Z0-9]{16}' + generate: expression + required: true +- name: TFRS_PASS + description: "Password fpr tfrs user in rabbitmq" + from: '[a-zA-Z0-9]{16}' + generate: expression + required: true +objects: +- apiVersion: v1 + kind: Secret + metadata: + annotations: null + name: template.rabbitmq-secret + stringData: + username: ${RABBITMQ_USER} + password: ${RABBITMQ_PASS} + cookie: ${ERLANG_COOKIE} + tfrs_password: ${TFRS_PASS} + type: Opaque \ No newline at end of file diff --git a/openshift-v4/templates/s2i-caddy/.pipeline/pipeline-cli b/openshift-v4/templates/s2i-caddy/.pipeline/pipeline-cli new file mode 100755 index 000000000..f02ccf9fe --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/.pipeline/pipeline-cli @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +export OCP_PIPELINE_VERSION='0.0.6' +OCP_PIPELINE_CLI_URL="https://raw.githubusercontent.com/BCDevOps/ocp-cd-pipeline/v${OCP_PIPELINE_VERSION}/src/main/resources/pipeline-cli" + + +curl -sSL "${OCP_PIPELINE_CLI_URL}" | bash -s "$@" diff --git a/openshift-v4/templates/s2i-caddy/.s2i/bin/assemble b/openshift-v4/templates/s2i-caddy/.s2i/bin/assemble new file mode 100755 index 000000000..99063b0c7 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/.s2i/bin/assemble @@ -0,0 +1,43 @@ +#!/bin/sh -e +# +# S2I assemble script for the 's2i-caddy' image. +# The 'assemble' script builds your application source so that it is ready to run. +# +# For more information refer to the documentation: +# https://github.com/openshift/source-to-image/blob/master/docs/builder_image.md +# + +if [[ "$1" == "-h" ]]; then + # If the 's2i-caddy' assemble script is executed with '-h' flag, + # print the usage. + exec /usr/libexec/s2i/usage +fi + +# Restore artifacts from the previous build (if they exist). +# +if [ "$(ls /tmp/artifacts/ 2>/dev/null)" ]; then + echo "---> Restoring build artifacts..." + mv /tmp/artifacts/. ./ +fi + +echo "---> Copying repository content into place..." +echo "Contents of /tmp/src:" +find /tmp/src -print +cp -Rf /tmp/src/* /var/www/html + +# (through detective work, we discovered that additional source image content is placed in /tmp/build/inputs by s2i) +# the bit below allows additional content that originates from one or more additional source images to be added to the output image +if [ -d /tmp/build/inputs ]; then + echo "Additional image content found..." + echo "---> Copying additional image content into place..." + cp -Rf /tmp/build/inputs/* /var/www/html +else + echo "No additional image content found." +fi + +if [[ -e "/var/www/html/Caddyfile" ]]; then + echo "Replacing default Caddyfile..." + cat /var/www/html/Caddyfile > /etc/Caddyfile +else + echo "Using default Caddyfile." +fi diff --git a/openshift-v4/templates/s2i-caddy/.s2i/bin/assemble-runtime b/openshift-v4/templates/s2i-caddy/.s2i/bin/assemble-runtime new file mode 100755 index 000000000..df5e89bde --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/.s2i/bin/assemble-runtime @@ -0,0 +1,2 @@ +#!/bin/sh -e +echo no assemble-runtime needed diff --git a/openshift-v4/templates/s2i-caddy/.s2i/bin/run b/openshift-v4/templates/s2i-caddy/.s2i/bin/run new file mode 100755 index 000000000..fe1ded581 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/.s2i/bin/run @@ -0,0 +1,10 @@ +#!/bin/sh -e +# +# S2I run script for the 's2i-caddy' image. +# The run script executes the server that runs your application. +# +# For more information see the documentation: +# https://github.com/openshift/source-to-image/blob/master/docs/builder_image.md +# + +exec caddy run --config /etc/Caddyfile diff --git a/openshift-v4/templates/s2i-caddy/.s2i/bin/save-artifacts b/openshift-v4/templates/s2i-caddy/.s2i/bin/save-artifacts new file mode 100755 index 000000000..e7f0d7064 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/.s2i/bin/save-artifacts @@ -0,0 +1,10 @@ +#!/bin/sh -e +# +# S2I save-artifacts script for the 's2i-caddy' image. +# The save-artifacts script streams a tar archive to standard output. +# The archive contains the files and folders you want to re-use in the next build. +# +# For more information see the documentation: +# https://github.com/openshift/source-to-image/blob/master/docs/builder_image.md +# +# tar cf - diff --git a/openshift-v4/templates/s2i-caddy/.s2i/bin/usage b/openshift-v4/templates/s2i-caddy/.s2i/bin/usage new file mode 100755 index 000000000..acaeb61e0 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/.s2i/bin/usage @@ -0,0 +1,12 @@ +#!/bin/sh -e +cat < s2i-caddy + +You can then run the resulting image via: +docker run +EOF diff --git a/openshift-v4/templates/s2i-caddy/Caddyfile b/openshift-v4/templates/s2i-caddy/Caddyfile new file mode 100644 index 000000000..42be5efbf --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/Caddyfile @@ -0,0 +1,7 @@ +0.0.0.0:2015 + +root /var/www/html + +log stdout + +errors stdout diff --git a/openshift-v4/templates/s2i-caddy/Dockerfile b/openshift-v4/templates/s2i-caddy/Dockerfile new file mode 100644 index 000000000..dc1b9b2d3 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/Dockerfile @@ -0,0 +1,56 @@ +FROM alpine:3.7 +MAINTAINER shea.phillips@cloudcompass.ca + +RUN apk update && \ + apk upgrade + +# =================================================================================================================================================================== +# Install Caddy +# Refs: +# - https://github.com/ZZROTDesign/alpine-caddy +# - https://github.com/mholt/caddy +# ------------------------------------------------------------------------------------------------------------------------------------------------------------------- +RUN apk update && \ + apk --no-cache add \ + tini \ + git \ + openssh-client && \ + apk --no-cache add --virtual \ + devs \ + tar \ + curl + +# Install Caddy Server, and All Middleware +RUN curl -L "https://github.com/caddyserver/caddy/releases/download/v2.0.0-rc.3/caddy_2.0.0-rc.3_linux_amd64.tar.gz" \ + | tar --no-same-owner -C /usr/bin/ -xz caddy + +# Remove build devs +RUN apk del devs + +LABEL io.openshift.s2i.scripts-url=image:///tmp/scripts + +## Copy the S2I scripts into place +COPY ./.s2i/bin/ /tmp/scripts + +ADD Caddyfile /etc/Caddyfile + +# Create the location where we will store our content, and fiddle the permissions so we will be able to write to it. +# Also twiddle the permissions on the Caddyfile and the s2i scripts dir so we will be able to overwrite them with a user-provided one sif desired. +RUN mkdir -p /var/www/html \ + && chmod g+w /var/www/html \ + && chmod g+w /etc/Caddyfile \ + && chmod g+w /tmp/scripts \ + && mkdir /tmp/src \ + && chmod g+w /tmp/src + +# Work-around for issues with S2I builds on Windows +WORKDIR /tmp + +# Expose the port for the container to Caddy's default +EXPOSE 2015 + +USER 1001 + +ENTRYPOINT ["/sbin/tini"] + +CMD ["sh","/tmp/scripts/usage"] diff --git a/openshift-v4/templates/s2i-caddy/Jenkinsfile b/openshift-v4/templates/s2i-caddy/Jenkinsfile new file mode 100644 index 000000000..f9f838d56 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/Jenkinsfile @@ -0,0 +1,60 @@ +pipeline { + agent none + options { + disableResume() + } + environment { + BASE_VERSION = 'v1' + IMAGE_STREAM_NAME = 's2i-caddy' + SOURCE_DIR = '.' + } + stages { + stage('Build') { + agent { label 'build' } + steps { + echo "Aborting all running jobs ..." + script { + abortAllPreviousBuildInProgress(currentBuild) + } + echo "BRANCH_NAME:${env.BRANCH_NAME}\nCHANGE_ID:${env.CHANGE_ID}\nCHANGE_TARGET:${env.CHANGE_TARGET}" + echo "Building ..." + sh ".pipeline/pipeline-cli build --pr=${CHANGE_ID} --config=${env.SOURCE_DIR}/openshift/config.groovy" + } + } + stage('Tag as latest') { + agent { label 'deploy' } + input { + message "Should we continue with tagging this image as ${env.BASE_VERSION}-latest?" + ok "Yes!" + } + steps { + echo "Tagging ${env.IMAGE_STREAM_NAME}:${env.BASE_VERSION}-latest and ${env.IMAGE_STREAM_NAME}:latest" + sh "oc -n 'bcgov-tools' tag ${env.IMAGE_STREAM_NAME}:build-v${CHANGE_ID} ${env.IMAGE_STREAM_NAME}:${env.BASE_VERSION}-latest" + sh "oc -n 'bcgov' tag bcgov-tools/${env.IMAGE_STREAM_NAME}:build-v${CHANGE_ID} ${env.IMAGE_STREAM_NAME}:${env.BASE_VERSION}-latest" + sh "oc -n 'bcgov' tag bcgov-tools/${env.IMAGE_STREAM_NAME}:build-v${CHANGE_ID} ${env.IMAGE_STREAM_NAME}:latest" + } + } + stage('Tag as stable') { + agent { label 'deploy' } + input { + message "Should we continue with tagging this image as ${env.BASE_VERSION}-stable?" + ok "Yes!" + } + steps { + echo "Tagging ${env.IMAGE_STREAM_NAME}:${env.BASE_VERSION}-stable and ${env.IMAGE_STREAM_NAME}:stable" + sh "oc -n 'bcgov' tag bcgov-tools/${env.IMAGE_STREAM_NAME}:build-v${CHANGE_ID} ${env.IMAGE_STREAM_NAME}:${env.BASE_VERSION}-stable" + sh "oc -n 'bcgov' tag bcgov-tools/${env.IMAGE_STREAM_NAME}:build-v${CHANGE_ID} ${env.IMAGE_STREAM_NAME}:stable" + } + } + stage('Acceptance') { + agent { label 'deploy' } + input { + message "is this PR closed in GitHub?" + ok "Yes!" + } + steps { + echo "Please accept/close PR via github UI for cleaning temporary objects" + } + } + } +} \ No newline at end of file diff --git a/openshift-v4/templates/s2i-caddy/LICENSE b/openshift-v4/templates/s2i-caddy/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/openshift-v4/templates/s2i-caddy/Makefile b/openshift-v4/templates/s2i-caddy/Makefile new file mode 100644 index 000000000..7a3b0eb9c --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/Makefile @@ -0,0 +1,10 @@ + +IMAGE_NAME = s2i-caddy + +build: + docker build -t $(IMAGE_NAME) . + +.PHONY: test +test: + docker build -t $(IMAGE_NAME)-candidate . + IMAGE_NAME=$(IMAGE_NAME)-candidate test/run diff --git a/openshift-v4/templates/s2i-caddy/README.md b/openshift-v4/templates/s2i-caddy/README.md new file mode 100644 index 000000000..361185665 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/README.md @@ -0,0 +1,72 @@ + +## Source-to-Image (s2i) for CaddyServer + +This repo provided the bits necessary to build an OpenShift "s2i" builder image that can subsequently be used to serve web content with CaddyServer. + +## Features + +The main capability that this enables is *very* quick and easy publishing of static web content from GitHub repos. + +## Usage + +The s2i image is available in the BC Gov OpenShift instance as `bcgov-s2i-caddy`. There is a live build configuration that can rebuild and publish the image if/as updates are required. + +To create a build configuration using `bcgov-s2i-caddy` that will provide an image with content from an existing GitHub repo with web content, run the following in a `-tools` project: + +``` +oc new-build bcgov-s2i-caddy~ --name= +``` + +A concrete example: + +``` +oc new-build bcgov-s2i-caddy~https://github.com/bcgov/pathfinder.git +``` + +Note that a good practice is to capture generated BuildConfigurations and store them alongside your source code (or content) in GitHub (conventionally in an `openshift` directory in the root of the repo). This can easily be done as follows: + + +``` +oc new-build bcgov-s2i-caddy~ --name= -o json > -bc.json +``` + +You would then be able to use the generated file to create your BC, and have it on hand for future reference/updates. To use the generated file: + +``` +oc create -f -bc.json -n +``` + +## Running The Image + +The default entrypoint script is `caddy run --conf /etc/Caddyfile`. This can be overridden. Keep in my +caddy 2.x is not compatible with caddy 1.x commands. Please check their [documentation](https://caddyserver.com/docs/getting-started) for more info. + +## Requirements + +You need to have access to a `-tools` project in OpenShift and have the `oc` binary locally. + +## Getting Help or Reporting an Issue + +Post in `#general` or `#help-me` in the BC Gov Pathfinder Slack team. + +## How to Contribute + +*If you are including a Code of Conduct, make sure that you have a [CODE_OF_CONDUCT.md](SAMPLE-CODE_OF_CONDUCT.md) file, and include the following text in here in the README:* +"Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md). By participating in this project you agree to abide by its terms." + +## License + + Copyright 2015 Province of British Columbia + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/openshift-v4/templates/s2i-caddy/openshift/config.groovy b/openshift-v4/templates/s2i-caddy/openshift/config.groovy new file mode 100644 index 000000000..5756219f7 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/openshift/config.groovy @@ -0,0 +1,50 @@ +app { + name = "${opt.'name'?:'s2i-caddy'}" + namespaces { //can't call environments :( + 'build'{ + namespace = 'bcgov-tools' + disposable = true + } + 'dev' { + namespace = 'bcgov' + disposable = true + } + 'prod' { + namespace = 'bcgov' + disposable = false + } + } + + git { + workDir = ['git', 'rev-parse', '--show-toplevel'].execute().text.trim() + uri = ['git', 'config', '--get', 'remote.origin.url'].execute().text.trim() + ref = "refs/pull/${opt.'pr'}/head" + commit = ['git', 'rev-parse', 'HEAD'].execute().text.trim() + } + + build { + env { + name = "build" + id = "pr-${opt.'pr'}" + } + suffix = "-build-${opt.'pr'}" + id = "${app.name}${app.build.suffix}" + version = "${app.build.env.name}-v${opt.'pr'}" + name = "${opt.'build-name'?:app.name}" + + namespace = app.namespaces.'build'.namespace + timeoutInSeconds = 60*20 // 20 minutes + templates = [ + [ + 'file':'openshift/s2i-caddy-bc.json', + 'params':[ + 'NAME': app.build.name, + 'SUFFIX': app.build.suffix, + 'OUTPUT_IMAGE_TAG': app.build.version, + 'GIT_REPO_URL': "${app.git.uri}", + 'GIT_REF': "${app.git.ref}" + ] + ] + ] + } +} \ No newline at end of file diff --git a/openshift-v4/templates/s2i-caddy/openshift/s2i-caddy-bc.json b/openshift-v4/templates/s2i-caddy/openshift/s2i-caddy-bc.json new file mode 100644 index 000000000..faeca83b2 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/openshift/s2i-caddy-bc.json @@ -0,0 +1,145 @@ +{ + "kind": "Template", + "apiVersion": "v1", + "metadata": { + "name": "s2i-caddy", + "creationTimestamp": null + }, + "objects": [ + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "alpine-caddy", + "creationTimestamp": null, + "labels": { + "shared": "true" + } + }, + "spec": { + "tags": [ + { + "name": "v2", + "from": { + "kind": "DockerImage", + "name": "caddy:2.0.0-rc.3-alpine" + }, + "generation": null, + "importPolicy": {} + } + ] + }, + "status": { + "dockerImageRepository": "" + } + }, + { + "kind": "ImageStream", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}", + "creationTimestamp": null, + "labels": { + "shared": "true" + } + }, + "spec": {}, + "status": { + "dockerImageRepository": "" + } + }, + { + "kind": "BuildConfig", + "apiVersion": "v1", + "metadata": { + "name": "${NAME}${SUFFIX}", + "creationTimestamp": null + }, + "spec": { + "triggers": [ + { + "type": "ConfigChange" + }, + { + "type": "ImageChange", + "imageChange": {} + } + ], + "source": { + "type": "Git", + "git": { + "uri": "${GIT_REPO_URL}", + "ref": "${GIT_REF}" + } + }, + "strategy": { + "type": "Docker", + "dockerStrategy": { + "from": { + "kind": "ImageStreamTag", + "name": "alpine-caddy:v2" + } + } + }, + "output": { + "to": { + "kind": "ImageStreamTag", + "name": "${NAME}:${OUTPUT_IMAGE_TAG}" + } + }, + "resources": { + "requests":{ + "cpu": "1", + "memory": "2Gi" + }, + "limits":{ + "cpu": "2", + "memory": "3Gi" + } + }, + "postCommit": {}, + "completionDeadlineSeconds": 1200, + "failedBuildsHistoryLimit": 2, + "successfulBuildsHistoryLimit": 5 + }, + "status": { + "lastVersion": 0 + } + } + ], + "parameters": [ + { + "name": "NAME", + "displayName": "Name", + "description": "The name assigned to all objects.", + "required": true, + "value": "s2i-caddy" + }, + { + "name": "SUFFIX", + "description": "A suffix appended to the name of all generated objects.", + "value": "" + }, + { + "name": "GIT_REPO_URL", + "displayName": "Git Repo URL", + "description": "The URL to your GIT repo, don't use the this default unless your just experimenting.", + "required": true, + "value": "https://github.com/BCDevOps/s2i-caddy.git" + }, + { + "name": "GIT_REF", + "displayName": "Git Reference", + "description": "The git reference or branch.", + "required": true, + "value": "master" + }, + { + "name": "OUTPUT_IMAGE_TAG", + "displayName": "Output Image Tag", + "description": "The tag given to the built image.", + "required": true, + "value": "latest" + } + ] +} diff --git a/openshift-v4/templates/s2i-caddy/test/run b/openshift-v4/templates/s2i-caddy/test/run new file mode 100755 index 000000000..ff82332a4 --- /dev/null +++ b/openshift-v4/templates/s2i-caddy/test/run @@ -0,0 +1,160 @@ +#!/bin/bash +# +# The 'run' performs a simple test that verifies the S2I image. +# The main focus here is to exercise the S2I scripts. +# +# For more information see the documentation: +# https://github.com/openshift/source-to-image/blob/master/docs/builder_image.md +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# +IMAGE_NAME=${IMAGE_NAME-s2i-caddy-candidate} + +# Determining system utility executables (darwin compatibility check) +READLINK_EXEC="readlink" +MKTEMP_EXEC="mktemp" +if [[ "$OSTYPE" =~ 'darwin' ]]; then + ! type -a "greadlink" &>"/dev/null" || READLINK_EXEC="greadlink" + ! type -a "gmktemp" &>"/dev/null" || MKTEMP_EXEC="gmktemp" +fi + +test_dir="$($READLINK_EXEC -zf $(dirname "${BASH_SOURCE[0]}"))" +image_dir=$($READLINK_EXEC -zf ${test_dir}/..) +scripts_url="file://${image_dir}/.s2i/bin" +cid_file=$($MKTEMP_EXEC -u --suffix=.cid) + +# Since we built the candidate image locally, we don't want S2I to attempt to pull +# it from Docker hub +s2i_args="--pull-policy=never --loglevel=2" + +# Port the image exposes service to be tested +test_port=8080 + +image_exists() { + docker inspect $1 &>/dev/null +} + +container_exists() { + image_exists $(cat $cid_file) +} + +container_ip() { + if [ ! -z "$DOCKER_HOST" ] && [[ "$OSTYPE" =~ 'darwin' ]]; then + docker-machine ip + else + docker inspect --format="{{ .NetworkSettings.IPAddress }}" $(cat $cid_file) + fi +} + +container_port() { + if [ ! -z "$DOCKER_HOST" ] && [[ "$OSTYPE" =~ 'darwin' ]]; then + docker inspect --format="{{(index .NetworkSettings.Ports \"$test_port/tcp\" 0).HostPort}}" "$(cat "${cid_file}")" + else + echo $test_port + fi +} + +run_s2i_build() { + s2i build --incremental=true ${s2i_args} file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp +} + +prepare() { + if ! image_exists ${IMAGE_NAME}; then + echo "ERROR: The image ${IMAGE_NAME} must exist before this script is executed." + exit 1 + fi + # s2i build requires the application is a valid 'Git' repository + pushd ${test_dir}/test-app >/dev/null + git init + git config user.email "build@localhost" && git config user.name "builder" + git add -A && git commit -m "Sample commit" + popd >/dev/null + run_s2i_build +} + +run_test_application() { + docker run --rm --cidfile=${cid_file} -p ${test_port} ${IMAGE_NAME}-testapp +} + +cleanup() { + if [ -f $cid_file ]; then + if container_exists; then + docker stop $(cat $cid_file) + fi + fi + if image_exists ${IMAGE_NAME}-testapp; then + docker rmi ${IMAGE_NAME}-testapp + fi +} + +check_result() { + local result="$1" + if [[ "$result" != "0" ]]; then + echo "S2I image '${IMAGE_NAME}' test FAILED (exit code: ${result})" + cleanup + exit $result + fi +} + +wait_for_cid() { + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f $cid_file ] && break + echo "Waiting for container to start..." + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done +} + +test_usage() { + echo "Testing 's2i usage'..." + s2i usage ${s2i_args} ${IMAGE_NAME} &>/dev/null +} + +test_connection() { + echo "Testing HTTP connection (http://$(container_ip):$(container_port))" + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + echo "Sending GET request to http://$(container_ip):$(container_port)/" + response_code=$(curl -s -w %{http_code} -o /dev/null http://$(container_ip):$(container_port)/) + status=$? + if [ $status -eq 0 ]; then + if [ $response_code -eq 200 ]; then + result=0 + fi + break + fi + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return $result +} + +# Build the application image twice to ensure the 'save-artifacts' and +# 'restore-artifacts' scripts are working properly +prepare +run_s2i_build +check_result $? + +# Verify the 'usage' script is working properly +test_usage +check_result $? + +# Verify that the HTTP connection can be established to test application container +run_test_application & + +# Wait for the container to write its CID file +wait_for_cid + +test_connection +check_result $? + +cleanup + diff --git a/openshift-v4/templates/scan-coordinator/README.md b/openshift-v4/templates/scan-coordinator/README.md new file mode 100644 index 000000000..93f599eb7 --- /dev/null +++ b/openshift-v4/templates/scan-coordinator/README.md @@ -0,0 +1,11 @@ +### Files included + +scan-coordinator-bc.yaml build config +scan-coordinator-dc.yaml deployment config + +### Before triggering pipeline + +N/A +### After pipeline completes + +N/A \ No newline at end of file diff --git a/openshift-v4/templates/scan-coordinator/scan-coordinator-bc.yaml b/openshift-v4/templates/scan-coordinator/scan-coordinator-bc.yaml new file mode 100644 index 000000000..24ef93c37 --- /dev/null +++ b/openshift-v4/templates/scan-coordinator/scan-coordinator-bc.yaml @@ -0,0 +1,74 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: scan-coordinator-bc + creationTimestamp: +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: GIT_URL + displayName: + description: tfrs repo + required: true +- name: GIT_REF + displayName: + description: tfrs ref + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the celery image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-scan-coordinator + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-scan-coordinator${SUFFIX} + creationTimestamp: + spec: + triggers: [] + runPolicy: Serial + source: + type: Git + git: + uri: ${GIT_URL} + ref: ${GIT_REF} + contextDir: security-scan/scan-coordinator + strategy: + type: Docker + dockerStrategy: + pullSecret: + name: docker-creds + noCache: true + forcePull: true + output: + to: + kind: ImageStreamTag + name: ${NAME}-scan-coordinator:${VERSION} + resources: {} + postCommit: {} + nodeSelector: + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + status: + lastVersion: 0 diff --git a/openshift-v4/templates/scan-coordinator/scan-coordinator-dc.yaml b/openshift-v4/templates/scan-coordinator/scan-coordinator-dc.yaml new file mode 100644 index 000000000..bcd8c3903 --- /dev/null +++ b/openshift-v4/templates/scan-coordinator/scan-coordinator-dc.yaml @@ -0,0 +1,154 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: scan-coordinator-dc + creationTimestamp: +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: NAMESPACE + displayName: Environment name + description: 'Sample values: 0ab226-dev, 0ab226-test and 0ab226-prod' + required: true +- name: VERSION + displayName: null + description: image tag name for output + required: true +- name: ENV_NAME + displayName: Environment name + description: 'Valid values: dev, test and prod' + required: true +- name: CPU_REQUEST + displayName: Requested CPU + description: Requested CPU + required: true +- name: CPU_LIMIT + displayName: CPU upper limit + description: CPU upper limit + required: true +- name: MEMORY_REQUEST + displayName: Requested memory + description: Requested memory + required: true +- name: MEMORY_LIMIT + displayName: Memory upper limit + description: Memory upper limit + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the scan coordinator image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-scan-coordinator + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-scan-coordinator${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-scan-coordinator${SUFFIX} + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - scan-coordinator + from: + kind: ImageStreamTag + name: ${NAME}-scan-coordinator:${VERSION} + lastTriggeredImage: '' + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + name: ${NAME}-scan-coordinator${SUFFIX} + template: + metadata: + creationTimestamp: + labels: + name: ${NAME}-scan-coordinator${SUFFIX} + spec: + containers: + - name: scan-coordinator + image: '' + env: + - name: BYPASS_CLAMAV + value: 'false' + - name: CLAMAV_HOST + value: ${NAME}-clamav-${ENV_NAME}.${NAMESPACE}.svc.cluster.local + - name: CLAMAV_PORT + value: '3310' + - name: AMQP_HOST + value: ${NAME}${SUFFIX}-rabbitmq-cluster.${NAMESPACE}.svc.cluster.local + - name: AMQP_VHOST + value: "/tfrs" + - name: AMQP_PORT + value: '5672' + - name: AMQP_USER + value: tfrs + - name: MINIO_ENDPOINT + value: ${NAME}-minio-${ENV_NAME}.apps.silver.devops.gov.bc.ca + - name: MINIO_USE_SSL + value: 'true' + - name: AMQP_PASSWORD + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-rabbitmq-cluster-secret + key: tfrs_password + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio-${ENV_NAME} + key: MINIO_ACCESS_KEY + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: ${NAME}-minio-${ENV_NAME} + key: MINIO_SECRET_KEY + resources: + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/scan-handler/README.md b/openshift-v4/templates/scan-handler/README.md new file mode 100644 index 000000000..9b641295a --- /dev/null +++ b/openshift-v4/templates/scan-handler/README.md @@ -0,0 +1,12 @@ +### Files included + +scan-handler-bc.json build config +scan-hendler-dc.json deployment config + +### Before triggering pipeline + +N/A + +### After pipeline completes + +N/A \ No newline at end of file diff --git a/openshift-v4/templates/scan-handler/scan-handler-bc.yaml b/openshift-v4/templates/scan-handler/scan-handler-bc.yaml new file mode 100644 index 000000000..8832b7903 --- /dev/null +++ b/openshift-v4/templates/scan-handler/scan-handler-bc.yaml @@ -0,0 +1,87 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: scan-handler-bc + creationTimestamp: +parameters: +- name: NAME + displayName: + description: the module name entered when run yo bcdk:pipeline, which is tfrs + required: true +- name: SUFFIX + displayName: + description: sample is -pr-0 + required: true +- name: VERSION + displayName: + description: image tag name for output + required: true +- name: RELEASE_BRANCH + displayName: + description: the release branch name + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the celery image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-scan-handler + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- kind: BuildConfig + apiVersion: build.openshift.io/v1 + metadata: + name: ${NAME}-scan-handler${SUFFIX} + creationTimestamp: + spec: + triggers: [] + runPolicy: Serial + source: + type: Dockerfile + dockerfile: | + FROM python:3.6.8-slim-stretch + RUN apt-get update \ + && apt-get install -y git \ + && apt-get install -y supervisor + WORKDIR /app + RUN git clone https://github.com/bcgov/tfrs.git + WORKDIR /app/tfrs + RUN git checkout ${tfrs_release} + RUN pip install --upgrade pip \ + && pip install -r backend/requirements.txt + RUN cp /app/tfrs/security-scan/scan-handler/scan-handler.conf /etc/supervisor/conf.d + RUN chgrp -R root /var/log/supervisor + RUN chmod -R g+w /var/log/supervisor + RUN chmod -R g+w /run || : + RUN chmod -R g+w /app + # RUN sed -i "s/chmod=0700/chmod=0770/g" /etc/supervisor/supervisord.conf + CMD ["supervisord"] + strategy: + type: Docker + dockerStrategy: + pullSecret: + name: docker-creds + noCache: true + env: + - name: tfrs_release + value: ${RELEASE_BRANCH} + forcePull: true + output: + to: + kind: ImageStreamTag + name: ${NAME}-scan-handler:${VERSION} + resources: {} + postCommit: {} + nodeSelector: + successfulBuildsHistoryLimit: 5 + failedBuildsHistoryLimit: 5 + status: + lastVersion: 0 diff --git a/openshift-v4/templates/scan-handler/scan-handler-dc.yaml b/openshift-v4/templates/scan-handler/scan-handler-dc.yaml new file mode 100644 index 000000000..a2ce8a9c2 --- /dev/null +++ b/openshift-v4/templates/scan-handler/scan-handler-dc.yaml @@ -0,0 +1,149 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: scan-handler-dc + creationTimestamp: +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: NAMESPACE + displayName: Environment name + description: 'Sample values: 0ab226-dev, 0ab226-test and 0ab226-prod' + required: true +- name: VERSION + displayName: null + description: image tag name for output + required: true +- description: Starting amount of CPU the container can use. + displayName: CPU REQUEST + name: CPU_REQUEST + value: '100m' + required: true +- description: Maximum amount of CPU the container can use. + displayName: CPU Limit + name: CPU_LIMIT + value: '250m' + required: true +- description: Starting amount of memory the container can use. + displayName: Memory Request + name: MEMORY_REQUEST + value: 256Mi + required: true +- description: Maximum amount of memory the container can use. + displayName: Memory Limit + name: MEMORY_LIMIT + value: 512Mi + required: true +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the scan handler image + labels: + shared: "true" + creationTimestamp: null + name: ${NAME}-scan-handler + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: '' +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-scan-handler${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-scan-handler${SUFFIX} + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 600 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - scan-handler + from: + kind: ImageStreamTag + name: ${NAME}-scan-handler:${VERSION} + lastTriggeredImage: '' + - type: ConfigChange + replicas: 1 + revisionHistoryLimit: 10 + test: false + selector: + name: ${NAME}-scan-handler${SUFFIX} + template: + metadata: + creationTimestamp: + labels: + name: ${NAME}-scan-handler${SUFFIX} + spec: + containers: + - name: scan-handler + image: '' + env: + - name: RABBITMQ_VHOST + value: "/tfrs" + - name: RABBITMQ_USER + value: tfrs + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: ${NAME}${SUFFIX}-rabbitmq-cluster-secret + key: tfrs_password + - name: RABBITMQ_HOST + value: ${NAME}${SUFFIX}-rabbitmq-cluster.${NAMESPACE}.svc.cluster.local + - name: RABBITMQ_PORT + value: '5672' + - name: DATABASE_SERVICE_NAME + value: patroni-master${SUFFIX} + - name: DATABASE_ENGINE + value: postgresql + - name: DATABASE_NAME + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-name + - name: DATABASE_USER + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-username + - name: DATABASE_PASSWORD + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-password + resources: + limits: + cpu: 250m + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: Always + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 diff --git a/openshift-v4/templates/schema-spy/README.md b/openshift-v4/templates/schema-spy/README.md new file mode 100644 index 000000000..4a39a438f --- /dev/null +++ b/openshift-v4/templates/schema-spy/README.md @@ -0,0 +1,32 @@ +### Files included + +schemaspy-bc.yaml build config +schemaspy-dc.json + +### Before triggering pipeline + +pipeline only refrest tags, so need to manually build and deploy schemaspy first + +oc process -f ./schemaspy-bc.yaml + +make sure secret patroni-[ENV_NAME] exist +make sure + +oc process -f ./schemaspy-dc.json \ +DATABASE_SERVICE_NAME=patroni-master-dev \ +ENV_NAME=dev \ +| oc create -f - -n mem-tfrs-dev + +oc process -f ./schemaspy-dc.json \ +DATABASE_SERVICE_NAME=patroni-master-test \ +ENV_NAME=test \ +| oc create -f - -n mem-tfrs-test + +oc process -f ./schemaspy-dc.json \ +DATABASE_SERVICE_NAME=patroni-master-prod \ +ENV_NAME=prod \ +| oc create -f - -n mem-tfrs-prod + +### After pipeline completes + +N/A \ No newline at end of file diff --git a/openshift-v4/templates/schema-spy/schemaspy-bc.yaml b/openshift-v4/templates/schema-spy/schemaspy-bc.yaml new file mode 100644 index 000000000..0ef22d9b6 --- /dev/null +++ b/openshift-v4/templates/schema-spy/schemaspy-bc.yaml @@ -0,0 +1,47 @@ +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + creationTimestamp: null + name: schemaspy-bc +objects: +- apiVersion: image.openshift.io/v1 + kind: ImageStream + metadata: + annotations: + description: Keeps track of changes in the schemaspy image + labels: + shared: "true" + creationTimestamp: null + name: schema-spy + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +- apiVersion: build.openshift.io/v1 + kind: BuildConfig + metadata: + creationTimestamp: null + labels: + app: schema-spy + name: schema-spy + spec: + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: schema-spy:latest + postCommit: {} + resources: {} + runPolicy: Serial + source: + git: + uri: https://github.com/bcgov/SchemaSpy.git + type: Git + strategy: + dockerStrategy: {} + type: Docker + triggers: + - type: ConfigChange + status: + lastVersion: 0 diff --git a/openshift-v4/templates/schema-spy/schemaspy-dc.yaml b/openshift-v4/templates/schema-spy/schemaspy-dc.yaml new file mode 100644 index 000000000..91acc2db0 --- /dev/null +++ b/openshift-v4/templates/schema-spy/schemaspy-dc.yaml @@ -0,0 +1,364 @@ +--- +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: schema-spy-dc + creationTimestamp: +parameters: +- name: NAME + displayName: App name + description: App name + value: tfrs + required: true +- name: SUFFIX + displayName: suffix + description: such as -dev-1696, -dev ane etc. + required: true +- name: ENV_NAME + displayName: environment name + description: such as dev, test and prod + required: true +- description: Starting amount of CPU the container can use. + displayName: CPU REQUEST + name: CPU_REQUEST_PUBLIC + value: 50m + required: true +- description: Maximum amount of CPU the container can use. + displayName: CPU Limit + name: CPU_LIMIT_PUBLIC + value: 1000m + required: true +- description: Starting amount of memory the container can use. + displayName: Memory Request + name: MEMORY_REQUEST_PUBLIC + value: 512Mi + required: true +- description: Maximum amount of memory the container can use. + displayName: Memory Limit + name: MEMORY_LIMIT_PUBLIC + value: 2Gi + required: true +- description: Starting amount of CPU the container can use. + displayName: CPU REQUEST + name: CPU_REQUEST_AUDIT + value: 50m + required: true +- description: Maximum amount of CPU the container can use. + displayName: CPU Limit + name: CPU_LIMIT_AUDIT + value: 500m + required: true +- description: Starting amount of memory the container can use. + displayName: Memory Request + name: MEMORY_REQUEST_AUDIT + value: 256Mi + required: true +- description: Maximum amount of memory the container can use. + displayName: Memory Limit + name: MEMORY_LIMIT_AUDIT + value: 1Gi + required: true +objects: +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-schema-spy-public${SUFFIX} + creationTimestamp: + labels: + template: schema-spy-public-template + annotations: + description: Defines how to deploy the schemaspy. + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1800 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - schema-spy-public + from: + kind: ImageStreamTag + namespace: 0ab226-tools + name: schema-spy:prod + lastTriggeredImage: + - type: ConfigChange + replicas: 1 + test: false + selector: + name: ${NAME}-schema-spy-public${SUFFIX} + template: + metadata: + name: ${NAME}-schema-spy-public${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-schema-spy-public${SUFFIX} + spec: +# initContainers: +# - name: wait-for-database +# image: toschneck/wait-for-it:latest +# command: ['sh', '-c', './wait-for-it.sh -t 0 ${NAME}-backend${SUFFIX}.0ab226-${ENV_NAME}.svc.cluster.local:8080'] + containers: + - name: schema-spy-public + image: + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: DATABASE_SERVICE_NAME + value: patroni-master${SUFFIX} + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-name + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-username + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-password + resources: + limits: + cpu: ${CPU_LIMIT_PUBLIC} + memory: ${MEMORY_LIMIT_PUBLIC} + requests: + cpu: ${CPU_REQUEST_PUBLIC} + memory: ${MEMORY_REQUEST_PUBLIC} + livenessProbe: + httpGet: + path: "/" + port: 8080 + scheme: HTTP + initialDelaySeconds: 1750 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: "/" + port: 8080 + scheme: HTTP + initialDelaySeconds: 420 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: IfNotPresent + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 +- kind: DeploymentConfig + apiVersion: apps.openshift.io/v1 + metadata: + name: ${NAME}-schema-spy-audit${SUFFIX} + creationTimestamp: + labels: + template: schema-spy-audit-template + annotations: + description: Defines how to deploy the container. + spec: + strategy: + type: Recreate + recreateParams: + timeoutSeconds: 1800 + resources: {} + activeDeadlineSeconds: 21600 + triggers: + - type: ImageChange + imageChangeParams: + automatic: true + containerNames: + - schema-spy-audit + from: + kind: ImageStreamTag + namespace: 0ab226-tools + name: schema-spy:prod + lastTriggeredImage: + - type: ConfigChange + replicas: 1 + test: false + selector: + name: ${NAME}-schema-spy-audit${SUFFIX} + template: + metadata: + name: ${NAME}-schema-spy-audit${SUFFIX} + creationTimestamp: + labels: + name: ${NAME}-schema-spy-audit${SUFFIX} + spec: +# initContainers: +# - name: wait-for-database +# image: toschneck/wait-for-it:latest +# command: ['sh', '-c', './wait-for-it.sh -t 0 ${NAME}-backend${SUFFIX}.0ab226-${ENV_NAME}.svc.cluster.local:8080'] + containers: + - name: schema-spy-audit + image: + ports: + - containerPort: 8080 + protocol: TCP + env: + - name: DATABASE_SERVICE_NAME + value: patroni-master${SUFFIX} + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-name + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-username + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: patroni${SUFFIX} + key: app-db-password + - name: DATABASE_SCHEMA + value: tfrs_audit + resources: + limits: + cpu: ${CPU_LIMIT_AUDIT} + memory: ${MEMORY_LIMIT_AUDIT} + requests: + cpu: ${CPU_REQUEST_AUDIT} + memory: ${MEMORY_REQUEST_AUDIT} + livenessProbe: + httpGet: + path: "/" + port: 8080 + scheme: HTTP + initialDelaySeconds: 90 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: "/" + port: 8080 + scheme: HTTP + initialDelaySeconds: 90 + timeoutSeconds: 3 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + terminationMessagePath: "/dev/termination-log" + terminationMessagePolicy: File + imagePullPolicy: IfNotPresent + restartPolicy: Always + terminationGracePeriodSeconds: 30 + dnsPolicy: ClusterFirst + securityContext: {} + schedulerName: default-scheduler + status: + latestVersion: 0 + observedGeneration: 0 + replicas: 0 + updatedReplicas: 0 + availableReplicas: 0 + unavailableReplicas: 0 +- kind: Service + apiVersion: v1 + metadata: + name: ${NAME}-schema-spy-public${SUFFIX} + creationTimestamp: + labels: + template: schema-spy-public-template + annotations: + description: Exposes and load balances the application pods + spec: + ports: + - name: 8080-tcp + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + name: ${NAME}-schema-spy-public${SUFFIX} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Service + apiVersion: v1 + metadata: + name: ${NAME}-schema-spy-audit${SUFFIX} + creationTimestamp: + labels: + template: schema-spy-public-template + annotations: + description: Exposes and load balances the application pods + spec: + ports: + - name: 8080-tcp + protocol: TCP + port: 8080 + targetPort: 8080 + selector: + name: ${NAME}-schema-spy-audit${SUFFIX} + type: ClusterIP + sessionAffinity: None + status: + loadBalancer: {} +- kind: Route + apiVersion: route.openshift.io/v1 + metadata: + name: ${NAME}-schema-spy-public${SUFFIX} + creationTimestamp: + labels: + template: schema-spy-public-template + spec: + host: ${NAME}-schema-spy-public${SUFFIX}.apps.silver.devops.gov.bc.ca + to: + kind: Service + name: ${NAME}-schema-spy-public${SUFFIX} + weight: 100 + port: + targetPort: 8080-tcp + tls: + termination: edge + insecureEdgeTerminationPolicy: Allow + wildcardPolicy: None + status: +- kind: Route + apiVersion: route.openshift.io/v1 + metadata: + name: ${NAME}-schema-spy-audit${SUFFIX} + creationTimestamp: + labels: + template: schema-spy-public-template + spec: + host: ${NAME}-schema-spy-audit${SUFFIX}.apps.silver.devops.gov.bc.ca + to: + kind: Service + name: ${NAME}-schema-spy-audit${SUFFIX} + weight: 100 + port: + targetPort: 8080-tcp + tls: + termination: edge + insecureEdgeTerminationPolicy: Allow + wildcardPolicy: None + status: + diff --git a/openshift/templates/components/document-security-scan/clamav-bc.json b/openshift/templates/components/document-security-scan/clamav-bc.json index 5420f8c62..e9e308776 100644 --- a/openshift/templates/components/document-security-scan/clamav-bc.json +++ b/openshift/templates/components/document-security-scan/clamav-bc.json @@ -41,7 +41,7 @@ "source": { "type": "Git", "git": { - "ref":"clamav-0.101.2-on-buster", + "ref":"ca-cert", "uri": "https://github.com/bcgov/docker-clamav.git" } }, diff --git a/security-scan/scan-coordinator/Dockerfile b/security-scan/scan-coordinator/Dockerfile index eb3af66eb..142818fdb 100644 --- a/security-scan/scan-coordinator/Dockerfile +++ b/security-scan/scan-coordinator/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12 +FROM golang:1.15 WORKDIR /go/src/scan-coordinator COPY . . diff --git a/security-scan/scan-coordinator/coordinator.go b/security-scan/scan-coordinator/coordinator.go index 26a19980d..c23c1cf31 100644 --- a/security-scan/scan-coordinator/coordinator.go +++ b/security-scan/scan-coordinator/coordinator.go @@ -1,6 +1,7 @@ package main import ( + "context" "net/url" "strings" "log" @@ -8,6 +9,7 @@ import ( "github.com/streadway/amqp" "github.com/dutchcoders/go-clamd" "github.com/minio/minio-go" + "github.com/minio/minio-go/pkg/credentials" "io" ) @@ -147,15 +149,15 @@ func testClamAVConnection(conf *config) { func testMinioConnection(conf *config) { log.Printf("Verifying Minio connection") - client, err := minio.New(conf.MinioEndpoint, - conf.MinioAccessKey, - conf.MinioSecretKey, - conf.MinioSecure) + client, err := minio.New(conf.MinioEndpoint, &minio.Options{ + Creds: credentials.NewStaticV4(conf.MinioAccessKey, conf.MinioSecretKey, ""), + Secure: conf.MinioSecure, + }) if err != nil { panic(err) } - _, err = client.ListBuckets() + _, err = client.ListBuckets(context.Background()) if err != nil { panic(err) } @@ -188,16 +190,16 @@ func handleRequest(conf *config, body []byte) (response ScanResponse) { bucket := tokens[1] obj := tokens[2] - client, err := minio.New(conf.MinioEndpoint, - conf.MinioAccessKey, - conf.MinioSecretKey, - conf.MinioSecure) + client, err := minio.New(conf.MinioEndpoint, &minio.Options{ + Creds: credentials.NewStaticV4(conf.MinioAccessKey, conf.MinioSecretKey, ""), + Secure: conf.MinioSecure, + }) if err != nil { log.Print(err) return } - resp, err := client.GetObject(bucket, obj, minio.GetObjectOptions{}) + resp, err := client.GetObject(context.Background(), bucket, obj, minio.GetObjectOptions{}) if err != nil { log.Print(err) return