Skip to content

Commit

Permalink
Merge pull request #2161 from AlexsLemonade/revert-2157-revert-2151-dev
Browse files Browse the repository at this point in the history
Revert "Revert "[DEPLOY] Fix for microbes and GSE75083""
  • Loading branch information
kurtwheeler authored Feb 25, 2020
2 parents c51ca1c + 66748ef commit 4d7f834
Show file tree
Hide file tree
Showing 119 changed files with 447,901 additions and 29,549 deletions.
1,660 changes: 1,660 additions & 0 deletions .circleci/codecov.sh

Large diffs are not rendered by default.

19 changes: 11 additions & 8 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ jobs:

# Install Nomad
- run: sudo ./scripts/install_nomad.sh

# Start Nomad and register jobs.
- run:
command: sudo -E ./scripts/run_nomad.sh -e test
Expand All @@ -43,19 +42,21 @@ jobs:
# Running these in the same job as the common tests is good
# because their dockerfiles are very similar so a lot of the
# build time is saved by only building those layers once.
- run: sudo chown -R circleci:circleci workers/test_volume/
- run:
command: .circleci/filter_tests.sh -t downloaders
no_output_timeout: 1h

# Run Foreman Tests
- run: mkdir -p test_volume && chmod -R a+rw test_volume
- run: mkdir -p test_volume && chmod -R a+rw test_volume && sudo chown -R circleci:circleci test_volume

# The foreman includes the end-to-end tests, but some of these
# require docker images which are not built in this
# workflow. Therefore we exclude salmon, affymetrix, and
# transcriptome and let those end-to-end tests get run in the
# workflows that include building those images.
- run: ./foreman/run_tests.sh --exclude-tag=salmon --exclude-tag=transcriptome --exclude-tag=affymetrix
- run: .circleci/upload_test_coverage.sh foreman

# Run NO_OP tests
- run: sudo chown -R circleci:circleci workers/test_volume/
Expand Down Expand Up @@ -90,7 +91,9 @@ jobs:
- run: ./scripts/update_models.sh

# Run Common Tests.
- run: mkdir -p test_volume && chmod -R a+rw test_volume && sudo chown -R circleci:circleci test_volume
- run: ./common/run_tests.sh
- run: .circleci/upload_test_coverage.sh common

- run: ./scripts/prepare_image.sh -i smasher -s workers -d localhost:5000

Expand Down Expand Up @@ -155,15 +158,16 @@ jobs:
- run: ./scripts/rebuild_es_index.sh

# Run API Tests.
- run: mkdir -p test_volume && chmod -R a+rw test_volume && sudo chown -R circleci:circleci test_volume
- run: ./api/run_tests.sh
- run: .circleci/upload_test_coverage.sh api

- run:
command: .circleci/filter_tests.sh -t salmon
no_output_timeout: 1h

# Install Nomad
- run: sudo ./scripts/install_nomad.sh

# Start Nomad and register jobs.
- run:
command: sudo -E ./scripts/run_nomad.sh -e test
Expand All @@ -178,9 +182,9 @@ jobs:
- run: docker push localhost:5000/dr_salmon

# Containers run as a different user so we need to give them permission to the test directory.
- run: mkdir -p test_volume && chmod -R a+rw test_volume

- run: mkdir -p test_volume && chmod -R a+rw test_volume && sudo chown -R circleci:circleci test_volume
- run: ./foreman/run_tests.sh --tag=salmon --tag=transcriptome
- run: .circleci/upload_test_coverage.sh foreman

tx_illumina_tests:
working_directory: ~/refinebio
Expand Down Expand Up @@ -262,7 +266,6 @@ jobs:

# Install Nomad
- run: sudo ./scripts/install_nomad.sh

# Start Nomad and register jobs.
- run:
command: sudo -E ./scripts/run_nomad.sh -e test
Expand All @@ -279,12 +282,12 @@ jobs:
- run: docker push localhost:5000/dr_affymetrix

# Containers run as a different user so we need to give them permission to the test directory.
- run: mkdir -p test_volume && chmod -R a+rw test_volume

- run: mkdir -p test_volume && chmod -R a+rw test_volume && sudo chown -R circleci:circleci test_volume
- run:
command: ./foreman/run_tests.sh --tag=affymetrix
# This takes more than 10 minutes, but not much.
no_output_timeout: 20m
- run: .circleci/upload_test_coverage.sh foreman

deploy:
machine: true
Expand Down
2 changes: 2 additions & 0 deletions .circleci/filter_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,5 @@ else
echo "Running all tests..";
./workers/run_tests.sh "$@"
fi

./.circleci/upload_test_coverage.sh workers
39 changes: 39 additions & 0 deletions .circleci/upload_test_coverage.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#!/bin/bash

# Script to upload code coverage to circleci

project=$1
if [[ $project == "" ]]
then
echo "No project specified"
exit 1
fi

if [[ $project == "workers" ]]
then
# the workers project uses it's own test_volume directory
test_volume="workers/test_volume"
else
test_volume="test_volume"
fi

coverage_file="${test_volume}/coverage.xml"

if [[ ! -f $coverage_file ]]
then
echo "Coverage file wasn't found, were the tests run before?"
exit 0 # exit this script but don't fail the tests for this.
fi

output_file="${test_volume}/${project}_coverage.xml"

# In the test coverage report, all file paths are relative to each project
# folder. We need to be relative to the repo's root directory. That's why we
# append the project folder name to each file path in coverage.xml
sed "s/filename=\"/filename=\"$project\//g" $coverage_file > $output_file

# codecov.sh is located at https://codecov.io/bash
# we downloaded it for convenience
./.circleci/codecov.sh -f "$output_file" -Z -F $project

rm -f $coverage_file $output_file
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ infrastructure/.terraform/
# We download a lot of files into the test_volume directory when
# running tests, which we don't need to track.
workers/test_volume/*
!workers/test_volume/test_download_files
# However for a couple tests we do store the data in the repo and need to track them.
!workers/test_volume/raw/TEST/TRANSCRIPTOME_INDEX/aegilops_tauschii_short.gtf.gz
!workers/test_volume/raw/TEST/NO_OP/test.txt
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Refine.bio [![Build Status](https://circleci.com/gh/AlexsLemonade/refinebio/tree/dev.svg?&style=shield)](https://circleci.com/gh/AlexsLemonade/refinebio/)
# Refine.bio [![Build Status](https://circleci.com/gh/AlexsLemonade/refinebio/tree/dev.svg?&style=shield)](https://circleci.com/gh/AlexsLemonade/refinebio/) [![codecov](https://codecov.io/gh/AlexsLemonade/refinebio/branch/master/graph/badge.svg)](https://codecov.io/gh/AlexsLemonade/refinebio)

<!-- This section needs to be drastically improved -->
Refine.bio harmonizes petabytes of publicly available biological data into
Expand Down
1 change: 0 additions & 1 deletion api/data_refinery_api/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,6 @@ def perform_update(self, serializer):
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"channel": "ccdl-general", # Move to robots when we get sick of these
"username": "EngagementBot",
"icon_emoji": ":halal:",
"attachments": [
Expand Down
2 changes: 1 addition & 1 deletion api/requirements.in
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
coverage
django==2.2.9
django==2.2.10
psycopg2-binary
boto3
requests>=2.20.0
Expand Down
2 changes: 1 addition & 1 deletion api/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ django-elasticsearch-dsl==6.4.2
django-filter==2.0.0
django-hstore==1.4.2 # via djangorestframework-hstore
django-nine==0.2.2 # via django-elasticsearch-dsl-drf
django==2.2.9
django==2.2.10
djangorestframework-hstore==1.3
djangorestframework==3.9.4
docutils==0.14 # via botocore
Expand Down
8 changes: 8 additions & 0 deletions api/run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,13 @@ if ! [ "$(docker ps --filter name=drdb -q)" ]; then
exit 1
fi

project_root=$(pwd) # "cd .." called above
volume_directory="$project_root/test_volume"
if [ ! -d "$volume_directory" ]; then
mkdir "$volume_directory"
chmod -R a+rwX "$volume_directory"
fi

./scripts/prepare_image.sh -i api_local -s api

. ./scripts/common.sh
Expand All @@ -32,4 +39,5 @@ docker run \
--add-host=nomad:"$HOST_IP" \
--add-host=elasticsearch:"$ES_HOST_IP" \
--env-file api/environments/test \
--volume "$volume_directory":/home/user/data_store \
-it ccdlstaging/dr_api_local bash -c "$(run_tests_with_coverage "$@")"
10 changes: 10 additions & 0 deletions common/data_refinery_common/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from data_refinery_common.utils import get_env_variable

LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# We store what salmon ouptuts as its version, therefore for
# comparisions or defaults we shouldn't just store the version string,
# we need something with the pattern: 'salmon X.X.X'
CURRENT_SALMON_VERSION = "salmon " + get_env_variable("SALMON_VERSION", "0.13.1")
CHUNK_SIZE = 1024 * 256 # chunk_size is in bytes
# Let this fail if SYSTEM_VERSION is unset.
SYSTEM_VERSION = get_env_variable("SYSTEM_VERSION")
68 changes: 43 additions & 25 deletions common/data_refinery_common/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,55 @@
from data_refinery_common.models.command_progress import ( # noqa
CdfCorrectedAccession,
SurveyedAccession,
)
from data_refinery_common.models.jobs import ( # noqa
DownloaderJob,
ProcessorJob,
SurveyJob,
SurveyJobKeyValue,
)
from data_refinery_common.models.models import ( # noqa
APIToken,
CompendiumResult,
from data_refinery_common.models.api_token import APIToken # noqa
from data_refinery_common.models.associations.compendium_result_organism_association import ( # noqa
CompendiumResultOrganismAssociation,
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Dataset,
)
from data_refinery_common.models.associations.downloaderjob_originalfile_association import ( # noqa
DownloaderJobOriginalFileAssociation,
Experiment,
ExperimentAnnotation,
)
from data_refinery_common.models.associations.experiment_organism_association import ( # noqa
ExperimentOrganismAssociation,
)
from data_refinery_common.models.associations.experiment_result_association import ( # noqa
ExperimentResultAssociation,
)
from data_refinery_common.models.associations.experiment_sample_association import ( # noqa
ExperimentSampleAssociation,
OrganismIndex,
OriginalFile,
)
from data_refinery_common.models.associations.original_file_sample_association import ( # noqa
OriginalFileSampleAssociation,
Pipeline,
Processor,
)
from data_refinery_common.models.associations.processorjob_dataset_association import ( # noqa
ProcessorJobDatasetAssociation,
)
from data_refinery_common.models.associations.processorjob_originalfile_association import ( # noqa
ProcessorJobOriginalFileAssociation,
Sample,
SampleAnnotation,
)
from data_refinery_common.models.associations.sample_computed_file_association import ( # noqa
SampleComputedFileAssociation,
)
from data_refinery_common.models.associations.sample_result_association import ( # noqa
SampleResultAssociation,
)
from data_refinery_common.models.command_progress import ( # noqa
CdfCorrectedAccession,
SurveyedAccession,
)
from data_refinery_common.models.compendium_result import CompendiumResult # noqa
from data_refinery_common.models.computational_result import ComputationalResult # noqa
from data_refinery_common.models.computational_result_annotation import ( # noqa
ComputationalResultAnnotation,
)
from data_refinery_common.models.computed_file import ComputedFile # noqa
from data_refinery_common.models.dataset import Dataset # noqa
from data_refinery_common.models.experiment import Experiment # noqa
from data_refinery_common.models.experiment_annotation import ExperimentAnnotation # noqa
from data_refinery_common.models.jobs.downloader_job import DownloaderJob # noqa
from data_refinery_common.models.jobs.processor_job import ProcessorJob # noqa
from data_refinery_common.models.jobs.survey_job import SurveyJob # noqa
from data_refinery_common.models.jobs.survey_job_key_value import SurveyJobKeyValue # noqa
from data_refinery_common.models.organism import Organism # noqa
from data_refinery_common.models.organism_index import OrganismIndex # noqa
from data_refinery_common.models.original_file import OriginalFile # noqa
from data_refinery_common.models.pipeline import Pipeline # noqa
from data_refinery_common.models.processor import Processor # noqa
from data_refinery_common.models.sample import Sample # noqa
from data_refinery_common.models.sample_annotation import SampleAnnotation # noqa
32 changes: 32 additions & 0 deletions common/data_refinery_common/models/api_token.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import uuid

from django.conf import settings
from django.db import models
from django.utils import timezone


class APIToken(models.Model):
""" Required for starting a smash job """

# ID
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)

# Activation
is_activated = models.BooleanField(default=False)

# Common Properties
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)

def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(APIToken, self).save(*args, **kwargs)

@property
def terms_and_conditions(self):
""" """
return settings.TERMS_AND_CONDITIONS
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from django.db import models

from data_refinery_common.models.compendium_result import CompendiumResult
from data_refinery_common.models.organism import Organism


class CompendiumResultOrganismAssociation(models.Model):

compendium_result = models.ForeignKey(
CompendiumResult, blank=False, null=False, on_delete=models.CASCADE
)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)

class Meta:
db_table = "compendium_result_organism_associations"
unique_together = ("compendium_result", "organism")
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from django.db import models

from data_refinery_common.models.jobs.downloader_job import DownloaderJob
from data_refinery_common.models.original_file import OriginalFile


class DownloaderJobOriginalFileAssociation(models.Model):

downloader_job = models.ForeignKey(
DownloaderJob, blank=False, null=False, on_delete=models.CASCADE
)
original_file = models.ForeignKey(
OriginalFile, blank=False, null=False, on_delete=models.CASCADE
)

class Meta:
db_table = "downloaderjob_originalfile_associations"
unique_together = ("downloader_job", "original_file")
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from django.db import models

from data_refinery_common.models.experiment import Experiment
from data_refinery_common.models.organism import Organism


class ExperimentOrganismAssociation(models.Model):

experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
organism = models.ForeignKey(Organism, blank=False, null=False, on_delete=models.CASCADE)

class Meta:
db_table = "experiment_organism_associations"
unique_together = ("experiment", "organism")
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from django.db import models

from data_refinery_common.models.computational_result import ComputationalResult
from data_refinery_common.models.experiment import Experiment


class ExperimentResultAssociation(models.Model):

experiment = models.ForeignKey(Experiment, blank=False, null=False, on_delete=models.CASCADE)
result = models.ForeignKey(
ComputationalResult, blank=False, null=False, on_delete=models.CASCADE
)

class Meta:
db_table = "experiment_result_associations"
unique_together = ("result", "experiment")
Loading

0 comments on commit 4d7f834

Please sign in to comment.