Skip to content

Commit

Permalink
Merge pull request #2351 from AlexsLemonade/BEW111/2204-api-improvements
Browse files Browse the repository at this point in the history
#2204 API improvements
  • Loading branch information
BEW111 authored Jun 30, 2020
2 parents 87cecae + 1f5c982 commit 1bb4fe3
Show file tree
Hide file tree
Showing 4 changed files with 312 additions and 103 deletions.
225 changes: 135 additions & 90 deletions api/data_refinery_api/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,11 @@ class Meta:


class OrganismIndexSerializer(serializers.ModelSerializer):

organism_name = serializers.StringRelatedField(source="organism", read_only=True)
download_url = serializers.SerializerMethodField()

class Meta:
model = OrganismIndex

fields = (
"id",
"assembly_name",
Expand All @@ -88,6 +86,71 @@ def get_download_url(self, obj):
return None


##
# Jobs
##


class SurveyJobSerializer(serializers.ModelSerializer):
class Meta:
model = SurveyJob
fields = (
"id",
"source_type",
"success",
"start_time",
"end_time",
"created_at",
"last_modified",
)


class DownloaderJobSerializer(serializers.ModelSerializer):
class Meta:
model = DownloaderJob
fields = (
"id",
"downloader_task",
"num_retries",
"retried",
"was_recreated",
"worker_id",
"worker_version",
"nomad_job_id",
"failure_reason",
"success",
"original_files",
"start_time",
"end_time",
"created_at",
"last_modified",
)


class ProcessorJobSerializer(serializers.ModelSerializer):
class Meta:
model = ProcessorJob
fields = (
"id",
"pipeline_applied",
"num_retries",
"retried",
"worker_id",
"ram_amount",
"volume_index",
"worker_version",
"failure_reason",
"nomad_job_id",
"success",
"original_files",
"datasets",
"start_time",
"end_time",
"created_at",
"last_modified",
)


##
# Results
##
Expand All @@ -105,12 +168,6 @@ class Meta:
)


class ComputationalResultAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = ComputationalResultAnnotation
fields = ("id", "data", "is_ccdl", "created_at", "last_modified")


class ComputedFileSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
Expand All @@ -128,22 +185,10 @@ class Meta:
)


class ComputedFileWithUrlSerializer(serializers.ModelSerializer):
class ComputationalResultAnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_smashable",
"is_qc",
"sha1",
"s3_bucket",
"s3_key",
"download_url",
"created_at",
"last_modified",
)
model = ComputationalResultAnnotation
fields = ("id", "data", "is_ccdl", "created_at", "last_modified")


class ComputationalResultSerializer(serializers.ModelSerializer):
Expand Down Expand Up @@ -171,10 +216,70 @@ class Meta:
)


class DetailedComputedFileSerializer(ComputedFileSerializer):
result = ComputationalResultSerializer(many=False, read_only=False)
samples = DetailedExperimentSampleSerializer(many=True)
compendia_organism_name = serializers.CharField(
source="compendia_organism__name", read_only=True
)

class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"samples",
"size_in_bytes",
"is_qn_target",
"is_smashable",
"is_qc",
"is_compendia",
"quant_sf_only",
"compendia_version",
"compendia_organism_name",
"sha1",
"s3_bucket",
"s3_key",
"s3_url",
"download_url",
"created_at",
"last_modified",
"result",
)


class ComputedFileWithUrlSerializer(serializers.ModelSerializer):
class Meta:
model = ComputedFile
fields = (
"id",
"filename",
"size_in_bytes",
"is_smashable",
"is_qc",
"sha1",
"s3_bucket",
"s3_key",
"download_url",
"created_at",
"last_modified",
)


class DetailedComputationalResultSerializer(ComputationalResultSerializer):
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)


class ComputationalResultWithUrlSerializer(ComputationalResultSerializer):
files = ComputedFileWithUrlSerializer(many=True, source="computedfile_set")


class DetailedComputationalResultWithUrlSerializer(ComputationalResultWithUrlSerializer):
processor = ProcessorSerializer(many=False)
organism_index = OrganismIndexSerializer(many=False)


class ComputationalResultNoFilesSerializer(serializers.ModelSerializer):
annotations = ComputationalResultAnnotationSerializer(
many=True, source="computationalresultannotation_set"
Expand Down Expand Up @@ -263,7 +368,7 @@ class Meta:
}


class OriginalFileListSerializer(serializers.ModelSerializer):
class OriginalFileSerializer(serializers.ModelSerializer):
class Meta:
model = OriginalFile
fields = (
Expand All @@ -272,7 +377,6 @@ class Meta:
"samples",
"size_in_bytes",
"sha1",
"samples",
"processor_jobs",
"downloader_jobs",
"source_url",
Expand All @@ -284,6 +388,12 @@ class Meta:
)


class DetailedOriginalFileSerializer(OriginalFileSerializer):
samples = DetailedExperimentSampleSerializer(many=True)
processor_jobs = ProcessorJobSerializer(many=True)
downloader_jobs = DownloaderJobSerializer(many=True)


##
# Samples
##
Expand Down Expand Up @@ -523,71 +633,6 @@ class Meta:
)


##
# Jobs
##


class SurveyJobSerializer(serializers.ModelSerializer):
class Meta:
model = SurveyJob
fields = (
"id",
"source_type",
"success",
"start_time",
"end_time",
"created_at",
"last_modified",
)


class DownloaderJobSerializer(serializers.ModelSerializer):
class Meta:
model = DownloaderJob
fields = (
"id",
"downloader_task",
"num_retries",
"retried",
"was_recreated",
"worker_id",
"worker_version",
"nomad_job_id",
"failure_reason",
"success",
"original_files",
"start_time",
"end_time",
"created_at",
"last_modified",
)


class ProcessorJobSerializer(serializers.ModelSerializer):
class Meta:
model = ProcessorJob
fields = (
"id",
"pipeline_applied",
"num_retries",
"retried",
"worker_id",
"ram_amount",
"volume_index",
"worker_version",
"failure_reason",
"nomad_job_id",
"success",
"original_files",
"datasets",
"start_time",
"end_time",
"created_at",
"last_modified",
)


##
# Datasets
##
Expand Down
32 changes: 26 additions & 6 deletions api/data_refinery_api/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,6 @@ def test_all_endpoints(self):
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response["X-Source-Revision"], get_env_variable("SYSTEM_VERSION"))

response = self.client.get(reverse("experiments", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("samples", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

Expand All @@ -220,10 +217,12 @@ def test_all_endpoints(self):
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("samples", kwargs={"version": API_VERSION}))
response = self.client.get(reverse("organisms", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("organisms", kwargs={"version": API_VERSION}))
response = self.client.get(
reverse("organisms", kwargs={"version": API_VERSION}) + "HOMO_SAPIENS/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("platforms", kwargs={"version": API_VERSION}))
Expand All @@ -238,16 +237,26 @@ def test_all_endpoints(self):
response = self.client.get(reverse("downloader_jobs", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(
reverse("downloader_jobs", kwargs={"version": API_VERSION}) + "1/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("processor_jobs", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(
reverse("processor_jobs", kwargs={"version": API_VERSION}) + "1/"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("stats", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("results", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("results", kwargs={"version": API_VERSION}))
response = self.client.get(reverse("results", kwargs={"version": API_VERSION}) + "1/")
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("schema_redoc", kwargs={"version": API_VERSION}))
Expand All @@ -261,6 +270,17 @@ def test_all_endpoints(self):
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(
reverse("transcriptome_indices", kwargs={"version": API_VERSION})
+ "?organism__name=DANIO_RERIO"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(
reverse("transcriptome_indices", kwargs={"version": API_VERSION}) + "?result_id=1"
)
self.assertEqual(response.status_code, status.HTTP_200_OK)

response = self.client.get(reverse("create_dataset", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)

Expand Down
Loading

0 comments on commit 1bb4fe3

Please sign in to comment.