diff --git a/api/data_refinery_api/serializers.py b/api/data_refinery_api/serializers.py index 326b0facb..e497530dd 100644 --- a/api/data_refinery_api/serializers.py +++ b/api/data_refinery_api/serializers.py @@ -61,13 +61,11 @@ class Meta: class OrganismIndexSerializer(serializers.ModelSerializer): - organism_name = serializers.StringRelatedField(source="organism", read_only=True) download_url = serializers.SerializerMethodField() class Meta: model = OrganismIndex - fields = ( "id", "assembly_name", @@ -88,6 +86,71 @@ def get_download_url(self, obj): return None +## +# Jobs +## + + +class SurveyJobSerializer(serializers.ModelSerializer): + class Meta: + model = SurveyJob + fields = ( + "id", + "source_type", + "success", + "start_time", + "end_time", + "created_at", + "last_modified", + ) + + +class DownloaderJobSerializer(serializers.ModelSerializer): + class Meta: + model = DownloaderJob + fields = ( + "id", + "downloader_task", + "num_retries", + "retried", + "was_recreated", + "worker_id", + "worker_version", + "nomad_job_id", + "failure_reason", + "success", + "original_files", + "start_time", + "end_time", + "created_at", + "last_modified", + ) + + +class ProcessorJobSerializer(serializers.ModelSerializer): + class Meta: + model = ProcessorJob + fields = ( + "id", + "pipeline_applied", + "num_retries", + "retried", + "worker_id", + "ram_amount", + "volume_index", + "worker_version", + "failure_reason", + "nomad_job_id", + "success", + "original_files", + "datasets", + "start_time", + "end_time", + "created_at", + "last_modified", + ) + + ## # Results ## @@ -105,12 +168,6 @@ class Meta: ) -class ComputationalResultAnnotationSerializer(serializers.ModelSerializer): - class Meta: - model = ComputationalResultAnnotation - fields = ("id", "data", "is_ccdl", "created_at", "last_modified") - - class ComputedFileSerializer(serializers.ModelSerializer): class Meta: model = ComputedFile @@ -128,22 +185,10 @@ class Meta: ) -class ComputedFileWithUrlSerializer(serializers.ModelSerializer): +class ComputationalResultAnnotationSerializer(serializers.ModelSerializer): class Meta: - model = ComputedFile - fields = ( - "id", - "filename", - "size_in_bytes", - "is_smashable", - "is_qc", - "sha1", - "s3_bucket", - "s3_key", - "download_url", - "created_at", - "last_modified", - ) + model = ComputationalResultAnnotation + fields = ("id", "data", "is_ccdl", "created_at", "last_modified") class ComputationalResultSerializer(serializers.ModelSerializer): @@ -171,10 +216,70 @@ class Meta: ) +class DetailedComputedFileSerializer(ComputedFileSerializer): + result = ComputationalResultSerializer(many=False, read_only=False) + samples = DetailedExperimentSampleSerializer(many=True) + compendia_organism_name = serializers.CharField( + source="compendia_organism__name", read_only=True + ) + + class Meta: + model = ComputedFile + fields = ( + "id", + "filename", + "samples", + "size_in_bytes", + "is_qn_target", + "is_smashable", + "is_qc", + "is_compendia", + "quant_sf_only", + "compendia_version", + "compendia_organism_name", + "sha1", + "s3_bucket", + "s3_key", + "s3_url", + "download_url", + "created_at", + "last_modified", + "result", + ) + + +class ComputedFileWithUrlSerializer(serializers.ModelSerializer): + class Meta: + model = ComputedFile + fields = ( + "id", + "filename", + "size_in_bytes", + "is_smashable", + "is_qc", + "sha1", + "s3_bucket", + "s3_key", + "download_url", + "created_at", + "last_modified", + ) + + +class DetailedComputationalResultSerializer(ComputationalResultSerializer): + processor = ProcessorSerializer(many=False) + organism_index = OrganismIndexSerializer(many=False) + + class ComputationalResultWithUrlSerializer(ComputationalResultSerializer): files = ComputedFileWithUrlSerializer(many=True, source="computedfile_set") +class DetailedComputationalResultWithUrlSerializer(ComputationalResultWithUrlSerializer): + processor = ProcessorSerializer(many=False) + organism_index = OrganismIndexSerializer(many=False) + + class ComputationalResultNoFilesSerializer(serializers.ModelSerializer): annotations = ComputationalResultAnnotationSerializer( many=True, source="computationalresultannotation_set" @@ -263,7 +368,7 @@ class Meta: } -class OriginalFileListSerializer(serializers.ModelSerializer): +class OriginalFileSerializer(serializers.ModelSerializer): class Meta: model = OriginalFile fields = ( @@ -272,7 +377,6 @@ class Meta: "samples", "size_in_bytes", "sha1", - "samples", "processor_jobs", "downloader_jobs", "source_url", @@ -284,6 +388,12 @@ class Meta: ) +class DetailedOriginalFileSerializer(OriginalFileSerializer): + samples = DetailedExperimentSampleSerializer(many=True) + processor_jobs = ProcessorJobSerializer(many=True) + downloader_jobs = DownloaderJobSerializer(many=True) + + ## # Samples ## @@ -523,71 +633,6 @@ class Meta: ) -## -# Jobs -## - - -class SurveyJobSerializer(serializers.ModelSerializer): - class Meta: - model = SurveyJob - fields = ( - "id", - "source_type", - "success", - "start_time", - "end_time", - "created_at", - "last_modified", - ) - - -class DownloaderJobSerializer(serializers.ModelSerializer): - class Meta: - model = DownloaderJob - fields = ( - "id", - "downloader_task", - "num_retries", - "retried", - "was_recreated", - "worker_id", - "worker_version", - "nomad_job_id", - "failure_reason", - "success", - "original_files", - "start_time", - "end_time", - "created_at", - "last_modified", - ) - - -class ProcessorJobSerializer(serializers.ModelSerializer): - class Meta: - model = ProcessorJob - fields = ( - "id", - "pipeline_applied", - "num_retries", - "retried", - "worker_id", - "ram_amount", - "volume_index", - "worker_version", - "failure_reason", - "nomad_job_id", - "success", - "original_files", - "datasets", - "start_time", - "end_time", - "created_at", - "last_modified", - ) - - ## # Datasets ## diff --git a/api/data_refinery_api/tests.py b/api/data_refinery_api/tests.py index 7f3961da7..faf8e63bd 100644 --- a/api/data_refinery_api/tests.py +++ b/api/data_refinery_api/tests.py @@ -202,9 +202,6 @@ def test_all_endpoints(self): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response["X-Source-Revision"], get_env_variable("SYSTEM_VERSION")) - response = self.client.get(reverse("experiments", kwargs={"version": API_VERSION})) - self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get(reverse("samples", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -220,10 +217,12 @@ def test_all_endpoints(self): ) self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get(reverse("samples", kwargs={"version": API_VERSION})) + response = self.client.get(reverse("organisms", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get(reverse("organisms", kwargs={"version": API_VERSION})) + response = self.client.get( + reverse("organisms", kwargs={"version": API_VERSION}) + "HOMO_SAPIENS/" + ) self.assertEqual(response.status_code, status.HTTP_200_OK) response = self.client.get(reverse("platforms", kwargs={"version": API_VERSION})) @@ -238,16 +237,26 @@ def test_all_endpoints(self): response = self.client.get(reverse("downloader_jobs", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_200_OK) + response = self.client.get( + reverse("downloader_jobs", kwargs={"version": API_VERSION}) + "1/" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + response = self.client.get(reverse("processor_jobs", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_200_OK) + response = self.client.get( + reverse("processor_jobs", kwargs={"version": API_VERSION}) + "1/" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + response = self.client.get(reverse("stats", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_200_OK) response = self.client.get(reverse("results", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_200_OK) - response = self.client.get(reverse("results", kwargs={"version": API_VERSION})) + response = self.client.get(reverse("results", kwargs={"version": API_VERSION}) + "1/") self.assertEqual(response.status_code, status.HTTP_200_OK) response = self.client.get(reverse("schema_redoc", kwargs={"version": API_VERSION})) @@ -261,6 +270,17 @@ def test_all_endpoints(self): ) self.assertEqual(response.status_code, status.HTTP_200_OK) + response = self.client.get( + reverse("transcriptome_indices", kwargs={"version": API_VERSION}) + + "?organism__name=DANIO_RERIO" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + response = self.client.get( + reverse("transcriptome_indices", kwargs={"version": API_VERSION}) + "?result_id=1" + ) + self.assertEqual(response.status_code, status.HTTP_200_OK) + response = self.client.get(reverse("create_dataset", kwargs={"version": API_VERSION})) self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) diff --git a/api/data_refinery_api/urls.py b/api/data_refinery_api/urls.py index cb70a6fa2..1ea93d0e3 100644 --- a/api/data_refinery_api/urls.py +++ b/api/data_refinery_api/urls.py @@ -13,11 +13,14 @@ APITokenView, CompendiumResultDetails, CompendiumResultList, + ComputationalResultsDetail, ComputationalResultsList, + ComputedFilesDetail, ComputedFilesList, CreateApiTokenView, CreateDatasetView, DatasetView, + DownloaderJobDetail, DownloaderJobList, ExperimentDetail, ExperimentDocumentView, @@ -25,9 +28,13 @@ FailedDownloaderJobStats, FailedProcessorJobStats, InstitutionList, + OrganismDetail, OrganismList, + OriginalFileDetail, OriginalFileList, PlatformList, + ProcessorDetail, + ProcessorJobDetail, ProcessorJobList, ProcessorList, QNTargetsAvailable, @@ -35,6 +42,7 @@ SampleDetail, SampleList, Stats, + SurveyJobDetail, SurveyJobList, TranscriptomeIndexDetail, TranscriptomeIndexList, @@ -98,9 +106,18 @@ class AccessUser: name="samples_detail", ), url(r"^organisms/$", OrganismList.as_view(), name="organisms"), + url( + r"^organisms/(?P.+)/$", OrganismDetail.as_view(), name="organisms_detail", + ), url(r"^platforms/$", PlatformList.as_view(), name="platforms"), + # platform detail url(r"^institutions/$", InstitutionList.as_view(), name="institutions"), url(r"^processors/$", ProcessorList.as_view(), name="processors"), + url( + r"^processors/(?P[0-9a-f-]+)/$", + ProcessorDetail.as_view(), + name="processors_details", + ), # Deliverables url(r"^dataset/$", CreateDatasetView.as_view(), name="create_dataset"), url(r"^dataset/(?P[0-9a-f-]+)/$", DatasetView.as_view(), name="dataset"), @@ -108,8 +125,23 @@ class AccessUser: url(r"^token/(?P[0-9a-f-]+)/$", APITokenView.as_view(), name="token_id"), # Jobs url(r"^jobs/survey/$", SurveyJobList.as_view(), name="survey_jobs"), + url( + r"^jobs/survey/(?P[0-9a-f-]+)/$", + SurveyJobDetail.as_view(), + name="survey_jobs", + ), url(r"^jobs/downloader/$", DownloaderJobList.as_view(), name="downloader_jobs"), + url( + r"^jobs/downloader/(?P[0-9a-f-]+)/$", + DownloaderJobDetail.as_view(), + name="downloader_jobs", + ), url(r"^jobs/processor/$", ProcessorJobList.as_view(), name="processor_jobs"), + url( + r"^jobs/processor/(?P[0-9a-f-]+)/$", + ProcessorJobDetail.as_view(), + name="processor_jobs", + ), # Dashboard Driver url(r"^stats/$", Stats.as_view(), name="stats"), url( @@ -148,10 +180,25 @@ class AccessUser: ), # Computed Files url(r"^computed_files/$", ComputedFilesList.as_view(), name="computed_files"), + url( + r"^computed_files/(?P[0-9a-f-]+)/$", + ComputedFilesDetail.as_view(), + name="computed_files_detail", + ), url(r"^original_files/$", OriginalFileList.as_view(), name="original_files"), + url( + r"^original_files/(?P[0-9a-f-]+)/$", + OriginalFileDetail.as_view(), + name="original_files_detail", + ), url( r"^computational_results/$", ComputationalResultsList.as_view(), name="results" ), + url( + r"^computational_results/(?P[0-9a-f-]+)/$", + ComputationalResultsDetail.as_view(), + name="results_detail", + ), # Compendia url(r"^compendia/$", CompendiumResultList.as_view(), name="compendium_results"), url( diff --git a/api/data_refinery_api/views.py b/api/data_refinery_api/views.py index 197c8cbfd..8cb3850b1 100644 --- a/api/data_refinery_api/views.py +++ b/api/data_refinery_api/views.py @@ -73,14 +73,18 @@ ComputedFileListSerializer, CreateDatasetSerializer, DatasetSerializer, + DetailedComputationalResultSerializer, + DetailedComputationalResultWithUrlSerializer, + DetailedComputedFileSerializer, DetailedExperimentSerializer, + DetailedOriginalFileSerializer, DetailedSampleSerializer, DownloaderJobSerializer, ExperimentSerializer, InstitutionSerializer, OrganismIndexSerializer, OrganismSerializer, - OriginalFileListSerializer, + OriginalFileSerializer, PlatformSerializer, ProcessorJobSerializer, ProcessorSerializer, @@ -874,6 +878,14 @@ class ProcessorList(generics.ListAPIView): serializer_class = ProcessorSerializer +class ProcessorDetail(generics.RetrieveAPIView): + """ Retrieves a processor by its ID """ + + lookup_field = "id" + queryset = Processor.objects.all() + serializer_class = ProcessorSerializer + + ## # Results ## @@ -889,6 +901,8 @@ class ComputationalResultsList(generics.ListAPIView): """ queryset = ComputationalResult.public_objects.all() + filter_backends = (DjangoFilterBackend,) + filterset_fields = ["processor__id"] def get_serializer_class(self): token_id = self.request.META.get("HTTP_API_KEY", None) @@ -906,6 +920,24 @@ def filter_queryset(self, queryset): return queryset.filter(**filter_dict) +class ComputationalResultsDetail(generics.RetrieveAPIView): + """ + Retrieves a computational result by its ID + """ + + lookup_field = "id" + queryset = ComputationalResult.public_objects.all() + + def get_serializer_class(self): + token_id = self.request.META.get("HTTP_API_KEY", None) + + try: + token = APIToken.objects.get(id=token_id, is_activated=True) + return DetailedComputationalResultWithUrlSerializer + except Exception: # General APIToken.DoesNotExist or django.core.exceptions.ValidationError + return DetailedComputationalResultSerializer + + ## # Search Filter Models ## @@ -919,6 +951,16 @@ class OrganismList(generics.ListAPIView): serializer_class = OrganismSerializer +class OrganismDetail(generics.RetrieveAPIView): + """ + Retrieves an organism by its taxonomy ID + """ + + lookup_field = "name" + queryset = Organism.objects.all() + serializer_class = OrganismSerializer + + class PlatformList(generics.ListAPIView): """ Unpaginated list of all the available "platform" information @@ -969,6 +1011,15 @@ class SurveyJobList(generics.ListAPIView): ordering = ("-id",) +class SurveyJobDetail(generics.RetrieveAPIView): + """ Retrieves a SurveyJob by ID """ + + lookup_field = "id" + model = SurveyJob + queryset = SurveyJob.objects.all() + serializer_class = SurveyJobSerializer + + @method_decorator( name="get", decorator=swagger_auto_schema( @@ -1022,6 +1073,15 @@ def get_queryset(self): return queryset +class DownloaderJobDetail(generics.RetrieveAPIView): + """ Retrieves a DownloaderJob by ID """ + + lookup_field = "id" + model = DownloaderJob + queryset = DownloaderJob.objects.all() + serializer_class = DownloaderJobSerializer + + @method_decorator( name="get", decorator=swagger_auto_schema( @@ -1075,6 +1135,15 @@ def get_queryset(self): return queryset +class ProcessorJobDetail(generics.RetrieveAPIView): + """ Retrieves a ProcessorJob by ID """ + + lookup_field = "id" + model = ProcessorJob + queryset = ProcessorJob.objects.all() + serializer_class = ProcessorJobSerializer + + ### # Statistics ### @@ -1455,6 +1524,8 @@ def _get_intervals(cls, objects, range_param, field="last_modified"): ### # Transcriptome Indices ### + + @method_decorator( name="get", decorator=swagger_auto_schema( @@ -1497,7 +1568,7 @@ class TranscriptomeIndexList(generics.ListAPIView): DjangoFilterBackend, filters.OrderingFilter, ) - filterset_fields = ["salmon_version", "index_type"] + filterset_fields = ["salmon_version", "index_type", "result_id", "organism__name"] ordering_fields = ("created_at", "salmon_version") ordering = ("-created_at",) @@ -1573,7 +1644,12 @@ class CompendiumResultList(generics.ListAPIView): DjangoFilterBackend, filters.OrderingFilter, ) - filterset_fields = ["primary_organism__name", "compendium_version", "quant_sf_only"] + filterset_fields = [ + "primary_organism__name", + "compendium_version", + "quant_sf_only", + "result__id", + ] ordering_fields = ("primary_organism__name", "compendium_version", "id") ordering = ("primary_organism__name",) @@ -1704,7 +1780,7 @@ class ComputedFilesList(generics.ListAPIView): DjangoFilterBackend, filters.OrderingFilter, ) - filterset_fields = ( + filterset_fields = { "id", "samples", "is_qn_target", @@ -1716,7 +1792,8 @@ class ComputedFilesList(generics.ListAPIView): "compendia_version", "created_at", "last_modified", - ) + "result__id", + } ordering_fields = ( "id", "created_at", @@ -1738,6 +1815,16 @@ def get_serializer_context(self): return serializer_context +class ComputedFilesDetail(generics.RetrieveAPIView): + """ + Retrieves a computed file by its ID + """ + + lookup_field = "id" + queryset = ComputedFile.objects.all() + serializer_class = DetailedComputedFileSerializer + + class OriginalFileList(generics.ListAPIView): """ original_files_list @@ -1747,12 +1834,12 @@ class OriginalFileList(generics.ListAPIView): """ queryset = OriginalFile.objects.all() - serializer_class = OriginalFileListSerializer + serializer_class = OriginalFileSerializer filter_backends = ( DjangoFilterBackend, filters.OrderingFilter, ) - filterset_fields = OriginalFileListSerializer.Meta.fields + filterset_fields = OriginalFileSerializer.Meta.fields ordering_fields = ( "id", "created_at", @@ -1761,6 +1848,16 @@ class OriginalFileList(generics.ListAPIView): ordering = ("-id",) +class OriginalFileDetail(generics.RetrieveAPIView): + """ + Retrieves an Original File by its ID + """ + + lookup_field = "id" + queryset = OriginalFile.objects.all() + serializer_class = DetailedOriginalFileSerializer + + # error handlers def handle404error(request, exception): message = "The requested resource was not found on this server."