diff --git a/spatialprofilingtoolbox/apiserver/app/main.py b/spatialprofilingtoolbox/apiserver/app/main.py index 98cbf340..99f7a1dd 100644 --- a/spatialprofilingtoolbox/apiserver/app/main.py +++ b/spatialprofilingtoolbox/apiserver/app/main.py @@ -214,6 +214,19 @@ def _get_anonymous_phenotype_counts_fast( return counts +@app.get("/phenotype-counts/") +async def get_phenotype_counts_nonblocking( + positive_marker: ValidChannelListPositives, + negative_marker: ValidChannelListNegatives, + study: ValidStudy, +) -> PhenotypeCounts: + """Computes the number of cells satisfying the given positive and negative criteria, in the + context of a given study. Non-blocking, has a "pending" flag in the response. + """ + counts = get_phenotype_counts(positive_marker, negative_marker, study, 0, blocking=False) + return counts + + @app.get("/request-spatial-metrics-computation/") async def request_spatial_metrics_computation( study: ValidStudy, @@ -333,6 +346,7 @@ def get_phenotype_counts_cached( study: str, number_cells: int, selected: tuple[int, ...], + blocking: bool = True, ) -> PhenotypeCounts: counts = OnDemandRequester.get_counts_by_specimen( positives, @@ -340,6 +354,7 @@ def get_phenotype_counts_cached( study, number_cells, set(selected) if selected is not None else None, + blocking = blocking, ) return counts @@ -350,6 +365,7 @@ def get_phenotype_counts( study: ValidStudy, number_cells: int, cells_selected: set[int] | None = None, + blocking: bool = True, ) -> PhenotypeCounts: """For each specimen, return the fraction of selected/all cells expressing the phenotype.""" positive_markers = [m for m in positive_marker if m != ''] @@ -360,6 +376,7 @@ def get_phenotype_counts( study, number_cells, tuple(sorted(list(cells_selected))) if cells_selected is not None else (), + blocking = blocking, ) return counts diff --git a/spatialprofilingtoolbox/db/exchange_data_formats/metrics.py b/spatialprofilingtoolbox/db/exchange_data_formats/metrics.py index 13eb2b2a..62c039ed 100644 --- a/spatialprofilingtoolbox/db/exchange_data_formats/metrics.py +++ b/spatialprofilingtoolbox/db/exchange_data_formats/metrics.py @@ -60,6 +60,7 @@ class PhenotypeCounts(BaseModel): counts: tuple[PhenotypeCount, ...] phenotype: CompositePhenotype number_cells_in_study: int + is_pending: bool class UnivariateMetricsComputationResult(BaseModel): diff --git a/spatialprofilingtoolbox/ondemand/request_scheduling.py b/spatialprofilingtoolbox/ondemand/request_scheduling.py index 67adc022..8b09aa0b 100644 --- a/spatialprofilingtoolbox/ondemand/request_scheduling.py +++ b/spatialprofilingtoolbox/ondemand/request_scheduling.py @@ -96,13 +96,14 @@ def get_counts_by_specimen( study_name: str, number_cells: int, cells_selected: set[int], + blocking: bool = True, ) -> PhenotypeCounts: phenotype = PhenotypeCriteria( positive_markers=tuple(filter(_nonempty, positives)), negative_markers=tuple(filter(_nonempty, negatives)), ) selected = tuple(sorted(list(cells_selected))) if cells_selected is not None else () - feature1, counts, counts_all = OnDemandRequester._counts(study_name, phenotype, selected) + feature1, counts, counts_all, pending = OnDemandRequester._counts(study_name, phenotype, selected, blocking) combined_keys = sorted(list(set(counts.values.keys()).intersection(counts_all.values.keys()))) missing_numerator = set(counts.values.keys()).difference(combined_keys) if len(missing_numerator) > 0: @@ -127,12 +128,17 @@ def get_counts_by_specimen( criteria=phenotype, ), number_cells_in_study=number_cells, + is_pending=pending, ) @classmethod def _counts( - cls, study_name: str, phenotype: PhenotypeCriteria, selected: tuple[int, ...], - ) -> tuple[str, Metrics1D, Metrics1D]: + cls, + study_name: str, + phenotype: PhenotypeCriteria, + selected: tuple[int, ...], + blocking: bool, + ) -> tuple[str, Metrics1D, Metrics1D, bool]: get = CountsProvider.get_metrics_or_schedule def get_results1() -> tuple[Metrics1D, str]: @@ -153,15 +159,17 @@ def get_results2() -> tuple[Metrics1D, str]: with DBConnection() as connection: connection._set_autocommit(True) - cls._wait_for_wrappedup(connection, get_results1, study_name) + if blocking: + cls._wait_for_wrappedup(connection, get_results1, study_name) counts, feature1 = get_results1() with DBConnection() as connection: connection._set_autocommit(True) - cls._wait_for_wrappedup(connection, get_results2, study_name) + if blocking: + cls._wait_for_wrappedup(connection, get_results2, study_name) counts_all, _ = get_results2() - return (feature1, counts, counts_all) + return (feature1, counts, counts_all, counts.is_pending or counts_all.is_pending) @classmethod def _wait_for_wrappedup( diff --git a/test/apiserver/module_tests/test_counts_query_delegation_edge_cases.py b/test/apiserver/module_tests/test_counts_query_delegation_edge_cases.py index 8eee4e96..8cc74402 100644 --- a/test/apiserver/module_tests/test_counts_query_delegation_edge_cases.py +++ b/test/apiserver/module_tests/test_counts_query_delegation_edge_cases.py @@ -6,29 +6,43 @@ STUDY_NAME = quote('Melanoma intralesional IL2') POSITIVE_MARKERS = ['CD3', 'CD4', 'CD8'] NEGATIVE_MARKERS: list[str] = [''] -ENDPOINT = 'anonymous-phenotype-counts-fast' +ENDPOINTS = ('anonymous-phenotype-counts-fast', 'phenotype-counts') HOST = 'spt-apiserver-testing' PORT = 8080 def main(): cases = [ - (HOST, PORT, ENDPOINT, STUDY_NAME, POSITIVE_MARKERS, NEGATIVE_MARKERS, 7), - (HOST, PORT, ENDPOINT, STUDY_NAME, NEGATIVE_MARKERS, POSITIVE_MARKERS, 352), + (HOST, PORT, ENDPOINTS[0], STUDY_NAME, POSITIVE_MARKERS, NEGATIVE_MARKERS, 7), + (HOST, PORT, ENDPOINTS[0], STUDY_NAME, NEGATIVE_MARKERS, POSITIVE_MARKERS, 352), + (HOST, PORT, ENDPOINTS[1], STUDY_NAME, POSITIVE_MARKERS, NEGATIVE_MARKERS, 7), + (HOST, PORT, ENDPOINTS[1], STUDY_NAME, NEGATIVE_MARKERS, POSITIVE_MARKERS, 352), ] - for host, port, endpoint, study_name, positive_markers, negative_markers, expected in cases: clause1 = '&'.join([f'positive_marker={m}' for m in positive_markers]) clause2 = '&'.join([f'negative_marker={m}' for m in negative_markers]) url = f'http://{host}:{port}/{endpoint}/?study={study_name}&'\ f'{clause1}&'\ f'{clause2}' - result = subprocess.run( - ['curl', '-s', url], - capture_output=True, - encoding='UTF-8', - check=True, - ).stdout - response = json.loads(result) + + if endpoint == ENDPOINTS[0]: + result = subprocess.run( + ['curl', '-s', url], + capture_output=True, + encoding='UTF-8', + check=True, + ).stdout + response = json.loads(result) + else: + while True: + result = subprocess.run( + ['curl', '-s', url], + capture_output=True, + encoding='UTF-8', + check=True, + ).stdout + response = json.loads(result) + if not response['is_pending']: + break phenotype_total = sum( phenotype_count['count'] for phenotype_count in response['counts'] ) diff --git a/test/ondemand/module_tests/expected_counts_structured1.json b/test/ondemand/module_tests/expected_counts_structured1.json index 5ba0724a..b8db5441 100644 --- a/test/ondemand/module_tests/expected_counts_structured1.json +++ b/test/ondemand/module_tests/expected_counts_structured1.json @@ -49,5 +49,6 @@ ] } }, - "number_cells_in_study": 0 + "number_cells_in_study": 0, + "is_pending": false } \ No newline at end of file diff --git a/test/ondemand/module_tests/expected_counts_structured2.json b/test/ondemand/module_tests/expected_counts_structured2.json index a3c0fcb3..4f0cc592 100644 --- a/test/ondemand/module_tests/expected_counts_structured2.json +++ b/test/ondemand/module_tests/expected_counts_structured2.json @@ -33,5 +33,6 @@ ] } }, - "number_cells_in_study": 0 + "number_cells_in_study": 0, + "is_pending": false } \ No newline at end of file