Skip to content

Commit

Permalink
Ensure DB is ready before backend writes to it
Browse files Browse the repository at this point in the history
  • Loading branch information
hooveran committed Jan 16, 2024
1 parent 2995fd5 commit 847cc3a
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 23 deletions.
2 changes: 1 addition & 1 deletion backend/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@ EXPOSE 8001

ENV PYTHONPATH=/opt

ENTRYPOINT ["bash", "-c", "echo $RUN_TESTS && chmod +x ./backend/initialize_db/test_start.sh && ./backend/initialize_db/test_start.sh"]
ENTRYPOINT ["bash", "-c", "echo $RUN_TESTS && chmod +x ./backend/initialize_db/quick_start.sh && ./backend/initialize_db/quick_start.sh"]
25 changes: 15 additions & 10 deletions backend/initialize_db/ChEMBL_CYP3A4_hERG_start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
# For SQL security reasons, to enable creation of alternative schema names other than 'public', you must uncomment the "CREATE SCHEMA" line in matcher/backend/mmpdb/mmpdblib/schema.py
postgres_schema="public"

structures=./initialize_db/ChEMBL_CYP3A4_hERG_structures.smi
fragments=./initialize_db/ChEMBL_CYP3A4_hERG_structures.fragments
properties=./initialize_db/ChEMBL_CYP3A4_hERG_props.txt
metadata=./initialize_db/ChEMBL_CYP3A4_hERG_metadata.csv
example_queries=./initialize_db/example_queries.json
INITIALIZE_DIR=./backend/initialize_db
MMPDB_DIR=./backend/mmpdb
structures=$INITIALIZE_DIR/ChEMBL_CYP3A4_hERG_structures.smi
fragments=$INITIALIZE_DIR/ChEMBL_CYP3A4_hERG_structures.fragments
properties=$INITIALIZE_DIR/ChEMBL_CYP3A4_hERG_props.txt
metadata=$INITIALIZE_DIR/ChEMBL_CYP3A4_hERG_metadata.csv
example_queries=$INITIALIZE_DIR/example_queries.json

COMPLETION_FILE=./mmpdb_build_complete
FAILURE_FILE=./mmpdb_build_failed
Expand All @@ -22,25 +24,28 @@ else
# Populate mmpdb database from scratch
{
# Standard mmpdb command for generating fragments, except we defined new 'matcher_alpha' fragmentation criteria
conda run --no-capture-output -n matcher-api python ./mmpdb/mmpdb.py fragment "${structures}" -o "${fragments}" --cut-smarts 'matcher_alpha' && \
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py fragment "${structures}" -o "${fragments}" --cut-smarts 'matcher_alpha' && \
# Standard mmpdb command for identifying MMPs and loading data to DB, except we introduced postgres support, and extended the data model
# The db connection string takes the form of 'schema$postgres', with the rest of the connection parameters being set as environment variables in the docker-compose.yml file
conda run --no-capture-output -n matcher-api python ./mmpdb/mmpdb.py index "${fragments}" -o "$postgres_schema\$postgres" && \

# Hereafter we are writing to the DB, make sure the DB is ready
./scripts/wait-for-it.sh database:5432 -t 0
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py index "${fragments}" -o "$postgres_schema\$postgres" && \
# Standard mmpdb command for loading property data to DB, except we introduced postgres support and ability to add property metadata
conda run --no-capture-output -n matcher-api python ./mmpdb/mmpdb.py loadprops -p "${properties}" --metadata "${metadata}" "$postgres_schema\$postgres" && \
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py loadprops -p "${properties}" --metadata "${metadata}" "$postgres_schema\$postgres" && \

# Here we load JSON representations of query input states, to a table in the database
# Each input query will have a unique integer snapshot_id assigned, starting from 1, going up to the n total number of example query inputs that we write to DB
# The query can be run by calling a frontend API endpoint using the this snapshot_id as an argument, for example to run the first query, http://localhost:8000/snap/1
# We embed these example queries in the "Run Example Query" page at http://localhost:8000/examples
# To generate the required JSON for example_queries.json, print out the JSON that is passed to backend_api.snap_write upon clicking the "Copy Shareable Link" button in Matcher frontend,
# then either delete the 'query_id' key/value, or change the 'query_id' value to 'NULL'
conda run --no-capture-output -n matcher-api python ./initialize_db/load_example_queries.py "${example_queries}" $postgres_schema && \
conda run --no-capture-output -n matcher-api python ./backend/load_example_queries.py "${example_queries}" $postgres_schema && \
touch "${COMPLETION_FILE}"
} || {
touch "${FAILURE_FILE}"
exit 1
}
fi

conda run --no-capture-output -n matcher-api uvicorn backend_api:app --host 0.0.0.0 --port 8001
conda run --no-capture-output -n matcher-api uvicorn backend.backend_api:app --host 0.0.0.0 --port 8001
25 changes: 15 additions & 10 deletions backend/initialize_db/quick_start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@
# For SQL security reasons, to enable creation of alternative schema names other than 'public', you must uncomment the "CREATE SCHEMA" line in matcher/backend/mmpdb/mmpdblib/schema.py
postgres_schema="public"

structures=./initialize_db/quick_structures.smi
fragments=./initialize_db/quick_structures.fragments
properties=./initialize_db/quick_props.csv
metadata=./initialize_db/quick_metadata.csv
example_queries=./initialize_db/example_queries.json
INITIALIZE_DIR=./backend/initialize_db
MMPDB_DIR=./backend/mmpdb
structures=$INITIALIZE_DIR/quick_structures.smi
fragments=$INITIALIZE_DIR/quick_structures.fragments
properties=$INITIALIZE_DIR/quick_props.csv
metadata=$INITIALIZE_DIR/quick_metadata.csv
example_queries=$INITIALIZE_DIR/example_queries.json

COMPLETION_FILE=./mmpdb_build_complete
FAILURE_FILE=./mmpdb_build_failed
Expand All @@ -22,25 +24,28 @@ else
# Populate mmpdb database from scratch
{
# Standard mmpdb command for generating fragments, except we defined new 'matcher_alpha' fragmentation criteria
conda run --no-capture-output -n matcher-api python ./mmpdb/mmpdb.py fragment "${structures}" -o "${fragments}" --cut-smarts 'matcher_alpha' && \
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py fragment "${structures}" -o "${fragments}" --cut-smarts 'matcher_alpha' && \
# Standard mmpdb command for identifying MMPs and loading data to DB, except we introduced postgres support, and extended the data model
# The db connection string takes the form of 'schema$postgres', with the rest of the connection parameters being set as environment variables in the docker-compose.yml file
conda run --no-capture-output -n matcher-api python ./mmpdb/mmpdb.py index "${fragments}" -o "$postgres_schema\$postgres" && \

# Hereafter we are writing to the DB, make sure the DB is ready
./scripts/wait-for-it.sh database:5432 -t 0
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py index "${fragments}" -o "$postgres_schema\$postgres" && \
# Standard mmpdb command for loading property data to DB, except we introduced postgres support and ability to add property metadata
conda run --no-capture-output -n matcher-api python ./mmpdb/mmpdb.py loadprops -p "${properties}" --metadata "${metadata}" "$postgres_schema\$postgres" && \
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py loadprops -p "${properties}" --metadata "${metadata}" "$postgres_schema\$postgres" && \

# Here we load JSON representations of query input states, to a table in the database
# Each input query will have a unique integer snapshot_id assigned, starting from 1, going up to the n total number of example query inputs that we write to DB
# The query can be run by calling a frontend API endpoint using the this snapshot_id as an argument, for example to run the first query, http://localhost:8000/snap/1
# We embed these example queries in the "Run Example Query" page at http://localhost:8000/examples
# To generate the required JSON for example_queries.json, print out the JSON that is passed to backend_api.snap_write upon clicking the "Copy Shareable Link" button in Matcher frontend,
# then either delete the 'query_id' key/value, or change the 'query_id' value to 'NULL'
conda run --no-capture-output -n matcher-api python ./initialize_db/load_example_queries.py "${example_queries}" $postgres_schema && \
conda run --no-capture-output -n matcher-api python ./backend/load_example_queries.py "${example_queries}" $postgres_schema && \
touch "${COMPLETION_FILE}"
} || {
touch "${FAILURE_FILE}"
exit 1
}
fi

conda run --no-capture-output -n matcher-api uvicorn backend_api:app --host 0.0.0.0 --port 8001
conda run --no-capture-output -n matcher-api uvicorn backend.backend_api:app --host 0.0.0.0 --port 8001
6 changes: 4 additions & 2 deletions backend/initialize_db/test_start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ else
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py fragment "${structures}" -o "${fragments}" --cut-smarts 'matcher_alpha' && \
# Standard mmpdb command for identifying MMPs and loading data to DB, except we introduced postgres support, and extended the data model
# The db connection string takes the form of 'schema$postgres', with the rest of the connection parameters being set as environment variables in the docker-compose.yml file

# Hereafter we are writing to the DB, make sure the DB is ready
./scripts/wait-for-it.sh database:5432 -t 0
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py index "${fragments}" -o "$postgres_schema\$postgres" && \
# Standard mmpdb command for loading property data to DB, except we introduced postgres support and ability to add property metadata
conda run --no-capture-output -n matcher-api python $MMPDB_DIR/mmpdb.py loadprops -p "${properties}" --metadata "${metadata}" "$postgres_schema\$postgres" && \
Expand All @@ -51,8 +54,7 @@ if [[ "$RUN_TESTS" == "true" ]]; then

conda run --no-capture-output -n matcher-api pytest -s -v ./backend/tests/unit_tests || exit 1

# Ensure backend, frontend, database are ready before running tests across containers
./scripts/wait-for-it.sh database:5432 -t 0
# Ensure backend, frontend are ready before running tests across containers
./scripts/wait-for-it.sh backend:8001 -t 0
./scripts/wait-for-it.sh frontend:8000 -t 0
conda run --no-capture-output -n matcher-api pytest -s -v ./backend/tests/integration_tests || exit 1
Expand Down

0 comments on commit 847cc3a

Please sign in to comment.