Skip to content

Commit

Permalink
Add oauth toggling to Make command
Browse files Browse the repository at this point in the history
  • Loading branch information
markbotterill committed May 28, 2024
1 parent 912acac commit b7f2ab8
Show file tree
Hide file tree
Showing 3 changed files with 233 additions and 1 deletion.
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,9 @@ send-slack-metrics:
docker exec danswer-stack-background-1 python /app/scripts/send_slack_report/send_slack_report.py

send-hubgpt-eval:
cd /home/ec2-user/danswer/deployment/docker_compose && \
docker compose -p danswer-stack -f docker-compose.analytics.yml up -d --build
sleep 150
docker exec danswer-stack-background-1 python /app/scripts/hubgpt_eval_automation.py
docker compose -p danswer-stack -f docker-compose.prod.yml up -d --build

3 changes: 2 additions & 1 deletion backend/scripts/hubgpt_eval_automation.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,4 +97,5 @@ def upload_to_slack(filename, channel_id):
data.to_csv(CSV_PATH, index = False)
print("Complete")
CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID")
upload_to_slack(CSV_PATH, CHANNEL_ID)
# upload_to_slack(CSV_PATH, CHANNEL_ID)
print("Bing bong")
226 changes: 226 additions & 0 deletions deployment/docker_compose/docker-compose.analytics.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
version: '3'
services:
api_server:
image: danswer/danswer-backend:latest
build:
context: ../../backend
dockerfile: Dockerfile
command: >
/bin/sh -c "alembic upgrade head && echo \"Starting Danswer Api Server\" && uvicorn danswer.main:app --host 0.0.0.0 --port 8080"
depends_on:
- relational_db
- index
restart: always
env_file:
- .env.analytics
environment:
- AUTH_TYPE=${AUTH_TYPE:-google_oauth}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
volumes:
- local_dynamic_storage:/home/storage
- file_connector_tmp_storage:/home/file_connector_storage
- model_cache_nltk:/root/nltk_data/
- model_cache_huggingface:/root/.cache/huggingface/
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"


background:
image: danswer/danswer-backend:latest
build:
context: ../../backend
dockerfile: Dockerfile
command: /usr/bin/supervisord
depends_on:
- relational_db
- index
restart: always
env_file:
- .env.analytics
environment:
- AUTH_TYPE=${AUTH_TYPE:-google_oauth}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
volumes:
- local_dynamic_storage:/home/storage
- file_connector_tmp_storage:/home/file_connector_storage
- model_cache_nltk:/root/nltk_data/
- model_cache_huggingface:/root/.cache/huggingface/
extra_hosts:
- "host.docker.internal:host-gateway"
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"


web_server:
image: danswer/danswer-web-server:latest
build:
context: ../../web
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_DISABLE_STREAMING=${NEXT_PUBLIC_DISABLE_STREAMING:-false}
depends_on:
- api_server
restart: always
env_file:
- .env.analytics
environment:
- INTERNAL_URL=http://api_server:8080
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
relational_db:
image: postgres:15.2-alpine
ports:
- "5432:5432"
restart: always
# POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file
env_file:
- .env.analytics
volumes:
- db_volume:/var/lib/postgresql/data
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"


inference_model_server:
image: danswer/danswer-model-server:latest
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-false}\" = \"True\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
- model_cache_torch:/root/.cache/torch/
- model_cache_huggingface:/root/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"


indexing_model_server:
image: danswer/danswer-model-server:latest
build:
context: ../../backend
dockerfile: Dockerfile.model_server
command: >
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-false}\" = \"True\" ]; then
echo 'Skipping service...';
exit 0;
else
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
fi"
restart: on-failure
environment:
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- INDEXING_ONLY=True
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
- model_cache_torch:/root/.cache/torch/
- model_cache_huggingface:/root/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"


# This container name cannot have an underscore in it due to Vespa expectations of the URL
index:
image: vespaengine/vespa:8.277.17
restart: always
ports:
- "19071:19071"
- "8081:8081"
volumes:
- vespa_volume:/opt/vespa/var
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"

caddy:
image: caddy:2-alpine
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ../data/caddy/Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
env_file:
- .env.analytics
depends_on:
- api_server
- web_server
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
# Run with --profile model-server to bring up the danswer-model-server container
model_server:
image: danswer/danswer-model-server:latest
build:
context: ../../backend
dockerfile: Dockerfile.model_server
profiles:
- "model-server"
command: uvicorn model_server.main:app --host 0.0.0.0 --port 9000
restart: always
environment:
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
volumes:
- model_cache_torch:/root/.cache/torch/
- model_cache_huggingface:/root/.cache/huggingface/
logging:
driver: json-file
options:
max-size: "50m"
max-file: "6"
volumes:
local_dynamic_storage:
file_connector_tmp_storage: # used to store files uploaded by the user temporarily while we are indexing them
db_volume:
vespa_volume:
model_cache_torch:
model_cache_nltk:
model_cache_huggingface:
caddy_data:
caddy_config:

0 comments on commit b7f2ab8

Please sign in to comment.