diff --git a/.env.example b/.env.example index b1a81d5751..67f6e30b0e 100644 --- a/.env.example +++ b/.env.example @@ -27,7 +27,8 @@ S3_ENDPOINT=${S3_ENDPOINT:-"http://s3:9000"} S3_ACCESS_KEY=${S3_ACCESS_KEY:-"fmtm"} S3_SECRET_KEY=${S3_SECRET_KEY:-"somelongpassword"} S3_BUCKET_NAME=${S3_BUCKET_NAME:-"fmtm-data"} -S3_BACKUP_BUCKET_NAME=${S3_BUCKET_NAME:-"fmtm-db-backups"} +S3_BACKUP_BUCKET_NAME=${S3_BACKUP_BUCKET_NAME:-"fmtm-db-backups"} +S3_ODK_BUCKET_NAME=${S3_ODK_BUCKET_NAME:-"fmtm-odk-media"} S3_DOWNLOAD_ROOT=${S3_DOWNLOAD_ROOT} S3_SKIP_BUCKET_INIT=${S3_SKIP_BUCKET_INIT} diff --git a/docker-compose.development.yml b/docker-compose.development.yml index 467f95f8d0..b213a57b43 100644 --- a/docker-compose.development.yml +++ b/docker-compose.development.yml @@ -54,7 +54,7 @@ services: central-ui: condition: service_completed_successfully s3: - condition: service_started + condition: service_healthy certbot: condition: service_completed_successfully ui: @@ -134,6 +134,8 @@ services: depends_on: central-db: condition: service_healthy + s3: + condition: service_healthy environment: - DOMAIN=${FMTM_ODK_DOMAIN:-odk.${FMTM_DOMAIN}} - SSL_TYPE=upstream @@ -156,6 +158,10 @@ services: - SENTRY_ORG_SUBDOMAIN=${SENTRY_ORG_SUBDOMAIN:-o130137} - SENTRY_KEY=${SENTRY_KEY:-3cf75f54983e473da6bd07daddf0d2ee} - SENTRY_PROJECT=${SENTRY_PROJECT:-1298632} + - S3_SERVER=${S3_ENDPOINT} + - S3_BUCKET_NAME=${S3_ODK_BUCKET_NAME:-"fmtm-odk-media"} + - S3_ACCESS_KEY=${S3_ACCESS_KEY} + - S3_SECRET_KEY=${S3_SECRET_KEY} networks: - fmtm-net restart: "unless-stopped" diff --git a/docker-compose.main.yml b/docker-compose.main.yml index da8683b784..016e60eea8 100644 --- a/docker-compose.main.yml +++ b/docker-compose.main.yml @@ -48,7 +48,7 @@ services: certbot: condition: service_completed_successfully s3: - condition: service_started + condition: service_healthy ui: condition: service_completed_successfully electric: diff --git a/docker-compose.yml b/docker-compose.yml index 2085cf21ca..d8e7876f81 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -52,7 +52,7 @@ services: condition: service_completed_successfully required: false s3: - condition: service_started + condition: service_healthy electric: condition: service_started volumes: @@ -131,7 +131,7 @@ services: - ./src/frontend:/app - /app/node_modules/ environment: - - VITE_API_URL=${API_URL:-http://api.${FMTM_DOMAIN}:${FMTM_DEV_PORT:-7050}} + - VITE_API_URL=http://api.${FMTM_DOMAIN}:${FMTM_DEV_PORT:-7050} ports: - "7051:7051" networks: @@ -154,7 +154,7 @@ services: - /app/.svelte-kit/ # - ../ui:/app/node_modules/@hotosm/ui:ro environment: - - VITE_API_URL=${API_URL:-http://api.${FMTM_DOMAIN}:${FMTM_DEV_PORT:-7050}} + - VITE_API_URL=http://api.${FMTM_DOMAIN}:${FMTM_DEV_PORT:-7050} - VITE_SYNC_URL=http://sync.${FMTM_DOMAIN}:${FMTM_DEV_PORT:-7050} networks: - fmtm-net @@ -170,6 +170,8 @@ services: depends_on: central-db: condition: service_healthy + s3: + condition: service_healthy environment: - DOMAIN=${CENTRAL_DOMAIN_OVERRIDE:-odk.${FMTM_DOMAIN}:${FMTM_DEV_PORT:-7050}} - SSL_TYPE=upstream @@ -192,6 +194,11 @@ services: - SENTRY_ORG_SUBDOMAIN=${SENTRY_ORG_SUBDOMAIN:-o130137} - SENTRY_KEY=${SENTRY_KEY:-3cf75f54983e473da6bd07daddf0d2ee} - SENTRY_PROJECT=${SENTRY_PROJECT:-1298632} + # Note S3_ENDPOINT is hardcoded here for when we use tunnel config + - S3_SERVER="http://s3:9000 + - S3_BUCKET_NAME=${S3_ODK_BUCKET_NAME:-"fmtm-odk-media"} + - S3_ACCESS_KEY=${S3_ACCESS_KEY} + - S3_SECRET_KEY=${S3_SECRET_KEY} # ports: # - "8383:8383" networks: @@ -320,7 +327,8 @@ services: - .env # Hardcode some vars for dev, as not necessarily present in the .env file environment: - - S3_ENDPOINT=${S3_ENDPOINT:-"http://s3:9000"} + # Note S3_ENDPOINT is hardcoded here for when we use tunnel config + - S3_ENDPOINT="http://s3:9000 - S3_BACKUP_BUCKET_NAME=${S3_BACKUP_BUCKET_NAME:-"fmtm-db-backups"} networks: - fmtm-net diff --git a/odkcentral/api/Dockerfile b/odkcentral/api/Dockerfile index 813a1316ac..d26422a867 100644 --- a/odkcentral/api/Dockerfile +++ b/odkcentral/api/Dockerfile @@ -19,11 +19,15 @@ # to init an admin user at startup ARG ODK_CENTRAL_TAG +ARG MINIO_TAG=${MINIO_TAG:-RELEASE.2024-10-13T13-34-11Z} +FROM docker.io/minio/minio:${MINIO_TAG} AS minio FROM ghcr.io/getodk/central-service:${ODK_CENTRAL_TAG} -COPY init-user-and-start.sh / -RUN chmod +x /init-user-and-start.sh -ENTRYPOINT ["/init-user-and-start.sh"] +# Copy minio mc client to create S3 buckets +COPY --from=minio /usr/bin/mc /usr/local/bin/ +COPY container-entrypoint.sh / +RUN chmod +x /container-entrypoint.sh +ENTRYPOINT ["/container-entrypoint.sh"] HEALTHCHECK --start-period=10s --interval=5s --retries=10 \ CMD nc -z localhost 8383 || exit 1 diff --git a/odkcentral/api/container-entrypoint.sh b/odkcentral/api/container-entrypoint.sh new file mode 100644 index 0000000000..5d44ff2250 --- /dev/null +++ b/odkcentral/api/container-entrypoint.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +set -eo pipefail + +check_all_s3_vars_present() { + if [ -z "${S3_SERVER}" ]; then + echo "Environment variable S3_SERVER is not set." + exit 1 + fi + if [ -z "${S3_ACCESS_KEY}" ]; then + echo "Environment variable S3_ACCESS_KEY is not set." + exit 1 + fi + if [ -z "${S3_SECRET_KEY}" ]; then + echo "Environment variable S3_SECRET_KEY is not set." + exit 1 + fi + if [ -z "${S3_BUCKET_NAME}" ]; then + echo "Environment variable S3_BUCKET_NAME is not set." + exit 1 + fi + + # Strip any extra unrequired "quotes" + export S3_SERVER="${S3_SERVER//\"/}" + export S3_ACCESS_KEY="${S3_ACCESS_KEY//\"/}" + export S3_SECRET_KEY="${S3_SECRET_KEY//\"/}" + export S3_BUCKET_NAME="${S3_BUCKET_NAME//\"/}" +} + +# Check env vars + strip extra quotes on vars +check_all_s3_vars_present + +# Wait for database to be available +wait-for-it "${CENTRAL_DB_HOST:-central-db}:5432" + +### Init, generate config, migrate db ### +echo "Stripping pm2 exec command from start-odk.sh script (last 2 lines)" +head -n -2 ./start-odk.sh > ./init-odk-db.sh +chmod +x ./init-odk-db.sh + +echo "Running ODKCentral start script to init environment and migrate DB" +echo "The server will not start on this run" +./init-odk-db.sh + +### Create admin user ### +echo "Creating test user ${SYSADMIN_EMAIL} with password ***${SYSADMIN_PASSWD: -3}" +echo "${SYSADMIN_PASSWD}" | odk-cmd --email "${SYSADMIN_EMAIL}" user-create || true + +echo "Elevating user to admin" +odk-cmd --email "${SYSADMIN_EMAIL}" user-promote || true + +### Create S3 bucket for submission photo storage ### +echo "Creating S3 bucket ${S3_BUCKET_NAME} to store submission media" +mc alias set s3 "$S3_SERVER" "$S3_ACCESS_KEY" "$S3_SECRET_KEY" +mc mb "s3/${S3_BUCKET_NAME}" --ignore-existing +# Prevent anonymous access (pre-signed URL download only) +mc anonymous set none "s3/${S3_BUCKET_NAME}" + +### Run server (hardcode WORKER_COUNT=1 for dev) ### +export WORKER_COUNT=1 +echo "Starting server." +exec npx pm2-runtime ./pm2.config.js diff --git a/odkcentral/api/init-user-and-start.sh b/odkcentral/api/init-user-and-start.sh deleted file mode 100644 index bb421ed25d..0000000000 --- a/odkcentral/api/init-user-and-start.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -set -eo pipefail - -# Wait for database to be available -wait-for-it "${CENTRAL_DB_HOST:-central-db}:5432" - -### Init, generate config, migrate db ### -echo "Stripping pm2 exec command from start-odk.sh script (last 2 lines)" -head -n -2 ./start-odk.sh > ./init-odk-db.sh -chmod +x ./init-odk-db.sh - -echo "Running ODKCentral start script to init environment and migrate DB" -echo "The server will not start on this run" -./init-odk-db.sh - -### Create admin user ### -echo "Creating test user ${SYSADMIN_EMAIL} with password ***${SYSADMIN_PASSWD: -3}" -echo "${SYSADMIN_PASSWD}" | odk-cmd --email "${SYSADMIN_EMAIL}" user-create || true - -echo "Elevating user to admin" -odk-cmd --email "${SYSADMIN_EMAIL}" user-promote || true - -### Run server (hardcode WORKER_COUNT=1 for dev) ### -export WORKER_COUNT=1 -echo "Starting server." -exec npx pm2-runtime ./pm2.config.js diff --git a/src/backend/app-entrypoint.sh b/src/backend/app-entrypoint.sh index eeab2ecb52..b9cccd5b58 100644 --- a/src/backend/app-entrypoint.sh +++ b/src/backend/app-entrypoint.sh @@ -19,23 +19,6 @@ wait_for_db() { exit 1 # Exit with an error code } -wait_for_s3() { - max_retries=30 - retry_interval=5 - - for ((i = 0; i < max_retries; i++)); do - if curl --silent -I "${S3_ENDPOINT:-http://s3:9000}" >/dev/null; then - echo "S3 is available." - return 0 # S3 is available, exit successfully - fi - echo "S3 is not yet available. Retrying in ${retry_interval} seconds..." - sleep ${retry_interval} - done - - echo "Timed out waiting for S3 to become available." - exit 1 # Exit with an error code -} - create_s3_buckets() { echo "Running init_s3_buckets.py script main function" python /opt/app/s3.py @@ -43,7 +26,6 @@ create_s3_buckets() { # Start wait in background with tmp log files wait_for_db & -wait_for_s3 & # Wait until checks complete wait diff --git a/src/backend/migrate-entrypoint.sh b/src/backend/migrate-entrypoint.sh index e2f9a27a53..842ce8bf6d 100644 --- a/src/backend/migrate-entrypoint.sh +++ b/src/backend/migrate-entrypoint.sh @@ -171,9 +171,7 @@ backup_db() { BUCKET_NAME=${S3_BACKUP_BUCKET_NAME} echo "Uploading to S3 bucket ${BUCKET_NAME}" - # Sed required to strip additional "quotes" mc alias set s3 "${S3_ENDPOINT}" "${S3_ACCESS_KEY}" "${S3_SECRET_KEY}" - mc mb "s3/${BUCKET_NAME}" --ignore-existing mc anonymous set download "s3/${BUCKET_NAME}" mc cp "${db_backup_file}" "s3/${BUCKET_NAME}/pre-migrate/"