diff --git a/.github/actions/test-setup-sentry-devservices/action.yml b/.github/actions/test-setup-sentry-devservices/action.yml new file mode 100644 index 00000000000000..0995881e85bbae --- /dev/null +++ b/.github/actions/test-setup-sentry-devservices/action.yml @@ -0,0 +1,119 @@ +# NOTE: Do not rely on `make` commands here as this action is used across different repos +# where the Makefile will not be available +name: 'Sentry Setup' +description: 'Sets up a Sentry test environment' +inputs: + workdir: + description: 'Directory where the sentry source is located' + required: false + default: '.' + +outputs: + yarn-cache-dir: + description: 'Path to yarn cache' + value: ${{ steps.config.outputs.yarn-cache-dir }} + matrix-instance-number: + description: 'The matrix instance number (starting at 1)' + value: ${{ steps.config.outputs.matrix-instance-number }} + matrix-instance-total: + description: 'Reexport of MATRIX_INSTANCE_TOTAL.' + value: ${{ steps.config.outputs.matrix-instance-total }} + +runs: + using: 'composite' + steps: + - name: Setup default environment variables + # the default for "bash" is: + # bash --noprofile --norc -eo pipefail {0} + shell: bash --noprofile --norc -eo pipefail -ux {0} + env: + MATRIX_INSTANCE: ${{ matrix.instance }} + # XXX: We should be using something like len(strategy.matrix.instance) (not possible atm) + # If you have other things like python-version: [foo, bar, baz] then the sharding logic + # isn't right because job-total will be 3x larger and you'd never run 2/3 of the tests. + # MATRIX_INSTANCE_TOTAL: ${{ strategy.job-total }} + run: | + echo "PIP_DISABLE_PIP_VERSION_CHECK=on" >> $GITHUB_ENV + echo "PIP_INDEX_URL=https://pypi.devinfra.sentry.io/simple" >> $GITHUB_ENV + echo "SENTRY_SKIP_BACKEND_VALIDATION=1" >> $GITHUB_ENV + + ### node configuration ### + echo "NODE_ENV=development" >> $GITHUB_ENV + + ### pytest configuration ### + echo "PY_COLORS=1" >> "$GITHUB_ENV" + echo "PYTEST_ADDOPTS=--reruns=5 --durations=10 --fail-slow=60s" >> $GITHUB_ENV + echo "COVERAGE_CORE=sysmon" >> "$GITHUB_ENV" + + ### pytest-sentry configuration ### + if [ "$GITHUB_REPOSITORY" = "getsentry/sentry" ]; then + echo "PYTEST_SENTRY_DSN=https://6fd5cfea2d4d46b182ad214ac7810508@sentry.io/2423079" >> $GITHUB_ENV + echo "PYTEST_SENTRY_TRACES_SAMPLE_RATE=0" >> $GITHUB_ENV + + # This records failures on master to sentry in order to detect flakey tests, as it's + # expected that people have failing tests on their PRs + if [ "$GITHUB_REF" = "refs/heads/master" ]; then + echo "PYTEST_SENTRY_ALWAYS_REPORT=1" >> $GITHUB_ENV + fi + fi + + # Configure a different release version, otherwise it defaults to the + # commit sha which will conflict with our actual prod releases. This is a + # confusing experience because it looks like these are "empty" releases + # because no commits are attached and associates the release with our + # javascript + sentry projects. + echo "SENTRY_RELEASE=ci@$GITHUB_SHA" >> $GITHUB_ENV + + # this handles pytest test sharding + if [ "$MATRIX_INSTANCE" ]; then + if ! [ "${MATRIX_INSTANCE_TOTAL:-}" ]; then + echo "MATRIX_INSTANCE_TOTAL is required." + exit 1 + fi + echo "TEST_GROUP=$MATRIX_INSTANCE" >> $GITHUB_ENV + echo "TOTAL_TEST_GROUPS=$MATRIX_INSTANCE_TOTAL" >> $GITHUB_ENV + fi + + - uses: getsentry/action-setup-venv@a133e6fd5fa6abd3f590a1c106abda344f5df69f # v2.1.0 + with: + python-version: ${{ inputs.python-version }} + cache-dependency-path: ${{ inputs.workdir }}/requirements-dev-frozen.txt + install-cmd: cd ${{ inputs.workdir }} && python3 -m tools.hack_pip && pip install -r requirements-dev-frozen.txt + + - name: Set up outputs + id: config + env: + MATRIX_INSTANCE: ${{ matrix.instance }} + shell: bash --noprofile --norc -eo pipefail -ux {0} + run: | + echo "yarn-cache-dir=$(yarn cache dir)" >> "$GITHUB_OUTPUT" + echo "matrix-instance-number=$(($MATRIX_INSTANCE+1))" >> "$GITHUB_OUTPUT" + echo "matrix-instance-total=$((${MATRIX_INSTANCE_TOTAL:-}))" >> "$GITHUB_OUTPUT" + + - name: Install python dependencies + shell: bash --noprofile --norc -eo pipefail -ux {0} + env: + # This is necessary when other repositories (e.g. relay) want to take advantage of this workflow + # without needing to fork it. The path needed is the one where setup.py is located + WORKDIR: ${{ inputs.workdir }} + run: | + cd "$WORKDIR" + # We need to install editable otherwise things like check migration will fail. + python3 -m tools.fast_editable --path . + + - name: Start devservices + shell: bash --noprofile --norc -eo pipefail -ux {0} + env: + WORKDIR: ${{ inputs.workdir }} + ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES: '1' + run: | + sentry init + + # have tests listen on the docker gateway ip so loopback can occur + echo "DJANGO_LIVE_TEST_SERVER_ADDRESS=$(docker network inspect bridge --format='{{(index .IPAM.Config 0).Gateway}}')" >> "$GITHUB_ENV" + + docker ps -a + + # This is necessary when other repositories (e.g. relay) want to take advantage of this workflow + # without needing to fork it. The path needed is the one where tools are located + cd "$WORKDIR" diff --git a/.github/workflows/test_docker_compose_acceptance.yml b/.github/workflows/test_docker_compose_acceptance.yml new file mode 100644 index 00000000000000..726e945978bf32 --- /dev/null +++ b/.github/workflows/test_docker_compose_acceptance.yml @@ -0,0 +1,138 @@ +# Also note that this name *MUST* match the filename because GHA +# only provides the workflow name (https://docs.github.com/en/free-pro-team@latest/actions/reference/environment-variables#default-environment-variables) +# and GH APIs only support querying by workflow *FILENAME* (https://developer.github.com/v3/actions/workflows/#get-a-workflow) +name: docker-compose-acceptance +on: + schedule: + - cron: '30,0 * * * *' + +# Cancel in progress workflows on pull_requests. +# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +# hack for https://github.com/actions/cache/issues/810#issuecomment-1222550359 +env: + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 + NODE_OPTIONS: '--max-old-space-size=4096' + +jobs: + docker-compose-acceptance: + name: docker-compose-acceptance + runs-on: ubuntu-22.04 + timeout-minutes: 30 + permissions: + contents: read + id-token: write + strategy: + # This helps not having to run multiple jobs because one fails, thus, reducing resource usage + # and reducing the risk that one of many runs would turn red again (read: intermittent tests) + fail-fast: false + matrix: + # XXX: When updating this, make sure you also update MATRIX_INSTANCE_TOTAL. + instance: [0, 1, 2, 3, 4] + pg-version: ['14'] + env: + # XXX: MATRIX_INSTANCE_TOTAL must be hardcoded to the length of strategy.matrix.instance. + MATRIX_INSTANCE_TOTAL: 5 + TEST_GROUP_STRATEGY: roundrobin + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + name: Checkout sentry + + - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4 + id: setup-node + with: + node-version-file: '.volta.json' + + - name: Step configurations + id: config + run: | + echo "webpack-path=.webpack_cache" >> "$GITHUB_OUTPUT" + echo "WEBPACK_CACHE_PATH=.webpack_cache" >> "$GITHUB_ENV" + + - name: webpack cache + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ${{ steps.config.outputs.webpack-path }} + key: ${{ runner.os }}-v2-webpack-cache-${{ hashFiles('webpack.config.ts') }} + + - name: node_modules cache + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + id: nodemodulescache + with: + path: node_modules + key: ${{ runner.os }}-node-modules-${{ hashFiles('yarn.lock', 'api-docs/yarn.lock', '.volta.json') }} + + - name: Install Javascript Dependencies + if: steps.nodemodulescache.outputs.cache-hit != 'true' + run: yarn install --frozen-lockfile + + - name: webpack + env: + # this is fine to not have for forks, it shouldn't fail + SENTRY_WEBPACK_WEBHOOK_SECRET: ${{ secrets.SENTRY_WEBPACK_WEBHOOK_SECRET }} + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + # should set value either as `true` or `false` + CODECOV_ENABLE_BA: true + GH_COMMIT_SHA: ${{ github.event.pull_request.head.sha }} + run: | + yarn build-acceptance + + - name: Build chartcuterie configuration module + run: | + make build-chartcuterie-config + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: copy chartcuterie config to devservices chartcuterie directory + run: | + ls config/chartcuterie + cp -r config/chartcuterie devservices + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse chartcuterie + + - name: Run acceptance tests (#${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) + run: make run-acceptance + + - name: Collect test data + uses: ./.github/actions/collect-test-data + if: ${{ !cancelled() }} + with: + artifact_path: .artifacts/pytest.acceptance.json + gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} + gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} + service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} + matrix_instance_number: ${{ steps.setup.outputs.matrix-instance-number }} + + # This job runs when FE or BE changes happen, however, we only upload coverage data for + # BE changes since it conflicts with codecov's carry forward functionality + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + uses: ./.github/actions/artifacts + if: ${{ always() }} + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + docker-compose-acceptance-required-checks: + # this is a required check so we need this job to always run and report a status. + if: always() + name: Acceptance + needs: [docker-compose-acceptance] + runs-on: ubuntu-22.04 + timeout-minutes: 3 + steps: + - name: Check for failures + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: | + echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1 diff --git a/.github/workflows/test_docker_compose_backend.yml b/.github/workflows/test_docker_compose_backend.yml new file mode 100644 index 00000000000000..7faf08e29ee23c --- /dev/null +++ b/.github/workflows/test_docker_compose_backend.yml @@ -0,0 +1,259 @@ +name: test-docker-compose-backend + +on: + schedule: + - cron: '30,0 * * * *' + +# Cancel in progress workflows on pull_requests. +# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +# hack for https://github.com/actions/cache/issues/810#issuecomment-1222550359 +env: + SEGMENT_DOWNLOAD_TIMEOUT_MINS: 3 + +jobs: + docker-compose-api-docs: + name: api docs test + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4 + id: setup-node + with: + node-version-file: '.volta.json' + + - name: Setup sentry python env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse + + - name: Run API docs tests + # install ts-node for ts build scripts to execute properly without potentially installing + # conflicting deps when running scripts locally + # see: https://github.com/getsentry/sentry/pull/32328/files + run: | + yarn add ts-node && make test-api-docs + + docker-compose-backend-test: + name: backend test + runs-on: ubuntu-22.04 + timeout-minutes: 60 + permissions: + contents: read + id-token: write + strategy: + # This helps not having to run multiple jobs because one fails, thus, reducing resource usage + # and reducing the risk that one of many runs would turn red again (read: intermittent tests) + fail-fast: false + matrix: + # XXX: When updating this, make sure you also update MATRIX_INSTANCE_TOTAL. + instance: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + pg-version: ['14'] + + env: + # XXX: `MATRIX_INSTANCE_TOTAL` must be hardcoded to the length of `strategy.matrix.instance`. + # If this increases, make sure to also increase `flags.backend.after_n_builds` in `codecov.yml`. + MATRIX_INSTANCE_TOTAL: 11 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + + - name: Bring up devservices + run: | + docker network create sentry + echo "BIGTABLE_EMULATOR_HOST=127.0.0.1:8086" >> $GITHUB_ENV + docker compose -f devservices/docker-compose-testing.yml up -d + + - name: Run backend test (${{ steps.setup.outputs.matrix-instance-number }} of ${{ steps.setup.outputs.matrix-instance-total }}) + run: | + make test-python-ci + + - name: Collect test data + uses: ./.github/actions/collect-test-data + if: ${{ !cancelled() }} + with: + artifact_path: .artifacts/pytest.json + gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} + gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} + service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} + matrix_instance_number: ${{ steps.setup.outputs.matrix-instance-number }} + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + docker-compose-backend-migration-tests: + name: backend migration tests + runs-on: ubuntu-22.04 + timeout-minutes: 30 + strategy: + matrix: + pg-version: ['14'] + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres snuba clickhouse + + - name: run tests + run: | + PYTEST_ADDOPTS="$PYTEST_ADDOPTS -m migrations --migrations --reruns 0" make test-python-ci + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + docker-compose-cli: + name: cli test + runs-on: ubuntu-22.04 + timeout-minutes: 10 + strategy: + matrix: + pg-version: ['14'] + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + + - name: Run test + run: | + make test-cli + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + docker-compose-migration: + name: check migration + runs-on: ubuntu-22.04 + strategy: + matrix: + pg-version: ['14'] + + steps: + - name: Checkout sentry + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + + - name: Migration & lockfile checks + env: + SENTRY_LOG_LEVEL: ERROR + PGPASSWORD: postgres + run: | + ./.github/workflows/scripts/migration-check.sh + + docker-compose-monolith-dbs: + name: monolith-dbs test + runs-on: ubuntu-22.04 + timeout-minutes: 20 + permissions: + contents: read + id-token: write + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Setup sentry env + uses: ./.github/actions/test-setup-sentry-devservices + id: setup + + - name: Bring up devservices + run: | + docker network create sentry + docker compose -f devservices/docker-compose-testing.yml up -d redis postgres + + - name: Run test + run: | + make test-monolith-dbs + + - name: Collect test data + uses: ./.github/actions/collect-test-data + if: ${{ !cancelled() }} + with: + artifact_path: .artifacts/pytest.monolith-dbs.json + gcs_bucket: ${{ secrets.COLLECT_TEST_DATA_GCS_BUCKET }} + gcp_project_id: ${{ secrets.COLLECT_TEST_DATA_GCP_PROJECT_ID }} + workload_identity_provider: ${{ secrets.SENTRY_GCP_DEV_WORKLOAD_IDENTITY_POOL }} + service_account_email: ${{ secrets.COLLECT_TEST_DATA_SERVICE_ACCOUNT_EMAIL }} + + # Upload coverage data even if running the tests step fails since + # it reduces large coverage fluctuations + - name: Handle artifacts + if: ${{ always() }} + uses: ./.github/actions/artifacts + with: + token: ${{ secrets.CODECOV_TOKEN }} + commit_sha: ${{ github.event.pull_request.head.sha }} + + # This check runs once all dependent jobs have passed + # It symbolizes that all required Backend checks have succesfully passed (Or skipped) + # This step is the only required backend check + docker-compose-backend-required-check: + needs: + [ + docker-compose-api-docs, + docker-compose-backend-test, + docker-compose-backend-migration-tests, + docker-compose-cli, + docker-compose-migration, + docker-compose-monolith-dbs, + ] + name: Backend + # This is necessary since a failed/skipped dependent job would cause this job to be skipped + if: always() + runs-on: ubuntu-22.04 + steps: + # If any jobs we depend on fail, we will fail since this is a required check + # NOTE: A timeout is considered a failure + - name: Check for failures + if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') + run: | + echo "One of the dependent jobs have failed. You may need to re-run it." && exit 1 diff --git a/devservices/clickhouse/config.xml b/devservices/clickhouse/config.xml new file mode 100644 index 00000000000000..327d60661b29da --- /dev/null +++ b/devservices/clickhouse/config.xml @@ -0,0 +1,6 @@ + + 0.3 + + 1 + + diff --git a/devservices/docker-compose-testing.yml b/devservices/docker-compose-testing.yml new file mode 100644 index 00000000000000..aa0ddafe656bbc --- /dev/null +++ b/devservices/docker-compose-testing.yml @@ -0,0 +1,282 @@ +x-restart-policy: &restart_policy + restart: unless-stopped +x-depends_on-healthy: &depends_on-healthy + condition: service_healthy +x-depends_on-default: &depends_on-default + condition: service_started +x-healthcheck-defaults: &healthcheck_defaults + interval: 30s + timeout: 1m30s + retries: 10 + start_period: 10s +services: + redis: + <<: *restart_policy + container_name: sentry_redis + image: ghcr.io/getsentry/image-mirror-library-redis:5.0-alpine + healthcheck: + <<: *healthcheck_defaults + test: redis-cli ping + command: + [ + 'redis-server', + '--appendonly', + 'yes', + '--save', + '60', + '20', + '--auto-aof-rewrite-percentage', + '100', + '--auto-aof-rewrite-min-size', + '64mb', + ] + volumes: + - 'sentry-redis:/data' + ports: + - '6379:6379' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + postgres: + <<: *restart_policy + container_name: sentry_postgres + # Using the same postgres version as Sentry dev for consistency purposes + image: 'ghcr.io/getsentry/image-mirror-library-postgres:14-alpine' + healthcheck: + <<: *healthcheck_defaults + # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided + test: ['CMD-SHELL', 'pg_isready -U ${POSTGRES_USER:-postgres}'] + 'command': + [ + 'postgres', + '-c', + 'wal_level=logical', + '-c', + 'max_replication_slots=1', + '-c', + 'max_wal_senders=1', + ] + environment: + POSTGRES_HOST_AUTH_METHOD: 'trust' + POSTGRES_DB: 'sentry' + volumes: + - 'sentry-postgres:/var/lib/postgresql/data' + ports: + - '5432:5432' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + kafka: + <<: *restart_policy + image: 'ghcr.io/getsentry/image-mirror-confluentinc-cp-kafka:7.5.0' + container_name: sentry_kafka + environment: + # https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example + KAFKA_PROCESS_ROLES: 'broker,controller' + KAFKA_CONTROLLER_QUORUM_VOTERS: '1@127.0.0.1:29093' + KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER' + KAFKA_NODE_ID: '1' + CLUSTER_ID: 'MkU3OEVBNTcwNTJENDM2Qk' + KAFKA_LISTENERS: 'PLAINTEXT://0.0.0.0:29092,INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092,CONTROLLER://0.0.0.0:29093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://127.0.0.1:29092,INTERNAL://kafka:9093,EXTERNAL://127.0.0.1:9092' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'PLAINTEXT:PLAINTEXT,INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT' + KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT' + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1' + KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: '1' + KAFKA_LOG_RETENTION_HOURS: '24' + KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust + KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too + volumes: + - 'sentry-kafka:/var/lib/kafka/data' + - 'sentry-kafka-log:/var/lib/kafka/log' + healthcheck: + <<: *healthcheck_defaults + test: ['CMD-SHELL', 'nc -z localhost 9092'] + interval: 10s + timeout: 10s + retries: 30 + ports: + - '9092:9092' + - '9093:9093' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + clickhouse: + <<: *restart_policy + container_name: sentry_clickhouse + image: 'ghcr.io/getsentry/image-mirror-altinity-clickhouse-server:23.3.19.33.altinitystable' + ulimits: + nofile: + soft: 262144 + hard: 262144 + volumes: + - 'sentry-clickhouse:/var/lib/clickhouse' + - 'sentry-clickhouse-log:/var/log/clickhouse-server' + - type: bind + read_only: true + source: ./clickhouse/config.xml + target: /etc/clickhouse-server/config.d/sentry.xml + healthcheck: + test: [ + 'CMD-SHELL', + # Manually override any http_proxy envvar that might be set, because + # this wget does not support no_proxy. See: + # https://github.com/getsentry/self-hosted/issues/1537 + "http_proxy='' wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", + ] + interval: 10s + timeout: 10s + retries: 30 + ports: + - '8123:8123' + - '9000:9000' + - '9009:9009' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + symbolicator: + <<: *restart_policy + container_name: sentry_symbolicator + image: 'us-central1-docker.pkg.dev/sentryio/symbolicator/image:nightly' + volumes: + - 'sentry-symbolicator:/data' + - type: bind + read_only: true + source: ./symbolicator + target: /etc/symbolicator + command: run -c /etc/symbolicator/config.yml + ports: + - '3021:3021' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + vroom: + <<: *restart_policy + container_name: sentry_vroom + image: 'us-central1-docker.pkg.dev/sentryio/vroom/vroom:latest' + environment: + SENTRY_KAFKA_BROKERS_PROFILING: 'sentry_kafka:9092' + SENTRY_KAFKA_BROKERS_OCCURRENCES: 'sentry_kafka:9092' + SENTRY_BUCKET_PROFILES: file://localhost//var/lib/sentry-profiles + SENTRY_SNUBA_HOST: 'http://snuba-api:1218' + volumes: + - sentry-vroom:/var/lib/sentry-profiles + depends_on: + kafka: + <<: *depends_on-healthy + ports: + - '8085:8085' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + snuba: + <<: *restart_policy + container_name: sentry_snuba + image: ghcr.io/getsentry/snuba:latest + ports: + - '1218:1218' + - '1219:1219' + networks: + - sentry + command: ['devserver'] + environment: + PYTHONUNBUFFERED: '1' + SNUBA_SETTINGS: docker + DEBUG: '1' + CLICKHOUSE_HOST: 'clickhouse' + CLICKHOUSE_PORT: '9000' + CLICKHOUSE_HTTP_PORT: '8123' + DEFAULT_BROKERS: 'kafka:9093' + REDIS_HOST: 'redis' + REDIS_PORT: '6379' + REDIS_DB: '1' + ENABLE_SENTRY_METRICS_DEV: '${ENABLE_SENTRY_METRICS_DEV:-}' + ENABLE_PROFILES_CONSUMER: '${ENABLE_PROFILES_CONSUMER:-}' + ENABLE_SPANS_CONSUMER: '${ENABLE_SPANS_CONSUMER:-}' + ENABLE_ISSUE_OCCURRENCE_CONSUMER: '${ENABLE_ISSUE_OCCURRENCE_CONSUMER:-}' + ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES: '1' + ENABLE_GROUP_ATTRIBUTES_CONSUMER: '${ENABLE_GROUP_ATTRIBUTES_CONSUMER:-}' + platform: linux/amd64 + depends_on: + - kafka + - redis + - clickhouse + extra_hosts: + host.docker.internal: host-gateway + bigtable: + <<: *restart_policy + container_name: sentry_bigtable + image: 'us.gcr.io/sentryio/cbtemulator:23c02d92c7a1747068eb1fc57dddbad23907d614' + ports: + - '8086:8086' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + redis-cluster: + <<: *restart_policy + container_name: sentry_redis-cluster + image: ghcr.io/getsentry/docker-redis-cluster:7.0.10 + ports: + - '7000:7000' + - '7001:7001' + - '7002:7002' + - '7003:7003' + - '7004:7004' + - '7005:7005' + networks: + - sentry + volumes: + - sentry-redis-cluster:/redis-data + environment: + - IP=0.0.0.0 + chartcuterie: + <<: *restart_policy + container_name: sentry_chartcuterie + image: 'us-central1-docker.pkg.dev/sentryio/chartcuterie/image:latest' + environment: + CHARTCUTERIE_CONFIG: /etc/chartcuterie/config.js + CHARTCUTERIE_CONFIG_POLLING: true + volumes: + - ./chartcuterie:/etc/chartcuterie + ports: + - '7901:9090' + networks: + - sentry + extra_hosts: + host.docker.internal: host-gateway + healthcheck: + <<: *healthcheck_defaults + # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided + test: + [ + 'CMD-SHELL', + 'docker exec sentry_chartcuterie python3 -c "import urllib.request; urllib.request.urlopen(\"http://127.0.0.1:9090/api/chartcuterie/healthcheck/live\", timeout=5)"', + ] + +volumes: + # These store application data that should persist across restarts. + sentry-data: + sentry-postgres: + sentry-redis: + sentry-redis-cluster: + sentry-kafka: + sentry-clickhouse: + sentry-symbolicator: + # This volume stores profiles and should be persisted. + # Not being external will still persist data across restarts. + # It won't persist if someone does a docker compose down -v. + sentry-vroom: + sentry-kafka-log: + sentry-clickhouse-log: + +networks: + sentry: + name: sentry + external: true diff --git a/devservices/symbolicator/config.yml b/devservices/symbolicator/config.yml new file mode 100644 index 00000000000000..290d752a6dd04c --- /dev/null +++ b/devservices/symbolicator/config.yml @@ -0,0 +1,11 @@ +bind: '0.0.0.0:3021' +logging: + level: 'debug' + format: 'pretty' + enable_backtraces: true + +# explicitly disable caches as it's not something we want in tests. in +# development it may be less ideal. perhaps we should do the same thing as we +# do with relay one day (one container per test/session), although that will be +# slow +cache_dir: null