chore(repo): fix local docker dev

This commit is contained in:
Aaron Reisman
2025-08-25 16:51:14 -07:00
parent 4a3c172992
commit 9aa92da633
22 changed files with 1778 additions and 717 deletions

View File

@@ -139,7 +139,8 @@ jobs:
branch_build_push_admin:
name: Build-Push Admin Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
needs:
- branch_build_setup
steps:
- name: Admin Build and Push
uses: makeplane/actions/build-push@v1.0.0
@@ -152,7 +153,9 @@ jobs:
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_admin }}
build-context: .
dockerfile-path: ./apps/admin/Dockerfile.admin
dockerfile-path: ./Dockerfile.node
build-args: |
APP_SCOPE=admin
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
@@ -161,7 +164,8 @@ jobs:
branch_build_push_web:
name: Build-Push Web Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
needs:
- branch_build_setup
steps:
- name: Web Build and Push
uses: makeplane/actions/build-push@v1.0.0
@@ -174,7 +178,9 @@ jobs:
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_web }}
build-context: .
dockerfile-path: ./apps/web/Dockerfile.web
dockerfile-path: ./Dockerfile.node
build-args: |
APP_SCOPE=web
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
@@ -183,7 +189,8 @@ jobs:
branch_build_push_space:
name: Build-Push Space Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
needs:
- branch_build_setup
steps:
- name: Space Build and Push
uses: makeplane/actions/build-push@v1.0.0
@@ -196,7 +203,9 @@ jobs:
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_space }}
build-context: .
dockerfile-path: ./apps/space/Dockerfile.space
dockerfile-path: ./Dockerfile.node
build-args: |
APP_SCOPE=space
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
@@ -205,7 +214,8 @@ jobs:
branch_build_push_live:
name: Build-Push Live Collaboration Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
needs:
- branch_build_setup
steps:
- name: Live Build and Push
uses: makeplane/actions/build-push@v1.0.0
@@ -227,7 +237,8 @@ jobs:
branch_build_push_api:
name: Build-Push API Server Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
needs:
- branch_build_setup
steps:
- name: Backend Build and Push
uses: makeplane/actions/build-push@v1.0.0
@@ -239,8 +250,8 @@ jobs:
dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }}
docker-image-owner: makeplane
docker-image-name: ${{ needs.branch_build_setup.outputs.dh_img_backend }}
build-context: ./apps/api
dockerfile-path: ./apps/api/Dockerfile.api
build-context: .
dockerfile-path: ./Dockerfile.api
buildx-driver: ${{ needs.branch_build_setup.outputs.gh_buildx_driver }}
buildx-version: ${{ needs.branch_build_setup.outputs.gh_buildx_version }}
buildx-platforms: ${{ needs.branch_build_setup.outputs.gh_buildx_platforms }}
@@ -249,7 +260,8 @@ jobs:
branch_build_push_proxy:
name: Build-Push Proxy Docker Image
runs-on: ubuntu-22.04
needs: [branch_build_setup]
needs:
- branch_build_setup
steps:
- name: Proxy Build and Push
uses: makeplane/actions/build-push@v1.0.0
@@ -368,15 +380,13 @@ jobs:
name: Build Release
runs-on: ubuntu-22.04
needs:
[
branch_build_setup,
branch_build_push_admin,
branch_build_push_web,
branch_build_push_space,
branch_build_push_live,
branch_build_push_api,
branch_build_push_proxy,
]
- branch_build_setup
- branch_build_push_admin
- branch_build_push_web
- branch_build_push_space
- branch_build_push_live
- branch_build_push_api
- branch_build_push_proxy
env:
REL_VERSION: ${{ needs.branch_build_setup.outputs.release_version }}
steps:

121
.github/workflows/docker-smoke-aio.yml vendored Normal file
View File

@@ -0,0 +1,121 @@
name: Docker AIO build and smoke test
on:
workflow_dispatch:
pull_request:
branches:
- "preview"
paths:
- "apps/web/**"
- "apps/space/**"
- "apps/admin/**"
- "apps/live/**"
- "packages/**"
- "turbo.json"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
- "Dockerfile.node"
- "Dockerfile.api"
- "Dockerfile.aio"
- "docker-bake.hcl"
- ".github/workflows/docker-smoke-aio.yml"
push:
branches:
- "preview"
paths:
- "apps/web/**"
- "apps/space/**"
- "apps/admin/**"
- "apps/live/**"
- "packages/**"
- "turbo.json"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
- "Dockerfile.node"
- "Dockerfile.api"
- "Dockerfile.aio"
- "docker-bake.hcl"
- ".github/workflows/docker-smoke-aio.yml"
concurrency:
group: aio-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: false
jobs:
determine-aio:
name: Determine if AIO needed
runs-on: ubuntu-latest
outputs:
aio_needed: ${{ steps.build-flag.outputs.aio_needed }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Detect changed paths
id: changes
uses: dorny/paths-filter@v3
with:
filters: |
web:
- 'apps/web/**'
space:
- 'apps/space/**'
admin:
- 'apps/admin/**'
live:
- 'apps/live/**'
common:
- 'packages/**'
- 'turbo.json'
- 'pnpm-lock.yaml'
- 'pnpm-workspace.yaml'
- 'Dockerfile.node'
- 'Dockerfile.api'
- 'Dockerfile.aio'
- 'docker-bake.hcl'
- '.github/workflows/docker-smoke-aio.yml'
- name: Compute AIO flag
id: build-flag
uses: actions/github-script@v7
with:
script: |
const anyCommon = '${{ steps.changes.outputs.common }}' === 'true';
const changedWeb = '${{ steps.changes.outputs.web }}' === 'true';
const changedSpace = '${{ steps.changes.outputs.space }}' === 'true';
const changedAdmin = '${{ steps.changes.outputs.admin }}' === 'true';
const changedLive = '${{ steps.changes.outputs.live }}' === 'true';
const aioNeeded = anyCommon || changedWeb || changedSpace || changedAdmin || changedLive;
core.setOutput('aio_needed', String(aioNeeded));
aio_smoke:
name: Build and smoke test AIO
runs-on: ubuntu-latest
needs: determine-aio
if: ${{ needs.determine-aio.outputs.aio_needed == 'true' }}
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Show Docker version
run: |
docker version
docker info
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build AIO image with bake (load into local daemon)
run: |
docker buildx bake -f "./docker-bake.hcl" --load aio
- name: Run AIO smoke script
run: |
chmod +x "scripts/smoke-aio.sh"
"scripts/smoke-aio.sh"

171
.github/workflows/docker-smoke.yml vendored Normal file
View File

@@ -0,0 +1,171 @@
name: Docker build and smoke test for apps
on:
workflow_dispatch:
pull_request:
branches:
- "preview"
paths:
- "apps/web/**"
- "apps/space/**"
- "apps/admin/**"
- "apps/live/**"
- "packages/**"
- "turbo.json"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
- "Dockerfile.node"
- "Dockerfile.api"
- "Dockerfile.aio"
- "docker-bake.hcl"
- ".github/workflows/docker-smoke.yml"
push:
branches:
- "preview"
paths:
- "apps/web/**"
- "apps/space/**"
- "apps/admin/**"
- "apps/live/**"
- "packages/**"
- "turbo.json"
- "pnpm-lock.yaml"
- "pnpm-workspace.yaml"
- "Dockerfile.node"
- "Dockerfile.api"
- "Dockerfile.aio"
- "docker-bake.hcl"
- ".github/workflows/docker-smoke.yml"
jobs:
determine-matrix:
name: Determine matrix
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.build-matrix.outputs.matrix }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Detect changed paths
id: changes
uses: dorny/paths-filter@v3
with:
filters: |
web:
- 'apps/web/**'
space:
- 'apps/space/**'
admin:
- 'apps/admin/**'
live:
- 'apps/live/**'
common:
- 'packages/**'
- 'turbo.json'
- 'pnpm-lock.yaml'
- 'pnpm-workspace.yaml'
- 'Dockerfile.node'
- 'Dockerfile.api'
- 'Dockerfile.aio'
- 'docker-bake.hcl'
- '.github/workflows/docker-smoke.yml'
- name: Build matrix
id: build-matrix
uses: actions/github-script@v7
with:
script: |
const include = [];
const anyCommon = '${{ steps.changes.outputs.common }}' === 'true';
const changed = {
web: '${{ steps.changes.outputs.web }}' === 'true',
space: '${{ steps.changes.outputs.space }}' === 'true',
admin: '${{ steps.changes.outputs.admin }}' === 'true',
live: '${{ steps.changes.outputs.live }}' === 'true',
};
const add = (name, bake_target, image, container, port, path, env_flags = "") =>
include.push({ name, bake_target, image, container, host_port: port, path, env_flags });
const buildAll = anyCommon || changed.web || changed.space || changed.admin || changed.live;
if (buildAll || changed.web) add('web', 'web', 'plane-web:ci-smoke', 'plane-web-ci', 3001, '/');
if (buildAll || changed.space) add('space', 'space', 'plane-space:ci-smoke', 'plane-space-ci', 3002, '/spaces');
if (buildAll || changed.admin) add('admin', 'admin', 'plane-admin:ci-smoke', 'plane-admin-ci', 3003, '/god-mode');
if (buildAll || changed.live) add('live', 'live', 'plane-live:ci-smoke', 'plane-live-ci', 3005, '/live/health', '-e NODE_ENV=production -e LIVE_BASE_PATH=/live');
if (include.length === 0) {
// Default to web to keep job non-empty
add('web', 'web', 'plane-web:ci-smoke', 'plane-web-ci', 3001, '/');
}
core.setOutput('matrix', JSON.stringify({ include }));
smoke:
name: Build and smoke test ${{ matrix.name }}
runs-on: ubuntu-latest
needs: determine-matrix
timeout-minutes: 25
strategy:
fail-fast: false
matrix: ${{ fromJSON(needs.determine-matrix.outputs.matrix) }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare build environment
run: echo "Using docker build (no buildx bake)"
- name: Show Docker version
run: |
docker version
docker info
- name: Build image (${{ matrix.name }})
shell: bash
run: |
set -euo pipefail
name="${{ matrix.name }}"
tag="${{ matrix.image }}"
if [ "$name" = "live" ]; then
docker build -f "apps/live/Dockerfile.live" -t "$tag" "."
else
docker build -f "Dockerfile.node" --target runtime --build-arg APP_SCOPE="$name" -t "$tag" "."
fi
- name: Run container (${{ matrix.name }})
run: |
docker run -d --name ${{ matrix.container }} -p ${{ matrix.host_port }}:3000 ${{ matrix.env_flags }} ${{ matrix.image }}
docker ps -a
- name: Smoke test HTTP endpoint (${{ matrix.name }})
shell: bash
run: |
set -euo pipefail
URL="http://localhost:${{ matrix.host_port }}${{ matrix.path }}"
echo "Probing $URL ..."
for i in {1..60}; do
STATUS="$(curl -sS -o /dev/null -w "%{http_code}" -L "${URL}" || true)"
if [ "${STATUS}" = "200" ]; then
echo "Success: HTTP ${STATUS} from ${URL}"
exit 0
fi
echo "Attempt ${i}: HTTP ${STATUS} (waiting 2s)"
sleep 2
done
echo "Failed to get HTTP 200 from ${URL}"
echo "::group::Container logs (${{ matrix.container }})"
docker logs ${{ matrix.container }} || true
echo "::endgroup::"
exit 1
- name: Cleanup container (${{ matrix.name }})
if: always()
run: |
docker rm -f ${{ matrix.container }} || true

120
Dockerfile.aio Normal file
View File

@@ -0,0 +1,120 @@
# syntax=docker/dockerfile:1.7
#
# All-in-one assembler image that composes Plane runtime from per-app images
# and the community supervisor/Caddy configuration.
#
# Build requirements:
# - Build and tag the per-app images first (or override args below):
# plane-web:latest, plane-space:latest, plane-admin:latest, plane-live:latest,
# plane-api:latest, plane-proxy:latest
#
# Example:
# docker build \
# -f plane/Dockerfile.aio \
# --build-arg WEB_IMG=plane-web:latest \
# --build-arg SPACE_IMG=plane-space:latest \
# --build-arg ADMIN_IMG=plane-admin:latest \
# --build-arg LIVE_IMG=plane-live:latest \
# --build-arg API_IMG=plane-api:latest \
# --build-arg PROXY_IMG=plane-proxy:latest \
# -t plane-aio:latest .
#
# Run:
# docker run --rm -it -p 80:80 plane-aio:latest
#
# Provide required env vars; see deployments/aio/community/README.md.
# ------------------------------------------------------------------------------
# Arguments to reference locally-built component images
# ------------------------------------------------------------------------------
# Build contexts are used for component images:
# --build-context web_ctx=target:web
# --build-context space_ctx=target:space
# --build-context admin_ctx=target:admin
# --build-context live_ctx=target:live
# --build-context api_ctx=target:api
# --build-context proxy_ctx=target:proxy
ARG NODE_VERSION=22-alpine
ARG PY_VERSION=3.12.10-alpine
# ------------------------------------------------------------------------------
# Source stages: pull artifacts from pre-built images
# ------------------------------------------------------------------------------
FROM node:${NODE_VERSION} AS node
# ------------------------------------------------------------------------------
# Final runner: Python as base (Supervisor + API deps live here)
# ------------------------------------------------------------------------------
FROM python:${PY_VERSION} AS runner
WORKDIR /app
# Base system libs for API/Caddy/Node runtime
RUN apk add --no-cache \
libpq \
libxslt \
xmlsec \
nss-tools \
bash \
curl \
ca-certificates \
openssl
# Install supervisor
RUN pip install --no-cache-dir supervisor && mkdir -p /etc/supervisor/conf.d
# Copy Node runtime into the AIO container for Next standalone apps
COPY --from=node /usr/lib /usr/lib
COPY --from=node /usr/local/lib /usr/local/lib
COPY --from=node /usr/local/include /usr/local/include
COPY --from=node /usr/local/bin /usr/local/bin
# Copy Next.js standalone app artifacts (namespaced under /app/<app>)
COPY --from=web_ctx /app /app/web
COPY --from=space_ctx /app /app/space
COPY --from=admin_ctx /app /app/admin
COPY --from=live_ctx /app /app/live
# Clean potential Next caches (optional)
RUN rm -rf /app/web/apps/web/.next/cache || true \
&& rm -rf /app/space/apps/space/.next/cache || true \
&& rm -rf /app/admin/apps/admin/.next/cache || true
# Copy Python backend code and installed packages/binaries
COPY --from=api_ctx /code /app/backend
# Match CPython version path (PY_VERSION=3.12.x-alpine)
COPY --from=api_ctx /usr/local/lib/python3.12/site-packages/ /usr/local/lib/python3.12/site-packages/
COPY --from=api_ctx /usr/local/bin/ /usr/local/bin/
# Caddy binary from proxy image
COPY --from=proxy_ctx /usr/bin/caddy /usr/bin/caddy
# Community supervisor config and startup script
COPY deployments/aio/community/start.sh /app/start.sh
COPY deployments/aio/community/supervisor.conf /etc/supervisor/conf.d/supervisor.conf
# Provide a default plane.env from variables template; start.sh will update it
COPY deployments/aio/community/variables.env /app/plane.env
# Prepare Caddy configuration:
# - Start from the repository Caddyfile.ce
# - Update upstreams to localhost-bound ports managed by supervisor
RUN mkdir -p /app/proxy
COPY apps/proxy/Caddyfile.ce /app/proxy/Caddyfile
RUN set -eux; \
sed -i 's|web:3000|localhost:3001|g' /app/proxy/Caddyfile; \
sed -i 's|space:3000|localhost:3002|g' /app/proxy/Caddyfile; \
sed -i 's|admin:3000|localhost:3003|g' /app/proxy/Caddyfile; \
sed -i 's|api:8000|localhost:3004|g' /app/proxy/Caddyfile; \
sed -i 's|live:3000|localhost:3005|g' /app/proxy/Caddyfile; \
sed -i '/plane-minio:9000/d' /app/proxy/Caddyfile
# Folders and permissions
RUN mkdir -p /app/logs/access /app/logs/error /app/data \
&& chmod +x /app/start.sh
VOLUME ["/app/data", "/app/logs"]
EXPOSE 80 443
CMD ["/app/start.sh"]

151
Dockerfile.api Normal file
View File

@@ -0,0 +1,151 @@
# syntax=docker/dockerfile:1.7
#
# Unified multi-stage Dockerfile for the Python API
# Targets:
# - dev: local development with hot reload and full build toolchain
# - runtime: production runtime using prebuilt wheels
#
# Example builds (from repo root):
# docker build -f plane/Dockerfile.api --target dev -t plane-api:dev .
# docker build -f plane/Dockerfile.api --target runtime -t plane-api:latest .
ARG PY_VERSION=python:3.12.10-alpine
# -----------------------------------------------------------------------------
# base: common runtime base (no build toolchain)
# -----------------------------------------------------------------------------
FROM ${PY_VERSION} AS base
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
INSTANCE_CHANGELOG_URL=https://sites.plane.so/pages/691ef037bcfe416a902e48cb55f59891/
# Runtime libraries required by the app
RUN apk add --no-cache \
libpq \
libxslt \
xmlsec \
ca-certificates \
openssl
WORKDIR /code
# -----------------------------------------------------------------------------
# builder: build Python wheels for all dependencies
# -----------------------------------------------------------------------------
FROM ${PY_VERSION} AS builder
# Full build toolchain to compile deps to wheels
RUN apk add --no-cache \
bash~=5.2 \
g++ \
gcc \
cargo \
git \
make \
postgresql-dev \
libc-dev \
linux-headers \
libffi-dev \
libxml2-dev \
libxslt-dev \
openssl-dev \
xmlsec-dev
WORKDIR /w
# Copy requirements (relative to repo root)
COPY apps/api/requirements.txt /w/requirements.txt
COPY apps/api/requirements /w/requirements
# Build wheels into /wheels to reuse in runtime
RUN --mount=type=cache,target=/root/.cache/pip \
pip wheel -r /w/requirements.txt --wheel-dir /wheels
# -----------------------------------------------------------------------------
# dev: local development image (bind mount source into /code)
# -----------------------------------------------------------------------------
FROM ${PY_VERSION} AS dev
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
INSTANCE_CHANGELOG_URL=https://sites.plane.so/pages/691ef037bcfe416a902e48cb55f59891/
# Match prior dev environment: runtime + build deps + node
RUN apk add --no-cache \
libpq \
libxslt \
xmlsec \
nodejs-current \
bash~=5.2 \
g++ \
gcc \
cargo \
git \
make \
postgresql-dev \
libc-dev \
linux-headers \
libffi-dev
WORKDIR /code
# Copy and install local dev requirements
COPY apps/api/requirements.txt ./requirements.txt
COPY apps/api/requirements ./requirements
RUN --mount=type=cache,target=/root/.cache/pip \
pip install -r requirements/local.txt --no-cache-dir
# Bring in the API source
COPY apps/api/ ./
# Permissions similar to existing Dockerfiles
RUN mkdir -p /code/plane/logs \
&& chmod -R +x /code/bin \
&& chmod -R 777 /code
EXPOSE 8000
CMD ["./bin/docker-entrypoint-api-local.sh"]
# -----------------------------------------------------------------------------
# runtime: production image using wheels from builder
# -----------------------------------------------------------------------------
FROM base AS runtime
# Bash needed for entrypoint scripts
RUN apk add --no-cache bash~=5.2
WORKDIR /code
# Install from wheels for reproducible, fast builds
COPY --from=builder /wheels /wheels
COPY apps/api/requirements.txt ./requirements.txt
COPY apps/api/requirements ./requirements
RUN --mount=type=cache,target=/root/.cache/pip \
pip install --no-cache-dir --no-index --find-links=/wheels -r requirements.txt
# Copy only what is required to run
COPY apps/api/manage.py ./manage.py
COPY apps/api/plane ./plane
COPY apps/api/templates ./templates
COPY apps/api/package.json ./package.json
COPY apps/api/bin ./bin
# Create unprivileged user and set secure permissions
RUN addgroup -S plane && adduser -S -G plane -h /code -s /sbin/nologin plane \
&& chmod +x ./bin/* \
&& mkdir -p /code/plane/logs \
&& chown -R plane:plane /code \
&& chmod -R 755 /code \
&& chmod 775 /code/plane/logs
ENV GUNICORN_WORKERS=3 \
PORT=8000
USER plane
EXPOSE 8000
# Default: API server; override command for worker/beat/migrator as needed
CMD ["./bin/docker-entrypoint-api.sh"]

138
Dockerfile.node Normal file
View File

@@ -0,0 +1,138 @@
# syntax=docker/dockerfile:1.7
#
# Unified multi-stage Dockerfile for Next.js apps in this monorepo.
# Supports:
# - target=dev (hot-reload, expects bind-mounted repo)
# - target=runtime (production image using Next.js standalone output)
#
# Usage examples:
# - Build dev image for web: docker build --target dev --build-arg APP_SCOPE=web -t plane-web:dev .
# - Run dev (with compose): mount the repo into /repo and override command/ports as needed
# - Build prod image for web: docker build --target runtime --build-arg APP_SCOPE=web -t plane-web:latest .
#
# APP_SCOPE must be one of: web, space, admin
ARG NODE_VERSION=22-alpine
ARG TURBO_VERSION=2.5.6
# -----------------------------------------------------------------------------
# Base: Node + pnpm (corepack)
# -----------------------------------------------------------------------------
FROM node:${NODE_VERSION} AS base
ENV PNPM_HOME=/pnpm
ENV PATH=$PNPM_HOME:$PATH
RUN corepack enable && apk add --no-cache libc6-compat
# -----------------------------------------------------------------------------
# builder: prune workspace using turbo for the selected app scope
# -----------------------------------------------------------------------------
FROM base AS builder
ARG APP_SCOPE
WORKDIR /repo
# Full context is required for turbo prune
COPY . .
RUN pnpm add -g turbo@${TURBO_VERSION}
RUN turbo prune --scope=${APP_SCOPE} --docker
# -----------------------------------------------------------------------------
# installer: install deps (offline) and build the selected scope
# -----------------------------------------------------------------------------
FROM base AS installer
WORKDIR /repo
# Seed minimal files for reproducible installs and good caching
COPY .gitignore .gitignore
COPY --from=builder /repo/out/json/ .
COPY --from=builder /repo/out/pnpm-lock.yaml ./pnpm-lock.yaml
# Fetch dependencies into a cached store layer
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store \
pnpm fetch --store-dir=/pnpm/store
# Bring in only the pruned workspace for fast installs/builds
COPY --from=builder /repo/out/full/ .
COPY turbo.json turbo.json
# Offline, frozen lockfile install from cached store
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store \
pnpm install --offline --frozen-lockfile --store-dir=/pnpm/store
# Build-time environment (safe to pass through; only NEXT_PUBLIC_* are embedded)
# Keep parity with existing app Dockerfiles
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
# Build only the selected scope
ARG APP_SCOPE
RUN pnpm turbo run build --filter=${APP_SCOPE}
# -----------------------------------------------------------------------------
# dev: for local development with hot reload (bind-mount the repo to /repo)
# -----------------------------------------------------------------------------
FROM base AS dev
WORKDIR /repo
# Helpful global tool for selective builds/dev
RUN pnpm add -g turbo@${TURBO_VERSION}
ENV NODE_ENV=development \
NEXT_TELEMETRY_DISABLED=1 \
TURBO_TELEMETRY_DISABLED=1
# Select which app to run in dev (web|space|admin)
ARG APP_SCOPE
ENV APP_SCOPE=${APP_SCOPE}
EXPOSE 3000
# Expect the source to be bind-mounted; install deps and start dev server
CMD ["sh", "-lc", "pnpm install && pnpm dev --filter=${APP_SCOPE}"]
# -----------------------------------------------------------------------------
# runtime: minimal Next.js standalone runner for the selected scope
# -----------------------------------------------------------------------------
FROM node:${NODE_VERSION} AS runtime
WORKDIR /app
ENV NODE_ENV=production \
NEXT_TELEMETRY_DISABLED=1 \
TURBO_TELEMETRY_DISABLED=1 \
HOSTNAME=0.0.0.0 \
PORT=3000
# Drop privileges
RUN addgroup -S -g 1001 nodejs \
&& adduser -S -u 1001 -G nodejs -h /home/nextjs -D nextjs \
&& mkdir -p /home/nextjs \
&& chown -R nextjs:nodejs /home/nextjs
USER nextjs
# Copy only the built output for the selected scope
ARG APP_SCOPE
ENV APP_SCOPE=${APP_SCOPE}
# Next.js standalone layout
COPY --from=installer /repo/apps/${APP_SCOPE}/.next/standalone ./
COPY --from=installer /repo/apps/${APP_SCOPE}/.next/static ./apps/${APP_SCOPE}/.next/static
COPY --from=installer /repo/apps/${APP_SCOPE}/public ./apps/${APP_SCOPE}/public
EXPOSE 3000
CMD ["sh", "-lc", "set -e; : \"${APP_SCOPE:?APP_SCOPE env is required}\"; SERVER=\"apps/${APP_SCOPE}/server.js\"; if [ ! -f \"$SERVER\" ]; then echo \"Error: $SERVER not found.\"; ls -la \"apps/${APP_SCOPE}\" || true; echo \"Known apps:\"; ls -1 apps || true; exit 1; fi; exec node \"$SERVER\""]

View File

@@ -1,103 +0,0 @@
# syntax=docker/dockerfile:1.7
FROM node:22-alpine AS base
# Setup pnpm package manager with corepack and configure global bin directory for caching
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
# *****************************************************************************
# STAGE 1: Build the project
# *****************************************************************************
FROM base AS builder
RUN apk add --no-cache libc6-compat
WORKDIR /app
ARG TURBO_VERSION=2.5.6
RUN corepack enable pnpm && pnpm add -g turbo@${TURBO_VERSION}
COPY . .
RUN turbo prune --scope=admin --docker
# *****************************************************************************
# STAGE 2: Install dependencies & build the project
# *****************************************************************************
FROM base AS installer
RUN apk add --no-cache libc6-compat
WORKDIR /app
COPY .gitignore .gitignore
COPY --from=builder /app/out/json/ .
COPY --from=builder /app/out/pnpm-lock.yaml ./pnpm-lock.yaml
RUN corepack enable pnpm
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm fetch --store-dir=/pnpm/store
COPY --from=builder /app/out/full/ .
COPY turbo.json turbo.json
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm install --offline --frozen-lockfile --store-dir=/pnpm/store
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
RUN pnpm turbo run build --filter=admin
# *****************************************************************************
# STAGE 3: Copy the project and start it
# *****************************************************************************
FROM base AS runner
WORKDIR /app
# Don't run production as root
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
USER nextjs
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=installer /app/apps/admin/.next/standalone ./
COPY --from=installer /app/apps/admin/.next/static ./apps/admin/.next/static
COPY --from=installer /app/apps/admin/public ./apps/admin/public
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
EXPOSE 3000
CMD ["node", "apps/admin/server.js"]

View File

@@ -1,17 +0,0 @@
FROM node:22-alpine
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
COPY . .
RUN corepack enable pnpm && pnpm add -g turbo
RUN pnpm install
ENV NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
EXPOSE 3000
VOLUME [ "/app/node_modules", "/app/admin/node_modules" ]
CMD ["pnpm", "dev", "--filter=admin"]

View File

@@ -1,58 +0,0 @@
FROM python:3.12.10-alpine
# set environment variables
ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
ENV INSTANCE_CHANGELOG_URL=https://sites.plane.so/pages/691ef037bcfe416a902e48cb55f59891/
# Update system packages for security
RUN apk update && apk upgrade
WORKDIR /code
RUN apk add --no-cache --upgrade \
"libpq" \
"libxslt" \
"xmlsec" \
"ca-certificates" \
"openssl"
COPY requirements.txt ./
COPY requirements ./requirements
RUN apk add --no-cache libffi-dev
RUN apk add --no-cache --virtual .build-deps \
"bash~=5.2" \
"g++" \
"gcc" \
"cargo" \
"git" \
"make" \
"postgresql-dev" \
"libc-dev" \
"linux-headers" \
&& \
pip install -r requirements.txt --compile --no-cache-dir \
&& \
apk del .build-deps \
&& \
rm -rf /var/cache/apk/*
# Add in Django deps and generate Django's static files
COPY manage.py manage.py
COPY plane plane/
COPY templates templates/
COPY package.json package.json
RUN apk --no-cache add "bash~=5.2"
COPY ./bin ./bin/
RUN mkdir -p /code/plane/logs
RUN chmod +x ./bin/*
RUN chmod -R 777 /code
# Expose container port and run entry point script
EXPOSE 8000
CMD ["./bin/docker-entrypoint-api.sh"]

View File

@@ -1,46 +0,0 @@
FROM python:3.12.5-alpine AS backend
# set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PIP_DISABLE_PIP_VERSION_CHECK=1
ENV INSTANCE_CHANGELOG_URL https://sites.plane.so/pages/691ef037bcfe416a902e48cb55f59891/
RUN apk --no-cache add \
"bash~=5.2" \
"libpq" \
"libxslt" \
"nodejs-current" \
"xmlsec" \
"libffi-dev" \
"bash~=5.2" \
"g++" \
"gcc" \
"cargo" \
"git" \
"make" \
"postgresql-dev" \
"libc-dev" \
"linux-headers"
WORKDIR /code
COPY requirements.txt ./requirements.txt
ADD requirements ./requirements
# Install the local development settings
RUN pip install -r requirements/local.txt --compile --no-cache-dir
COPY . .
RUN mkdir -p /code/plane/logs
RUN chmod -R +x /code/bin
RUN chmod -R 777 /code
# Expose container port and run entry point script
EXPOSE 8000
CMD [ "./bin/docker-entrypoint-api-local.sh" ]

View File

@@ -1,19 +0,0 @@
FROM node:22-alpine
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
COPY . .
RUN corepack enable pnpm && pnpm add -g turbo
RUN pnpm install
EXPOSE 3002
ENV NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
VOLUME [ "/app/node_modules", "/app/apps/space/node_modules"]
CMD ["pnpm", "dev", "--filter=space"]

View File

@@ -1,103 +0,0 @@
# syntax=docker/dockerfile:1.7
FROM node:22-alpine AS base
# Setup pnpm package manager with corepack and configure global bin directory for caching
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
# *****************************************************************************
# STAGE 1: Build the project
# *****************************************************************************
FROM base AS builder
RUN apk add --no-cache libc6-compat
WORKDIR /app
ARG TURBO_VERSION=2.5.6
RUN corepack enable pnpm && pnpm add -g turbo@${TURBO_VERSION}
COPY . .
RUN turbo prune --scope=space --docker
# *****************************************************************************
# STAGE 2: Install dependencies & build the project
# *****************************************************************************
FROM base AS installer
RUN apk add --no-cache libc6-compat
WORKDIR /app
COPY .gitignore .gitignore
COPY --from=builder /app/out/json/ .
COPY --from=builder /app/out/pnpm-lock.yaml ./pnpm-lock.yaml
RUN corepack enable pnpm
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm fetch --store-dir=/pnpm/store
COPY --from=builder /app/out/full/ .
COPY turbo.json turbo.json
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm install --offline --frozen-lockfile --store-dir=/pnpm/store
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
RUN pnpm turbo run build --filter=space
# *****************************************************************************
# STAGE 3: Copy the project and start it
# *****************************************************************************
FROM base AS runner
WORKDIR /app
# Don't run production as root
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
USER nextjs
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=installer /app/apps/space/.next/standalone ./
COPY --from=installer /app/apps/space/.next/static ./apps/space/.next/static
COPY --from=installer /app/apps/space/public ./apps/space/public
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
EXPOSE 3000
CMD ["node", "apps/space/server.js"]

View File

@@ -1,13 +0,0 @@
FROM node:22-alpine
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
COPY . .
RUN corepack enable pnpm && pnpm add -g turbo
RUN pnpm install
EXPOSE 3000
VOLUME [ "/app/node_modules", "/app/web/node_modules" ]
CMD ["pnpm", "dev", "--filter=web"]

View File

@@ -1,120 +0,0 @@
# syntax=docker/dockerfile:1.7
FROM node:22-alpine AS base
# Setup pnpm package manager with corepack and configure global bin directory for caching
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable
# *****************************************************************************
# STAGE 1: Build the project
# *****************************************************************************
FROM base AS builder
RUN apk add --no-cache libc6-compat
# Set working directory
WORKDIR /app
ARG TURBO_VERSION=2.5.6
RUN corepack enable pnpm && pnpm add -g turbo@${TURBO_VERSION}
COPY . .
RUN turbo prune --scope=web --docker
# *****************************************************************************
# STAGE 2: Install dependencies & build the project
# *****************************************************************************
# Add lockfile and package.json's of isolated subworkspace
FROM base AS installer
RUN apk add --no-cache libc6-compat
WORKDIR /app
# First install the dependencies (as they change less often)
COPY .gitignore .gitignore
COPY --from=builder /app/out/json/ .
COPY --from=builder /app/out/pnpm-lock.yaml ./pnpm-lock.yaml
RUN corepack enable pnpm
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm fetch --store-dir=/pnpm/store
# Build the project
COPY --from=builder /app/out/full/ .
COPY turbo.json turbo.json
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm install --offline --frozen-lockfile --store-dir=/pnpm/store
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_LIVE_BASE_URL=""
ENV NEXT_PUBLIC_LIVE_BASE_URL=$NEXT_PUBLIC_LIVE_BASE_URL
ARG NEXT_PUBLIC_LIVE_BASE_PATH="/live"
ENV NEXT_PUBLIC_LIVE_BASE_PATH=$NEXT_PUBLIC_LIVE_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
RUN pnpm turbo run build --filter=web
# *****************************************************************************
# STAGE 3: Copy the project and start it
# *****************************************************************************
FROM base AS runner
WORKDIR /app
# Don't run production as root
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
USER nextjs
# Automatically leverage output traces to reduce image size
# https://nextjs.org/docs/advanced-features/output-file-tracing
COPY --from=installer /app/apps/web/.next/standalone ./
COPY --from=installer /app/apps/web/.next/static ./apps/web/.next/static
COPY --from=installer /app/apps/web/public ./apps/web/public
ARG NEXT_PUBLIC_API_BASE_URL=""
ENV NEXT_PUBLIC_API_BASE_URL=$NEXT_PUBLIC_API_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_URL=""
ENV NEXT_PUBLIC_ADMIN_BASE_URL=$NEXT_PUBLIC_ADMIN_BASE_URL
ARG NEXT_PUBLIC_ADMIN_BASE_PATH="/god-mode"
ENV NEXT_PUBLIC_ADMIN_BASE_PATH=$NEXT_PUBLIC_ADMIN_BASE_PATH
ARG NEXT_PUBLIC_LIVE_BASE_URL=""
ENV NEXT_PUBLIC_LIVE_BASE_URL=$NEXT_PUBLIC_LIVE_BASE_URL
ARG NEXT_PUBLIC_LIVE_BASE_PATH="/live"
ENV NEXT_PUBLIC_LIVE_BASE_PATH=$NEXT_PUBLIC_LIVE_BASE_PATH
ARG NEXT_PUBLIC_SPACE_BASE_URL=""
ENV NEXT_PUBLIC_SPACE_BASE_URL=$NEXT_PUBLIC_SPACE_BASE_URL
ARG NEXT_PUBLIC_SPACE_BASE_PATH="/spaces"
ENV NEXT_PUBLIC_SPACE_BASE_PATH=$NEXT_PUBLIC_SPACE_BASE_PATH
ARG NEXT_PUBLIC_WEB_BASE_URL=""
ENV NEXT_PUBLIC_WEB_BASE_URL=$NEXT_PUBLIC_WEB_BASE_URL
ENV NEXT_TELEMETRY_DISABLED=1
ENV TURBO_TELEMETRY_DISABLED=1
EXPOSE 3000
CMD ["node", "apps/web/server.js"]

View File

@@ -97,7 +97,7 @@ priority=20
[program:live]
directory=/app/live
command=sh -c "node live/server.js"
command=sh -c "node /app/live/apps/live/dist/server.js"
autostart=true
autorestart=true
stdout_logfile=/app/logs/access/live.log
@@ -106,7 +106,7 @@ stderr_logfile=/app/logs/error/live.err.log
# stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=50MB
stderr_logfile_backups=5
environment=PORT=3005,HOSTNAME=0.0.0.0
environment=PORT=3005,HOSTNAME=0.0.0.0,LIVE_BASE_PATH=/live
priority=20
@@ -121,4 +121,4 @@ stderr_logfile=/app/logs/error/proxy.err.log
# stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=50MB
stderr_logfile_backups=5
priority=20
priority=20

View File

@@ -3,19 +3,25 @@ services:
image: ${DOCKERHUB_USER:-local}/plane-frontend:${APP_RELEASE:-latest}
build:
context: ../../
dockerfile: apps/web/Dockerfile.web
dockerfile: Dockerfile.node
args:
APP_SCOPE: web
space:
image: ${DOCKERHUB_USER:-local}/plane-space:${APP_RELEASE:-latest}
build:
context: ../../
dockerfile: apps/space/Dockerfile.space
dockerfile: Dockerfile.node
args:
APP_SCOPE: space
admin:
image: ${DOCKERHUB_USER:-local}/plane-admin:${APP_RELEASE:-latest}
build:
context: ../../
dockerfile: apps/admin/Dockerfile.admin
dockerfile: Dockerfile.node
args:
APP_SCOPE: admin
live:
image: ${DOCKERHUB_USER:-local}/plane-live:${APP_RELEASE:-latest}
@@ -26,7 +32,7 @@ services:
api:
image: ${DOCKERHUB_USER:-local}/plane-backend:${APP_RELEASE:-latest}
build:
context: ../../apps/api
context: ../../
dockerfile: Dockerfile.api
proxy:

132
docker-bake.hcl Normal file
View File

@@ -0,0 +1,132 @@
# docker-bake.hcl
#
# Build all runtime images for Plane services and the AIO assembler image.
# Uses unified Dockerfiles under plane/ for Node (Next.js) apps and API.
#
# Usage examples:
# # Build all individual runtime images (no push)
# docker buildx bake
#
# # Build everything including AIO
# docker buildx bake all
#
# # Build and push with custom tag and registry/prefix
# docker buildx bake all --set *.tags=myrepo/plane-{{.target}}:1.2.3 --push
#
# # Or set variables:
# docker buildx bake all --set TAG=1.2.3 --set IMAGE_PREFIX=myrepo/ --push
#
# Notes:
# - The "AIO" image composes from the individual images built here.
# - "live" currently uses its app-specific Dockerfile (non-Next.js runtime).
group "default" {
targets = ["web", "space", "admin", "live", "api", "proxy"]
}
group "all" {
targets = ["web", "space", "admin", "live", "api", "proxy", "aio"]
}
group "frontend" {
targets = ["web", "space", "admin"]
}
variable "TAG" {
# Global tag to apply to images
default = "latest"
}
variable "IMAGE_PREFIX" {
# Optional prefix/registry for image tags, e.g. "ghcr.io/makeplane/" or "yourrepo/"
# Leave empty to tag locally (e.g. "plane-web:latest")
default = ""
}
variable "PLATFORMS" {
# List of platforms (e.g., ["linux/amd64", "linux/arm64"])
default = ["linux/amd64"]
}
# Common cache configuration for faster CI builds
target "with-cache" {
cache-from = ["type=gha"]
cache-to = ["type=gha,mode=max"]
}
# Common base for Next.js apps using the unified Dockerfile
target "common-node" {
inherits = ["with-cache"]
dockerfile = "Dockerfile.node"
context = "."
platforms = "${PLATFORMS}"
}
# Frontend apps (Next.js standalone runtime)
target "web" {
inherits = ["common-node"]
target = "runtime"
args = { APP_SCOPE = "web" }
tags = ["${IMAGE_PREFIX}plane-web:${TAG}"]
}
target "space" {
inherits = ["common-node"]
target = "runtime"
args = { APP_SCOPE = "space" }
tags = ["${IMAGE_PREFIX}plane-space:${TAG}"]
}
target "admin" {
inherits = ["common-node"]
target = "runtime"
args = { APP_SCOPE = "admin" }
tags = ["${IMAGE_PREFIX}plane-admin:${TAG}"]
}
# Live app (Node service; not Next.js standalone)
# Keeps its dedicated Dockerfile to match current build/run layout
target "live" {
inherits = ["with-cache"]
dockerfile = "apps/live/Dockerfile.live"
context = "."
platforms = "${PLATFORMS}"
tags = ["${IMAGE_PREFIX}plane-live:${TAG}"]
}
# Python API (unified Dockerfile)
target "api" {
inherits = ["with-cache"]
dockerfile = "Dockerfile.api"
context = "."
target = "runtime"
platforms = "${PLATFORMS}"
tags = ["${IMAGE_PREFIX}plane-api:${TAG}"]
}
# Proxy (Caddy with plugins)
target "proxy" {
inherits = ["with-cache"]
dockerfile = "Dockerfile.ce"
context = "apps/proxy"
platforms = "${PLATFORMS}"
tags = ["${IMAGE_PREFIX}plane-proxy:${TAG}"]
}
# All-in-one assembler image
# Composes from previously built runtime images; override args if you use different tags.
target "aio" {
inherits = ["with-cache"]
dockerfile = "Dockerfile.aio"
context = "."
platforms = "${PLATFORMS}"
contexts = {
web_ctx = "target:web"
space_ctx = "target:space"
admin_ctx = "target:admin"
live_ctx = "target:live"
api_ctx = "target:api"
proxy_ctx = "target:proxy"
}
tags = ["${IMAGE_PREFIX}plane-aio:${TAG}"]
}

View File

@@ -1,9 +1,134 @@
# Local development stack with hot reload for web/space/admin/live and Django API.
# Uses unified dev targets in plane/Dockerfile.node and plane/Dockerfile.api.
# Infra services (Postgres, Redis, RabbitMQ, MinIO) are included.
services:
# Django API in dev mode (bind mounts for instant reload)
api:
build:
context: .
dockerfile: Dockerfile.api
target: dev
restart: unless-stopped
working_dir: /code
command: ./bin/docker-entrypoint-api-local.sh
env_file:
- ./apps/api/.env
environment:
USE_MINIO: "1"
AWS_S3_ENDPOINT_URL: http://plane-minio:9000
MINIO_ENDPOINT_URL: http://plane-minio:9000
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-minioadmin}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
AWS_REGION: ${AWS_REGION:-us-east-1}
WEB_URL: ${WEB_URL:-http://localhost:3001}
DJANGO_DEBUG_TOOLBAR: ${DJANGO_DEBUG_TOOLBAR:-0}
volumes:
- ./apps/api:/code
depends_on:
- plane-db
- plane-redis
- plane-mq
- plane-minio
ports:
- "8000:8000"
worker:
build:
context: .
dockerfile: Dockerfile.api
target: dev
restart: unless-stopped
working_dir: /code
command: ./bin/docker-entrypoint-worker.sh
env_file:
- ./apps/api/.env
environment:
USE_MINIO: "1"
AWS_S3_ENDPOINT_URL: http://plane-minio:9000
MINIO_ENDPOINT_URL: http://plane-minio:9000
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-minioadmin}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
AWS_REGION: ${AWS_REGION:-us-east-1}
volumes:
- ./apps/api:/code
depends_on:
- api
- plane-db
- plane-redis
- plane-mq
beat-worker:
build:
context: .
dockerfile: Dockerfile.api
target: dev
restart: unless-stopped
working_dir: /code
command: ./bin/docker-entrypoint-beat.sh
env_file:
- ./apps/api/.env
environment:
USE_MINIO: "1"
AWS_S3_ENDPOINT_URL: http://plane-minio:9000
MINIO_ENDPOINT_URL: http://plane-minio:9000
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-minioadmin}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
AWS_REGION: ${AWS_REGION:-us-east-1}
volumes:
- ./apps/api:/code
depends_on:
- api
- plane-db
- plane-redis
- plane-mq
migrator:
build:
context: .
dockerfile: Dockerfile.api
target: dev
restart: "no"
working_dir: /code
command: ./bin/docker-entrypoint-migrator.sh --settings=plane.settings.local
env_file:
- ./apps/api/.env
environment:
USE_MINIO: "1"
AWS_S3_ENDPOINT_URL: http://plane-minio:9000
MINIO_ENDPOINT_URL: http://plane-minio:9000
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID:-minioadmin}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
AWS_S3_BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
AWS_REGION: ${AWS_REGION:-us-east-1}
volumes:
- ./apps/api:/code
depends_on:
- plane-db
- plane-redis
- plane-minio
# Infra
plane-db:
image: postgres:15.7-alpine
restart: unless-stopped
command: "postgres -c 'max_connections=1000'"
environment:
POSTGRES_USER: ${POSTGRES_USER:-plane}
POSTGRES_DB: ${POSTGRES_DB:-plane}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-plane}
PGDATA: /var/lib/postgresql/data
volumes:
- pgdata:/var/lib/postgresql/data
ports:
- "5432:5432"
plane-redis:
image: valkey/valkey:7.2.5-alpine
restart: unless-stopped
networks:
- dev_env
volumes:
- redisdata:/data
ports:
@@ -12,218 +137,43 @@ services:
plane-mq:
image: rabbitmq:3.13.6-management-alpine
restart: unless-stopped
networks:
- dev_env
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER:-plane}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASSWORD:-plane}
RABBITMQ_DEFAULT_VHOST: ${RABBITMQ_VHOST:-plane}
volumes:
- rabbitmq_data:/var/lib/rabbitmq
env_file:
- .env
environment:
RABBITMQ_DEFAULT_USER: ${RABBITMQ_USER}
RABBITMQ_DEFAULT_PASS: ${RABBITMQ_PASSWORD}
RABBITMQ_DEFAULT_VHOST: ${RABBITMQ_VHOST}
ports:
- "15672:15672"
plane-minio:
image: minio/minio
restart: unless-stopped
networks:
- dev_env
# Bootstrap bucket on first run; suitable for local smoke/dev only
entrypoint: >
/bin/sh -c "
mkdir -p /export/${AWS_S3_BUCKET_NAME} &&
minio server /export --console-address ':9090' &
sleep 5 &&
mc alias set myminio http://localhost:9000 ${AWS_ACCESS_KEY_ID} ${AWS_SECRET_ACCESS_KEY} &&
mc mb myminio/${AWS_S3_BUCKET_NAME} -p || true
&& tail -f /dev/null
set -e
mkdir -p /export/${AWS_S3_BUCKET_NAME:-uploads};
minio server /export --console-address ':9090' & pid=$!;
sleep 5;
mc alias set myminio http://plane-minio:9000 ${AWS_ACCESS_KEY_ID:-minioadmin} ${AWS_SECRET_ACCESS_KEY:-minioadmin};
mc mb myminio/${AWS_S3_BUCKET_NAME:-uploads} -p || true;
wait $pid
"
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID:-minioadmin}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY:-minioadmin}
volumes:
- uploads:/export
env_file:
- .env
environment:
MINIO_ROOT_USER: ${AWS_ACCESS_KEY_ID}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_ACCESS_KEY}
ports:
- "9000:9000"
- "9090:9090"
plane-db:
image: postgres:15.7-alpine
restart: unless-stopped
networks:
- dev_env
command: postgres -c 'max_connections=1000'
volumes:
- pgdata:/var/lib/postgresql/data
env_file:
- .env
environment:
PGDATA: /var/lib/postgresql/data
ports:
- "5432:5432"
# web:
# build:
# context: .
# dockerfile: ./web/Dockerfile.dev
# restart: unless-stopped
# networks:
# - dev_env
# volumes:
# - ./web:/app/web
# env_file:
# - ./web/.env
# depends_on:
# - api
# - worker
# space:
# build:
# context: .
# dockerfile: ./space/Dockerfile.dev
# restart: unless-stopped
# networks:
# - dev_env
# volumes:
# - ./space:/app/space
# depends_on:
# - api
# - worker
# - web
# admin:
# build:
# context: .
# dockerfile: ./admin/Dockerfile.dev
# restart: unless-stopped
# networks:
# - dev_env
# volumes:
# - ./admin:/app/admin
# depends_on:
# - api
# - worker
# - web
# live:
# build:
# context: .
# dockerfile: ./live/Dockerfile.dev
# restart: unless-stopped
# networks:
# - dev_env
# volumes:
# - ./live:/app/live
# depends_on:
# - api
# - worker
# - web
api:
build:
context: ./apps/api
dockerfile: Dockerfile.dev
args:
DOCKER_BUILDKIT: 1
restart: unless-stopped
networks:
- dev_env
volumes:
- ./apps/api:/code
command: ./bin/docker-entrypoint-api-local.sh
env_file:
- ./apps/api/.env
depends_on:
- plane-db
- plane-redis
- plane-mq
ports:
- "8000:8000"
worker:
build:
context: ./apps/api
dockerfile: Dockerfile.dev
args:
DOCKER_BUILDKIT: 1
restart: unless-stopped
networks:
- dev_env
volumes:
- ./apps/api:/code
command: ./bin/docker-entrypoint-worker.sh
env_file:
- ./apps/api/.env
depends_on:
- api
- plane-db
- plane-redis
beat-worker:
build:
context: ./apps/api
dockerfile: Dockerfile.dev
args:
DOCKER_BUILDKIT: 1
restart: unless-stopped
networks:
- dev_env
volumes:
- ./apps/api:/code
command: ./bin/docker-entrypoint-beat.sh
env_file:
- ./apps/api/.env
depends_on:
- api
- plane-db
- plane-redis
migrator:
build:
context: ./apps/api
dockerfile: Dockerfile.dev
args:
DOCKER_BUILDKIT: 1
restart: "no"
networks:
- dev_env
volumes:
- ./apps/api:/code
command: ./bin/docker-entrypoint-migrator.sh --settings=plane.settings.local
env_file:
- ./apps/api/.env
depends_on:
- plane-db
- plane-redis
# proxy:
# build:
# context: ./apps/proxy
# dockerfile: Dockerfile.ce
# restart: unless-stopped
# networks:
# - dev_env
# ports:
# - ${LISTEN_HTTP_PORT}:80
# - ${LISTEN_HTTPS_PORT}:443
# env_file:
# - .env
# environment:
# FILE_SIZE_LIMIT: ${FILE_SIZE_LIMIT:-5242880}
# BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
# depends_on:
# - api
# - web
# - space
# - admin
volumes:
redisdata:
uploads:
pgdata:
rabbitmq_data:
# Node/pnpm caches for speedy dev
networks:
dev_env:
driver: bridge
# Infra data
pgdata:
redisdata:
rabbitmq_data:
uploads:

View File

@@ -3,9 +3,10 @@ services:
container_name: web
build:
context: .
dockerfile: ./apps/web/Dockerfile.web
dockerfile: ./Dockerfile.node
args:
DOCKER_BUILDKIT: 1
APP_SCOPE: web
restart: always
depends_on:
- api
@@ -14,9 +15,10 @@ services:
container_name: admin
build:
context: .
dockerfile: ./apps/admin/Dockerfile.admin
dockerfile: ./Dockerfile.node
args:
DOCKER_BUILDKIT: 1
APP_SCOPE: admin
restart: always
depends_on:
- api
@@ -26,9 +28,10 @@ services:
container_name: space
build:
context: .
dockerfile: ./apps/space/Dockerfile.space
dockerfile: ./Dockerfile.node
args:
DOCKER_BUILDKIT: 1
APP_SCOPE: space
restart: always
depends_on:
- api
@@ -37,8 +40,8 @@ services:
api:
container_name: api
build:
context: ./apps/api
dockerfile: Dockerfile.api
context: .
dockerfile: ./Dockerfile.api
args:
DOCKER_BUILDKIT: 1
restart: always
@@ -52,8 +55,8 @@ services:
worker:
container_name: bgworker
build:
context: ./apps/api
dockerfile: Dockerfile.api
context: .
dockerfile: ./Dockerfile.api
args:
DOCKER_BUILDKIT: 1
restart: always
@@ -68,8 +71,8 @@ services:
beat-worker:
container_name: beatworker
build:
context: ./apps/api
dockerfile: Dockerfile.api
context: .
dockerfile: ./Dockerfile.api
args:
DOCKER_BUILDKIT: 1
restart: always
@@ -84,8 +87,8 @@ services:
migrator:
container_name: plane-migrator
build:
context: ./apps/api
dockerfile: Dockerfile.api
context: .
dockerfile: ./Dockerfile.api
args:
DOCKER_BUILDKIT: 1
restart: no

14
mise.toml Normal file
View File

@@ -0,0 +1,14 @@
# Repo-wide tool versions managed by mise
# See: https://mise.jdx.dev
[tools]
# Node LTS "Jod" to match .nvmrc and package.json engines
node = "22.18.0"
# Python version to match Dockerfile.api
python = "3.12.10"
# Global CLI utilities installed via pipx
# This provides the `ruff` command used by CI and local checks
pipx = "latest"
"pipx:ruff" = "0.9.7"

320
scripts/smoke-aio.sh Executable file
View File

@@ -0,0 +1,320 @@
#!/usr/bin/env bash
# plane/scripts/smoke-aio.sh
# Smoke test for the all-in-one (AIO) image. It:
# - Runs the plane-aio image with minimal required environment
# - Waits for Caddy to come up
# - Probes proxied endpoints:
# / (web)
# /spaces (space)
# /god-mode (admin)
# /live/health (live)
# - Prints container logs on failure and exits non-zero
#
# Usage examples:
# ./scripts/smoke-aio.sh
# ./scripts/smoke-aio.sh --image yourrepo/plane-aio:latest --port 18080
# ./scripts/smoke-aio.sh --attempts 90 --sleep 1 --keep
# ./scripts/smoke-aio.sh --docker-flags "--network host"
#
# Dependencies: bash, docker, curl
set -euo pipefail
# -------------------------------
# Defaults
# -------------------------------
IMAGE_DEFAULT="plane-aio:latest"
HOST_DEFAULT="127.0.0.1"
PORT_DEFAULT=8080
ATTEMPTS_DEFAULT=60
SLEEP_DEFAULT=2
KEEP_DEFAULT=0
PULL_DEFAULT=0
DOCKER_FLAGS_DEFAULT=""
# Required env for AIO (values are placeholders for smoke only)
DOMAIN_NAME_DEFAULT="localhost"
DATABASE_URL_DEFAULT="postgresql://plane:plane@127.0.0.1:15432/plane"
REDIS_URL_DEFAULT="redis://127.0.0.1:16379"
AMQP_URL_DEFAULT="amqp://plane:plane@127.0.0.1:15673/plane"
AWS_REGION_DEFAULT="us-east-1"
AWS_ACCESS_KEY_ID_DEFAULT="smoke"
AWS_SECRET_ACCESS_KEY_DEFAULT="smoke"
AWS_S3_BUCKET_NAME_DEFAULT="smoke-bucket"
AWS_S3_ENDPOINT_URL_DEFAULT="http://127.0.0.1:19000"
SITE_ADDRESS_DEFAULT=":80"
FILE_SIZE_LIMIT_DEFAULT="5242880"
# -------------------------------
# State
# -------------------------------
IMAGE="$IMAGE_DEFAULT"
HOST="$HOST_DEFAULT"
PORT="$PORT_DEFAULT"
ATTEMPTS="$ATTEMPTS_DEFAULT"
SLEEP_SECS="$SLEEP_DEFAULT"
KEEP="$KEEP_DEFAULT"
PULL="$PULL_DEFAULT"
DOCKER_FLAGS="$DOCKER_FLAGS_DEFAULT"
# AIO env values (overridable via flags)
DOMAIN_NAME="$DOMAIN_NAME_DEFAULT"
DATABASE_URL="$DATABASE_URL_DEFAULT"
REDIS_URL="$REDIS_URL_DEFAULT"
AMQP_URL="$AMQP_URL_DEFAULT"
AWS_REGION="$AWS_REGION_DEFAULT"
AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID_DEFAULT"
AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY_DEFAULT"
AWS_S3_BUCKET_NAME="$AWS_S3_BUCKET_NAME_DEFAULT"
AWS_S3_ENDPOINT_URL="$AWS_S3_ENDPOINT_URL_DEFAULT"
SITE_ADDRESS="$SITE_ADDRESS_DEFAULT"
FILE_SIZE_LIMIT="$FILE_SIZE_LIMIT_DEFAULT"
TMP_DIR="$(mktemp -d -t plane-aio-smoke.XXXXXX)"
CONTAINER="plane-aio-smoke-$$"
# Endpoints to probe via Caddy
# Endpoint selection flags (1 = test, 0 = skip)
RUN_WEB=1
RUN_SPACE=1
RUN_ADMIN=1
RUN_LIVE=1
# Build PATHS dynamically later based on RUN_* flags
declare -a PATHS=()
declare -A NAMES=( ["/"]="web" ["/spaces"]="space" ["/god-mode"]="admin" ["/live/health"]="live" )
FAILURES=0
# -------------------------------
# Helpers
# -------------------------------
usage() {
cat <<EOF
Usage: $(basename "$0") [options]
Options:
--image <name:tag> AIO image to run (default: ${IMAGE_DEFAULT})
--host <ip/host> Hostname/IP to probe (default: ${HOST_DEFAULT})
--port <port> Host port to map container 80 to (default: ${PORT_DEFAULT})
--attempts <n> Attempts before failure (default: ${ATTEMPTS_DEFAULT})
--sleep <seconds> Sleep between attempts (default: ${SLEEP_DEFAULT})
--keep Do not remove the container after the test
--pull docker pull the image before run
--docker-flags "<flags>" Extra flags for docker run (e.g., --network)
# Override required env vars for AIO start script:
--domain-name <val> (default: ${DOMAIN_NAME_DEFAULT})
--database-url <val> (default: ${DATABASE_URL_DEFAULT})
--redis-url <val> (default: ${REDIS_URL_DEFAULT})
--amqp-url <val> (default: ${AMQP_URL_DEFAULT})
--aws-region <val> (default: ${AWS_REGION_DEFAULT})
--aws-access-key-id <val> (default: ${AWS_ACCESS_KEY_ID_DEFAULT})
--aws-secret-access-key <val> (default: ${AWS_SECRET_ACCESS_KEY_DEFAULT})
--aws-s3-bucket-name <val> (default: ${AWS_S3_BUCKET_NAME_DEFAULT})
--aws-s3-endpoint-url <val> (default: ${AWS_S3_ENDPOINT_URL_DEFAULT})
--site-address <val> (default: ${SITE_ADDRESS_DEFAULT})
--file-size-limit <bytes> (default: ${FILE_SIZE_LIMIT_DEFAULT})
-h, --help Show help and exit
Examples:
$(basename "$0")
$(basename "$0") --image yourrepo/plane-aio:latest --port 18080 --attempts 90 --sleep 1
EOF
}
exists_cmd() { command -v "$1" >/dev/null 2>&1; }
log() { printf "%s\n" "$*"; }
log_ok() { printf "\033[32m%s\033[0m\n" "$*"; }
log_warn() { printf "\033[33m%s\033[0m\n" "$*"; }
log_err() { printf "\033[31m%s\033[0m\n" "$*"; }
cleanup() {
local code=$?
if [[ $KEEP -eq 0 ]]; then
docker rm -f "$CONTAINER" >/dev/null 2>&1 || true
else
log "Keeping container: $CONTAINER"
fi
rm -rf "$TMP_DIR" >/dev/null 2>&1 || true
exit $code
}
is_port_in_use() {
local host="$1" port="$2"
if exists_cmd ss; then
ss -ltn 2>/dev/null | awk '{print $4}' | grep -q ":${port}\\>" && return 0
elif exists_cmd lsof; then
lsof -iTCP:"$port" -sTCP:LISTEN >/dev/null 2>&1 && return 0
elif exists_cmd netstat; then
netstat -ltn 2>/dev/null | awk '{print $4}' | grep -q ":${port}\\>" && return 0
else
local code
code="$(curl -sS -m 1 -o /dev/null -w "%{http_code}" "http://${host}:${port}/" || true)"
[[ "$code" != "000" ]] && return 0
fi
return 1
}
# -------------------------------
# Arg parsing
# -------------------------------
while [[ $# -gt 0 ]]; do
case "$1" in
--image) IMAGE="$2"; shift 2;;
--host) HOST="$2"; shift 2;;
--port) PORT="$2"; shift 2;;
--attempts) ATTEMPTS="$2"; shift 2;;
--sleep) SLEEP_SECS="$2"; shift 2;;
--keep) KEEP=1; shift;;
--pull) PULL=1; shift;;
--docker-flags) DOCKER_FLAGS="$2"; shift 2;;
--domain-name) DOMAIN_NAME="$2"; shift 2;;
--database-url) DATABASE_URL="$2"; shift 2;;
--redis-url) REDIS_URL="$2"; shift 2;;
--amqp-url) AMQP_URL="$2"; shift 2;;
--aws-region) AWS_REGION="$2"; shift 2;;
--aws-access-key-id) AWS_ACCESS_KEY_ID="$2"; shift 2;;
--aws-secret-access-key) AWS_SECRET_ACCESS_KEY="$2"; shift 2;;
--aws-s3-bucket-name) AWS_S3_BUCKET_NAME="$2"; shift 2;;
--aws-s3-endpoint-url) AWS_S3_ENDPOINT_URL="$2"; shift 2;;
--site-address) SITE_ADDRESS="$2"; shift 2;;
--file-size-limit) FILE_SIZE_LIMIT="$2"; shift 2;;
--skip-web) RUN_WEB=0; shift;;
--skip-space) RUN_SPACE=0; shift;;
--skip-admin) RUN_ADMIN=0; shift;;
--skip-live) RUN_LIVE=0; shift;;
-h|--help) usage; exit 0;;
*) log_err "Unknown arg: $1"; usage; exit 1;;
esac
done
trap cleanup EXIT INT TERM
# -------------------------------
# Pre-flight
# -------------------------------
exists_cmd docker || { log_err "docker is required"; exit 1; }
exists_cmd curl || { log_err "curl is required"; exit 1; }
log "AIO smoke starting..."
log " Image: ${IMAGE}"
log " Host: ${HOST}"
log " Port: ${PORT}"
log " Attempts: ${ATTEMPTS}"
log " Sleep (s): ${SLEEP_SECS}"
log " Keep: ${KEEP}"
log " Pull: ${PULL}"
log " Docker flags: ${DOCKER_FLAGS:-<none>}"
# Parse DOCKER_FLAGS string into an array for safe docker invocation
if [[ -n "${DOCKER_FLAGS:-}" ]]; then
read -r -a DOCKER_FLAGS_ARRAY <<< "$DOCKER_FLAGS"
else
DOCKER_FLAGS_ARRAY=()
fi
if is_port_in_use "$HOST" "$PORT"; then
log_err "Port ${HOST}:${PORT} appears to be in use. Use --port to override."
exit 1
fi
if [[ $PULL -eq 1 ]]; then
log "Pulling ${IMAGE} ..."
docker pull "$IMAGE" >/dev/null
fi
# -------------------------------
# Run container
# -------------------------------
log "Starting AIO container: ${CONTAINER}"
if ! docker run -d --name "$CONTAINER" \
-p "${PORT}:80" \
-e DOMAIN_NAME="$DOMAIN_NAME" \
-e DATABASE_URL="$DATABASE_URL" \
-e REDIS_URL="$REDIS_URL" \
-e AMQP_URL="$AMQP_URL" \
-e AWS_REGION="$AWS_REGION" \
-e AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \
-e AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \
-e AWS_S3_BUCKET_NAME="$AWS_S3_BUCKET_NAME" \
-e AWS_S3_ENDPOINT_URL="$AWS_S3_ENDPOINT_URL" \
-e SITE_ADDRESS="$SITE_ADDRESS" \
-e FILE_SIZE_LIMIT="$FILE_SIZE_LIMIT" \
"${DOCKER_FLAGS_ARRAY[@]}" \
"$IMAGE" >"$TMP_DIR/run.out" 2>"$TMP_DIR/run.err"
then
log_err "Failed to start AIO container"
cat "$TMP_DIR/run.err" 1>&2 || true
exit 1
fi
# -------------------------------
# Select endpoints to probe
# -------------------------------
PATHS=()
[[ $RUN_WEB -eq 1 ]] && PATHS+=("/")
[[ $RUN_SPACE -eq 1 ]] && PATHS+=("/spaces")
[[ $RUN_ADMIN -eq 1 ]] && PATHS+=("/god-mode")
[[ $RUN_LIVE -eq 1 ]] && PATHS+=("/live/health")
# -------------------------------
# Probe endpoints
# -------------------------------
for path in "${PATHS[@]}"; do
name="${NAMES[$path]}"
url="http://${HOST}:${PORT}${path}"
i=1
status=""
early_logs_printed=0
log "Probing ${name}: ${url}"
while [[ $i -le $ATTEMPTS ]]; do
errfile="$TMP_DIR/${name}.curl.err"
status="$(curl --connect-timeout 5 --max-time 10 -sS -o /dev/null -w "%{http_code}" -L "$url" 2>"$errfile" || true)"
if [[ "$status" == "200" ]]; then
log_ok "Success: ${name} responded 200 at ${url}"
break
fi
# Print early container logs after a few failures
if [[ $i -eq 5 && $early_logs_printed -eq 0 ]]; then
log "----- Early container logs (${CONTAINER}) -----"
docker logs "$CONTAINER" || true
log "-----------------------------------------------"
early_logs_printed=1
fi
# Detect container exit
state="$(docker ps -a --filter "name=${CONTAINER}" --format '{{.Status}}' || true)"
if [[ -n "$state" ]] && echo "$state" | grep -qi "^exited"; then
log_err "Container exited early: ${state}"
log "----- Container logs (${CONTAINER}) -----"
docker logs "$CONTAINER" || true
log "-----------------------------------------"
FAILURES=$((FAILURES+1))
break
fi
log "Waiting ${name} (attempt ${i}/${ATTEMPTS}) -> HTTP ${status}, retrying in ${SLEEP_SECS}s"
sleep "$SLEEP_SECS"
i=$((i+1))
done
if [[ "$status" != "200" ]]; then
log_err "Failure: ${name} did not return 200 at ${url}"
FAILURES=$((FAILURES+1))
fi
done
if [[ $FAILURES -gt 0 ]]; then
log_err "AIO smoke finished with ${FAILURES} failure(s)"
exit 1
fi
log_ok "AIO smoke finished successfully"
exit 0

404
scripts/smoke.sh Executable file
View File

@@ -0,0 +1,404 @@
#!/usr/bin/env bash
# plane/scripts/smoke.sh
# Portable smoke test for Plane frontend images (web, space, admin, live).
# - Runs each image in a container bound to a host port
# - Probes an HTTP path until it returns 200 OK (or times out)
# - Prints container logs on failure and exits non-zero
#
# Examples:
# ./scripts/smoke.sh
# ./scripts/smoke.sh --web plane-web:latest --space plane-space:latest
# ./scripts/smoke.sh --image-prefix local/ --attempts 90 --sleep 1
# ./scripts/smoke.sh --parallel --keep
#
# Dependencies: bash, docker, curl
set -euo pipefail
# -------------------------------
# Defaults
# -------------------------------
WEB_IMAGE_DEFAULT="plane-web:latest"
SPACE_IMAGE_DEFAULT="plane-space:latest"
ADMIN_IMAGE_DEFAULT="plane-admin:latest"
LIVE_IMAGE_DEFAULT="plane-live:latest"
WEB_PORT_DEFAULT=3001
SPACE_PORT_DEFAULT=3002
ADMIN_PORT_DEFAULT=3003
LIVE_PORT_DEFAULT=3005
WEB_PATH_DEFAULT="/"
SPACE_PATH_DEFAULT="/spaces"
ADMIN_PATH_DEFAULT="/god-mode"
LIVE_PATH_DEFAULT="/live/health"
ATTEMPTS_DEFAULT=60
SLEEP_DEFAULT=2
KEEP_DEFAULT=0 # 1 = keep containers after run
PULL_DEFAULT=0 # 1 = docker pull before run
PARALLEL_DEFAULT=0 # 1 = run tests in parallel
HOST_DEFAULT="127.0.0.1"
DOCKER_FLAGS_DEFAULT=""
# Live specific env (can be overridden via --live-env)
LIVE_ENV_DEFAULT="-e NODE_ENV=production -e LIVE_BASE_PATH=/live"
# -------------------------------
# State
# -------------------------------
WEB_IMAGE="$WEB_IMAGE_DEFAULT"
SPACE_IMAGE="$SPACE_IMAGE_DEFAULT"
ADMIN_IMAGE="$ADMIN_IMAGE_DEFAULT"
LIVE_IMAGE="$LIVE_IMAGE_DEFAULT"
WEB_PORT="$WEB_PORT_DEFAULT"
SPACE_PORT="$SPACE_PORT_DEFAULT"
ADMIN_PORT="$ADMIN_PORT_DEFAULT"
LIVE_PORT="$LIVE_PORT_DEFAULT"
WEB_PATH="$WEB_PATH_DEFAULT"
SPACE_PATH="$SPACE_PATH_DEFAULT"
ADMIN_PATH="$ADMIN_PATH_DEFAULT"
LIVE_PATH="$LIVE_PATH_DEFAULT"
ATTEMPTS="$ATTEMPTS_DEFAULT"
SLEEP_SECS="$SLEEP_DEFAULT"
KEEP="$KEEP_DEFAULT"
PULL="$PULL_DEFAULT"
PARALLEL="$PARALLEL_DEFAULT"
HOST="$HOST_DEFAULT"
DOCKER_FLAGS="$DOCKER_FLAGS_DEFAULT"
LIVE_ENV="$LIVE_ENV_DEFAULT"
RUN_WEB=1
RUN_SPACE=1
RUN_ADMIN=1
RUN_LIVE=1
TMP_DIR="$(mktemp -d -t plane-smoke.XXXXXX)"
CONTAINERS_FILE="$TMP_DIR/containers.txt"
: > "$CONTAINERS_FILE"
CONTAINERS=()
PIDS=()
FAILURES=0
# -------------------------------
# Helpers
# -------------------------------
usage() {
cat <<EOF
Usage: $(basename "$0") [options]
Options:
--web <image[:tag]> Image for web (default: ${WEB_IMAGE_DEFAULT})
--space <image[:tag]> Image for space (default: ${SPACE_IMAGE_DEFAULT})
--admin <image[:tag]> Image for admin (default: ${ADMIN_IMAGE_DEFAULT})
--live <image[:tag]> Image for live (default: ${LIVE_IMAGE_DEFAULT})
--skip-web Skip web
--skip-space Skip space
--skip-admin Skip admin
--skip-live Skip live
--web-port <port> Host port for web (default: ${WEB_PORT_DEFAULT})
--space-port <port> Host port for space (default: ${SPACE_PORT_DEFAULT})
--admin-port <port> Host port for admin (default: ${ADMIN_PORT_DEFAULT})
--live-port <port> Host port for live (default: ${LIVE_PORT_DEFAULT})
--web-path <path> Path to probe for web (default: ${WEB_PATH_DEFAULT})
--space-path <path> Path to probe for space (default: ${SPACE_PATH_DEFAULT})
--admin-path <path> Path to probe for admin (default: ${ADMIN_PATH_DEFAULT})
--live-path <path> Path to probe for live (default: ${LIVE_PATH_DEFAULT})
--host <ip/host> Hostname/IP to probe (default: ${HOST_DEFAULT})
--attempts <n> Attempts before failure (default: ${ATTEMPTS_DEFAULT})
--sleep <seconds> Sleep between attempts (default: ${SLEEP_DEFAULT})
--pull docker pull each image before run
--keep Do not remove containers after tests
--parallel Run tests in parallel
--docker-flags "<flags>" Extra flags passed to docker run (e.g. --network)
--image-prefix <prefix> Prefix for all images (e.g. "local/"), overrides individual images
--live-env "<flags>" Override env flags for 'live' container (default: ${LIVE_ENV_DEFAULT})
-h, --help Show this help and exit
Examples:
$(basename "$0")
$(basename "$0") --web plane-web:latest --space plane-space:latest
$(basename "$0") --image-prefix ghcr.io/yourorg/ --attempts 90 --sleep 1
EOF
}
exists_cmd() {
command -v "$1" >/dev/null 2>&1
}
is_port_in_use() {
local host="$1"
local port="$2"
if exists_cmd ss; then
ss -ltn 2>/dev/null | awk '{print $4}' | grep -q ":${port}\\>" && return 0
elif exists_cmd lsof; then
lsof -iTCP:"$port" -sTCP:LISTEN >/dev/null 2>&1 && return 0
elif exists_cmd netstat; then
netstat -ltn 2>/dev/null | awk '{print $4}' | grep -q ":${port}\\>" && return 0
else
# Fallback: if we can get any HTTP response, assume in use
local code
code="$(curl -sS -m 1 -o /dev/null -w "%{http_code}" "http://${host}:${port}/" || true)"
[[ "$code" != "000" ]] && return 0
fi
return 1
}
cleanup() {
local code=$?
if [[ $KEEP -eq 0 ]]; then
if [[ -f "$CONTAINERS_FILE" ]]; then
while IFS= read -r c; do
[[ -n "$c" ]] || continue
docker rm -f "$c" >/dev/null 2>&1 || true
done < <(sort -u "$CONTAINERS_FILE")
fi
fi
rm -rf "$TMP_DIR" >/dev/null 2>&1 || true
exit $code
}
log() { printf "%s\n" "$*"; }
log_ok() { printf "\033[32m%s\033[0m\n" "$*"; }
log_warn() { printf "\033[33m%s\033[0m\n" "$*"; }
log_err() { printf "\033[31m%s\033[0m\n" "$*"; }
# name, image, port, path, envflags
run_and_probe() {
local name="$1"
local image="$2"
local port="$3"
local path="$4"
local envflags="$5"
local container="${name}-smoke-$$"
if [[ $PULL -eq 1 ]]; then
log "Pulling ${image} ..."
docker pull "$image" >/dev/null
fi
# Pre-flight: ensure host port is not already in use
if is_port_in_use "$HOST" "$port"; then
log_err "Port ${HOST}:${port} appears to be in use; aborting ${name} smoke"
return 1
fi
log "Starting ${name}: image=${image} port=${port} path=${path}"
if ! cid=$(docker run -d --name "$container" -p "${port}:3000" $envflags $DOCKER_FLAGS "$image" 2>"$TMP_DIR/${container}.err"); then
# If the image tag is :ci-smoke and it fails, try falling back to :latest
base="${image%:*}"
tag="${image##*:}"
if [[ "$tag" == "$image" ]]; then
tag=""
fi
if [[ "$tag" == "ci-smoke" ]]; then
alt_image="${base}:latest"
log_warn "Failed to start ${name} with ${image}, retrying with ${alt_image} ..."
if ! cid=$(docker run -d --name "$container" -p "${port}:3000" $envflags $DOCKER_FLAGS "$alt_image" 2>"$TMP_DIR/${container}.err"); then
log_err "Failed to start container ${container}"
cat "$TMP_DIR/${container}.err" 1>&2 || true
return 1
else
image="$alt_image"
fi
else
log_err "Failed to start container ${container}"
cat "$TMP_DIR/${container}.err" 1>&2 || true
return 1
fi
fi
printf "%s\n" "$container" >>"$CONTAINERS_FILE"
local url="http://${HOST}:${port}${path}"
local i=1
local status=""
local conn_reset_count=0
local logs_printed=0
while [[ $i -le $ATTEMPTS ]]; do
local errfile="$TMP_DIR/${container}.curl.err"
status="$(curl -sS -o /dev/null -w "%{http_code}" -L "$url" 2>"$errfile" || true)"
if [[ "$status" == "200" ]]; then
log_ok "Success: ${name} responded 200 at ${url}"
return 0
fi
# If repeated connection issues, show early container logs once
if grep -qiE "connection reset|failed to connect|connection refused" "$errfile" 2>/dev/null; then
conn_reset_count=$((conn_reset_count+1))
if [[ $conn_reset_count -ge 3 && $logs_printed -eq 0 ]]; then
log "----- ${name} early logs (${container}) -----"
docker logs "$container" || true
log "---------------------------------------------"
logs_printed=1
fi
fi
# Detect if container exited early
local state
state="$(docker ps -a --filter "name=${container}" --format '{{.Status}}' || true)"
if [[ -n "$state" ]] && echo "$state" | grep -qi "^exited"; then
log_err "${name} container exited early: ${state}"
log "----- ${name} container logs (${container}) -----"
docker logs "$container" || true
log "-----------------------------------------------"
return 1
fi
log "Waiting ${name} (attempt ${i}/${ATTEMPTS}) -> HTTP ${status}, retrying in ${SLEEP_SECS}s"
sleep "$SLEEP_SECS"
i=$((i+1))
done
log_err "Failure: ${name} did not return 200 at ${url} after ${ATTEMPTS} attempts"
log "----- ${name} container logs (${container}) -----"
docker logs "$container" || true
log "-----------------------------------------------"
return 1
}
# -------------------------------
# Parse args
# -------------------------------
while [[ $# -gt 0 ]]; do
case "$1" in
--web) WEB_IMAGE="$2"; shift 2;;
--space) SPACE_IMAGE="$2"; shift 2;;
--admin) ADMIN_IMAGE="$2"; shift 2;;
--live) LIVE_IMAGE="$2"; shift 2;;
--skip-web) RUN_WEB=0; shift;;
--skip-space) RUN_SPACE=0; shift;;
--skip-admin) RUN_ADMIN=0; shift;;
--skip-live) RUN_LIVE=0; shift;;
--web-port) WEB_PORT="$2"; shift 2;;
--space-port) SPACE_PORT="$2"; shift 2;;
--admin-port) ADMIN_PORT="$2"; shift 2;;
--live-port) LIVE_PORT="$2"; shift 2;;
--web-path) WEB_PATH="$2"; shift 2;;
--space-path) SPACE_PATH="$2"; shift 2;;
--admin-path) ADMIN_PATH="$2"; shift 2;;
--live-path) LIVE_PATH="$2"; shift 2;;
--host) HOST="$2"; shift 2;;
--attempts) ATTEMPTS="$2"; shift 2;;
--sleep) SLEEP_SECS="$2"; shift 2;;
--pull) PULL=1; shift;;
--keep) KEEP=1; shift;;
--parallel) PARALLEL=1; shift;;
--docker-flags) DOCKER_FLAGS="$2"; shift 2;;
--live-env) LIVE_ENV="$2"; shift 2;;
--image-prefix)
local prefix="$2"
WEB_IMAGE="${prefix}plane-web:latest"
SPACE_IMAGE="${prefix}plane-space:latest"
ADMIN_IMAGE="${prefix}plane-admin:latest"
LIVE_IMAGE="${prefix}plane-live:latest"
shift 2
;;
-h|--help) usage; exit 0;;
*)
log_err "Unknown argument: $1"
usage
exit 1
;;
esac
done
trap cleanup EXIT INT TERM
# -------------------------------
# Pre-flight
# -------------------------------
exists_cmd docker || { log_err "docker is required"; exit 1; }
exists_cmd curl || { log_err "curl is required"; exit 1; }
log "Smoke test starting..."
log " Host: ${HOST}"
log " Attempts: ${ATTEMPTS}"
log " Sleep (s): ${SLEEP_SECS}"
log " Keep: ${KEEP}"
log " Pull: ${PULL}"
log " Parallel: ${PARALLEL}"
log " Docker flags: ${DOCKER_FLAGS:-<none>}"
# -------------------------------
# Run tests
# -------------------------------
# Define a small runner wrapper that writes a status code to tmp
service_job() {
local name="$1" image="$2" port="$3" path="$4" envflags="$5" out="$6"
if run_and_probe "$name" "$image" "$port" "$path" "$envflags"; then
echo "OK" > "$out"
else
echo "FAIL" > "$out"
fi
}
if [[ $PARALLEL -eq 1 ]]; then
if [[ $RUN_WEB -eq 1 ]]; then
service_job "web" "$WEB_IMAGE" "$WEB_PORT" "$WEB_PATH" "" "$TMP_DIR/web.status" &
PIDS+=($!)
fi
if [[ $RUN_SPACE -eq 1 ]]; then
service_job "space" "$SPACE_IMAGE" "$SPACE_PORT" "$SPACE_PATH" "" "$TMP_DIR/space.status" &
PIDS+=($!)
fi
if [[ $RUN_ADMIN -eq 1 ]]; then
service_job "admin" "$ADMIN_IMAGE" "$ADMIN_PORT" "$ADMIN_PATH" "" "$TMP_DIR/admin.status" &
PIDS+=($!)
fi
if [[ $RUN_LIVE -eq 1 ]]; then
service_job "live" "$LIVE_IMAGE" "$LIVE_PORT" "$LIVE_PATH" "$LIVE_ENV" "$TMP_DIR/live.status" &
PIDS+=($!)
fi
for pid in "${PIDS[@]}"; do
wait "$pid" || true
done
for svc in web space admin live; do
[[ $svc == "web" && $RUN_WEB -eq 0 ]] && continue
[[ $svc == "space" && $RUN_SPACE -eq 0 ]] && continue
[[ $svc == "admin" && $RUN_ADMIN -eq 0 ]] && continue
[[ $svc == "live" && $RUN_LIVE -eq 0 ]] && continue
status_file="$TMP_DIR/${svc}.status"
if [[ -f "$status_file" && "$(cat "$status_file")" == "OK" ]]; then
log_ok "${svc}: OK"
else
log_err "${svc}: FAIL"
FAILURES=$((FAILURES+1))
fi
done
else
if [[ $RUN_WEB -eq 1 ]]; then
if ! run_and_probe "web" "$WEB_IMAGE" "$WEB_PORT" "$WEB_PATH" ""; then FAILURES=$((FAILURES+1)); fi
fi
if [[ $RUN_SPACE -eq 1 ]]; then
if ! run_and_probe "space" "$SPACE_IMAGE" "$SPACE_PORT" "$SPACE_PATH" ""; then FAILURES=$((FAILURES+1)); fi
fi
if [[ $RUN_ADMIN -eq 1 ]]; then
if ! run_and_probe "admin" "$ADMIN_IMAGE" "$ADMIN_PORT" "$ADMIN_PATH" ""; then FAILURES=$((FAILURES+1)); fi
fi
if [[ $RUN_LIVE -eq 1 ]]; then
if ! run_and_probe "live" "$LIVE_IMAGE" "$LIVE_PORT" "$LIVE_PATH" "$LIVE_ENV"; then FAILURES=$((FAILURES+1)); fi
fi
fi
if [[ $FAILURES -gt 0 ]]; then
log_err "Smoke test finished with ${FAILURES} failure(s)"
exit 1
fi
log_ok "Smoke test finished successfully"
exit 0