mirror of
https://github.com/bahdotsh/wrkflw.git
synced 2025-12-29 16:36:38 +01:00
Compare commits
34 Commits
fix/runs-o
...
wrkflw-exe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5051f71b8b | ||
|
|
64b980d254 | ||
|
|
2d809388a2 | ||
|
|
03af6cb7c1 | ||
|
|
ae52779e11 | ||
|
|
fe7be3e1ae | ||
|
|
30f405ccb9 | ||
|
|
1d56d86ba5 | ||
|
|
f1ca411281 | ||
|
|
797e31e3d3 | ||
|
|
4e66f65de7 | ||
|
|
335886ac70 | ||
|
|
8005cbb7ee | ||
|
|
5b216f59e6 | ||
|
|
7a17d26589 | ||
|
|
6efad9ce96 | ||
|
|
064f7259d7 | ||
|
|
db1d4bcf48 | ||
|
|
250a88ba94 | ||
|
|
cd56ce8506 | ||
|
|
8fc6dcaa6c | ||
|
|
3f7bd30cca | ||
|
|
960f7486a2 | ||
|
|
cb936cd1af | ||
|
|
625b8111f1 | ||
|
|
b2b6e9e08d | ||
|
|
86660ae573 | ||
|
|
886c415fa7 | ||
|
|
460357d9fe | ||
|
|
096ccfa180 | ||
|
|
8765537cfa | ||
|
|
ac708902ef | ||
|
|
d1268d55cf | ||
|
|
a146d94c35 |
25
.github/workflows/release.yml
vendored
25
.github/workflows/release.yml
vendored
@@ -42,7 +42,30 @@ jobs:
|
||||
cargo install git-cliff --force
|
||||
|
||||
- name: Generate Changelog
|
||||
run: git-cliff --latest --output CHANGELOG.md
|
||||
run: |
|
||||
# Debug: Show current state
|
||||
echo "Current ref: ${{ github.ref_name }}"
|
||||
echo "Input version: ${{ github.event.inputs.version }}"
|
||||
echo "All tags:"
|
||||
git tag --sort=-version:refname | head -10
|
||||
|
||||
# Generate changelog from the current tag to the previous version tag
|
||||
CURRENT_TAG="${{ github.event.inputs.version || github.ref_name }}"
|
||||
PREVIOUS_TAG=$(git tag --sort=-version:refname | grep "^v" | head -2 | tail -1)
|
||||
|
||||
echo "Current tag: $CURRENT_TAG"
|
||||
echo "Previous tag: $PREVIOUS_TAG"
|
||||
|
||||
if [ -n "$PREVIOUS_TAG" ] && [ "$PREVIOUS_TAG" != "$CURRENT_TAG" ]; then
|
||||
echo "Generating changelog for range: $PREVIOUS_TAG..$CURRENT_TAG"
|
||||
git-cliff --tag "$CURRENT_TAG" "$PREVIOUS_TAG..$CURRENT_TAG" --output CHANGELOG.md
|
||||
else
|
||||
echo "Generating latest changelog for tag: $CURRENT_TAG"
|
||||
git-cliff --tag "$CURRENT_TAG" --latest --output CHANGELOG.md
|
||||
fi
|
||||
|
||||
echo "Generated changelog:"
|
||||
cat CHANGELOG.md
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
|
||||
1426
Cargo.lock
generated
1426
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.6.0"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
description = "A GitHub Actions workflow validator and executor"
|
||||
documentation = "https://github.com/bahdotsh/wrkflw"
|
||||
@@ -44,7 +44,7 @@ rayon = "1.7.0"
|
||||
num_cpus = "1.16.0"
|
||||
regex = "1.10"
|
||||
lazy_static = "1.4"
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "json"] }
|
||||
libc = "0.2"
|
||||
nix = { version = "0.27.1", features = ["fs"] }
|
||||
urlencoding = "2.1.3"
|
||||
|
||||
23
README.md
23
README.md
@@ -111,6 +111,12 @@ wrkflw validate path/to/workflow.yml
|
||||
# Validate workflows in a specific directory
|
||||
wrkflw validate path/to/workflows
|
||||
|
||||
# Validate multiple files and/or directories (GitHub and GitLab are auto-detected)
|
||||
wrkflw validate path/to/flow-1.yml path/to/flow-2.yml path/to/workflows
|
||||
|
||||
# Force GitLab parsing for all provided paths
|
||||
wrkflw validate --gitlab .gitlab-ci.yml other.gitlab-ci.yml
|
||||
|
||||
# Validate with verbose output
|
||||
wrkflw validate --verbose path/to/workflow.yml
|
||||
|
||||
@@ -437,19 +443,28 @@ jobs:
|
||||
### Runtime Mode Differences
|
||||
- **Docker Mode**: Provides the closest match to GitHub's environment, including support for Docker container actions, service containers, and Linux-based jobs. Some advanced container configurations may still require manual setup.
|
||||
- **Podman Mode**: Similar to Docker mode but uses Podman for container execution. Offers rootless container support and enhanced security. Fully compatible with Docker-based workflows.
|
||||
- **Emulation Mode**: Runs workflows using the local system tools. Limitations:
|
||||
- **🔒 Secure Emulation Mode**: Runs workflows on the local system with comprehensive sandboxing for security. **Recommended for local development**:
|
||||
- Command validation and filtering (blocks dangerous commands like `rm -rf /`, `sudo`, etc.)
|
||||
- Resource limits (CPU, memory, execution time)
|
||||
- Filesystem access controls
|
||||
- Process monitoring and limits
|
||||
- Safe for running untrusted workflows locally
|
||||
- **⚠️ Emulation Mode (Legacy)**: Runs workflows using local system tools without sandboxing. **Not recommended - use Secure Emulation instead**:
|
||||
- Only supports local and JavaScript actions (no Docker container actions)
|
||||
- No support for service containers
|
||||
- No caching support
|
||||
- **No security protections - can execute harmful commands**
|
||||
- Some actions may require adaptation to work locally
|
||||
- Special action handling is more limited
|
||||
|
||||
### Best Practices
|
||||
- Test workflows in both Docker and emulation modes to ensure compatibility
|
||||
- **Use Secure Emulation mode for local development** - provides safety without container overhead
|
||||
- Test workflows in multiple runtime modes to ensure compatibility
|
||||
- **Use Docker/Podman mode for production** - provides maximum isolation and reproducibility
|
||||
- Keep matrix builds reasonably sized for better performance
|
||||
- Use environment variables instead of GitHub secrets when possible
|
||||
- Consider using local actions for complex custom functionality
|
||||
- Test network-dependent actions carefully in both modes
|
||||
- **Review security warnings** - pay attention to blocked commands in secure emulation mode
|
||||
- **Start with secure mode** - only fall back to legacy emulation if necessary
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
12
clippy-test.yml
Normal file
12
clippy-test.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Clippy Test
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test secrets after clippy fixes
|
||||
env:
|
||||
TEST_VAR: ${{ secrets.TEST_SECRET }}
|
||||
run: |
|
||||
echo "Secret length: ${#TEST_VAR}"
|
||||
@@ -12,9 +12,9 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
colored.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
|
||||
@@ -21,26 +21,9 @@ pub fn evaluate_workflow_file(path: &Path, verbose: bool) -> Result<ValidationRe
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
// Check if name exists
|
||||
if workflow.get("name").is_none() {
|
||||
// Check if this might be a reusable workflow caller before reporting missing name
|
||||
let has_reusable_workflow_job = if let Some(Value::Mapping(jobs)) = workflow.get("jobs") {
|
||||
jobs.values().any(|job| {
|
||||
if let Some(job_config) = job.as_mapping() {
|
||||
job_config.contains_key(Value::String("uses".to_string()))
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// Only report missing name if it's not a workflow with reusable workflow jobs
|
||||
if !has_reusable_workflow_job {
|
||||
result.add_issue("Workflow is missing a name".to_string());
|
||||
}
|
||||
}
|
||||
// Note: The 'name' field is optional per GitHub Actions specification.
|
||||
// When omitted, GitHub displays the workflow file path relative to the repository root.
|
||||
// We do not validate name presence as it's not required by the schema.
|
||||
|
||||
// Check if jobs section exists
|
||||
match workflow.get("jobs") {
|
||||
|
||||
@@ -12,12 +12,13 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.6.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.7.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
wrkflw-secrets = { path = "../secrets", version = "0.7.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
async-trait.workspace = true
|
||||
@@ -26,6 +27,7 @@ chrono.workspace = true
|
||||
dirs.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
ignore = "0.4"
|
||||
lazy_static.workspace = true
|
||||
num_cpus.workspace = true
|
||||
once_cell.workspace = true
|
||||
|
||||
@@ -840,6 +840,14 @@ impl DockerRuntime {
|
||||
working_dir: &Path,
|
||||
volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
// First, try to pull the image if it's not available locally
|
||||
if let Err(e) = self.pull_image_inner(image).await {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Failed to pull image {}: {}. Attempting to continue with existing image.",
|
||||
image, e
|
||||
));
|
||||
}
|
||||
|
||||
// Collect environment variables
|
||||
let mut env: Vec<String> = env_vars
|
||||
.iter()
|
||||
|
||||
@@ -9,6 +9,8 @@ use std::path::Path;
|
||||
use std::process::Command;
|
||||
use thiserror::Error;
|
||||
|
||||
use ignore::{gitignore::GitignoreBuilder, Match};
|
||||
|
||||
use crate::dependency;
|
||||
use crate::docker;
|
||||
use crate::environment;
|
||||
@@ -20,6 +22,7 @@ use wrkflw_parser::gitlab::{self, parse_pipeline};
|
||||
use wrkflw_parser::workflow::{self, parse_workflow, ActionInfo, Job, WorkflowDefinition};
|
||||
use wrkflw_runtime::container::ContainerRuntime;
|
||||
use wrkflw_runtime::emulation;
|
||||
use wrkflw_secrets::{SecretConfig, SecretManager, SecretMasker, SecretSubstitution};
|
||||
|
||||
#[allow(unused_variables, unused_assignments)]
|
||||
/// Execute a GitHub Actions workflow file locally
|
||||
@@ -98,6 +101,7 @@ async fn execute_github_workflow(
|
||||
"WRKFLW_RUNTIME_MODE".to_string(),
|
||||
match config.runtime_type {
|
||||
RuntimeType::Emulation => "emulation".to_string(),
|
||||
RuntimeType::SecureEmulation => "secure_emulation".to_string(),
|
||||
RuntimeType::Docker => "docker".to_string(),
|
||||
RuntimeType::Podman => "podman".to_string(),
|
||||
},
|
||||
@@ -114,7 +118,27 @@ async fn execute_github_workflow(
|
||||
ExecutionError::Execution(format!("Failed to setup GitHub env files: {}", e))
|
||||
})?;
|
||||
|
||||
// 5. Execute jobs according to the plan
|
||||
// 5. Initialize secrets management
|
||||
let secret_manager = if let Some(secrets_config) = &config.secrets_config {
|
||||
Some(
|
||||
SecretManager::new(secrets_config.clone())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ExecutionError::Execution(format!("Failed to initialize secret manager: {}", e))
|
||||
})?,
|
||||
)
|
||||
} else {
|
||||
Some(SecretManager::default().await.map_err(|e| {
|
||||
ExecutionError::Execution(format!(
|
||||
"Failed to initialize default secret manager: {}",
|
||||
e
|
||||
))
|
||||
})?)
|
||||
};
|
||||
|
||||
let secret_masker = SecretMasker::new();
|
||||
|
||||
// 6. Execute jobs according to the plan
|
||||
let mut results = Vec::new();
|
||||
let mut has_failures = false;
|
||||
let mut failure_details = String::new();
|
||||
@@ -127,6 +151,8 @@ async fn execute_github_workflow(
|
||||
runtime.as_ref(),
|
||||
&env_context,
|
||||
config.verbose,
|
||||
secret_manager.as_ref(),
|
||||
Some(&secret_masker),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -198,6 +224,7 @@ async fn execute_gitlab_pipeline(
|
||||
"WRKFLW_RUNTIME_MODE".to_string(),
|
||||
match config.runtime_type {
|
||||
RuntimeType::Emulation => "emulation".to_string(),
|
||||
RuntimeType::SecureEmulation => "secure_emulation".to_string(),
|
||||
RuntimeType::Docker => "docker".to_string(),
|
||||
RuntimeType::Podman => "podman".to_string(),
|
||||
},
|
||||
@@ -208,7 +235,27 @@ async fn execute_gitlab_pipeline(
|
||||
ExecutionError::Execution(format!("Failed to setup environment files: {}", e))
|
||||
})?;
|
||||
|
||||
// 6. Execute jobs according to the plan
|
||||
// 6. Initialize secrets management
|
||||
let secret_manager = if let Some(secrets_config) = &config.secrets_config {
|
||||
Some(
|
||||
SecretManager::new(secrets_config.clone())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
ExecutionError::Execution(format!("Failed to initialize secret manager: {}", e))
|
||||
})?,
|
||||
)
|
||||
} else {
|
||||
Some(SecretManager::default().await.map_err(|e| {
|
||||
ExecutionError::Execution(format!(
|
||||
"Failed to initialize default secret manager: {}",
|
||||
e
|
||||
))
|
||||
})?)
|
||||
};
|
||||
|
||||
let secret_masker = SecretMasker::new();
|
||||
|
||||
// 7. Execute jobs according to the plan
|
||||
let mut results = Vec::new();
|
||||
let mut has_failures = false;
|
||||
let mut failure_details = String::new();
|
||||
@@ -221,6 +268,8 @@ async fn execute_gitlab_pipeline(
|
||||
runtime.as_ref(),
|
||||
&env_context,
|
||||
config.verbose,
|
||||
secret_manager.as_ref(),
|
||||
Some(&secret_masker),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -400,6 +449,9 @@ fn initialize_runtime(
|
||||
}
|
||||
}
|
||||
RuntimeType::Emulation => Ok(Box::new(emulation::EmulationRuntime::new())),
|
||||
RuntimeType::SecureEmulation => Ok(Box::new(
|
||||
wrkflw_runtime::secure_emulation::SecureEmulationRuntime::new(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,6 +460,7 @@ pub enum RuntimeType {
|
||||
Docker,
|
||||
Podman,
|
||||
Emulation,
|
||||
SecureEmulation,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -415,6 +468,7 @@ pub struct ExecutionConfig {
|
||||
pub runtime_type: RuntimeType,
|
||||
pub verbose: bool,
|
||||
pub preserve_containers_on_failure: bool,
|
||||
pub secrets_config: Option<SecretConfig>,
|
||||
}
|
||||
|
||||
pub struct ExecutionResult {
|
||||
@@ -516,13 +570,68 @@ async fn prepare_action(
|
||||
} else {
|
||||
// It's a JavaScript or composite action
|
||||
// For simplicity, we'll use node to run it (this would need more work for full support)
|
||||
return Ok("node:16-buster-slim".to_string());
|
||||
return Ok("node:20-slim".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// GitHub action: use standard runner image
|
||||
// In a real implementation, you'd need to clone the repo at the specified version
|
||||
Ok("node:16-buster-slim".to_string())
|
||||
// GitHub action: determine appropriate image based on action type
|
||||
let image = determine_action_image(&action.repository);
|
||||
Ok(image)
|
||||
}
|
||||
|
||||
/// Determine the appropriate Docker image for a GitHub action
|
||||
fn determine_action_image(repository: &str) -> String {
|
||||
// Handle specific well-known actions
|
||||
match repository {
|
||||
// PHP setup actions
|
||||
repo if repo.starts_with("shivammathur/setup-php") => {
|
||||
"composer:latest".to_string() // Use composer image which includes PHP and composer
|
||||
}
|
||||
|
||||
// Python setup actions
|
||||
repo if repo.starts_with("actions/setup-python") => "python:3.11-slim".to_string(),
|
||||
|
||||
// Node.js setup actions
|
||||
repo if repo.starts_with("actions/setup-node") => "node:20-slim".to_string(),
|
||||
|
||||
// Java setup actions
|
||||
repo if repo.starts_with("actions/setup-java") => "eclipse-temurin:17-jdk".to_string(),
|
||||
|
||||
// Go setup actions
|
||||
repo if repo.starts_with("actions/setup-go") => "golang:1.21-slim".to_string(),
|
||||
|
||||
// .NET setup actions
|
||||
repo if repo.starts_with("actions/setup-dotnet") => {
|
||||
"mcr.microsoft.com/dotnet/sdk:7.0".to_string()
|
||||
}
|
||||
|
||||
// Rust setup actions
|
||||
repo if repo.starts_with("actions-rs/toolchain")
|
||||
|| repo.starts_with("dtolnay/rust-toolchain") =>
|
||||
{
|
||||
"rust:latest".to_string()
|
||||
}
|
||||
|
||||
// Docker/container actions
|
||||
repo if repo.starts_with("docker/") => "docker:latest".to_string(),
|
||||
|
||||
// AWS actions
|
||||
repo if repo.starts_with("aws-actions/") => "amazon/aws-cli:latest".to_string(),
|
||||
|
||||
// Default to Node.js for most GitHub actions (checkout, upload-artifact, etc.)
|
||||
_ => {
|
||||
// Check if it's a common core GitHub action that should use a more complete environment
|
||||
if repository.starts_with("actions/checkout")
|
||||
|| repository.starts_with("actions/upload-artifact")
|
||||
|| repository.starts_with("actions/download-artifact")
|
||||
|| repository.starts_with("actions/cache")
|
||||
{
|
||||
"catthehacker/ubuntu:act-latest".to_string() // Use act runner image for core actions
|
||||
} else {
|
||||
"node:20-slim".to_string() // Default for other actions
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn execute_job_batch(
|
||||
@@ -531,11 +640,21 @@ async fn execute_job_batch(
|
||||
runtime: &dyn ContainerRuntime,
|
||||
env_context: &HashMap<String, String>,
|
||||
verbose: bool,
|
||||
secret_manager: Option<&SecretManager>,
|
||||
secret_masker: Option<&SecretMasker>,
|
||||
) -> Result<Vec<JobResult>, ExecutionError> {
|
||||
// Execute jobs in parallel
|
||||
let futures = jobs
|
||||
.iter()
|
||||
.map(|job_name| execute_job_with_matrix(job_name, workflow, runtime, env_context, verbose));
|
||||
let futures = jobs.iter().map(|job_name| {
|
||||
execute_job_with_matrix(
|
||||
job_name,
|
||||
workflow,
|
||||
runtime,
|
||||
env_context,
|
||||
verbose,
|
||||
secret_manager,
|
||||
secret_masker,
|
||||
)
|
||||
});
|
||||
|
||||
let result_arrays = future::join_all(futures).await;
|
||||
|
||||
@@ -558,6 +677,8 @@ struct JobExecutionContext<'a> {
|
||||
runtime: &'a dyn ContainerRuntime,
|
||||
env_context: &'a HashMap<String, String>,
|
||||
verbose: bool,
|
||||
secret_manager: Option<&'a SecretManager>,
|
||||
secret_masker: Option<&'a SecretMasker>,
|
||||
}
|
||||
|
||||
/// Execute a job, expanding matrix if present
|
||||
@@ -567,6 +688,8 @@ async fn execute_job_with_matrix(
|
||||
runtime: &dyn ContainerRuntime,
|
||||
env_context: &HashMap<String, String>,
|
||||
verbose: bool,
|
||||
secret_manager: Option<&SecretManager>,
|
||||
secret_masker: Option<&SecretMasker>,
|
||||
) -> Result<Vec<JobResult>, ExecutionError> {
|
||||
// Get the job definition
|
||||
let job = workflow.jobs.get(job_name).ok_or_else(|| {
|
||||
@@ -629,6 +752,8 @@ async fn execute_job_with_matrix(
|
||||
runtime,
|
||||
env_context,
|
||||
verbose,
|
||||
secret_manager,
|
||||
secret_masker,
|
||||
})
|
||||
.await
|
||||
} else {
|
||||
@@ -639,6 +764,8 @@ async fn execute_job_with_matrix(
|
||||
runtime,
|
||||
env_context,
|
||||
verbose,
|
||||
secret_manager,
|
||||
secret_masker,
|
||||
};
|
||||
let result = execute_job(ctx).await?;
|
||||
Ok(vec![result])
|
||||
@@ -679,13 +806,6 @@ async fn execute_job(ctx: JobExecutionContext<'_>) -> Result<JobResult, Executio
|
||||
ExecutionError::Execution(format!("Failed to get current directory: {}", e))
|
||||
})?;
|
||||
|
||||
// Copy project files to the job workspace directory
|
||||
wrkflw_logging::info(&format!(
|
||||
"Copying project files to job workspace: {}",
|
||||
job_dir.path().display()
|
||||
));
|
||||
copy_directory_contents(¤t_dir, job_dir.path())?;
|
||||
|
||||
wrkflw_logging::info(&format!("Executing job: {}", ctx.job_name));
|
||||
|
||||
let mut job_success = true;
|
||||
@@ -705,6 +825,8 @@ async fn execute_job(ctx: JobExecutionContext<'_>) -> Result<JobResult, Executio
|
||||
runner_image: &runner_image_value,
|
||||
verbose: ctx.verbose,
|
||||
matrix_combination: &None,
|
||||
secret_manager: ctx.secret_manager,
|
||||
secret_masker: ctx.secret_masker,
|
||||
})
|
||||
.await;
|
||||
|
||||
@@ -774,6 +896,10 @@ struct MatrixExecutionContext<'a> {
|
||||
runtime: &'a dyn ContainerRuntime,
|
||||
env_context: &'a HashMap<String, String>,
|
||||
verbose: bool,
|
||||
#[allow(dead_code)] // Planned for future implementation
|
||||
secret_manager: Option<&'a SecretManager>,
|
||||
#[allow(dead_code)] // Planned for future implementation
|
||||
secret_masker: Option<&'a SecretMasker>,
|
||||
}
|
||||
|
||||
/// Execute a set of matrix combinations
|
||||
@@ -879,13 +1005,6 @@ async fn execute_matrix_job(
|
||||
ExecutionError::Execution(format!("Failed to get current directory: {}", e))
|
||||
})?;
|
||||
|
||||
// Copy project files to the job workspace directory
|
||||
wrkflw_logging::info(&format!(
|
||||
"Copying project files to job workspace: {}",
|
||||
job_dir.path().display()
|
||||
));
|
||||
copy_directory_contents(¤t_dir, job_dir.path())?;
|
||||
|
||||
let job_success = if job_template.steps.is_empty() {
|
||||
wrkflw_logging::warning(&format!("Job '{}' has no steps", matrix_job_name));
|
||||
true
|
||||
@@ -905,6 +1024,8 @@ async fn execute_matrix_job(
|
||||
runner_image: &runner_image_value,
|
||||
verbose,
|
||||
matrix_combination: &Some(combination.values.clone()),
|
||||
secret_manager: None, // Matrix execution context doesn't have secrets yet
|
||||
secret_masker: None,
|
||||
})
|
||||
.await
|
||||
{
|
||||
@@ -974,6 +1095,9 @@ struct StepExecutionContext<'a> {
|
||||
verbose: bool,
|
||||
#[allow(dead_code)]
|
||||
matrix_combination: &'a Option<HashMap<String, Value>>,
|
||||
secret_manager: Option<&'a SecretManager>,
|
||||
#[allow(dead_code)] // Planned for future implementation
|
||||
secret_masker: Option<&'a SecretMasker>,
|
||||
}
|
||||
|
||||
async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, ExecutionError> {
|
||||
@@ -990,9 +1114,24 @@ async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, Execu
|
||||
// Prepare step environment
|
||||
let mut step_env = ctx.job_env.clone();
|
||||
|
||||
// Add step-level environment variables
|
||||
// Add step-level environment variables (with secret substitution)
|
||||
for (key, value) in &ctx.step.env {
|
||||
step_env.insert(key.clone(), value.clone());
|
||||
let resolved_value = if let Some(secret_manager) = ctx.secret_manager {
|
||||
let mut substitution = SecretSubstitution::new(secret_manager);
|
||||
match substitution.substitute(value).await {
|
||||
Ok(resolved) => resolved,
|
||||
Err(e) => {
|
||||
wrkflw_logging::error(&format!(
|
||||
"Failed to resolve secrets in environment variable {}: {}",
|
||||
key, e
|
||||
));
|
||||
value.clone()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
value.clone()
|
||||
};
|
||||
step_env.insert(key.clone(), resolved_value);
|
||||
}
|
||||
|
||||
// Execute the step based on its type
|
||||
@@ -1021,28 +1160,13 @@ async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, Execu
|
||||
detailed_output
|
||||
.push_str(&format!(" - Destination: {}\n", ctx.working_dir.display()));
|
||||
|
||||
// Add list of top-level files/directories that were copied (limit to 10)
|
||||
detailed_output.push_str("\nTop-level files/directories copied:\n");
|
||||
// Add a summary count instead of listing all files
|
||||
if let Ok(entries) = std::fs::read_dir(¤t_dir) {
|
||||
for (i, entry) in entries.take(10).enumerate() {
|
||||
if let Ok(entry) = entry {
|
||||
let file_type = if entry.path().is_dir() {
|
||||
"directory"
|
||||
} else {
|
||||
"file"
|
||||
};
|
||||
detailed_output.push_str(&format!(
|
||||
" - {} ({})\n",
|
||||
entry.file_name().to_string_lossy(),
|
||||
file_type
|
||||
));
|
||||
}
|
||||
|
||||
if i >= 9 {
|
||||
detailed_output.push_str(" - ... (more items not shown)\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
let entry_count = entries.count();
|
||||
detailed_output.push_str(&format!(
|
||||
"\nCopied {} top-level items to workspace\n",
|
||||
entry_count
|
||||
));
|
||||
}
|
||||
|
||||
detailed_output
|
||||
@@ -1085,13 +1209,15 @@ async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, Execu
|
||||
let mut owned_strings: Vec<String> = Vec::new(); // Keep strings alive until after we use cmd
|
||||
|
||||
// Special handling for Rust actions
|
||||
if uses.starts_with("actions-rs/") {
|
||||
if uses.starts_with("actions-rs/") || uses.starts_with("dtolnay/rust-toolchain") {
|
||||
wrkflw_logging::info(
|
||||
"🔄 Detected Rust action - using system Rust installation",
|
||||
);
|
||||
|
||||
// For toolchain action, verify Rust is installed
|
||||
if uses.starts_with("actions-rs/toolchain@") {
|
||||
if uses.starts_with("actions-rs/toolchain@")
|
||||
|| uses.starts_with("dtolnay/rust-toolchain@")
|
||||
{
|
||||
let rustc_version = Command::new("rustc")
|
||||
.arg("--version")
|
||||
.output()
|
||||
@@ -1417,7 +1543,7 @@ async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, Execu
|
||||
let output = ctx
|
||||
.runtime
|
||||
.run_container(
|
||||
ctx.runner_image,
|
||||
&image,
|
||||
&cmd.to_vec(),
|
||||
&env_vars,
|
||||
container_workspace,
|
||||
@@ -1527,11 +1653,29 @@ async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, Execu
|
||||
let mut status = StepStatus::Success;
|
||||
let mut error_details = None;
|
||||
|
||||
// Check if this is a cargo command
|
||||
let is_cargo_cmd = run.trim().starts_with("cargo");
|
||||
// Perform secret substitution if secret manager is available
|
||||
let resolved_run = if let Some(secret_manager) = ctx.secret_manager {
|
||||
let mut substitution = SecretSubstitution::new(secret_manager);
|
||||
match substitution.substitute(run).await {
|
||||
Ok(resolved) => resolved,
|
||||
Err(e) => {
|
||||
return Ok(StepResult {
|
||||
name: step_name,
|
||||
status: StepStatus::Failure,
|
||||
output: format!("Secret substitution failed: {}", e),
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
run.clone()
|
||||
};
|
||||
|
||||
// Convert command string to array of string slices
|
||||
let cmd_parts: Vec<&str> = run.split_whitespace().collect();
|
||||
// Check if this is a cargo command
|
||||
let is_cargo_cmd = resolved_run.trim().starts_with("cargo");
|
||||
|
||||
// For complex shell commands, use bash to execute them properly
|
||||
// This handles quotes, pipes, redirections, and command substitutions correctly
|
||||
let cmd_parts = vec!["bash", "-c", &resolved_run];
|
||||
|
||||
// Convert environment variables to the required format
|
||||
let env_vars: Vec<(&str, &str)> = step_env
|
||||
@@ -1630,7 +1774,60 @@ async fn execute_step(ctx: StepExecutionContext<'_>) -> Result<StepResult, Execu
|
||||
Ok(step_result)
|
||||
}
|
||||
|
||||
/// Create a gitignore matcher for the given directory
|
||||
fn create_gitignore_matcher(
|
||||
dir: &Path,
|
||||
) -> Result<Option<ignore::gitignore::Gitignore>, ExecutionError> {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
|
||||
// Try to add .gitignore file if it exists
|
||||
let gitignore_path = dir.join(".gitignore");
|
||||
if gitignore_path.exists() {
|
||||
builder.add(&gitignore_path);
|
||||
}
|
||||
|
||||
// Add some common ignore patterns as fallback
|
||||
builder.add_line(None, "target/").map_err(|e| {
|
||||
ExecutionError::Execution(format!("Failed to add default ignore pattern: {}", e))
|
||||
})?;
|
||||
builder.add_line(None, ".git/").map_err(|e| {
|
||||
ExecutionError::Execution(format!("Failed to add default ignore pattern: {}", e))
|
||||
})?;
|
||||
|
||||
match builder.build() {
|
||||
Ok(gitignore) => Ok(Some(gitignore)),
|
||||
Err(e) => {
|
||||
wrkflw_logging::warning(&format!("Failed to build gitignore matcher: {}", e));
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn copy_directory_contents(from: &Path, to: &Path) -> Result<(), ExecutionError> {
|
||||
copy_directory_contents_with_gitignore(from, to, None)
|
||||
}
|
||||
|
||||
fn copy_directory_contents_with_gitignore(
|
||||
from: &Path,
|
||||
to: &Path,
|
||||
gitignore: Option<&ignore::gitignore::Gitignore>,
|
||||
) -> Result<(), ExecutionError> {
|
||||
// If no gitignore provided, try to create one for the root directory
|
||||
let root_gitignore;
|
||||
let gitignore = if gitignore.is_none() {
|
||||
root_gitignore = create_gitignore_matcher(from)?;
|
||||
root_gitignore.as_ref()
|
||||
} else {
|
||||
gitignore
|
||||
};
|
||||
|
||||
// Log summary of the copy operation
|
||||
wrkflw_logging::debug(&format!(
|
||||
"Copying directory contents from {} to {}",
|
||||
from.display(),
|
||||
to.display()
|
||||
));
|
||||
|
||||
for entry in std::fs::read_dir(from)
|
||||
.map_err(|e| ExecutionError::Execution(format!("Failed to read directory: {}", e)))?
|
||||
{
|
||||
@@ -1638,7 +1835,23 @@ fn copy_directory_contents(from: &Path, to: &Path) -> Result<(), ExecutionError>
|
||||
entry.map_err(|e| ExecutionError::Execution(format!("Failed to read entry: {}", e)))?;
|
||||
let path = entry.path();
|
||||
|
||||
// Skip hidden files/dirs and target directory for efficiency
|
||||
// Check if the file should be ignored according to .gitignore
|
||||
if let Some(gitignore) = gitignore {
|
||||
let relative_path = path.strip_prefix(from).unwrap_or(&path);
|
||||
match gitignore.matched(relative_path, path.is_dir()) {
|
||||
Match::Ignore(_) => {
|
||||
wrkflw_logging::debug(&format!("Skipping ignored file/directory: {path:?}"));
|
||||
continue;
|
||||
}
|
||||
Match::Whitelist(_) | Match::None => {
|
||||
// File is not ignored or explicitly whitelisted
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log individual files only in trace mode (removed verbose per-file logging)
|
||||
|
||||
// Additional basic filtering for hidden files (but allow .gitignore and .github)
|
||||
let file_name = match path.file_name() {
|
||||
Some(name) => name.to_string_lossy(),
|
||||
None => {
|
||||
@@ -1648,7 +1861,13 @@ fn copy_directory_contents(from: &Path, to: &Path) -> Result<(), ExecutionError>
|
||||
)));
|
||||
}
|
||||
};
|
||||
if file_name.starts_with(".") || file_name == "target" {
|
||||
|
||||
// Skip most hidden files but allow important ones
|
||||
if file_name.starts_with(".")
|
||||
&& file_name != ".gitignore"
|
||||
&& file_name != ".github"
|
||||
&& !file_name.starts_with(".env")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1666,8 +1885,8 @@ fn copy_directory_contents(from: &Path, to: &Path) -> Result<(), ExecutionError>
|
||||
std::fs::create_dir_all(&dest_path)
|
||||
.map_err(|e| ExecutionError::Execution(format!("Failed to create dir: {}", e)))?;
|
||||
|
||||
// Recursively copy subdirectories
|
||||
copy_directory_contents(&path, &dest_path)?;
|
||||
// Recursively copy subdirectories with the same gitignore
|
||||
copy_directory_contents_with_gitignore(&path, &dest_path, gitignore)?;
|
||||
} else {
|
||||
std::fs::copy(&path, &dest_path)
|
||||
.map_err(|e| ExecutionError::Execution(format!("Failed to copy file: {}", e)))?;
|
||||
@@ -1680,11 +1899,11 @@ fn copy_directory_contents(from: &Path, to: &Path) -> Result<(), ExecutionError>
|
||||
fn get_runner_image(runs_on: &str) -> String {
|
||||
// Map GitHub runners to Docker images
|
||||
match runs_on.trim() {
|
||||
// ubuntu runners - micro images (minimal size)
|
||||
"ubuntu-latest" => "node:16-buster-slim",
|
||||
"ubuntu-22.04" => "node:16-bullseye-slim",
|
||||
"ubuntu-20.04" => "node:16-buster-slim",
|
||||
"ubuntu-18.04" => "node:16-buster-slim",
|
||||
// ubuntu runners - using Ubuntu base images for better compatibility
|
||||
"ubuntu-latest" => "ubuntu:latest",
|
||||
"ubuntu-22.04" => "ubuntu:22.04",
|
||||
"ubuntu-20.04" => "ubuntu:20.04",
|
||||
"ubuntu-18.04" => "ubuntu:18.04",
|
||||
|
||||
// ubuntu runners - medium images (with more tools)
|
||||
"ubuntu-latest-medium" => "catthehacker/ubuntu:act-latest",
|
||||
@@ -1905,8 +2124,16 @@ async fn execute_reusable_workflow_job(
|
||||
let mut all_results = Vec::new();
|
||||
let mut any_failed = false;
|
||||
for batch in plan {
|
||||
let results =
|
||||
execute_job_batch(&batch, &called, ctx.runtime, &child_env, ctx.verbose).await?;
|
||||
let results = execute_job_batch(
|
||||
&batch,
|
||||
&called,
|
||||
ctx.runtime,
|
||||
&child_env,
|
||||
ctx.verbose,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
for r in &results {
|
||||
if r.status == JobStatus::Failure {
|
||||
any_failed = true;
|
||||
@@ -2102,6 +2329,8 @@ async fn execute_composite_action(
|
||||
runner_image,
|
||||
verbose,
|
||||
matrix_combination: &None,
|
||||
secret_manager: None, // Composite actions don't have secrets yet
|
||||
secret_masker: None,
|
||||
}))
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies from workspace
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
indexmap.workspace = true
|
||||
|
||||
@@ -14,4 +14,4 @@ categories.workspace = true
|
||||
serde.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -12,8 +12,8 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
jsonschema.workspace = true
|
||||
|
||||
@@ -260,7 +260,7 @@ test_job:
|
||||
fs::write(&file, content).unwrap();
|
||||
|
||||
// Parse the pipeline
|
||||
let pipeline = parse_pipeline(&file.path()).unwrap();
|
||||
let pipeline = parse_pipeline(file.path()).unwrap();
|
||||
|
||||
// Validate basic structure
|
||||
assert_eq!(pipeline.stages.as_ref().unwrap().len(), 2);
|
||||
|
||||
@@ -12,8 +12,8 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
async-trait.workspace = true
|
||||
@@ -23,5 +23,8 @@ serde_yaml.workspace = true
|
||||
tempfile = "3.9"
|
||||
tokio.workspace = true
|
||||
futures = "0.3"
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
ignore = "0.4"
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
which = "4.4"
|
||||
regex = "1.10"
|
||||
thiserror = "1.0"
|
||||
|
||||
258
crates/runtime/README_SECURITY.md
Normal file
258
crates/runtime/README_SECURITY.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# Security Features in wrkflw Runtime
|
||||
|
||||
This document describes the security features implemented in the wrkflw runtime, particularly the sandboxing capabilities for emulation mode.
|
||||
|
||||
## Overview
|
||||
|
||||
The wrkflw runtime provides multiple execution modes with varying levels of security:
|
||||
|
||||
1. **Docker Mode** - Uses Docker containers for isolation (recommended for production)
|
||||
2. **Podman Mode** - Uses Podman containers for isolation with rootless support
|
||||
3. **Secure Emulation Mode** - 🔒 **NEW**: Sandboxed execution on the host system
|
||||
4. **Emulation Mode** - ⚠️ **UNSAFE**: Direct execution on the host system (deprecated)
|
||||
|
||||
## Security Modes
|
||||
|
||||
### 🔒 Secure Emulation Mode (Recommended for Local Development)
|
||||
|
||||
The secure emulation mode provides comprehensive sandboxing to protect your system from potentially harmful commands while still allowing legitimate workflow operations.
|
||||
|
||||
#### Features
|
||||
|
||||
- **Command Validation**: Blocks dangerous commands like `rm -rf /`, `dd`, `sudo`, etc.
|
||||
- **Pattern Detection**: Uses regex patterns to detect dangerous command combinations
|
||||
- **Resource Limits**: Enforces CPU, memory, and execution time limits
|
||||
- **Filesystem Isolation**: Restricts file access to allowed paths only
|
||||
- **Environment Sanitization**: Filters dangerous environment variables
|
||||
- **Process Monitoring**: Tracks and limits spawned processes
|
||||
|
||||
#### Usage
|
||||
|
||||
```bash
|
||||
# Use secure emulation mode (recommended)
|
||||
wrkflw run --runtime secure-emulation .github/workflows/build.yml
|
||||
|
||||
# Or via TUI
|
||||
wrkflw tui --runtime secure-emulation
|
||||
```
|
||||
|
||||
#### Command Whitelist/Blacklist
|
||||
|
||||
**Allowed Commands (Safe):**
|
||||
- Basic utilities: `echo`, `cat`, `ls`, `grep`, `sed`, `awk`
|
||||
- Development tools: `cargo`, `npm`, `python`, `git`, `node`
|
||||
- Build tools: `make`, `cmake`, `javac`, `dotnet`
|
||||
|
||||
**Blocked Commands (Dangerous):**
|
||||
- System modification: `rm`, `dd`, `mkfs`, `mount`, `sudo`
|
||||
- Network tools: `wget`, `curl`, `ssh`, `nc`
|
||||
- Process control: `kill`, `killall`, `systemctl`
|
||||
|
||||
#### Resource Limits
|
||||
|
||||
```rust
|
||||
// Default configuration
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(300), // 5 minutes
|
||||
max_memory_mb: 512, // 512 MB
|
||||
max_cpu_percent: 80, // 80% CPU
|
||||
max_processes: 10, // Max 10 processes
|
||||
allow_network: false, // No network access
|
||||
strict_mode: true, // Whitelist-only mode
|
||||
}
|
||||
```
|
||||
|
||||
### ⚠️ Legacy Emulation Mode (Unsafe)
|
||||
|
||||
The original emulation mode executes commands directly on the host system without any sandboxing. **This mode will be deprecated and should only be used for trusted workflows.**
|
||||
|
||||
```bash
|
||||
# Legacy unsafe mode (not recommended)
|
||||
wrkflw run --runtime emulation .github/workflows/build.yml
|
||||
```
|
||||
|
||||
## Example: Blocked vs Allowed Commands
|
||||
|
||||
### ❌ Blocked Commands
|
||||
|
||||
```yaml
|
||||
# This workflow will be blocked in secure emulation mode
|
||||
steps:
|
||||
- name: Dangerous command
|
||||
run: rm -rf /tmp/* # BLOCKED: Dangerous file deletion
|
||||
|
||||
- name: System modification
|
||||
run: sudo apt-get install package # BLOCKED: sudo usage
|
||||
|
||||
- name: Network access
|
||||
run: wget https://malicious-site.com/script.sh | sh # BLOCKED: wget + shell execution
|
||||
```
|
||||
|
||||
### ✅ Allowed Commands
|
||||
|
||||
```yaml
|
||||
# This workflow will run successfully in secure emulation mode
|
||||
steps:
|
||||
- name: Build project
|
||||
run: cargo build --release # ALLOWED: Development tool
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test # ALLOWED: Testing
|
||||
|
||||
- name: List files
|
||||
run: ls -la target/ # ALLOWED: Safe file listing
|
||||
|
||||
- name: Format code
|
||||
run: cargo fmt --check # ALLOWED: Code formatting
|
||||
```
|
||||
|
||||
## Security Warnings and Messages
|
||||
|
||||
When dangerous commands are detected, wrkflw provides clear security messages:
|
||||
|
||||
```
|
||||
🚫 SECURITY BLOCK: Command 'rm' is not allowed in secure emulation mode.
|
||||
This command was blocked for security reasons.
|
||||
If you need to run this command, please use Docker or Podman mode instead.
|
||||
```
|
||||
|
||||
```
|
||||
🚫 SECURITY BLOCK: Dangerous command pattern detected: 'rm -rf /'.
|
||||
This command was blocked because it matches a known dangerous pattern.
|
||||
Please review your workflow for potentially harmful commands.
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Workflow-Friendly Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::create_workflow_sandbox_config;
|
||||
|
||||
let config = create_workflow_sandbox_config();
|
||||
// - Allows network access for package downloads
|
||||
// - Higher resource limits for CI/CD workloads
|
||||
// - Less strict mode for development flexibility
|
||||
```
|
||||
|
||||
### Strict Security Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::create_strict_sandbox_config;
|
||||
|
||||
let config = create_strict_sandbox_config();
|
||||
// - No network access
|
||||
// - Very limited command set
|
||||
// - Low resource limits
|
||||
// - Strict whitelist-only mode
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::{SandboxConfig, Sandbox};
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
let mut config = SandboxConfig::default();
|
||||
|
||||
// Custom allowed commands
|
||||
config.allowed_commands = ["echo", "ls", "cargo"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
// Custom resource limits
|
||||
config.max_execution_time = Duration::from_secs(60);
|
||||
config.max_memory_mb = 256;
|
||||
|
||||
// Custom allowed paths
|
||||
config.allowed_write_paths.insert(PathBuf::from("./target"));
|
||||
config.allowed_read_paths.insert(PathBuf::from("./src"));
|
||||
|
||||
let sandbox = Sandbox::new(config)?;
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Unsafe Emulation to Secure Emulation
|
||||
|
||||
1. **Change Runtime Flag**:
|
||||
```bash
|
||||
# Old (unsafe)
|
||||
wrkflw run --runtime emulation workflow.yml
|
||||
|
||||
# New (secure)
|
||||
wrkflw run --runtime secure-emulation workflow.yml
|
||||
```
|
||||
|
||||
2. **Review Workflow Commands**: Check for any commands that might be blocked and adjust if necessary.
|
||||
|
||||
3. **Handle Security Blocks**: If legitimate commands are blocked, consider:
|
||||
- Using Docker/Podman mode for those specific workflows
|
||||
- Modifying the workflow to use allowed alternatives
|
||||
- Creating a custom sandbox configuration
|
||||
|
||||
### When to Use Each Mode
|
||||
|
||||
| Use Case | Recommended Mode | Reason |
|
||||
|----------|------------------|---------|
|
||||
| Local development | Secure Emulation | Good balance of security and convenience |
|
||||
| Untrusted workflows | Docker/Podman | Maximum isolation |
|
||||
| CI/CD pipelines | Docker/Podman | Consistent, reproducible environment |
|
||||
| Testing workflows | Secure Emulation | Fast execution with safety |
|
||||
| Trusted internal workflows | Secure Emulation | Sufficient security for known-safe code |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Command Blocked Error
|
||||
|
||||
If you encounter a security block:
|
||||
|
||||
1. **Check if the command is necessary**: Can you achieve the same result with an allowed command?
|
||||
2. **Use container mode**: Switch to Docker or Podman mode for unrestricted execution
|
||||
3. **Modify the workflow**: Use safer alternatives where possible
|
||||
|
||||
### Resource Limit Exceeded
|
||||
|
||||
If your workflow hits resource limits:
|
||||
|
||||
1. **Optimize the workflow**: Reduce resource usage where possible
|
||||
2. **Use custom configuration**: Increase limits for specific use cases
|
||||
3. **Use container mode**: For resource-intensive workflows
|
||||
|
||||
### Path Access Denied
|
||||
|
||||
If file access is denied:
|
||||
|
||||
1. **Check allowed paths**: Ensure your workflow only accesses permitted directories
|
||||
2. **Use relative paths**: Work within the project directory
|
||||
3. **Use container mode**: For workflows requiring system-wide file access
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Default to Secure Mode**: Use secure emulation mode by default for local development
|
||||
2. **Test Workflows**: Always test workflows in secure mode before deploying
|
||||
3. **Review Security Messages**: Pay attention to security blocks and warnings
|
||||
4. **Use Containers for Production**: Use Docker/Podman for production deployments
|
||||
5. **Regular Updates**: Keep wrkflw updated for the latest security improvements
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Secure emulation mode is designed to prevent **accidental** harmful commands, not to stop **determined** attackers
|
||||
- For maximum security with untrusted code, always use container modes
|
||||
- The sandbox is most effective against script errors and typos that could damage your system
|
||||
- Always review workflows from untrusted sources before execution
|
||||
|
||||
## Contributing Security Improvements
|
||||
|
||||
If you find security issues or have suggestions for improvements:
|
||||
|
||||
1. **Report Security Issues**: Use responsible disclosure for security vulnerabilities
|
||||
2. **Suggest Command Patterns**: Help improve dangerous pattern detection
|
||||
3. **Test Edge Cases**: Help us identify bypass techniques
|
||||
4. **Documentation**: Improve security documentation and examples
|
||||
|
||||
---
|
||||
|
||||
For more information, see the main [README.md](../../README.md) and [Security Policy](../../SECURITY.md).
|
||||
@@ -24,6 +24,7 @@ pub trait ContainerRuntime {
|
||||
) -> Result<String, ContainerError>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContainerOutput {
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
|
||||
@@ -10,6 +10,8 @@ use tempfile::TempDir;
|
||||
use which;
|
||||
use wrkflw_logging;
|
||||
|
||||
use ignore::{gitignore::GitignoreBuilder, Match};
|
||||
|
||||
// Global collection of resources to clean up
|
||||
static EMULATION_WORKSPACES: Lazy<Mutex<Vec<PathBuf>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
static EMULATION_PROCESSES: Lazy<Mutex<Vec<u32>>> = Lazy::new(|| Mutex::new(Vec::new()));
|
||||
@@ -490,14 +492,75 @@ impl ContainerRuntime for EmulationRuntime {
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Create a gitignore matcher for the given directory
|
||||
fn create_gitignore_matcher(
|
||||
dir: &Path,
|
||||
) -> Result<Option<ignore::gitignore::Gitignore>, std::io::Error> {
|
||||
let mut builder = GitignoreBuilder::new(dir);
|
||||
|
||||
// Try to add .gitignore file if it exists
|
||||
let gitignore_path = dir.join(".gitignore");
|
||||
if gitignore_path.exists() {
|
||||
builder.add(&gitignore_path);
|
||||
}
|
||||
|
||||
// Add some common ignore patterns as fallback
|
||||
if let Err(e) = builder.add_line(None, "target/") {
|
||||
wrkflw_logging::warning(&format!("Failed to add default ignore pattern: {}", e));
|
||||
}
|
||||
if let Err(e) = builder.add_line(None, ".git/") {
|
||||
wrkflw_logging::warning(&format!("Failed to add default ignore pattern: {}", e));
|
||||
}
|
||||
|
||||
match builder.build() {
|
||||
Ok(gitignore) => Ok(Some(gitignore)),
|
||||
Err(e) => {
|
||||
wrkflw_logging::warning(&format!("Failed to build gitignore matcher: {}", e));
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn copy_directory_contents(source: &Path, dest: &Path) -> std::io::Result<()> {
|
||||
copy_directory_contents_with_gitignore(source, dest, None)
|
||||
}
|
||||
|
||||
fn copy_directory_contents_with_gitignore(
|
||||
source: &Path,
|
||||
dest: &Path,
|
||||
gitignore: Option<&ignore::gitignore::Gitignore>,
|
||||
) -> std::io::Result<()> {
|
||||
// Create the destination directory if it doesn't exist
|
||||
fs::create_dir_all(dest)?;
|
||||
|
||||
// If no gitignore provided, try to create one for the root directory
|
||||
let root_gitignore;
|
||||
let gitignore = if gitignore.is_none() {
|
||||
root_gitignore = create_gitignore_matcher(source)?;
|
||||
root_gitignore.as_ref()
|
||||
} else {
|
||||
gitignore
|
||||
};
|
||||
|
||||
// Iterate through all entries in the source directory
|
||||
for entry in fs::read_dir(source)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
// Check if the file should be ignored according to .gitignore
|
||||
if let Some(gitignore) = gitignore {
|
||||
let relative_path = path.strip_prefix(source).unwrap_or(&path);
|
||||
match gitignore.matched(relative_path, path.is_dir()) {
|
||||
Match::Ignore(_) => {
|
||||
wrkflw_logging::debug(&format!("Skipping ignored file/directory: {path:?}"));
|
||||
continue;
|
||||
}
|
||||
Match::Whitelist(_) | Match::None => {
|
||||
// File is not ignored or explicitly whitelisted
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let file_name = match path.file_name() {
|
||||
Some(name) => name,
|
||||
None => {
|
||||
@@ -507,23 +570,19 @@ fn copy_directory_contents(source: &Path, dest: &Path) -> std::io::Result<()> {
|
||||
};
|
||||
let dest_path = dest.join(file_name);
|
||||
|
||||
// Skip hidden files (except .gitignore and .github might be useful)
|
||||
// Skip most hidden files but allow important ones
|
||||
let file_name_str = file_name.to_string_lossy();
|
||||
if file_name_str.starts_with(".")
|
||||
&& file_name_str != ".gitignore"
|
||||
&& file_name_str != ".github"
|
||||
&& !file_name_str.starts_with(".env")
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip target directory for Rust projects
|
||||
if file_name_str == "target" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if path.is_dir() {
|
||||
// Recursively copy subdirectories
|
||||
copy_directory_contents(&path, &dest_path)?;
|
||||
// Recursively copy subdirectories with the same gitignore
|
||||
copy_directory_contents_with_gitignore(&path, &dest_path, gitignore)?;
|
||||
} else {
|
||||
// Copy files
|
||||
fs::copy(&path, &dest_path)?;
|
||||
@@ -584,6 +643,15 @@ pub async fn handle_special_action(action: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!("🔄 Detected Rust formatter action: {}", action));
|
||||
|
||||
check_command_available("rustfmt", "rustfmt", "rustup component add rustfmt");
|
||||
} else if action.starts_with("dtolnay/rust-toolchain@") {
|
||||
// For dtolnay/rust-toolchain action, check for Rust installation
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔄 Detected dtolnay Rust toolchain action: {}",
|
||||
action
|
||||
));
|
||||
|
||||
check_command_available("rustc", "Rust", "https://rustup.rs/");
|
||||
check_command_available("cargo", "Cargo", "https://rustup.rs/");
|
||||
} else if action.starts_with("actions/setup-node@") {
|
||||
// Node.js setup action
|
||||
wrkflw_logging::info(&format!("🔄 Detected Node.js setup action: {}", action));
|
||||
|
||||
@@ -2,3 +2,5 @@
|
||||
|
||||
pub mod container;
|
||||
pub mod emulation;
|
||||
pub mod sandbox;
|
||||
pub mod secure_emulation;
|
||||
|
||||
672
crates/runtime/src/sandbox.rs
Normal file
672
crates/runtime/src/sandbox.rs
Normal file
@@ -0,0 +1,672 @@
|
||||
use regex::Regex;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
use wrkflw_logging;
|
||||
|
||||
/// Configuration for sandbox execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SandboxConfig {
|
||||
/// Maximum execution time for commands
|
||||
pub max_execution_time: Duration,
|
||||
/// Maximum memory usage in MB
|
||||
pub max_memory_mb: u64,
|
||||
/// Maximum CPU usage percentage
|
||||
pub max_cpu_percent: u64,
|
||||
/// Allowed commands (whitelist)
|
||||
pub allowed_commands: HashSet<String>,
|
||||
/// Blocked commands (blacklist)
|
||||
pub blocked_commands: HashSet<String>,
|
||||
/// Allowed file system paths (read-only)
|
||||
pub allowed_read_paths: HashSet<PathBuf>,
|
||||
/// Allowed file system paths (read-write)
|
||||
pub allowed_write_paths: HashSet<PathBuf>,
|
||||
/// Whether to enable network access
|
||||
pub allow_network: bool,
|
||||
/// Maximum number of processes
|
||||
pub max_processes: u32,
|
||||
/// Whether to enable strict mode (more restrictive)
|
||||
pub strict_mode: bool,
|
||||
}
|
||||
|
||||
impl Default for SandboxConfig {
|
||||
fn default() -> Self {
|
||||
let mut allowed_commands = HashSet::new();
|
||||
|
||||
// Basic safe commands
|
||||
allowed_commands.insert("echo".to_string());
|
||||
allowed_commands.insert("printf".to_string());
|
||||
allowed_commands.insert("cat".to_string());
|
||||
allowed_commands.insert("head".to_string());
|
||||
allowed_commands.insert("tail".to_string());
|
||||
allowed_commands.insert("grep".to_string());
|
||||
allowed_commands.insert("sed".to_string());
|
||||
allowed_commands.insert("awk".to_string());
|
||||
allowed_commands.insert("sort".to_string());
|
||||
allowed_commands.insert("uniq".to_string());
|
||||
allowed_commands.insert("wc".to_string());
|
||||
allowed_commands.insert("cut".to_string());
|
||||
allowed_commands.insert("tr".to_string());
|
||||
allowed_commands.insert("which".to_string());
|
||||
allowed_commands.insert("pwd".to_string());
|
||||
allowed_commands.insert("env".to_string());
|
||||
allowed_commands.insert("date".to_string());
|
||||
allowed_commands.insert("basename".to_string());
|
||||
allowed_commands.insert("dirname".to_string());
|
||||
|
||||
// File operations (safe variants)
|
||||
allowed_commands.insert("ls".to_string());
|
||||
allowed_commands.insert("find".to_string());
|
||||
allowed_commands.insert("mkdir".to_string());
|
||||
allowed_commands.insert("touch".to_string());
|
||||
allowed_commands.insert("cp".to_string());
|
||||
allowed_commands.insert("mv".to_string());
|
||||
|
||||
// Development tools
|
||||
allowed_commands.insert("git".to_string());
|
||||
allowed_commands.insert("cargo".to_string());
|
||||
allowed_commands.insert("rustc".to_string());
|
||||
allowed_commands.insert("rustfmt".to_string());
|
||||
allowed_commands.insert("clippy".to_string());
|
||||
allowed_commands.insert("npm".to_string());
|
||||
allowed_commands.insert("yarn".to_string());
|
||||
allowed_commands.insert("node".to_string());
|
||||
allowed_commands.insert("python".to_string());
|
||||
allowed_commands.insert("python3".to_string());
|
||||
allowed_commands.insert("pip".to_string());
|
||||
allowed_commands.insert("pip3".to_string());
|
||||
allowed_commands.insert("java".to_string());
|
||||
allowed_commands.insert("javac".to_string());
|
||||
allowed_commands.insert("maven".to_string());
|
||||
allowed_commands.insert("gradle".to_string());
|
||||
allowed_commands.insert("go".to_string());
|
||||
allowed_commands.insert("dotnet".to_string());
|
||||
|
||||
// Compression tools
|
||||
allowed_commands.insert("tar".to_string());
|
||||
allowed_commands.insert("gzip".to_string());
|
||||
allowed_commands.insert("gunzip".to_string());
|
||||
allowed_commands.insert("zip".to_string());
|
||||
allowed_commands.insert("unzip".to_string());
|
||||
|
||||
let mut blocked_commands = HashSet::new();
|
||||
|
||||
// Dangerous system commands
|
||||
blocked_commands.insert("rm".to_string());
|
||||
blocked_commands.insert("rmdir".to_string());
|
||||
blocked_commands.insert("dd".to_string());
|
||||
blocked_commands.insert("mkfs".to_string());
|
||||
blocked_commands.insert("fdisk".to_string());
|
||||
blocked_commands.insert("mount".to_string());
|
||||
blocked_commands.insert("umount".to_string());
|
||||
blocked_commands.insert("sudo".to_string());
|
||||
blocked_commands.insert("su".to_string());
|
||||
blocked_commands.insert("passwd".to_string());
|
||||
blocked_commands.insert("chown".to_string());
|
||||
blocked_commands.insert("chmod".to_string());
|
||||
blocked_commands.insert("chgrp".to_string());
|
||||
blocked_commands.insert("chroot".to_string());
|
||||
|
||||
// Network and system tools
|
||||
blocked_commands.insert("nc".to_string());
|
||||
blocked_commands.insert("netcat".to_string());
|
||||
blocked_commands.insert("wget".to_string());
|
||||
blocked_commands.insert("curl".to_string());
|
||||
blocked_commands.insert("ssh".to_string());
|
||||
blocked_commands.insert("scp".to_string());
|
||||
blocked_commands.insert("rsync".to_string());
|
||||
|
||||
// Process control
|
||||
blocked_commands.insert("kill".to_string());
|
||||
blocked_commands.insert("killall".to_string());
|
||||
blocked_commands.insert("pkill".to_string());
|
||||
blocked_commands.insert("nohup".to_string());
|
||||
blocked_commands.insert("screen".to_string());
|
||||
blocked_commands.insert("tmux".to_string());
|
||||
|
||||
// System modification
|
||||
blocked_commands.insert("systemctl".to_string());
|
||||
blocked_commands.insert("service".to_string());
|
||||
blocked_commands.insert("crontab".to_string());
|
||||
blocked_commands.insert("at".to_string());
|
||||
blocked_commands.insert("reboot".to_string());
|
||||
blocked_commands.insert("shutdown".to_string());
|
||||
blocked_commands.insert("halt".to_string());
|
||||
blocked_commands.insert("poweroff".to_string());
|
||||
|
||||
Self {
|
||||
max_execution_time: Duration::from_secs(300), // 5 minutes
|
||||
max_memory_mb: 512,
|
||||
max_cpu_percent: 80,
|
||||
allowed_commands,
|
||||
blocked_commands,
|
||||
allowed_read_paths: HashSet::new(),
|
||||
allowed_write_paths: HashSet::new(),
|
||||
allow_network: false,
|
||||
max_processes: 10,
|
||||
strict_mode: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sandbox error types
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SandboxError {
|
||||
#[error("Command blocked by security policy: {command}")]
|
||||
BlockedCommand { command: String },
|
||||
|
||||
#[error("Dangerous command pattern detected: {pattern}")]
|
||||
DangerousPattern { pattern: String },
|
||||
|
||||
#[error("Path access denied: {path}")]
|
||||
PathAccessDenied { path: String },
|
||||
|
||||
#[error("Resource limit exceeded: {resource}")]
|
||||
ResourceLimitExceeded { resource: String },
|
||||
|
||||
#[error("Execution timeout after {seconds} seconds")]
|
||||
ExecutionTimeout { seconds: u64 },
|
||||
|
||||
#[error("Sandbox setup failed: {reason}")]
|
||||
SandboxSetupError { reason: String },
|
||||
|
||||
#[error("Command execution failed: {reason}")]
|
||||
ExecutionError { reason: String },
|
||||
}
|
||||
|
||||
/// Secure sandbox for executing commands in emulation mode
|
||||
pub struct Sandbox {
|
||||
config: SandboxConfig,
|
||||
workspace: TempDir,
|
||||
dangerous_patterns: Vec<Regex>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
/// Create a new sandbox with the given configuration
|
||||
pub fn new(config: SandboxConfig) -> Result<Self, SandboxError> {
|
||||
let workspace = tempfile::tempdir().map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create sandbox workspace: {}", e),
|
||||
})?;
|
||||
|
||||
let dangerous_patterns = Self::compile_dangerous_patterns();
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Created new sandbox with workspace: {}",
|
||||
workspace.path().display()
|
||||
));
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
workspace,
|
||||
dangerous_patterns,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute a command in the sandbox
|
||||
pub async fn execute_command(
|
||||
&self,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
) -> Result<crate::container::ContainerOutput, SandboxError> {
|
||||
if command.is_empty() {
|
||||
return Err(SandboxError::ExecutionError {
|
||||
reason: "Empty command".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let command_str = command.join(" ");
|
||||
|
||||
// Step 1: Validate command
|
||||
self.validate_command(&command_str)?;
|
||||
|
||||
// Step 2: Setup sandbox environment
|
||||
let sandbox_dir = self.setup_sandbox_environment(working_dir)?;
|
||||
|
||||
// Step 3: Execute with limits
|
||||
self.execute_with_limits(command, env_vars, &sandbox_dir)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Validate that a command is safe to execute
|
||||
fn validate_command(&self, command_str: &str) -> Result<(), SandboxError> {
|
||||
// Check for dangerous patterns first
|
||||
for pattern in &self.dangerous_patterns {
|
||||
if pattern.is_match(command_str) {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Blocked dangerous command pattern: {}",
|
||||
command_str
|
||||
));
|
||||
return Err(SandboxError::DangerousPattern {
|
||||
pattern: command_str.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Split command by shell operators to validate each part
|
||||
let command_parts = self.split_shell_command(command_str);
|
||||
|
||||
for part in command_parts {
|
||||
let part = part.trim();
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract the base command from this part
|
||||
let base_command = part.split_whitespace().next().unwrap_or("");
|
||||
let command_name = Path::new(base_command)
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or(base_command);
|
||||
|
||||
// Skip shell built-ins and operators
|
||||
if self.is_shell_builtin(command_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check blocked commands
|
||||
if self.config.blocked_commands.contains(command_name) {
|
||||
wrkflw_logging::warning(&format!("🚫 Blocked command: {}", command_name));
|
||||
return Err(SandboxError::BlockedCommand {
|
||||
command: command_name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// In strict mode, only allow whitelisted commands
|
||||
if self.config.strict_mode && !self.config.allowed_commands.contains(command_name) {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Command not in whitelist (strict mode): {}",
|
||||
command_name
|
||||
));
|
||||
return Err(SandboxError::BlockedCommand {
|
||||
command: command_name.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!("✅ Command validation passed: {}", command_str));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Split shell command by operators while preserving quoted strings
|
||||
fn split_shell_command(&self, command_str: &str) -> Vec<String> {
|
||||
// Simple split by common shell operators
|
||||
// This is not a full shell parser but handles most cases
|
||||
let separators = ["&&", "||", ";", "|"];
|
||||
let mut parts = vec![command_str.to_string()];
|
||||
|
||||
for separator in separators {
|
||||
let mut new_parts = Vec::new();
|
||||
for part in parts {
|
||||
let split_parts: Vec<String> = part
|
||||
.split(separator)
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
new_parts.extend(split_parts);
|
||||
}
|
||||
parts = new_parts;
|
||||
}
|
||||
|
||||
parts
|
||||
}
|
||||
|
||||
/// Check if a command is a shell built-in
|
||||
fn is_shell_builtin(&self, command: &str) -> bool {
|
||||
let builtins = [
|
||||
"true", "false", "test", "[", "echo", "printf", "cd", "pwd", "export", "set", "unset",
|
||||
"alias", "history", "jobs", "fg", "bg", "wait", "read",
|
||||
];
|
||||
builtins.contains(&command)
|
||||
}
|
||||
|
||||
/// Setup isolated sandbox environment
|
||||
fn setup_sandbox_environment(&self, working_dir: &Path) -> Result<PathBuf, SandboxError> {
|
||||
let sandbox_root = self.workspace.path();
|
||||
let sandbox_workspace = sandbox_root.join("workspace");
|
||||
|
||||
// Create sandbox directory structure
|
||||
fs::create_dir_all(&sandbox_workspace).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create sandbox workspace: {}", e),
|
||||
})?;
|
||||
|
||||
// Copy allowed files to sandbox (if working_dir exists and is allowed)
|
||||
if working_dir.exists() && self.is_path_allowed(working_dir, false) {
|
||||
self.copy_safe_files(working_dir, &sandbox_workspace)?;
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Sandbox environment ready: {}",
|
||||
sandbox_workspace.display()
|
||||
));
|
||||
|
||||
Ok(sandbox_workspace)
|
||||
}
|
||||
|
||||
/// Copy files safely to sandbox, excluding dangerous files
|
||||
fn copy_safe_files(&self, source: &Path, dest: &Path) -> Result<(), SandboxError> {
|
||||
for entry in fs::read_dir(source).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to read source directory: {}", e),
|
||||
})? {
|
||||
let entry = entry.map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to read directory entry: {}", e),
|
||||
})?;
|
||||
|
||||
let path = entry.path();
|
||||
let file_name = path.file_name().and_then(|s| s.to_str()).unwrap_or("");
|
||||
|
||||
// Skip dangerous or sensitive files
|
||||
if self.should_skip_file(file_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let dest_path = dest.join(file_name);
|
||||
|
||||
if path.is_file() {
|
||||
fs::copy(&path, &dest_path).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to copy file: {}", e),
|
||||
})?;
|
||||
} else if path.is_dir() && !self.should_skip_directory(file_name) {
|
||||
fs::create_dir_all(&dest_path).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create directory: {}", e),
|
||||
})?;
|
||||
self.copy_safe_files(&path, &dest_path)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute command with resource limits and monitoring
|
||||
async fn execute_with_limits(
|
||||
&self,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
) -> Result<crate::container::ContainerOutput, SandboxError> {
|
||||
// Join command parts and execute via shell for proper handling of operators
|
||||
let command_str = command.join(" ");
|
||||
|
||||
let mut cmd = Command::new("sh");
|
||||
cmd.arg("-c");
|
||||
cmd.arg(&command_str);
|
||||
cmd.current_dir(working_dir);
|
||||
cmd.stdout(Stdio::piped());
|
||||
cmd.stderr(Stdio::piped());
|
||||
|
||||
// Set environment variables (filtered)
|
||||
for (key, value) in env_vars {
|
||||
if self.is_env_var_safe(key) {
|
||||
cmd.env(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
// Add sandbox-specific environment variables
|
||||
cmd.env("WRKFLW_SANDBOXED", "true");
|
||||
cmd.env("WRKFLW_SANDBOX_MODE", "strict");
|
||||
|
||||
// Execute with timeout
|
||||
let timeout_duration = self.config.max_execution_time;
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"🏃 Executing sandboxed command: {} (timeout: {}s)",
|
||||
command.join(" "),
|
||||
timeout_duration.as_secs()
|
||||
));
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let result = tokio::time::timeout(timeout_duration, async {
|
||||
let output = cmd.output().map_err(|e| SandboxError::ExecutionError {
|
||||
reason: format!("Command execution failed: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(crate::container::ContainerOutput {
|
||||
stdout: String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
stderr: String::from_utf8_lossy(&output.stderr).to_string(),
|
||||
exit_code: output.status.code().unwrap_or(-1),
|
||||
})
|
||||
})
|
||||
.await;
|
||||
|
||||
let execution_time = start_time.elapsed();
|
||||
|
||||
match result {
|
||||
Ok(output_result) => {
|
||||
wrkflw_logging::info(&format!(
|
||||
"✅ Sandboxed command completed in {:.2}s",
|
||||
execution_time.as_secs_f64()
|
||||
));
|
||||
output_result
|
||||
}
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"⏰ Sandboxed command timed out after {:.2}s",
|
||||
timeout_duration.as_secs_f64()
|
||||
));
|
||||
Err(SandboxError::ExecutionTimeout {
|
||||
seconds: timeout_duration.as_secs(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a path is allowed for access
|
||||
fn is_path_allowed(&self, path: &Path, write_access: bool) -> bool {
|
||||
let abs_path = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
if write_access {
|
||||
self.config
|
||||
.allowed_write_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
} else {
|
||||
self.config
|
||||
.allowed_read_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
|| self
|
||||
.config
|
||||
.allowed_write_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an environment variable is safe to pass through
|
||||
fn is_env_var_safe(&self, key: &str) -> bool {
|
||||
// Block dangerous environment variables
|
||||
let dangerous_env_vars = [
|
||||
"LD_PRELOAD",
|
||||
"LD_LIBRARY_PATH",
|
||||
"DYLD_INSERT_LIBRARIES",
|
||||
"DYLD_LIBRARY_PATH",
|
||||
"PATH",
|
||||
"HOME",
|
||||
"SHELL",
|
||||
];
|
||||
|
||||
!dangerous_env_vars.contains(&key)
|
||||
}
|
||||
|
||||
/// Check if a file should be skipped during copying
|
||||
fn should_skip_file(&self, filename: &str) -> bool {
|
||||
let dangerous_files = [
|
||||
".ssh",
|
||||
".gnupg",
|
||||
".aws",
|
||||
".docker",
|
||||
"id_rsa",
|
||||
"id_ed25519",
|
||||
"credentials",
|
||||
"config",
|
||||
".env",
|
||||
".secrets",
|
||||
];
|
||||
|
||||
dangerous_files
|
||||
.iter()
|
||||
.any(|pattern| filename.contains(pattern))
|
||||
|| filename.starts_with('.') && filename != ".gitignore" && filename != ".github"
|
||||
}
|
||||
|
||||
/// Check if a directory should be skipped
|
||||
fn should_skip_directory(&self, dirname: &str) -> bool {
|
||||
let skip_dirs = [
|
||||
"target",
|
||||
"node_modules",
|
||||
".git",
|
||||
".cargo",
|
||||
".npm",
|
||||
".cache",
|
||||
"build",
|
||||
"dist",
|
||||
"tmp",
|
||||
"temp",
|
||||
];
|
||||
|
||||
skip_dirs.contains(&dirname)
|
||||
}
|
||||
|
||||
/// Compile regex patterns for dangerous command detection
|
||||
fn compile_dangerous_patterns() -> Vec<Regex> {
|
||||
let patterns = [
|
||||
r"rm\s+.*-rf?\s*/", // rm -rf /
|
||||
r"dd\s+.*of=/dev/", // dd ... of=/dev/...
|
||||
r">\s*/dev/sd[a-z]", // > /dev/sda
|
||||
r"mkfs\.", // mkfs.ext4, etc.
|
||||
r"fdisk\s+/dev/", // fdisk /dev/...
|
||||
r"mount\s+.*\s+/", // mount ... /
|
||||
r"chroot\s+/", // chroot /
|
||||
r"sudo\s+", // sudo commands
|
||||
r"su\s+", // su commands
|
||||
r"bash\s+-c\s+.*rm.*-rf", // bash -c "rm -rf ..."
|
||||
r"sh\s+-c\s+.*rm.*-rf", // sh -c "rm -rf ..."
|
||||
r"eval\s+.*rm.*-rf", // eval "rm -rf ..."
|
||||
r":\(\)\{.*;\};:", // Fork bomb
|
||||
r"/proc/sys/", // /proc/sys access
|
||||
r"/etc/passwd", // /etc/passwd access
|
||||
r"/etc/shadow", // /etc/shadow access
|
||||
r"nc\s+.*-e", // netcat with exec
|
||||
r"wget\s+.*\|\s*sh", // wget ... | sh
|
||||
r"curl\s+.*\|\s*sh", // curl ... | sh
|
||||
];
|
||||
|
||||
patterns
|
||||
.iter()
|
||||
.filter_map(|pattern| {
|
||||
Regex::new(pattern)
|
||||
.map_err(|e| {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Invalid regex pattern {}: {}",
|
||||
pattern, e
|
||||
));
|
||||
e
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a default sandbox configuration for CI/CD workflows
|
||||
pub fn create_workflow_sandbox_config() -> SandboxConfig {
|
||||
let mut allowed_read_paths = HashSet::new();
|
||||
allowed_read_paths.insert(PathBuf::from("."));
|
||||
|
||||
let mut allowed_write_paths = HashSet::new();
|
||||
allowed_write_paths.insert(PathBuf::from("."));
|
||||
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(1800), // 30 minutes
|
||||
max_memory_mb: 2048, // 2GB
|
||||
max_processes: 50,
|
||||
allow_network: true,
|
||||
strict_mode: false,
|
||||
allowed_read_paths,
|
||||
allowed_write_paths,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a strict sandbox configuration for untrusted code
|
||||
pub fn create_strict_sandbox_config() -> SandboxConfig {
|
||||
let mut allowed_read_paths = HashSet::new();
|
||||
allowed_read_paths.insert(PathBuf::from("."));
|
||||
|
||||
let mut allowed_write_paths = HashSet::new();
|
||||
allowed_write_paths.insert(PathBuf::from("."));
|
||||
|
||||
// Very limited command set
|
||||
let allowed_commands = ["echo", "cat", "ls", "pwd", "date"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(60), // 1 minute
|
||||
max_memory_mb: 128, // 128MB
|
||||
max_processes: 5,
|
||||
allow_network: false,
|
||||
strict_mode: true,
|
||||
allowed_read_paths,
|
||||
allowed_write_paths,
|
||||
allowed_commands,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dangerous_pattern_detection() {
|
||||
let sandbox = Sandbox::new(SandboxConfig::default()).unwrap();
|
||||
|
||||
// Should block dangerous commands
|
||||
assert!(sandbox.validate_command("rm -rf /").is_err());
|
||||
assert!(sandbox
|
||||
.validate_command("dd if=/dev/zero of=/dev/sda")
|
||||
.is_err());
|
||||
assert!(sandbox.validate_command("sudo rm -rf /home").is_err());
|
||||
assert!(sandbox.validate_command("bash -c 'rm -rf /'").is_err());
|
||||
|
||||
// Should allow safe commands
|
||||
assert!(sandbox.validate_command("echo hello").is_ok());
|
||||
assert!(sandbox.validate_command("ls -la").is_ok());
|
||||
assert!(sandbox.validate_command("cargo build").is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_whitelist() {
|
||||
let config = create_strict_sandbox_config();
|
||||
let sandbox = Sandbox::new(config).unwrap();
|
||||
|
||||
// Should allow whitelisted commands
|
||||
assert!(sandbox.validate_command("echo hello").is_ok());
|
||||
assert!(sandbox.validate_command("ls").is_ok());
|
||||
|
||||
// Should block non-whitelisted commands
|
||||
assert!(sandbox.validate_command("git clone").is_err());
|
||||
assert!(sandbox.validate_command("cargo build").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_filtering() {
|
||||
let sandbox = Sandbox::new(SandboxConfig::default()).unwrap();
|
||||
|
||||
// Should skip dangerous files
|
||||
assert!(sandbox.should_skip_file("id_rsa"));
|
||||
assert!(sandbox.should_skip_file(".ssh"));
|
||||
assert!(sandbox.should_skip_file("credentials"));
|
||||
|
||||
// Should allow safe files
|
||||
assert!(!sandbox.should_skip_file("Cargo.toml"));
|
||||
assert!(!sandbox.should_skip_file("README.md"));
|
||||
assert!(!sandbox.should_skip_file(".gitignore"));
|
||||
}
|
||||
}
|
||||
339
crates/runtime/src/secure_emulation.rs
Normal file
339
crates/runtime/src/secure_emulation.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
use crate::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use crate::sandbox::{create_workflow_sandbox_config, Sandbox, SandboxConfig, SandboxError};
|
||||
use async_trait::async_trait;
|
||||
use std::path::Path;
|
||||
use wrkflw_logging;
|
||||
|
||||
/// Secure emulation runtime that uses sandboxing for safety
|
||||
pub struct SecureEmulationRuntime {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for SecureEmulationRuntime {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureEmulationRuntime {
|
||||
/// Create a new secure emulation runtime with default workflow-friendly configuration
|
||||
pub fn new() -> Self {
|
||||
let config = create_workflow_sandbox_config();
|
||||
let sandbox = Sandbox::new(config).expect("Failed to create sandbox");
|
||||
|
||||
wrkflw_logging::info("🔒 Initialized secure emulation runtime with sandboxing");
|
||||
|
||||
Self { sandbox }
|
||||
}
|
||||
|
||||
/// Create a new secure emulation runtime with custom sandbox configuration
|
||||
pub fn new_with_config(config: SandboxConfig) -> Result<Self, ContainerError> {
|
||||
let sandbox = Sandbox::new(config).map_err(|e| {
|
||||
ContainerError::ContainerStart(format!("Failed to create sandbox: {}", e))
|
||||
})?;
|
||||
|
||||
wrkflw_logging::info("🔒 Initialized secure emulation runtime with custom config");
|
||||
|
||||
Ok(Self { sandbox })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ContainerRuntime for SecureEmulationRuntime {
|
||||
async fn run_container(
|
||||
&self,
|
||||
image: &str,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
_volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Executing sandboxed command: {} (image: {})",
|
||||
command.join(" "),
|
||||
image
|
||||
));
|
||||
|
||||
// Use sandbox to execute the command safely
|
||||
let result = self
|
||||
.sandbox
|
||||
.execute_command(command, env_vars, working_dir)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(output) => {
|
||||
wrkflw_logging::info("✅ Sandboxed command completed successfully");
|
||||
Ok(output)
|
||||
}
|
||||
Err(SandboxError::BlockedCommand { command }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 SECURITY BLOCK: Command '{}' is not allowed in secure emulation mode. \
|
||||
This command was blocked for security reasons. \
|
||||
If you need to run this command, please use Docker or Podman mode instead.",
|
||||
command
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::DangerousPattern { pattern }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 SECURITY BLOCK: Dangerous command pattern detected: '{}'. \
|
||||
This command was blocked because it matches a known dangerous pattern. \
|
||||
Please review your workflow for potentially harmful commands.",
|
||||
pattern
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::ExecutionTimeout { seconds }) => {
|
||||
let error_msg = format!(
|
||||
"⏰ Command execution timed out after {} seconds. \
|
||||
Consider optimizing your command or increasing timeout limits.",
|
||||
seconds
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::PathAccessDenied { path }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 Path access denied: '{}'. \
|
||||
The sandbox restricts file system access for security.",
|
||||
path
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::ResourceLimitExceeded { resource }) => {
|
||||
let error_msg = format!(
|
||||
"📊 Resource limit exceeded: {}. \
|
||||
Your command used too many system resources.",
|
||||
resource
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(e) => {
|
||||
let error_msg = format!("Sandbox execution failed: {}", e);
|
||||
wrkflw_logging::error(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn pull_image(&self, image: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Secure emulation: Pretending to pull image {}",
|
||||
image
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_image(&self, dockerfile: &Path, tag: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Secure emulation: Pretending to build image {} from {}",
|
||||
tag,
|
||||
dockerfile.display()
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn prepare_language_environment(
|
||||
&self,
|
||||
language: &str,
|
||||
version: Option<&str>,
|
||||
_additional_packages: Option<Vec<String>>,
|
||||
) -> Result<String, ContainerError> {
|
||||
// For secure emulation runtime, we'll use a simplified approach
|
||||
// that doesn't require building custom images
|
||||
let base_image = match language {
|
||||
"python" => version.map_or("python:3.11-slim".to_string(), |v| format!("python:{}", v)),
|
||||
"node" => version.map_or("node:20-slim".to_string(), |v| format!("node:{}", v)),
|
||||
"java" => version.map_or("eclipse-temurin:17-jdk".to_string(), |v| {
|
||||
format!("eclipse-temurin:{}", v)
|
||||
}),
|
||||
"go" => version.map_or("golang:1.21-slim".to_string(), |v| format!("golang:{}", v)),
|
||||
"dotnet" => version.map_or("mcr.microsoft.com/dotnet/sdk:7.0".to_string(), |v| {
|
||||
format!("mcr.microsoft.com/dotnet/sdk:{}", v)
|
||||
}),
|
||||
"rust" => version.map_or("rust:latest".to_string(), |v| format!("rust:{}", v)),
|
||||
_ => {
|
||||
return Err(ContainerError::ContainerStart(format!(
|
||||
"Unsupported language: {}",
|
||||
language
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
// For emulation, we'll just return the base image
|
||||
// The actual package installation will be handled during container execution
|
||||
Ok(base_image)
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle special actions in secure emulation mode
|
||||
pub async fn handle_special_action_secure(action: &str) -> Result<(), ContainerError> {
|
||||
// Extract owner, repo and version from the action
|
||||
let action_parts: Vec<&str> = action.split('@').collect();
|
||||
let action_name = action_parts[0];
|
||||
let action_version = if action_parts.len() > 1 {
|
||||
action_parts[1]
|
||||
} else {
|
||||
"latest"
|
||||
};
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Processing action in secure mode: {} @ {}",
|
||||
action_name, action_version
|
||||
));
|
||||
|
||||
// In secure mode, we're more restrictive about what actions we allow
|
||||
match action_name {
|
||||
// Core GitHub actions that are generally safe
|
||||
name if name.starts_with("actions/checkout") => {
|
||||
wrkflw_logging::info("✅ Checkout action - workspace files are prepared securely");
|
||||
}
|
||||
name if name.starts_with("actions/setup-node") => {
|
||||
wrkflw_logging::info("🟡 Node.js setup - using system Node.js in secure mode");
|
||||
check_command_available_secure("node", "Node.js", "https://nodejs.org/");
|
||||
}
|
||||
name if name.starts_with("actions/setup-python") => {
|
||||
wrkflw_logging::info("🟡 Python setup - using system Python in secure mode");
|
||||
check_command_available_secure("python", "Python", "https://www.python.org/downloads/");
|
||||
}
|
||||
name if name.starts_with("actions/setup-java") => {
|
||||
wrkflw_logging::info("🟡 Java setup - using system Java in secure mode");
|
||||
check_command_available_secure("java", "Java", "https://adoptium.net/");
|
||||
}
|
||||
name if name.starts_with("actions/cache") => {
|
||||
wrkflw_logging::info("🟡 Cache action - caching disabled in secure emulation mode");
|
||||
}
|
||||
|
||||
// Rust-specific actions
|
||||
name if name.starts_with("actions-rs/cargo") => {
|
||||
wrkflw_logging::info("🟡 Rust cargo action - using system Rust in secure mode");
|
||||
check_command_available_secure("cargo", "Rust/Cargo", "https://rustup.rs/");
|
||||
}
|
||||
name if name.starts_with("actions-rs/toolchain") => {
|
||||
wrkflw_logging::info("🟡 Rust toolchain action - using system Rust in secure mode");
|
||||
check_command_available_secure("rustc", "Rust", "https://rustup.rs/");
|
||||
}
|
||||
name if name.starts_with("actions-rs/fmt") => {
|
||||
wrkflw_logging::info("🟡 Rust formatter action - using system rustfmt in secure mode");
|
||||
check_command_available_secure("rustfmt", "rustfmt", "rustup component add rustfmt");
|
||||
}
|
||||
|
||||
// Potentially dangerous actions that we warn about
|
||||
name if name.contains("docker") || name.contains("container") => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Docker/container action '{}' is not supported in secure emulation mode. \
|
||||
Use Docker or Podman mode for container actions.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
name if name.contains("ssh") || name.contains("deploy") => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 SSH/deployment action '{}' is restricted in secure emulation mode. \
|
||||
Use Docker or Podman mode for deployment actions.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
|
||||
// Unknown actions
|
||||
_ => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🟡 Unknown action '{}' in secure emulation mode. \
|
||||
Some functionality may be limited or unavailable.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a command is available, with security-focused messaging
|
||||
fn check_command_available_secure(command: &str, name: &str, install_url: &str) {
|
||||
use std::process::Command;
|
||||
|
||||
let is_available = Command::new("which")
|
||||
.arg(command)
|
||||
.output()
|
||||
.map(|output| output.status.success())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !is_available {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🔧 {} is required but not found on the system",
|
||||
name
|
||||
));
|
||||
wrkflw_logging::info(&format!(
|
||||
"To use this action in secure mode, please install {}: {}",
|
||||
name, install_url
|
||||
));
|
||||
wrkflw_logging::info(&format!(
|
||||
"Alternatively, use Docker or Podman mode for automatic {} installation",
|
||||
name
|
||||
));
|
||||
} else {
|
||||
// Try to get version information
|
||||
if let Ok(output) = Command::new(command).arg("--version").output() {
|
||||
if output.status.success() {
|
||||
let version = String::from_utf8_lossy(&output.stdout);
|
||||
wrkflw_logging::info(&format!(
|
||||
"✅ Using system {} in secure mode: {}",
|
||||
name,
|
||||
version.trim()
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::sandbox::create_strict_sandbox_config;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secure_emulation_blocks_dangerous_commands() {
|
||||
let config = create_strict_sandbox_config();
|
||||
let runtime = SecureEmulationRuntime::new_with_config(config).unwrap();
|
||||
|
||||
// Should block dangerous commands
|
||||
let result = runtime
|
||||
.run_container(
|
||||
"alpine:latest",
|
||||
&["rm", "-rf", "/"],
|
||||
&[],
|
||||
&PathBuf::from("."),
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
let error_msg = result.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("SECURITY BLOCK"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secure_emulation_allows_safe_commands() {
|
||||
let runtime = SecureEmulationRuntime::new();
|
||||
|
||||
// Should allow safe commands
|
||||
let result = runtime
|
||||
.run_container(
|
||||
"alpine:latest",
|
||||
&["echo", "hello world"],
|
||||
&[],
|
||||
&PathBuf::from("."),
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
let output = result.unwrap();
|
||||
assert!(output.stdout.contains("hello world"));
|
||||
assert_eq!(output.exit_code, 0);
|
||||
}
|
||||
}
|
||||
56
crates/secrets/Cargo.toml
Normal file
56
crates/secrets/Cargo.toml
Normal file
@@ -0,0 +1,56 @@
|
||||
[package]
|
||||
name = "wrkflw-secrets"
|
||||
version = "0.7.1"
|
||||
edition = "2021"
|
||||
authors = ["wrkflw contributors"]
|
||||
description = "Secrets management for wrkflw workflow execution"
|
||||
license = "MIT"
|
||||
keywords = ["secrets", "workflow", "ci-cd", "github-actions"]
|
||||
categories = ["development-tools"]
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_yaml = "0.9"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
anyhow = "1.0"
|
||||
thiserror = "1.0"
|
||||
base64 = "0.21"
|
||||
aes-gcm = "0.10"
|
||||
rand = "0.8"
|
||||
dirs = "5.0"
|
||||
tracing = "0.1"
|
||||
regex = "1.10"
|
||||
url = "2.4"
|
||||
async-trait = "0.1"
|
||||
lazy_static = "1.4"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
pbkdf2 = "0.12"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
|
||||
# Optional dependencies for different secret providers (commented out for compatibility)
|
||||
# reqwest = { version = "0.11", features = ["json"], optional = true }
|
||||
# aws-sdk-secretsmanager = { version = "1.0", optional = true }
|
||||
# azure_security_keyvault = { version = "0.16", optional = true }
|
||||
|
||||
[features]
|
||||
default = ["env-provider", "file-provider"]
|
||||
env-provider = []
|
||||
file-provider = []
|
||||
# Cloud provider features are planned for future implementation
|
||||
# vault-provider = ["reqwest"]
|
||||
# aws-provider = ["aws-sdk-secretsmanager", "reqwest"]
|
||||
# azure-provider = ["azure_security_keyvault", "reqwest"]
|
||||
# gcp-provider = ["reqwest"]
|
||||
# all-providers = ["vault-provider", "aws-provider", "azure-provider", "gcp-provider"]
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.8"
|
||||
tokio-test = "0.4"
|
||||
uuid = { version = "1.6", features = ["v4"] }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
|
||||
[[bench]]
|
||||
name = "masking_bench"
|
||||
harness = false
|
||||
387
crates/secrets/README.md
Normal file
387
crates/secrets/README.md
Normal file
@@ -0,0 +1,387 @@
|
||||
# wrkflw-secrets
|
||||
|
||||
Comprehensive secrets management for wrkflw workflow execution. This crate provides secure handling of secrets with support for multiple providers, encryption, masking, and GitHub Actions-compatible variable substitution.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multiple Secret Providers**: Environment variables, files, HashiCorp Vault, AWS Secrets Manager, Azure Key Vault, Google Cloud Secret Manager
|
||||
- **Secure Storage**: AES-256-GCM encryption for secrets at rest
|
||||
- **Variable Substitution**: GitHub Actions-compatible `${{ secrets.* }}` syntax
|
||||
- **Secret Masking**: Automatic masking of secrets in logs and output with pattern detection
|
||||
- **Caching**: Optional caching with TTL for performance optimization
|
||||
- **Rate Limiting**: Built-in protection against secret access abuse
|
||||
- **Input Validation**: Comprehensive validation of secret names and values
|
||||
- **Health Checks**: Provider health monitoring and diagnostics
|
||||
- **Configuration**: Flexible YAML/JSON configuration with environment variable support
|
||||
- **Thread Safety**: Full async/await support with concurrent access
|
||||
- **Performance Optimized**: Compiled regex patterns and caching for high-throughput scenarios
|
||||
|
||||
## Quick Start
|
||||
|
||||
```rust
|
||||
use wrkflw_secrets::prelude::*;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> SecretResult<()> {
|
||||
// Create a secret manager with default configuration
|
||||
let manager = SecretManager::default().await?;
|
||||
|
||||
// Set an environment variable
|
||||
std::env::set_var("GITHUB_TOKEN", "ghp_your_token_here");
|
||||
|
||||
// Get a secret
|
||||
let secret = manager.get_secret("GITHUB_TOKEN").await?;
|
||||
println!("Token: {}", secret.value());
|
||||
|
||||
// Use secret substitution
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
let template = "curl -H 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' https://api.github.com";
|
||||
let resolved = substitution.substitute(template).await?;
|
||||
|
||||
// Mask secrets in logs
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret(secret.value());
|
||||
let safe_log = masker.mask(&resolved);
|
||||
println!("Safe log: {}", safe_log);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Set default provider
|
||||
export WRKFLW_DEFAULT_SECRET_PROVIDER=env
|
||||
|
||||
# Enable/disable secret masking
|
||||
export WRKFLW_SECRET_MASKING=true
|
||||
|
||||
# Set operation timeout
|
||||
export WRKFLW_SECRET_TIMEOUT=30
|
||||
```
|
||||
|
||||
### Configuration File
|
||||
|
||||
Create `~/.wrkflw/secrets.yml`:
|
||||
|
||||
```yaml
|
||||
default_provider: env
|
||||
enable_masking: true
|
||||
timeout_seconds: 30
|
||||
enable_caching: true
|
||||
cache_ttl_seconds: 300
|
||||
|
||||
providers:
|
||||
env:
|
||||
type: environment
|
||||
prefix: "WRKFLW_SECRET_"
|
||||
|
||||
file:
|
||||
type: file
|
||||
path: "~/.wrkflw/secrets.json"
|
||||
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.example.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
```
|
||||
|
||||
## Secret Providers
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The simplest provider reads secrets from environment variables:
|
||||
|
||||
```rust
|
||||
// With prefix
|
||||
std::env::set_var("WRKFLW_SECRET_API_KEY", "secret_value");
|
||||
let secret = manager.get_secret_from_provider("env", "API_KEY").await?;
|
||||
|
||||
// Without prefix
|
||||
std::env::set_var("GITHUB_TOKEN", "ghp_token");
|
||||
let secret = manager.get_secret_from_provider("env", "GITHUB_TOKEN").await?;
|
||||
```
|
||||
|
||||
### File-based Storage
|
||||
|
||||
Store secrets in JSON, YAML, or environment files:
|
||||
|
||||
**JSON format** (`secrets.json`):
|
||||
```json
|
||||
{
|
||||
"API_KEY": "secret_api_key",
|
||||
"DB_PASSWORD": "secret_password"
|
||||
}
|
||||
```
|
||||
|
||||
**Environment format** (`secrets.env`):
|
||||
```bash
|
||||
API_KEY=secret_api_key
|
||||
DB_PASSWORD="quoted password"
|
||||
GITHUB_TOKEN='single quoted token'
|
||||
```
|
||||
|
||||
**YAML format** (`secrets.yml`):
|
||||
```yaml
|
||||
API_KEY: secret_api_key
|
||||
DB_PASSWORD: secret_password
|
||||
```
|
||||
|
||||
### HashiCorp Vault
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.example.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
```
|
||||
|
||||
### AWS Secrets Manager
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole" # optional
|
||||
```
|
||||
|
||||
### Azure Key Vault
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
auth:
|
||||
method: service_principal
|
||||
client_id: "${AZURE_CLIENT_ID}"
|
||||
client_secret: "${AZURE_CLIENT_SECRET}"
|
||||
tenant_id: "${AZURE_TENANT_ID}"
|
||||
```
|
||||
|
||||
### Google Cloud Secret Manager
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
gcp:
|
||||
type: gcp_secret_manager
|
||||
project_id: "my-project"
|
||||
key_file: "/path/to/service-account.json" # optional
|
||||
```
|
||||
|
||||
## Variable Substitution
|
||||
|
||||
Support for GitHub Actions-compatible secret references:
|
||||
|
||||
```rust
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
// Default provider
|
||||
let template = "TOKEN=${{ secrets.GITHUB_TOKEN }}";
|
||||
let resolved = substitution.substitute(template).await?;
|
||||
|
||||
// Specific provider
|
||||
let template = "API_KEY=${{ secrets.vault:API_KEY }}";
|
||||
let resolved = substitution.substitute(template).await?;
|
||||
```
|
||||
|
||||
## Secret Masking
|
||||
|
||||
Automatically mask secrets in logs and output:
|
||||
|
||||
```rust
|
||||
let mut masker = SecretMasker::new();
|
||||
|
||||
// Add specific secrets
|
||||
masker.add_secret("secret_value");
|
||||
|
||||
// Automatic pattern detection for common secret types
|
||||
let log = "Token: ghp_1234567890123456789012345678901234567890";
|
||||
let masked = masker.mask(log);
|
||||
// Output: "Token: ghp_***"
|
||||
```
|
||||
|
||||
Supported patterns:
|
||||
- GitHub Personal Access Tokens (`ghp_*`)
|
||||
- GitHub App tokens (`ghs_*`)
|
||||
- GitHub OAuth tokens (`gho_*`)
|
||||
- AWS Access Keys (`AKIA*`)
|
||||
- JWT tokens
|
||||
- Generic API keys
|
||||
|
||||
## Encrypted Storage
|
||||
|
||||
For sensitive environments, use encrypted storage:
|
||||
|
||||
```rust
|
||||
use wrkflw_secrets::storage::{EncryptedSecretStore, KeyDerivation};
|
||||
|
||||
// Create encrypted store
|
||||
let (mut store, key) = EncryptedSecretStore::new()?;
|
||||
|
||||
// Add secrets
|
||||
store.add_secret(&key, "API_KEY", "secret_value")?;
|
||||
|
||||
// Save to file
|
||||
store.save_to_file("secrets.encrypted").await?;
|
||||
|
||||
// Load from file
|
||||
let loaded_store = EncryptedSecretStore::load_from_file("secrets.encrypted").await?;
|
||||
let secret = loaded_store.get_secret(&key, "API_KEY")?;
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
All operations return `SecretResult<T>` with comprehensive error types:
|
||||
|
||||
```rust
|
||||
match manager.get_secret("MISSING_SECRET").await {
|
||||
Ok(secret) => println!("Secret: {}", secret.value()),
|
||||
Err(SecretError::NotFound { name }) => {
|
||||
eprintln!("Secret '{}' not found", name);
|
||||
}
|
||||
Err(SecretError::ProviderNotFound { provider }) => {
|
||||
eprintln!("Provider '{}' not configured", provider);
|
||||
}
|
||||
Err(SecretError::AuthenticationFailed { provider, reason }) => {
|
||||
eprintln!("Auth failed for {}: {}", provider, reason);
|
||||
}
|
||||
Err(e) => eprintln!("Error: {}", e),
|
||||
}
|
||||
```
|
||||
|
||||
## Health Checks
|
||||
|
||||
Monitor provider health:
|
||||
|
||||
```rust
|
||||
let health_results = manager.health_check().await;
|
||||
for (provider, result) in health_results {
|
||||
match result {
|
||||
Ok(()) => println!("✓ {} is healthy", provider),
|
||||
Err(e) => println!("✗ {} failed: {}", provider, e),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
1. **Use encryption** for secrets at rest
|
||||
2. **Enable masking** to prevent secrets in logs
|
||||
3. **Rotate secrets** regularly
|
||||
4. **Use least privilege** access for secret providers
|
||||
5. **Monitor access** through health checks and logging
|
||||
6. **Use provider-specific authentication** (IAM roles, service principals)
|
||||
7. **Configure rate limiting** to prevent abuse
|
||||
8. **Validate input** - the system automatically validates secret names and values
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
Protect against abuse with built-in rate limiting:
|
||||
|
||||
```rust
|
||||
use wrkflw_secrets::rate_limit::RateLimitConfig;
|
||||
use std::time::Duration;
|
||||
|
||||
let mut config = SecretConfig::default();
|
||||
config.rate_limit = RateLimitConfig {
|
||||
max_requests: 100, // Max requests per window
|
||||
window_duration: Duration::from_secs(60), // 1 minute window
|
||||
enabled: true,
|
||||
};
|
||||
|
||||
let manager = SecretManager::new(config).await?;
|
||||
|
||||
// Rate limiting is automatically applied to all secret access operations
|
||||
match manager.get_secret("API_KEY").await {
|
||||
Ok(secret) => println!("Success: {}", secret.value()),
|
||||
Err(SecretError::RateLimitExceeded(msg)) => {
|
||||
println!("Rate limited: {}", msg);
|
||||
}
|
||||
Err(e) => println!("Other error: {}", e),
|
||||
}
|
||||
```
|
||||
|
||||
## Input Validation
|
||||
|
||||
All inputs are automatically validated:
|
||||
|
||||
```rust
|
||||
// Secret names must:
|
||||
// - Be 1-255 characters long
|
||||
// - Contain only letters, numbers, underscores, hyphens, and dots
|
||||
// - Not start or end with dots
|
||||
// - Not contain consecutive dots
|
||||
// - Not be reserved system names
|
||||
|
||||
// Secret values must:
|
||||
// - Be under 1MB in size
|
||||
// - Not contain null bytes
|
||||
// - Be valid UTF-8
|
||||
|
||||
// Invalid examples that will be rejected:
|
||||
manager.get_secret("").await; // Empty name
|
||||
manager.get_secret("invalid/name").await; // Invalid characters
|
||||
manager.get_secret(".hidden").await; // Starts with dot
|
||||
manager.get_secret("CON").await; // Reserved name
|
||||
```
|
||||
|
||||
## Performance Features
|
||||
|
||||
### Caching
|
||||
|
||||
```rust
|
||||
let config = SecretConfig {
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 300, // 5 minutes
|
||||
..Default::default()
|
||||
};
|
||||
```
|
||||
|
||||
### Optimized Pattern Matching
|
||||
|
||||
- Pre-compiled regex patterns for secret detection
|
||||
- Global pattern cache using `OnceLock`
|
||||
- Efficient string replacement algorithms
|
||||
- Cached mask generation
|
||||
|
||||
### Benchmarking
|
||||
|
||||
Run performance benchmarks:
|
||||
|
||||
```bash
|
||||
cargo bench -p wrkflw-secrets
|
||||
```
|
||||
|
||||
## Feature Flags
|
||||
|
||||
Enable optional providers:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
wrkflw-secrets = { version = "0.1", features = ["vault-provider", "aws-provider"] }
|
||||
```
|
||||
|
||||
Available features:
|
||||
- `env-provider` (default)
|
||||
- `file-provider` (default)
|
||||
- `vault-provider`
|
||||
- `aws-provider`
|
||||
- `azure-provider`
|
||||
- `gcp-provider`
|
||||
- `all-providers`
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details.
|
||||
92
crates/secrets/benches/masking_bench.rs
Normal file
92
crates/secrets/benches/masking_bench.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Benchmarks for secret masking performance
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use wrkflw_secrets::SecretMasker;
|
||||
|
||||
fn bench_basic_masking(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("password123");
|
||||
masker.add_secret("api_key_abcdef123456");
|
||||
masker.add_secret("super_secret_value_that_should_be_masked");
|
||||
|
||||
let text = "The password is password123 and the API key is api_key_abcdef123456. Also super_secret_value_that_should_be_masked is here.";
|
||||
|
||||
c.bench_function("basic_masking", |b| b.iter(|| masker.mask(black_box(text))));
|
||||
}
|
||||
|
||||
fn bench_pattern_masking(c: &mut Criterion) {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let text = "GitHub token: ghp_1234567890123456789012345678901234567890 and AWS key: AKIAIOSFODNN7EXAMPLE";
|
||||
|
||||
c.bench_function("pattern_masking", |b| {
|
||||
b.iter(|| masker.mask(black_box(text)))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_large_text_masking(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
// Create a large text with secrets scattered throughout
|
||||
let mut large_text = String::new();
|
||||
for i in 0..1000 {
|
||||
large_text.push_str(&format!(
|
||||
"Line {}: Some normal text here with secret123 and password456 mixed in. ",
|
||||
i
|
||||
));
|
||||
}
|
||||
|
||||
c.bench_function("large_text_masking", |b| {
|
||||
b.iter(|| masker.mask(black_box(&large_text)))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_many_secrets(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
|
||||
// Add many secrets
|
||||
for i in 0..100 {
|
||||
masker.add_secret(format!("secret_{}", i));
|
||||
}
|
||||
|
||||
let text = "This text contains secret_50 and secret_75 but not others.";
|
||||
|
||||
c.bench_function("many_secrets", |b| b.iter(|| masker.mask(black_box(text))));
|
||||
}
|
||||
|
||||
fn bench_contains_secrets(c: &mut Criterion) {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("password123");
|
||||
masker.add_secret("api_key_abcdef123456");
|
||||
|
||||
let text_with_secrets = "The password is password123";
|
||||
let text_without_secrets = "Just some normal text";
|
||||
let text_with_patterns = "GitHub token: ghp_1234567890123456789012345678901234567890";
|
||||
|
||||
c.bench_function("contains_secrets_with", |b| {
|
||||
b.iter(|| masker.contains_secrets(black_box(text_with_secrets)))
|
||||
});
|
||||
|
||||
c.bench_function("contains_secrets_without", |b| {
|
||||
b.iter(|| masker.contains_secrets(black_box(text_without_secrets)))
|
||||
});
|
||||
|
||||
c.bench_function("contains_secrets_patterns", |b| {
|
||||
b.iter(|| masker.contains_secrets(black_box(text_with_patterns)))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_basic_masking,
|
||||
bench_pattern_masking,
|
||||
bench_large_text_masking,
|
||||
bench_many_secrets,
|
||||
bench_contains_secrets
|
||||
);
|
||||
criterion_main!(benches);
|
||||
203
crates/secrets/src/config.rs
Normal file
203
crates/secrets/src/config.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use crate::rate_limit::RateLimitConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Configuration for the secrets management system
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SecretConfig {
|
||||
/// Default secret provider to use when none is specified
|
||||
pub default_provider: String,
|
||||
|
||||
/// Configuration for each secret provider
|
||||
pub providers: HashMap<String, SecretProviderConfig>,
|
||||
|
||||
/// Whether to enable secret masking in logs
|
||||
pub enable_masking: bool,
|
||||
|
||||
/// Timeout for secret operations in seconds
|
||||
pub timeout_seconds: u64,
|
||||
|
||||
/// Whether to cache secrets for performance
|
||||
pub enable_caching: bool,
|
||||
|
||||
/// Cache TTL in seconds
|
||||
pub cache_ttl_seconds: u64,
|
||||
|
||||
/// Rate limiting configuration
|
||||
#[serde(skip)]
|
||||
pub rate_limit: RateLimitConfig,
|
||||
}
|
||||
|
||||
impl Default for SecretConfig {
|
||||
fn default() -> Self {
|
||||
let mut providers = HashMap::new();
|
||||
|
||||
// Add default environment variable provider
|
||||
providers.insert(
|
||||
"env".to_string(),
|
||||
SecretProviderConfig::Environment { prefix: None },
|
||||
);
|
||||
|
||||
// Add default file provider
|
||||
providers.insert(
|
||||
"file".to_string(),
|
||||
SecretProviderConfig::File {
|
||||
path: "~/.wrkflw/secrets".to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
Self {
|
||||
default_provider: "env".to_string(),
|
||||
providers,
|
||||
enable_masking: true,
|
||||
timeout_seconds: 30,
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 300, // 5 minutes
|
||||
rate_limit: RateLimitConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for different types of secret providers
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", rename_all = "snake_case")]
|
||||
pub enum SecretProviderConfig {
|
||||
/// Environment variables provider
|
||||
Environment {
|
||||
/// Optional prefix for environment variables (e.g., "WRKFLW_SECRET_")
|
||||
prefix: Option<String>,
|
||||
},
|
||||
|
||||
/// File-based secret storage
|
||||
File {
|
||||
/// Path to the secrets file or directory
|
||||
path: String,
|
||||
},
|
||||
// Cloud providers are planned for future implementation
|
||||
// /// HashiCorp Vault provider
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// Vault {
|
||||
// /// Vault server URL
|
||||
// url: String,
|
||||
// /// Authentication method
|
||||
// auth: VaultAuth,
|
||||
// /// Optional mount path (defaults to "secret")
|
||||
// mount_path: Option<String>,
|
||||
// },
|
||||
|
||||
// /// AWS Secrets Manager provider
|
||||
// #[cfg(feature = "aws-provider")]
|
||||
// AwsSecretsManager {
|
||||
// /// AWS region
|
||||
// region: String,
|
||||
// /// Optional role ARN to assume
|
||||
// role_arn: Option<String>,
|
||||
// },
|
||||
|
||||
// /// Azure Key Vault provider
|
||||
// #[cfg(feature = "azure-provider")]
|
||||
// AzureKeyVault {
|
||||
// /// Key Vault URL
|
||||
// vault_url: String,
|
||||
// /// Authentication method
|
||||
// auth: AzureAuth,
|
||||
// },
|
||||
|
||||
// /// Google Cloud Secret Manager provider
|
||||
// #[cfg(feature = "gcp-provider")]
|
||||
// GcpSecretManager {
|
||||
// /// GCP project ID
|
||||
// project_id: String,
|
||||
// /// Optional service account key file path
|
||||
// key_file: Option<String>,
|
||||
// },
|
||||
}
|
||||
|
||||
// Cloud provider authentication types are planned for future implementation
|
||||
// /// HashiCorp Vault authentication methods
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// #[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
// #[serde(tag = "method", rename_all = "snake_case")]
|
||||
// pub enum VaultAuth {
|
||||
// /// Token-based authentication
|
||||
// Token { token: String },
|
||||
// /// AppRole authentication
|
||||
// AppRole { role_id: String, secret_id: String },
|
||||
// /// Kubernetes authentication
|
||||
// Kubernetes {
|
||||
// role: String,
|
||||
// jwt_path: Option<String>,
|
||||
// },
|
||||
// }
|
||||
|
||||
// /// Azure authentication methods
|
||||
// #[cfg(feature = "azure-provider")]
|
||||
// #[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
// #[serde(tag = "method", rename_all = "snake_case")]
|
||||
// pub enum AzureAuth {
|
||||
// /// Service Principal authentication
|
||||
// ServicePrincipal {
|
||||
// client_id: String,
|
||||
// client_secret: String,
|
||||
// tenant_id: String,
|
||||
// },
|
||||
// /// Managed Identity authentication
|
||||
// ManagedIdentity,
|
||||
// /// Azure CLI authentication
|
||||
// AzureCli,
|
||||
// }
|
||||
|
||||
impl SecretConfig {
|
||||
/// Load configuration from a file
|
||||
pub fn from_file(path: &str) -> crate::SecretResult<Self> {
|
||||
let content = std::fs::read_to_string(path)?;
|
||||
|
||||
if path.ends_with(".json") {
|
||||
Ok(serde_json::from_str(&content)?)
|
||||
} else if path.ends_with(".yml") || path.ends_with(".yaml") {
|
||||
Ok(serde_yaml::from_str(&content)?)
|
||||
} else {
|
||||
Err(crate::SecretError::invalid_config(
|
||||
"Unsupported config file format. Use .json, .yml, or .yaml",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Save configuration to a file
|
||||
pub fn to_file(&self, path: &str) -> crate::SecretResult<()> {
|
||||
let content = if path.ends_with(".json") {
|
||||
serde_json::to_string_pretty(self)?
|
||||
} else if path.ends_with(".yml") || path.ends_with(".yaml") {
|
||||
serde_yaml::to_string(self)?
|
||||
} else {
|
||||
return Err(crate::SecretError::invalid_config(
|
||||
"Unsupported config file format. Use .json, .yml, or .yaml",
|
||||
));
|
||||
};
|
||||
|
||||
std::fs::write(path, content)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load configuration from environment variables
|
||||
pub fn from_env() -> Self {
|
||||
let mut config = Self::default();
|
||||
|
||||
// Override default provider if specified
|
||||
if let Ok(provider) = std::env::var("WRKFLW_DEFAULT_SECRET_PROVIDER") {
|
||||
config.default_provider = provider;
|
||||
}
|
||||
|
||||
// Override masking setting
|
||||
if let Ok(masking) = std::env::var("WRKFLW_SECRET_MASKING") {
|
||||
config.enable_masking = masking.parse().unwrap_or(true);
|
||||
}
|
||||
|
||||
// Override timeout
|
||||
if let Ok(timeout) = std::env::var("WRKFLW_SECRET_TIMEOUT") {
|
||||
config.timeout_seconds = timeout.parse().unwrap_or(30);
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
}
|
||||
88
crates/secrets/src/error.rs
Normal file
88
crates/secrets/src/error.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use thiserror::Error;
|
||||
|
||||
/// Result type for secret operations
|
||||
pub type SecretResult<T> = Result<T, SecretError>;
|
||||
|
||||
/// Errors that can occur during secret operations
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SecretError {
|
||||
#[error("Secret not found: {name}")]
|
||||
NotFound { name: String },
|
||||
|
||||
#[error("Secret provider '{provider}' not found")]
|
||||
ProviderNotFound { provider: String },
|
||||
|
||||
#[error("Authentication failed for provider '{provider}': {reason}")]
|
||||
AuthenticationFailed { provider: String, reason: String },
|
||||
|
||||
#[error("Network error accessing secret provider: {0}")]
|
||||
NetworkError(String),
|
||||
|
||||
#[error("Invalid secret configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
|
||||
#[error("Encryption error: {0}")]
|
||||
EncryptionError(String),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
#[error("JSON parsing error: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
|
||||
#[error("YAML parsing error: {0}")]
|
||||
YamlError(#[from] serde_yaml::Error),
|
||||
|
||||
#[error("Invalid secret value format: {0}")]
|
||||
InvalidFormat(String),
|
||||
|
||||
#[error("Secret operation timeout")]
|
||||
Timeout,
|
||||
|
||||
#[error("Permission denied accessing secret: {name}")]
|
||||
PermissionDenied { name: String },
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
|
||||
#[error("Invalid secret name: {reason}")]
|
||||
InvalidSecretName { reason: String },
|
||||
|
||||
#[error("Secret value too large: {size} bytes (max: {max_size} bytes)")]
|
||||
SecretTooLarge { size: usize, max_size: usize },
|
||||
|
||||
#[error("Rate limit exceeded: {0}")]
|
||||
RateLimitExceeded(String),
|
||||
}
|
||||
|
||||
impl SecretError {
|
||||
/// Create a new NotFound error
|
||||
pub fn not_found(name: impl Into<String>) -> Self {
|
||||
Self::NotFound { name: name.into() }
|
||||
}
|
||||
|
||||
/// Create a new ProviderNotFound error
|
||||
pub fn provider_not_found(provider: impl Into<String>) -> Self {
|
||||
Self::ProviderNotFound {
|
||||
provider: provider.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new AuthenticationFailed error
|
||||
pub fn auth_failed(provider: impl Into<String>, reason: impl Into<String>) -> Self {
|
||||
Self::AuthenticationFailed {
|
||||
provider: provider.into(),
|
||||
reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new InvalidConfig error
|
||||
pub fn invalid_config(msg: impl Into<String>) -> Self {
|
||||
Self::InvalidConfig(msg.into())
|
||||
}
|
||||
|
||||
/// Create a new Internal error
|
||||
pub fn internal(msg: impl Into<String>) -> Self {
|
||||
Self::Internal(msg.into())
|
||||
}
|
||||
}
|
||||
247
crates/secrets/src/lib.rs
Normal file
247
crates/secrets/src/lib.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! # wrkflw-secrets
|
||||
//!
|
||||
//! Comprehensive secrets management for wrkflw workflow execution.
|
||||
//! Supports multiple secret providers and secure handling throughout the execution pipeline.
|
||||
//!
|
||||
//! ## Features
|
||||
//!
|
||||
//! - **Multiple Secret Providers**: Environment variables, file-based storage, with extensibility for cloud providers
|
||||
//! - **Secret Substitution**: GitHub Actions-style secret references (`${{ secrets.SECRET_NAME }}`)
|
||||
//! - **Automatic Masking**: Intelligent secret detection and masking in logs and output
|
||||
//! - **Rate Limiting**: Built-in protection against secret access abuse
|
||||
//! - **Caching**: Configurable caching for improved performance
|
||||
//! - **Input Validation**: Comprehensive validation of secret names and values
|
||||
//! - **Thread Safety**: Full async/await support with thread-safe operations
|
||||
//!
|
||||
//! ## Quick Start
|
||||
//!
|
||||
//! ```rust
|
||||
//! use wrkflw_secrets::{SecretManager, SecretMasker, SecretSubstitution};
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! // Initialize the secret manager with default configuration
|
||||
//! let manager = SecretManager::default().await?;
|
||||
//!
|
||||
//! // Set an environment variable for testing
|
||||
//! std::env::set_var("API_TOKEN", "secret_api_token_123");
|
||||
//!
|
||||
//! // Retrieve a secret
|
||||
//! let secret = manager.get_secret("API_TOKEN").await?;
|
||||
//! println!("Secret value: {}", secret.value());
|
||||
//!
|
||||
//! // Use secret substitution
|
||||
//! let mut substitution = SecretSubstitution::new(&manager);
|
||||
//! let template = "Using token: ${{ secrets.API_TOKEN }}";
|
||||
//! let resolved = substitution.substitute(template).await?;
|
||||
//! println!("Resolved: {}", resolved);
|
||||
//!
|
||||
//! // Set up secret masking
|
||||
//! let mut masker = SecretMasker::new();
|
||||
//! masker.add_secret("secret_api_token_123");
|
||||
//!
|
||||
//! let log_message = "Failed to authenticate with token: secret_api_token_123";
|
||||
//! let masked = masker.mask(log_message);
|
||||
//! println!("Masked: {}", masked); // Will show: "Failed to authenticate with token: se***123"
|
||||
//!
|
||||
//! // Clean up
|
||||
//! std::env::remove_var("API_TOKEN");
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Configuration
|
||||
//!
|
||||
//! ```rust
|
||||
//! use wrkflw_secrets::{SecretConfig, SecretProviderConfig, SecretManager};
|
||||
//! use std::collections::HashMap;
|
||||
//!
|
||||
//! #[tokio::main]
|
||||
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
//! let mut providers = HashMap::new();
|
||||
//!
|
||||
//! // Environment variable provider with prefix
|
||||
//! providers.insert(
|
||||
//! "env".to_string(),
|
||||
//! SecretProviderConfig::Environment {
|
||||
//! prefix: Some("MYAPP_SECRET_".to_string())
|
||||
//! }
|
||||
//! );
|
||||
//!
|
||||
//! // File-based provider
|
||||
//! providers.insert(
|
||||
//! "file".to_string(),
|
||||
//! SecretProviderConfig::File {
|
||||
//! path: "/path/to/secrets.json".to_string()
|
||||
//! }
|
||||
//! );
|
||||
//!
|
||||
//! let config = SecretConfig {
|
||||
//! default_provider: "env".to_string(),
|
||||
//! providers,
|
||||
//! enable_masking: true,
|
||||
//! timeout_seconds: 30,
|
||||
//! enable_caching: true,
|
||||
//! cache_ttl_seconds: 300,
|
||||
//! rate_limit: Default::default(),
|
||||
//! };
|
||||
//!
|
||||
//! let manager = SecretManager::new(config).await?;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Security Features
|
||||
//!
|
||||
//! ### Input Validation
|
||||
//!
|
||||
//! All secret names and values are validated to prevent injection attacks and ensure compliance
|
||||
//! with naming conventions.
|
||||
//!
|
||||
//! ### Rate Limiting
|
||||
//!
|
||||
//! Built-in rate limiting prevents abuse and denial-of-service attacks on secret providers.
|
||||
//!
|
||||
//! ### Automatic Pattern Detection
|
||||
//!
|
||||
//! The masking system automatically detects and masks common secret patterns:
|
||||
//! - GitHub Personal Access Tokens (`ghp_*`)
|
||||
//! - AWS Access Keys (`AKIA*`)
|
||||
//! - JWT tokens
|
||||
//! - API keys and tokens
|
||||
//!
|
||||
//! ### Memory Safety
|
||||
//!
|
||||
//! Secrets are handled with care to minimize exposure in memory and logs.
|
||||
//!
|
||||
//! ## Provider Support
|
||||
//!
|
||||
//! ### Environment Variables
|
||||
//!
|
||||
//! ```rust
|
||||
//! use wrkflw_secrets::{SecretProviderConfig, SecretManager, SecretConfig};
|
||||
//!
|
||||
//! // With prefix for better security
|
||||
//! let provider = SecretProviderConfig::Environment {
|
||||
//! prefix: Some("MYAPP_".to_string())
|
||||
//! };
|
||||
//! ```
|
||||
//!
|
||||
//! ### File-based Storage
|
||||
//!
|
||||
//! Supports JSON, YAML, and environment file formats:
|
||||
//!
|
||||
//! ```json
|
||||
//! {
|
||||
//! "database_password": "super_secret_password",
|
||||
//! "api_key": "your_api_key_here"
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ```yaml
|
||||
//! database_password: super_secret_password
|
||||
//! api_key: your_api_key_here
|
||||
//! ```
|
||||
//!
|
||||
//! ```bash
|
||||
//! # Environment format
|
||||
//! DATABASE_PASSWORD=super_secret_password
|
||||
//! API_KEY="your_api_key_here"
|
||||
//! ```
|
||||
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod manager;
|
||||
pub mod masking;
|
||||
pub mod providers;
|
||||
pub mod rate_limit;
|
||||
pub mod storage;
|
||||
pub mod substitution;
|
||||
pub mod validation;
|
||||
|
||||
pub use config::{SecretConfig, SecretProviderConfig};
|
||||
pub use error::{SecretError, SecretResult};
|
||||
pub use manager::SecretManager;
|
||||
pub use masking::SecretMasker;
|
||||
pub use providers::{SecretProvider, SecretValue};
|
||||
pub use substitution::SecretSubstitution;
|
||||
|
||||
/// Re-export commonly used types
|
||||
pub mod prelude {
|
||||
pub use crate::{
|
||||
SecretConfig, SecretError, SecretManager, SecretMasker, SecretProvider, SecretResult,
|
||||
SecretSubstitution, SecretValue,
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use uuid;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_secret_management() {
|
||||
let config = SecretConfig::default();
|
||||
let manager = SecretManager::new(config)
|
||||
.await
|
||||
.expect("Failed to create manager");
|
||||
|
||||
// Use a unique test secret name to avoid conflicts
|
||||
let test_secret_name = format!(
|
||||
"TEST_SECRET_{}",
|
||||
uuid::Uuid::new_v4().to_string().replace('-', "_")
|
||||
);
|
||||
std::env::set_var(&test_secret_name, "secret_value");
|
||||
|
||||
let result = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "secret_value");
|
||||
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_substitution() {
|
||||
let config = SecretConfig::default();
|
||||
let manager = SecretManager::new(config)
|
||||
.await
|
||||
.expect("Failed to create manager");
|
||||
|
||||
// Use a unique test secret name to avoid conflicts
|
||||
let test_secret_name = format!(
|
||||
"GITHUB_TOKEN_{}",
|
||||
uuid::Uuid::new_v4().to_string().replace('-', "_")
|
||||
);
|
||||
std::env::set_var(&test_secret_name, "ghp_test_token");
|
||||
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
let input = format!("echo 'Token: ${{{{ secrets.{} }}}}'", test_secret_name);
|
||||
|
||||
let result = substitution.substitute(&input).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let output = result.unwrap();
|
||||
assert!(output.contains("ghp_test_token"));
|
||||
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_masking() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
let input = "The secret is secret123 and password is password456";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(masked.contains("***"));
|
||||
assert!(!masked.contains("secret123"));
|
||||
assert!(!masked.contains("password456"));
|
||||
}
|
||||
}
|
||||
267
crates/secrets/src/manager.rs
Normal file
267
crates/secrets/src/manager.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use crate::{
|
||||
config::{SecretConfig, SecretProviderConfig},
|
||||
providers::{env::EnvironmentProvider, file::FileProvider, SecretProvider, SecretValue},
|
||||
rate_limit::RateLimiter,
|
||||
validation::{validate_provider_name, validate_secret_name},
|
||||
SecretError, SecretResult,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Cached secret entry
|
||||
#[derive(Debug, Clone)]
|
||||
struct CachedSecret {
|
||||
value: SecretValue,
|
||||
expires_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
/// Central secret manager that coordinates multiple providers
|
||||
pub struct SecretManager {
|
||||
config: SecretConfig,
|
||||
providers: HashMap<String, Box<dyn SecretProvider>>,
|
||||
cache: Arc<RwLock<HashMap<String, CachedSecret>>>,
|
||||
rate_limiter: RateLimiter,
|
||||
}
|
||||
|
||||
impl SecretManager {
|
||||
/// Create a new secret manager with the given configuration
|
||||
pub async fn new(config: SecretConfig) -> SecretResult<Self> {
|
||||
let mut providers: HashMap<String, Box<dyn SecretProvider>> = HashMap::new();
|
||||
|
||||
// Initialize providers based on configuration
|
||||
for (name, provider_config) in &config.providers {
|
||||
// Validate provider name
|
||||
validate_provider_name(name)?;
|
||||
|
||||
let provider: Box<dyn SecretProvider> = match provider_config {
|
||||
SecretProviderConfig::Environment { prefix } => {
|
||||
Box::new(EnvironmentProvider::new(prefix.clone()))
|
||||
}
|
||||
SecretProviderConfig::File { path } => Box::new(FileProvider::new(path.clone())),
|
||||
// Cloud providers are planned for future implementation
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// SecretProviderConfig::Vault { url, auth, mount_path } => {
|
||||
// Box::new(crate::providers::vault::VaultProvider::new(
|
||||
// url.clone(),
|
||||
// auth.clone(),
|
||||
// mount_path.clone(),
|
||||
// ).await?)
|
||||
// }
|
||||
};
|
||||
|
||||
providers.insert(name.clone(), provider);
|
||||
}
|
||||
|
||||
let rate_limiter = RateLimiter::new(config.rate_limit.clone());
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
providers,
|
||||
cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
rate_limiter,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new secret manager with default configuration
|
||||
pub async fn default() -> SecretResult<Self> {
|
||||
Self::new(SecretConfig::default()).await
|
||||
}
|
||||
|
||||
/// Get a secret by name using the default provider
|
||||
pub async fn get_secret(&self, name: &str) -> SecretResult<SecretValue> {
|
||||
validate_secret_name(name)?;
|
||||
self.get_secret_from_provider(&self.config.default_provider, name)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get a secret from a specific provider
|
||||
pub async fn get_secret_from_provider(
|
||||
&self,
|
||||
provider_name: &str,
|
||||
name: &str,
|
||||
) -> SecretResult<SecretValue> {
|
||||
validate_provider_name(provider_name)?;
|
||||
validate_secret_name(name)?;
|
||||
|
||||
// Check rate limit
|
||||
let rate_limit_key = format!("{}:{}", provider_name, name);
|
||||
self.rate_limiter.check_rate_limit(&rate_limit_key).await?;
|
||||
|
||||
// Check cache first if caching is enabled
|
||||
if self.config.enable_caching {
|
||||
let cache_key = format!("{}:{}", provider_name, name);
|
||||
|
||||
{
|
||||
let cache = self.cache.read().await;
|
||||
if let Some(cached) = cache.get(&cache_key) {
|
||||
if chrono::Utc::now() < cached.expires_at {
|
||||
return Ok(cached.value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get provider
|
||||
let provider = self
|
||||
.providers
|
||||
.get(provider_name)
|
||||
.ok_or_else(|| SecretError::provider_not_found(provider_name))?;
|
||||
|
||||
// Get secret from provider
|
||||
let secret = provider.get_secret(name).await?;
|
||||
|
||||
// Cache the result if caching is enabled
|
||||
if self.config.enable_caching {
|
||||
let cache_key = format!("{}:{}", provider_name, name);
|
||||
let expires_at = chrono::Utc::now()
|
||||
+ chrono::Duration::seconds(self.config.cache_ttl_seconds as i64);
|
||||
|
||||
let cached_secret = CachedSecret {
|
||||
value: secret.clone(),
|
||||
expires_at,
|
||||
};
|
||||
|
||||
let mut cache = self.cache.write().await;
|
||||
cache.insert(cache_key, cached_secret);
|
||||
}
|
||||
|
||||
Ok(secret)
|
||||
}
|
||||
|
||||
/// List all available secrets from all providers
|
||||
pub async fn list_all_secrets(&self) -> SecretResult<HashMap<String, Vec<String>>> {
|
||||
let mut all_secrets = HashMap::new();
|
||||
|
||||
for (provider_name, provider) in &self.providers {
|
||||
match provider.list_secrets().await {
|
||||
Ok(secrets) => {
|
||||
all_secrets.insert(provider_name.clone(), secrets);
|
||||
}
|
||||
Err(_) => {
|
||||
// Some providers may not support listing, ignore errors
|
||||
all_secrets.insert(provider_name.clone(), vec![]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(all_secrets)
|
||||
}
|
||||
|
||||
/// Check health of all providers
|
||||
pub async fn health_check(&self) -> HashMap<String, SecretResult<()>> {
|
||||
let mut results = HashMap::new();
|
||||
|
||||
for (provider_name, provider) in &self.providers {
|
||||
let result = provider.health_check().await;
|
||||
results.insert(provider_name.clone(), result);
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Clear the cache
|
||||
pub async fn clear_cache(&self) {
|
||||
let mut cache = self.cache.write().await;
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
/// Get configuration
|
||||
pub fn config(&self) -> &SecretConfig {
|
||||
&self.config
|
||||
}
|
||||
|
||||
/// Check if a provider exists
|
||||
pub fn has_provider(&self, name: &str) -> bool {
|
||||
self.providers.contains_key(name)
|
||||
}
|
||||
|
||||
/// Get provider names
|
||||
pub fn provider_names(&self) -> Vec<String> {
|
||||
self.providers.keys().cloned().collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_creation() {
|
||||
let config = SecretConfig::default();
|
||||
let manager = SecretManager::new(config).await;
|
||||
assert!(manager.is_ok());
|
||||
|
||||
let manager = manager.unwrap();
|
||||
assert!(manager.has_provider("env"));
|
||||
assert!(manager.has_provider("file"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_environment_provider() {
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("TEST_SECRET_MANAGER_{}", std::process::id());
|
||||
std::env::set_var(&test_secret_name, "manager_test_value");
|
||||
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let result = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "manager_test_value");
|
||||
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_caching() {
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("CACHE_TEST_SECRET_{}", std::process::id());
|
||||
std::env::set_var(&test_secret_name, "cached_value");
|
||||
|
||||
let config = SecretConfig {
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 60, // 1 minute
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let manager = SecretManager::new(config).await.unwrap();
|
||||
|
||||
// First call should hit the provider
|
||||
let result1 = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
assert!(result1.is_ok());
|
||||
|
||||
// Remove the environment variable
|
||||
std::env::remove_var(&test_secret_name);
|
||||
|
||||
// Second call should hit the cache and still return the value
|
||||
let result2 = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
assert!(result2.is_ok());
|
||||
assert_eq!(result2.unwrap().value(), "cached_value");
|
||||
|
||||
// Clear cache and try again - should fail now
|
||||
manager.clear_cache().await;
|
||||
let result3 = manager
|
||||
.get_secret_from_provider("env", &test_secret_name)
|
||||
.await;
|
||||
assert!(result3.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_manager_health_check() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let health_results = manager.health_check().await;
|
||||
|
||||
assert!(health_results.contains_key("env"));
|
||||
assert!(health_results.contains_key("file"));
|
||||
|
||||
// Environment provider should be healthy
|
||||
assert!(health_results.get("env").unwrap().is_ok());
|
||||
}
|
||||
}
|
||||
348
crates/secrets/src/masking.rs
Normal file
348
crates/secrets/src/masking.rs
Normal file
@@ -0,0 +1,348 @@
|
||||
use regex::Regex;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
/// Compiled regex patterns for common secret formats
|
||||
struct CompiledPatterns {
|
||||
github_pat: Regex,
|
||||
github_app: Regex,
|
||||
github_oauth: Regex,
|
||||
aws_access_key: Regex,
|
||||
aws_secret: Regex,
|
||||
jwt: Regex,
|
||||
api_key: Regex,
|
||||
}
|
||||
|
||||
impl CompiledPatterns {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
github_pat: Regex::new(r"ghp_[a-zA-Z0-9]{36}").unwrap(),
|
||||
github_app: Regex::new(r"ghs_[a-zA-Z0-9]{36}").unwrap(),
|
||||
github_oauth: Regex::new(r"gho_[a-zA-Z0-9]{36}").unwrap(),
|
||||
aws_access_key: Regex::new(r"AKIA[0-9A-Z]{16}").unwrap(),
|
||||
aws_secret: Regex::new(r"[A-Za-z0-9/+=]{40}").unwrap(),
|
||||
jwt: Regex::new(r"eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*").unwrap(),
|
||||
api_key: Regex::new(r"(?i)(api[_-]?key|token)[\s:=]+[a-zA-Z0-9_-]{16,}").unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Global compiled patterns (initialized once)
|
||||
static PATTERNS: OnceLock<CompiledPatterns> = OnceLock::new();
|
||||
|
||||
/// Secret masking utility to prevent secrets from appearing in logs
|
||||
pub struct SecretMasker {
|
||||
secrets: HashSet<String>,
|
||||
secret_cache: HashMap<String, String>, // Cache masked versions
|
||||
mask_char: char,
|
||||
min_length: usize,
|
||||
}
|
||||
|
||||
impl SecretMasker {
|
||||
/// Create a new secret masker
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
secrets: HashSet::new(),
|
||||
secret_cache: HashMap::new(),
|
||||
mask_char: '*',
|
||||
min_length: 3, // Don't mask very short strings
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new secret masker with custom mask character
|
||||
pub fn with_mask_char(mask_char: char) -> Self {
|
||||
Self {
|
||||
secrets: HashSet::new(),
|
||||
secret_cache: HashMap::new(),
|
||||
mask_char,
|
||||
min_length: 3,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a secret to be masked
|
||||
pub fn add_secret(&mut self, secret: impl Into<String>) {
|
||||
let secret = secret.into();
|
||||
if secret.len() >= self.min_length {
|
||||
let masked = self.create_mask(&secret);
|
||||
self.secret_cache.insert(secret.clone(), masked);
|
||||
self.secrets.insert(secret);
|
||||
}
|
||||
}
|
||||
|
||||
/// Add multiple secrets to be masked
|
||||
pub fn add_secrets(&mut self, secrets: impl IntoIterator<Item = String>) {
|
||||
for secret in secrets {
|
||||
self.add_secret(secret);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a secret from masking
|
||||
pub fn remove_secret(&mut self, secret: &str) {
|
||||
self.secrets.remove(secret);
|
||||
self.secret_cache.remove(secret);
|
||||
}
|
||||
|
||||
/// Clear all secrets
|
||||
pub fn clear(&mut self) {
|
||||
self.secrets.clear();
|
||||
self.secret_cache.clear();
|
||||
}
|
||||
|
||||
/// Mask secrets in the given text
|
||||
pub fn mask(&self, text: &str) -> String {
|
||||
let mut result = text.to_string();
|
||||
|
||||
// Use cached masked versions for better performance
|
||||
for secret in &self.secrets {
|
||||
if !secret.is_empty() {
|
||||
if let Some(masked) = self.secret_cache.get(secret) {
|
||||
result = result.replace(secret, masked);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also mask potential tokens and keys with regex patterns
|
||||
result = self.mask_patterns(&result);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Create a mask for a secret, preserving some structure for debugging
|
||||
fn create_mask(&self, secret: &str) -> String {
|
||||
let len = secret.len();
|
||||
|
||||
if len <= 3 {
|
||||
// Very short secrets - mask completely
|
||||
self.mask_char.to_string().repeat(3)
|
||||
} else if len <= 8 {
|
||||
// Short secrets - show first character
|
||||
format!(
|
||||
"{}{}",
|
||||
secret.chars().next().unwrap(),
|
||||
self.mask_char.to_string().repeat(len - 1)
|
||||
)
|
||||
} else {
|
||||
// Longer secrets - show first 2 and last 2 characters
|
||||
let chars: Vec<char> = secret.chars().collect();
|
||||
let first_two = chars.iter().take(2).collect::<String>();
|
||||
let last_two = chars.iter().skip(len - 2).collect::<String>();
|
||||
let middle_mask = self.mask_char.to_string().repeat(len - 4);
|
||||
format!("{}{}{}", first_two, middle_mask, last_two)
|
||||
}
|
||||
}
|
||||
|
||||
/// Mask common patterns that look like secrets
|
||||
fn mask_patterns(&self, text: &str) -> String {
|
||||
let patterns = PATTERNS.get_or_init(CompiledPatterns::new);
|
||||
let mut result = text.to_string();
|
||||
|
||||
// GitHub Personal Access Tokens
|
||||
result = patterns
|
||||
.github_pat
|
||||
.replace_all(&result, "ghp_***")
|
||||
.to_string();
|
||||
|
||||
// GitHub App tokens
|
||||
result = patterns
|
||||
.github_app
|
||||
.replace_all(&result, "ghs_***")
|
||||
.to_string();
|
||||
|
||||
// GitHub OAuth tokens
|
||||
result = patterns
|
||||
.github_oauth
|
||||
.replace_all(&result, "gho_***")
|
||||
.to_string();
|
||||
|
||||
// AWS Access Key IDs
|
||||
result = patterns
|
||||
.aws_access_key
|
||||
.replace_all(&result, "AKIA***")
|
||||
.to_string();
|
||||
|
||||
// AWS Secret Access Keys (basic pattern)
|
||||
// Only mask if it's clearly in a secret context (basic heuristic)
|
||||
if text.to_lowercase().contains("secret") || text.to_lowercase().contains("key") {
|
||||
result = patterns.aws_secret.replace_all(&result, "***").to_string();
|
||||
}
|
||||
|
||||
// JWT tokens (basic pattern)
|
||||
result = patterns
|
||||
.jwt
|
||||
.replace_all(&result, "eyJ***.eyJ***.***")
|
||||
.to_string();
|
||||
|
||||
// API keys with common prefixes
|
||||
result = patterns
|
||||
.api_key
|
||||
.replace_all(&result, "${1}=***")
|
||||
.to_string();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Check if text contains any secrets
|
||||
pub fn contains_secrets(&self, text: &str) -> bool {
|
||||
for secret in &self.secrets {
|
||||
if text.contains(secret) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Also check for common patterns
|
||||
self.has_secret_patterns(text)
|
||||
}
|
||||
|
||||
/// Check if text contains common secret patterns
|
||||
fn has_secret_patterns(&self, text: &str) -> bool {
|
||||
let patterns = PATTERNS.get_or_init(CompiledPatterns::new);
|
||||
|
||||
patterns.github_pat.is_match(text)
|
||||
|| patterns.github_app.is_match(text)
|
||||
|| patterns.github_oauth.is_match(text)
|
||||
|| patterns.aws_access_key.is_match(text)
|
||||
|| patterns.jwt.is_match(text)
|
||||
}
|
||||
|
||||
/// Get the number of secrets being tracked
|
||||
pub fn secret_count(&self) -> usize {
|
||||
self.secrets.len()
|
||||
}
|
||||
|
||||
/// Check if a specific secret is being tracked
|
||||
pub fn has_secret(&self, secret: &str) -> bool {
|
||||
self.secrets.contains(secret)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SecretMasker {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_basic_masking() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
let input = "The secret is secret123 and password is password456";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(!masked.contains("secret123"));
|
||||
assert!(!masked.contains("password456"));
|
||||
assert!(masked.contains("***"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preserve_structure() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("verylongsecretkey123");
|
||||
|
||||
let input = "Key: verylongsecretkey123";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
// Should preserve first 2 and last 2 characters
|
||||
assert!(masked.contains("ve"));
|
||||
assert!(masked.contains("23"));
|
||||
assert!(masked.contains("***"));
|
||||
assert!(!masked.contains("verylongsecretkey123"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_github_token_patterns() {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let input = "Token: ghp_1234567890123456789012345678901234567890";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(!masked.contains("ghp_1234567890123456789012345678901234567890"));
|
||||
assert!(masked.contains("ghp_***"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aws_access_key_patterns() {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let input = "AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(!masked.contains("AKIAIOSFODNN7EXAMPLE"));
|
||||
assert!(masked.contains("AKIA***"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jwt_token_patterns() {
|
||||
let masker = SecretMasker::new();
|
||||
|
||||
let input = "JWT: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(masked.contains("eyJ***.eyJ***.***"));
|
||||
assert!(!masked.contains("SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains_secrets() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
|
||||
assert!(masker.contains_secrets("The secret is secret123"));
|
||||
assert!(!masker.contains_secrets("No secrets here"));
|
||||
assert!(masker.contains_secrets("Token: ghp_1234567890123456789012345678901234567890"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_short_secrets() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("ab"); // Too short, should not be added
|
||||
masker.add_secret("abc"); // Minimum length
|
||||
|
||||
assert_eq!(masker.secret_count(), 1);
|
||||
assert!(!masker.has_secret("ab"));
|
||||
assert!(masker.has_secret("abc"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_mask_char() {
|
||||
let mut masker = SecretMasker::with_mask_char('X');
|
||||
masker.add_secret("secret123");
|
||||
|
||||
let input = "The secret is secret123";
|
||||
let masked = masker.mask(input);
|
||||
|
||||
assert!(masked.contains("XX"));
|
||||
assert!(!masked.contains("**"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_secret() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
assert_eq!(masker.secret_count(), 2);
|
||||
|
||||
masker.remove_secret("secret123");
|
||||
assert_eq!(masker.secret_count(), 1);
|
||||
assert!(!masker.has_secret("secret123"));
|
||||
assert!(masker.has_secret("password456"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clear_secrets() {
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("secret123");
|
||||
masker.add_secret("password456");
|
||||
|
||||
assert_eq!(masker.secret_count(), 2);
|
||||
|
||||
masker.clear();
|
||||
assert_eq!(masker.secret_count(), 0);
|
||||
}
|
||||
}
|
||||
143
crates/secrets/src/providers/env.rs
Normal file
143
crates/secrets/src/providers/env.rs
Normal file
@@ -0,0 +1,143 @@
|
||||
use crate::{
|
||||
validation::validate_secret_value, SecretError, SecretProvider, SecretResult, SecretValue,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Environment variable secret provider
|
||||
pub struct EnvironmentProvider {
|
||||
prefix: Option<String>,
|
||||
}
|
||||
|
||||
impl EnvironmentProvider {
|
||||
/// Create a new environment provider
|
||||
pub fn new(prefix: Option<String>) -> Self {
|
||||
Self { prefix }
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EnvironmentProvider {
|
||||
fn default() -> Self {
|
||||
Self::new(None)
|
||||
}
|
||||
}
|
||||
|
||||
impl EnvironmentProvider {
|
||||
/// Get the full environment variable name
|
||||
fn get_env_name(&self, name: &str) -> String {
|
||||
match &self.prefix {
|
||||
Some(prefix) => format!("{}{}", prefix, name),
|
||||
None => name.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SecretProvider for EnvironmentProvider {
|
||||
async fn get_secret(&self, name: &str) -> SecretResult<SecretValue> {
|
||||
let env_name = self.get_env_name(name);
|
||||
|
||||
match std::env::var(&env_name) {
|
||||
Ok(value) => {
|
||||
// Validate the secret value
|
||||
validate_secret_value(&value)?;
|
||||
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("source".to_string(), "environment".to_string());
|
||||
metadata.insert("env_var".to_string(), env_name);
|
||||
|
||||
Ok(SecretValue::with_metadata(value, metadata))
|
||||
}
|
||||
Err(std::env::VarError::NotPresent) => Err(SecretError::not_found(name)),
|
||||
Err(std::env::VarError::NotUnicode(_)) => Err(SecretError::InvalidFormat(format!(
|
||||
"Environment variable '{}' contains invalid Unicode",
|
||||
env_name
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_secrets(&self) -> SecretResult<Vec<String>> {
|
||||
let mut secrets = Vec::new();
|
||||
|
||||
for (key, _) in std::env::vars() {
|
||||
if let Some(prefix) = &self.prefix {
|
||||
if key.starts_with(prefix) {
|
||||
secrets.push(key[prefix.len()..].to_string());
|
||||
}
|
||||
} else {
|
||||
// Without a prefix, we can't distinguish secrets from regular env vars
|
||||
// So we'll return an error suggesting the use of a prefix
|
||||
return Err(SecretError::internal(
|
||||
"Cannot list secrets from environment without a prefix. Configure a prefix like 'WRKFLW_SECRET_'"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"environment"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_environment_provider_basic() {
|
||||
let provider = EnvironmentProvider::default();
|
||||
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("TEST_SECRET_{}", std::process::id());
|
||||
std::env::set_var(&test_secret_name, "test_value");
|
||||
|
||||
let result = provider.get_secret(&test_secret_name).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "test_value");
|
||||
assert_eq!(
|
||||
secret.metadata.get("source"),
|
||||
Some(&"environment".to_string())
|
||||
);
|
||||
|
||||
// Clean up
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_environment_provider_with_prefix() {
|
||||
let provider = EnvironmentProvider::new(Some("WRKFLW_SECRET_".to_string()));
|
||||
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let test_secret_name = format!("API_KEY_{}", std::process::id());
|
||||
let full_env_name = format!("WRKFLW_SECRET_{}", test_secret_name);
|
||||
std::env::set_var(&full_env_name, "secret_api_key");
|
||||
|
||||
let result = provider.get_secret(&test_secret_name).await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "secret_api_key");
|
||||
|
||||
// Clean up
|
||||
std::env::remove_var(&full_env_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_environment_provider_not_found() {
|
||||
let provider = EnvironmentProvider::default();
|
||||
|
||||
let result = provider.get_secret("NONEXISTENT_SECRET").await;
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "NONEXISTENT_SECRET");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
288
crates/secrets/src/providers/file.rs
Normal file
288
crates/secrets/src/providers/file.rs
Normal file
@@ -0,0 +1,288 @@
|
||||
use crate::{
|
||||
validation::validate_secret_value, SecretError, SecretProvider, SecretResult, SecretValue,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
/// File-based secret provider
|
||||
pub struct FileProvider {
|
||||
path: String,
|
||||
}
|
||||
|
||||
impl FileProvider {
|
||||
/// Create a new file provider
|
||||
pub fn new(path: impl Into<String>) -> Self {
|
||||
Self { path: path.into() }
|
||||
}
|
||||
|
||||
/// Expand tilde in path
|
||||
fn expand_path(&self) -> String {
|
||||
if self.path.starts_with("~/") {
|
||||
if let Some(home) = dirs::home_dir() {
|
||||
return home.join(&self.path[2..]).to_string_lossy().to_string();
|
||||
}
|
||||
}
|
||||
self.path.clone()
|
||||
}
|
||||
|
||||
/// Load secrets from JSON file
|
||||
async fn load_json_secrets(&self, file_path: &Path) -> SecretResult<HashMap<String, String>> {
|
||||
let content = tokio::fs::read_to_string(file_path).await?;
|
||||
let json: Value = serde_json::from_str(&content)?;
|
||||
|
||||
let mut secrets = HashMap::new();
|
||||
if let Value::Object(obj) = json {
|
||||
for (key, value) in obj {
|
||||
if let Value::String(secret_value) = value {
|
||||
secrets.insert(key, secret_value);
|
||||
} else {
|
||||
secrets.insert(key, value.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
/// Load secrets from YAML file
|
||||
async fn load_yaml_secrets(&self, file_path: &Path) -> SecretResult<HashMap<String, String>> {
|
||||
let content = tokio::fs::read_to_string(file_path).await?;
|
||||
let yaml: serde_yaml::Value = serde_yaml::from_str(&content)?;
|
||||
|
||||
let mut secrets = HashMap::new();
|
||||
if let serde_yaml::Value::Mapping(map) = yaml {
|
||||
for (key, value) in map {
|
||||
if let (serde_yaml::Value::String(k), v) = (key, value) {
|
||||
let secret_value = match v {
|
||||
serde_yaml::Value::String(s) => s,
|
||||
_ => serde_yaml::to_string(&v)?.trim().to_string(),
|
||||
};
|
||||
secrets.insert(k, secret_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
/// Load secrets from environment-style file
|
||||
async fn load_env_secrets(&self, file_path: &Path) -> SecretResult<HashMap<String, String>> {
|
||||
let content = tokio::fs::read_to_string(file_path).await?;
|
||||
let mut secrets = HashMap::new();
|
||||
|
||||
for line in content.lines() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some((key, value)) = line.split_once('=') {
|
||||
let key = key.trim().to_string();
|
||||
let value = value.trim();
|
||||
|
||||
// Handle quoted values
|
||||
let value = if (value.starts_with('"') && value.ends_with('"'))
|
||||
|| (value.starts_with('\'') && value.ends_with('\''))
|
||||
{
|
||||
&value[1..value.len() - 1]
|
||||
} else {
|
||||
value
|
||||
};
|
||||
|
||||
secrets.insert(key, value.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
|
||||
/// Load all secrets from the configured path
|
||||
async fn load_secrets(&self) -> SecretResult<HashMap<String, String>> {
|
||||
let expanded_path = self.expand_path();
|
||||
let path = Path::new(&expanded_path);
|
||||
|
||||
if !path.exists() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
if path.is_file() {
|
||||
// Single file - determine format by extension
|
||||
if let Some(extension) = path.extension().and_then(|ext| ext.to_str()) {
|
||||
match extension.to_lowercase().as_str() {
|
||||
"json" => self.load_json_secrets(path).await,
|
||||
"yml" | "yaml" => self.load_yaml_secrets(path).await,
|
||||
"env" => self.load_env_secrets(path).await,
|
||||
_ => {
|
||||
// Default to environment format for unknown extensions
|
||||
self.load_env_secrets(path).await
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No extension, try environment format
|
||||
self.load_env_secrets(path).await
|
||||
}
|
||||
} else {
|
||||
// Directory - load from multiple files
|
||||
let mut all_secrets = HashMap::new();
|
||||
let mut entries = tokio::fs::read_dir(path).await?;
|
||||
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let entry_path = entry.path();
|
||||
if entry_path.is_file() {
|
||||
if let Some(extension) = entry_path.extension().and_then(|ext| ext.to_str()) {
|
||||
let secrets = match extension.to_lowercase().as_str() {
|
||||
"json" => self.load_json_secrets(&entry_path).await?,
|
||||
"yml" | "yaml" => self.load_yaml_secrets(&entry_path).await?,
|
||||
"env" => self.load_env_secrets(&entry_path).await?,
|
||||
_ => continue, // Skip unknown file types
|
||||
};
|
||||
all_secrets.extend(secrets);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(all_secrets)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SecretProvider for FileProvider {
|
||||
async fn get_secret(&self, name: &str) -> SecretResult<SecretValue> {
|
||||
let secrets = self.load_secrets().await?;
|
||||
|
||||
if let Some(value) = secrets.get(name) {
|
||||
// Validate the secret value
|
||||
validate_secret_value(value)?;
|
||||
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("source".to_string(), "file".to_string());
|
||||
metadata.insert("file_path".to_string(), self.expand_path());
|
||||
|
||||
Ok(SecretValue::with_metadata(value.clone(), metadata))
|
||||
} else {
|
||||
Err(SecretError::not_found(name))
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_secrets(&self) -> SecretResult<Vec<String>> {
|
||||
let secrets = self.load_secrets().await?;
|
||||
Ok(secrets.keys().cloned().collect())
|
||||
}
|
||||
|
||||
fn name(&self) -> &str {
|
||||
"file"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
async fn create_test_json_file(dir: &TempDir, content: &str) -> String {
|
||||
let file_path = dir.path().join("secrets.json");
|
||||
tokio::fs::write(&file_path, content).await.unwrap();
|
||||
file_path.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
async fn create_test_env_file(dir: &TempDir, content: &str) -> String {
|
||||
let file_path = dir.path().join("secrets.env");
|
||||
tokio::fs::write(&file_path, content).await.unwrap();
|
||||
file_path.to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_json() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_json_file(
|
||||
&temp_dir,
|
||||
r#"
|
||||
{
|
||||
"API_KEY": "secret_api_key",
|
||||
"DB_PASSWORD": "secret_password"
|
||||
}
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let result = provider.get_secret("API_KEY").await;
|
||||
assert!(result.is_ok());
|
||||
|
||||
let secret = result.unwrap();
|
||||
assert_eq!(secret.value(), "secret_api_key");
|
||||
assert_eq!(secret.metadata.get("source"), Some(&"file".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_env_format() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_env_file(
|
||||
&temp_dir,
|
||||
r#"
|
||||
# This is a comment
|
||||
API_KEY=secret_api_key
|
||||
DB_PASSWORD="quoted password"
|
||||
GITHUB_TOKEN='single quoted token'
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let api_key = provider.get_secret("API_KEY").await.unwrap();
|
||||
assert_eq!(api_key.value(), "secret_api_key");
|
||||
|
||||
let password = provider.get_secret("DB_PASSWORD").await.unwrap();
|
||||
assert_eq!(password.value(), "quoted password");
|
||||
|
||||
let token = provider.get_secret("GITHUB_TOKEN").await.unwrap();
|
||||
assert_eq!(token.value(), "single quoted token");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_not_found() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_json_file(&temp_dir, "{}").await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let result = provider.get_secret("NONEXISTENT").await;
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "NONEXISTENT");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_provider_list_secrets() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let file_path = create_test_json_file(
|
||||
&temp_dir,
|
||||
r#"
|
||||
{
|
||||
"SECRET_1": "value1",
|
||||
"SECRET_2": "value2",
|
||||
"SECRET_3": "value3"
|
||||
}
|
||||
"#,
|
||||
)
|
||||
.await;
|
||||
|
||||
let provider = FileProvider::new(file_path);
|
||||
|
||||
let secrets = provider.list_secrets().await.unwrap();
|
||||
assert_eq!(secrets.len(), 3);
|
||||
assert!(secrets.contains(&"SECRET_1".to_string()));
|
||||
assert!(secrets.contains(&"SECRET_2".to_string()));
|
||||
assert!(secrets.contains(&"SECRET_3".to_string()));
|
||||
}
|
||||
}
|
||||
91
crates/secrets/src/providers/mod.rs
Normal file
91
crates/secrets/src/providers/mod.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use crate::{SecretError, SecretResult};
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub mod env;
|
||||
pub mod file;
|
||||
|
||||
// Cloud provider modules are planned for future implementation
|
||||
// #[cfg(feature = "vault-provider")]
|
||||
// pub mod vault;
|
||||
|
||||
// #[cfg(feature = "aws-provider")]
|
||||
// pub mod aws;
|
||||
|
||||
// #[cfg(feature = "azure-provider")]
|
||||
// pub mod azure;
|
||||
|
||||
// #[cfg(feature = "gcp-provider")]
|
||||
// pub mod gcp;
|
||||
|
||||
/// A secret value with metadata
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SecretValue {
|
||||
/// The actual secret value
|
||||
value: String,
|
||||
/// Optional metadata about the secret
|
||||
pub metadata: HashMap<String, String>,
|
||||
/// When this secret was retrieved (for caching)
|
||||
pub retrieved_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
impl SecretValue {
|
||||
/// Create a new secret value
|
||||
pub fn new(value: impl Into<String>) -> Self {
|
||||
Self {
|
||||
value: value.into(),
|
||||
metadata: HashMap::new(),
|
||||
retrieved_at: chrono::Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new secret value with metadata
|
||||
pub fn with_metadata(value: impl Into<String>, metadata: HashMap<String, String>) -> Self {
|
||||
Self {
|
||||
value: value.into(),
|
||||
metadata,
|
||||
retrieved_at: chrono::Utc::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the secret value
|
||||
pub fn value(&self) -> &str {
|
||||
&self.value
|
||||
}
|
||||
|
||||
/// Check if this secret has expired based on TTL
|
||||
pub fn is_expired(&self, ttl_seconds: u64) -> bool {
|
||||
let now = chrono::Utc::now();
|
||||
let elapsed = now.signed_duration_since(self.retrieved_at);
|
||||
elapsed.num_seconds() > ttl_seconds as i64
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for secret providers
|
||||
#[async_trait]
|
||||
pub trait SecretProvider: Send + Sync {
|
||||
/// Get a secret by name
|
||||
async fn get_secret(&self, name: &str) -> SecretResult<SecretValue>;
|
||||
|
||||
/// List available secrets (optional, for providers that support it)
|
||||
async fn list_secrets(&self) -> SecretResult<Vec<String>> {
|
||||
Err(SecretError::internal(
|
||||
"list_secrets not supported by this provider",
|
||||
))
|
||||
}
|
||||
|
||||
/// Check if the provider is healthy/accessible
|
||||
async fn health_check(&self) -> SecretResult<()> {
|
||||
// Default implementation tries to get a non-existent secret
|
||||
// If it returns NotFound, the provider is healthy
|
||||
match self.get_secret("__health_check__").await {
|
||||
Err(SecretError::NotFound { .. }) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
Ok(_) => Ok(()), // Surprisingly, the health check secret exists
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the provider name
|
||||
fn name(&self) -> &str;
|
||||
}
|
||||
242
crates/secrets/src/rate_limit.rs
Normal file
242
crates/secrets/src/rate_limit.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Rate limiting for secret access operations
|
||||
|
||||
use crate::{SecretError, SecretResult};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
/// Rate limiter configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimitConfig {
|
||||
/// Maximum requests per time window
|
||||
pub max_requests: u32,
|
||||
/// Time window duration
|
||||
pub window_duration: Duration,
|
||||
/// Whether to enable rate limiting
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_requests: 100,
|
||||
window_duration: Duration::from_secs(60), // 1 minute
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Track requests for a specific key
|
||||
#[derive(Debug)]
|
||||
struct RequestTracker {
|
||||
requests: Vec<Instant>,
|
||||
first_request: Instant,
|
||||
}
|
||||
|
||||
impl RequestTracker {
|
||||
fn new() -> Self {
|
||||
let now = Instant::now();
|
||||
Self {
|
||||
requests: Vec::new(),
|
||||
first_request: now,
|
||||
}
|
||||
}
|
||||
|
||||
fn add_request(&mut self, now: Instant) {
|
||||
if self.requests.is_empty() {
|
||||
self.first_request = now;
|
||||
}
|
||||
self.requests.push(now);
|
||||
}
|
||||
|
||||
fn cleanup_old_requests(&mut self, window_duration: Duration, now: Instant) {
|
||||
let cutoff = now - window_duration;
|
||||
self.requests.retain(|&req_time| req_time > cutoff);
|
||||
|
||||
if let Some(&first) = self.requests.first() {
|
||||
self.first_request = first;
|
||||
}
|
||||
}
|
||||
|
||||
fn request_count(&self) -> usize {
|
||||
self.requests.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Rate limiter for secret access operations
|
||||
pub struct RateLimiter {
|
||||
config: RateLimitConfig,
|
||||
trackers: Arc<RwLock<HashMap<String, RequestTracker>>>,
|
||||
}
|
||||
|
||||
impl RateLimiter {
|
||||
/// Create a new rate limiter with the given configuration
|
||||
pub fn new(config: RateLimitConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
trackers: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a request should be allowed for the given key
|
||||
pub async fn check_rate_limit(&self, key: &str) -> SecretResult<()> {
|
||||
if !self.config.enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
let mut trackers = self.trackers.write().await;
|
||||
|
||||
// Clean up old requests for existing tracker
|
||||
if let Some(tracker) = trackers.get_mut(key) {
|
||||
tracker.cleanup_old_requests(self.config.window_duration, now);
|
||||
|
||||
// Check if we're over the limit
|
||||
if tracker.request_count() >= self.config.max_requests as usize {
|
||||
let time_until_reset = self.config.window_duration - (now - tracker.first_request);
|
||||
return Err(SecretError::RateLimitExceeded(format!(
|
||||
"Rate limit exceeded. Try again in {} seconds",
|
||||
time_until_reset.as_secs()
|
||||
)));
|
||||
}
|
||||
|
||||
// Add the current request
|
||||
tracker.add_request(now);
|
||||
} else {
|
||||
// Create new tracker and add first request
|
||||
let mut tracker = RequestTracker::new();
|
||||
tracker.add_request(now);
|
||||
trackers.insert(key.to_string(), tracker);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset rate limit for a specific key
|
||||
pub async fn reset_rate_limit(&self, key: &str) {
|
||||
let mut trackers = self.trackers.write().await;
|
||||
trackers.remove(key);
|
||||
}
|
||||
|
||||
/// Clear all rate limit data
|
||||
pub async fn clear_all(&self) {
|
||||
let mut trackers = self.trackers.write().await;
|
||||
trackers.clear();
|
||||
}
|
||||
|
||||
/// Get current request count for a key
|
||||
pub async fn get_request_count(&self, key: &str) -> usize {
|
||||
let trackers = self.trackers.read().await;
|
||||
trackers.get(key).map(|t| t.request_count()).unwrap_or(0)
|
||||
}
|
||||
|
||||
/// Get rate limit configuration
|
||||
pub fn config(&self) -> &RateLimitConfig {
|
||||
&self.config
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RateLimiter {
|
||||
fn default() -> Self {
|
||||
Self::new(RateLimitConfig::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_basic() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 3,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// First 3 requests should succeed
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
|
||||
// 4th request should fail
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_different_keys() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 2,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// Different keys should have separate limits
|
||||
assert!(limiter.check_rate_limit("key1").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("key1").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("key2").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("key2").await.is_ok());
|
||||
|
||||
// Both keys should now be at their limit
|
||||
assert!(limiter.check_rate_limit("key1").await.is_err());
|
||||
assert!(limiter.check_rate_limit("key2").await.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_reset() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 1,
|
||||
window_duration: Duration::from_secs(60), // Long window
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// Use up the limit
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_err());
|
||||
|
||||
// Reset and try again
|
||||
limiter.reset_rate_limit("test_key").await;
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rate_limit_disabled() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 1,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: false,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
// All requests should succeed when disabled
|
||||
for _ in 0..10 {
|
||||
assert!(limiter.check_rate_limit("test_key").await.is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_request_count() {
|
||||
let config = RateLimitConfig {
|
||||
max_requests: 5,
|
||||
window_duration: Duration::from_secs(1),
|
||||
enabled: true,
|
||||
};
|
||||
let limiter = RateLimiter::new(config);
|
||||
|
||||
assert_eq!(limiter.get_request_count("test_key").await, 0);
|
||||
|
||||
limiter.check_rate_limit("test_key").await.unwrap();
|
||||
assert_eq!(limiter.get_request_count("test_key").await, 1);
|
||||
|
||||
limiter.check_rate_limit("test_key").await.unwrap();
|
||||
assert_eq!(limiter.get_request_count("test_key").await, 2);
|
||||
}
|
||||
}
|
||||
351
crates/secrets/src/storage.rs
Normal file
351
crates/secrets/src/storage.rs
Normal file
@@ -0,0 +1,351 @@
|
||||
use crate::{SecretError, SecretResult};
|
||||
use aes_gcm::{
|
||||
aead::{Aead, KeyInit, OsRng},
|
||||
Aes256Gcm, Key, Nonce,
|
||||
};
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Encrypted secret storage for sensitive data at rest
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EncryptedSecretStore {
|
||||
/// Encrypted secrets map (base64 encoded)
|
||||
secrets: HashMap<String, String>,
|
||||
/// Salt for key derivation (base64 encoded)
|
||||
salt: String,
|
||||
/// Nonce for encryption (base64 encoded)
|
||||
nonce: String,
|
||||
}
|
||||
|
||||
impl EncryptedSecretStore {
|
||||
/// Create a new encrypted secret store with a random key
|
||||
pub fn new() -> SecretResult<(Self, [u8; 32])> {
|
||||
let key = Aes256Gcm::generate_key(&mut OsRng);
|
||||
let salt = Self::generate_salt();
|
||||
let nonce = Self::generate_nonce();
|
||||
|
||||
let store = Self {
|
||||
secrets: HashMap::new(),
|
||||
salt: general_purpose::STANDARD.encode(salt),
|
||||
nonce: general_purpose::STANDARD.encode(nonce),
|
||||
};
|
||||
|
||||
Ok((store, key.into()))
|
||||
}
|
||||
|
||||
/// Create an encrypted secret store from existing data
|
||||
pub fn from_data(secrets: HashMap<String, String>, salt: String, nonce: String) -> Self {
|
||||
Self {
|
||||
secrets,
|
||||
salt,
|
||||
nonce,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an encrypted secret
|
||||
pub fn add_secret(&mut self, key: &[u8; 32], name: &str, value: &str) -> SecretResult<()> {
|
||||
let encrypted = self.encrypt_value(key, value)?;
|
||||
self.secrets.insert(name.to_string(), encrypted);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get and decrypt a secret
|
||||
pub fn get_secret(&self, key: &[u8; 32], name: &str) -> SecretResult<String> {
|
||||
let encrypted = self
|
||||
.secrets
|
||||
.get(name)
|
||||
.ok_or_else(|| SecretError::not_found(name))?;
|
||||
|
||||
self.decrypt_value(key, encrypted)
|
||||
}
|
||||
|
||||
/// Remove a secret
|
||||
pub fn remove_secret(&mut self, name: &str) -> bool {
|
||||
self.secrets.remove(name).is_some()
|
||||
}
|
||||
|
||||
/// List all secret names
|
||||
pub fn list_secrets(&self) -> Vec<String> {
|
||||
self.secrets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Check if a secret exists
|
||||
pub fn has_secret(&self, name: &str) -> bool {
|
||||
self.secrets.contains_key(name)
|
||||
}
|
||||
|
||||
/// Get the number of stored secrets
|
||||
pub fn secret_count(&self) -> usize {
|
||||
self.secrets.len()
|
||||
}
|
||||
|
||||
/// Clear all secrets
|
||||
pub fn clear(&mut self) {
|
||||
self.secrets.clear();
|
||||
}
|
||||
|
||||
/// Encrypt a value
|
||||
fn encrypt_value(&self, key: &[u8; 32], value: &str) -> SecretResult<String> {
|
||||
let cipher = Aes256Gcm::new(Key::<Aes256Gcm>::from_slice(key));
|
||||
let nonce_bytes = general_purpose::STANDARD
|
||||
.decode(&self.nonce)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid nonce: {}", e)))?;
|
||||
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(SecretError::EncryptionError(
|
||||
"Invalid nonce length".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, value.as_bytes())
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Encryption failed: {}", e)))?;
|
||||
|
||||
Ok(general_purpose::STANDARD.encode(&ciphertext))
|
||||
}
|
||||
|
||||
/// Decrypt a value
|
||||
fn decrypt_value(&self, key: &[u8; 32], encrypted: &str) -> SecretResult<String> {
|
||||
let cipher = Aes256Gcm::new(Key::<Aes256Gcm>::from_slice(key));
|
||||
let nonce_bytes = general_purpose::STANDARD
|
||||
.decode(&self.nonce)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid nonce: {}", e)))?;
|
||||
|
||||
if nonce_bytes.len() != 12 {
|
||||
return Err(SecretError::EncryptionError(
|
||||
"Invalid nonce length".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
let ciphertext = general_purpose::STANDARD
|
||||
.decode(encrypted)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid ciphertext: {}", e)))?;
|
||||
|
||||
let plaintext = cipher
|
||||
.decrypt(nonce, ciphertext.as_ref())
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Decryption failed: {}", e)))?;
|
||||
|
||||
String::from_utf8(plaintext)
|
||||
.map_err(|e| SecretError::EncryptionError(format!("Invalid UTF-8: {}", e)))
|
||||
}
|
||||
|
||||
/// Generate a random salt
|
||||
fn generate_salt() -> [u8; 32] {
|
||||
let mut salt = [0u8; 32];
|
||||
rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut salt);
|
||||
salt
|
||||
}
|
||||
|
||||
/// Generate a random nonce
|
||||
fn generate_nonce() -> [u8; 12] {
|
||||
let mut nonce = [0u8; 12];
|
||||
rand::RngCore::fill_bytes(&mut rand::thread_rng(), &mut nonce);
|
||||
nonce
|
||||
}
|
||||
|
||||
/// Serialize to JSON
|
||||
pub fn to_json(&self) -> SecretResult<String> {
|
||||
serde_json::to_string_pretty(self)
|
||||
.map_err(|e| SecretError::internal(format!("Serialization failed: {}", e)))
|
||||
}
|
||||
|
||||
/// Deserialize from JSON
|
||||
pub fn from_json(json: &str) -> SecretResult<Self> {
|
||||
serde_json::from_str(json)
|
||||
.map_err(|e| SecretError::internal(format!("Deserialization failed: {}", e)))
|
||||
}
|
||||
|
||||
/// Save to file
|
||||
pub async fn save_to_file(&self, path: &str) -> SecretResult<()> {
|
||||
let json = self.to_json()?;
|
||||
tokio::fs::write(path, json)
|
||||
.await
|
||||
.map_err(SecretError::IoError)
|
||||
}
|
||||
|
||||
/// Load from file
|
||||
pub async fn load_from_file(path: &str) -> SecretResult<Self> {
|
||||
let json = tokio::fs::read_to_string(path)
|
||||
.await
|
||||
.map_err(SecretError::IoError)?;
|
||||
Self::from_json(&json)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EncryptedSecretStore {
|
||||
fn default() -> Self {
|
||||
let (store, _) = Self::new().expect("Failed to create default encrypted store");
|
||||
store
|
||||
}
|
||||
}
|
||||
|
||||
/// Key derivation utilities
|
||||
pub struct KeyDerivation;
|
||||
|
||||
impl KeyDerivation {
|
||||
/// Derive a key from a password using PBKDF2
|
||||
pub fn derive_key_from_password(password: &str, salt: &[u8], iterations: u32) -> [u8; 32] {
|
||||
let mut key = [0u8; 32];
|
||||
let _ = pbkdf2::pbkdf2::<hmac::Hmac<sha2::Sha256>>(
|
||||
password.as_bytes(),
|
||||
salt,
|
||||
iterations,
|
||||
&mut key,
|
||||
);
|
||||
key
|
||||
}
|
||||
|
||||
/// Generate a secure random key
|
||||
pub fn generate_random_key() -> [u8; 32] {
|
||||
Aes256Gcm::generate_key(&mut OsRng).into()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_basic() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
// Add a secret
|
||||
store
|
||||
.add_secret(&key, "test_secret", "secret_value")
|
||||
.unwrap();
|
||||
|
||||
// Retrieve the secret
|
||||
let value = store.get_secret(&key, "test_secret").unwrap();
|
||||
assert_eq!(value, "secret_value");
|
||||
|
||||
// Check metadata
|
||||
assert!(store.has_secret("test_secret"));
|
||||
assert_eq!(store.secret_count(), 1);
|
||||
|
||||
let secrets = store.list_secrets();
|
||||
assert_eq!(secrets.len(), 1);
|
||||
assert!(secrets.contains(&"test_secret".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_multiple_secrets() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
// Add multiple secrets
|
||||
store.add_secret(&key, "secret1", "value1").unwrap();
|
||||
store.add_secret(&key, "secret2", "value2").unwrap();
|
||||
store.add_secret(&key, "secret3", "value3").unwrap();
|
||||
|
||||
// Retrieve all secrets
|
||||
assert_eq!(store.get_secret(&key, "secret1").unwrap(), "value1");
|
||||
assert_eq!(store.get_secret(&key, "secret2").unwrap(), "value2");
|
||||
assert_eq!(store.get_secret(&key, "secret3").unwrap(), "value3");
|
||||
|
||||
assert_eq!(store.secret_count(), 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_wrong_key() {
|
||||
let (mut store, key1) = EncryptedSecretStore::new().unwrap();
|
||||
let (_, key2) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
// Add secret with key1
|
||||
store
|
||||
.add_secret(&key1, "test_secret", "secret_value")
|
||||
.unwrap();
|
||||
|
||||
// Try to retrieve with wrong key
|
||||
let result = store.get_secret(&key2, "test_secret");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_not_found() {
|
||||
let (store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
let result = store.get_secret(&key, "nonexistent");
|
||||
assert!(result.is_err());
|
||||
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "nonexistent");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_remove() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
store
|
||||
.add_secret(&key, "test_secret", "secret_value")
|
||||
.unwrap();
|
||||
assert!(store.has_secret("test_secret"));
|
||||
|
||||
let removed = store.remove_secret("test_secret");
|
||||
assert!(removed);
|
||||
assert!(!store.has_secret("test_secret"));
|
||||
|
||||
let removed_again = store.remove_secret("test_secret");
|
||||
assert!(!removed_again);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_encrypted_secret_store_serialization() {
|
||||
let (mut store, key) = EncryptedSecretStore::new().unwrap();
|
||||
|
||||
store.add_secret(&key, "secret1", "value1").unwrap();
|
||||
store.add_secret(&key, "secret2", "value2").unwrap();
|
||||
|
||||
// Serialize to JSON
|
||||
let json = store.to_json().unwrap();
|
||||
|
||||
// Deserialize from JSON
|
||||
let restored_store = EncryptedSecretStore::from_json(&json).unwrap();
|
||||
|
||||
// Verify secrets are still accessible
|
||||
assert_eq!(
|
||||
restored_store.get_secret(&key, "secret1").unwrap(),
|
||||
"value1"
|
||||
);
|
||||
assert_eq!(
|
||||
restored_store.get_secret(&key, "secret2").unwrap(),
|
||||
"value2"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_derivation() {
|
||||
let password = "test_password";
|
||||
let salt = b"test_salt_bytes_32_chars_long!!";
|
||||
let iterations = 10000;
|
||||
|
||||
let key1 = KeyDerivation::derive_key_from_password(password, salt, iterations);
|
||||
let key2 = KeyDerivation::derive_key_from_password(password, salt, iterations);
|
||||
|
||||
// Same password and salt should produce same key
|
||||
assert_eq!(key1, key2);
|
||||
|
||||
// Different salt should produce different key
|
||||
let different_salt = b"different_salt_bytes_32_chars!";
|
||||
let key3 = KeyDerivation::derive_key_from_password(password, different_salt, iterations);
|
||||
assert_ne!(key1, key3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_random_key_generation() {
|
||||
let key1 = KeyDerivation::generate_random_key();
|
||||
let key2 = KeyDerivation::generate_random_key();
|
||||
|
||||
// Random keys should be different
|
||||
assert_ne!(key1, key2);
|
||||
|
||||
// Keys should be 32 bytes
|
||||
assert_eq!(key1.len(), 32);
|
||||
assert_eq!(key2.len(), 32);
|
||||
}
|
||||
}
|
||||
252
crates/secrets/src/substitution.rs
Normal file
252
crates/secrets/src/substitution.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use crate::{SecretManager, SecretResult};
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
/// Regex to match GitHub-style secret references: ${{ secrets.SECRET_NAME }}
|
||||
static ref SECRET_PATTERN: Regex = Regex::new(
|
||||
r"\$\{\{\s*secrets\.([a-zA-Z0-9_][a-zA-Z0-9_-]*)\s*\}\}"
|
||||
).unwrap();
|
||||
|
||||
/// Regex to match provider-specific secret references: ${{ secrets.provider:SECRET_NAME }}
|
||||
static ref PROVIDER_SECRET_PATTERN: Regex = Regex::new(
|
||||
r"\$\{\{\s*secrets\.([a-zA-Z0-9_][a-zA-Z0-9_-]*):([a-zA-Z0-9_][a-zA-Z0-9_-]*)\s*\}\}"
|
||||
).unwrap();
|
||||
}
|
||||
|
||||
/// Secret substitution engine for replacing secret references in text
|
||||
pub struct SecretSubstitution<'a> {
|
||||
manager: &'a SecretManager,
|
||||
resolved_secrets: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl<'a> SecretSubstitution<'a> {
|
||||
/// Create a new secret substitution engine
|
||||
pub fn new(manager: &'a SecretManager) -> Self {
|
||||
Self {
|
||||
manager,
|
||||
resolved_secrets: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Substitute all secret references in the given text
|
||||
pub async fn substitute(&mut self, text: &str) -> SecretResult<String> {
|
||||
let mut result = text.to_string();
|
||||
|
||||
// First, handle provider-specific secrets: ${{ secrets.provider:SECRET_NAME }}
|
||||
result = self.substitute_provider_secrets(&result).await?;
|
||||
|
||||
// Then handle default provider secrets: ${{ secrets.SECRET_NAME }}
|
||||
result = self.substitute_default_secrets(&result).await?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Substitute provider-specific secret references
|
||||
async fn substitute_provider_secrets(&mut self, text: &str) -> SecretResult<String> {
|
||||
let mut result = text.to_string();
|
||||
|
||||
for captures in PROVIDER_SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let provider = captures.get(1).unwrap().as_str();
|
||||
let secret_name = captures.get(2).unwrap().as_str();
|
||||
|
||||
let cache_key = format!("{}:{}", provider, secret_name);
|
||||
|
||||
let secret_value = if let Some(cached) = self.resolved_secrets.get(&cache_key) {
|
||||
cached.clone()
|
||||
} else {
|
||||
let secret = self
|
||||
.manager
|
||||
.get_secret_from_provider(provider, secret_name)
|
||||
.await?;
|
||||
let value = secret.value().to_string();
|
||||
self.resolved_secrets.insert(cache_key, value.clone());
|
||||
value
|
||||
};
|
||||
|
||||
result = result.replace(full_match, &secret_value);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Substitute default provider secret references
|
||||
async fn substitute_default_secrets(&mut self, text: &str) -> SecretResult<String> {
|
||||
let mut result = text.to_string();
|
||||
|
||||
for captures in SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let secret_name = captures.get(1).unwrap().as_str();
|
||||
|
||||
let secret_value = if let Some(cached) = self.resolved_secrets.get(secret_name) {
|
||||
cached.clone()
|
||||
} else {
|
||||
let secret = self.manager.get_secret(secret_name).await?;
|
||||
let value = secret.value().to_string();
|
||||
self.resolved_secrets
|
||||
.insert(secret_name.to_string(), value.clone());
|
||||
value
|
||||
};
|
||||
|
||||
result = result.replace(full_match, &secret_value);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Get all resolved secrets (for masking purposes)
|
||||
pub fn resolved_secrets(&self) -> &HashMap<String, String> {
|
||||
&self.resolved_secrets
|
||||
}
|
||||
|
||||
/// Check if text contains secret references
|
||||
pub fn contains_secrets(text: &str) -> bool {
|
||||
SECRET_PATTERN.is_match(text) || PROVIDER_SECRET_PATTERN.is_match(text)
|
||||
}
|
||||
|
||||
/// Extract all secret references from text without resolving them
|
||||
pub fn extract_secret_refs(text: &str) -> Vec<SecretRef> {
|
||||
let mut refs = Vec::new();
|
||||
|
||||
// Extract provider-specific references
|
||||
for captures in PROVIDER_SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let provider = captures.get(1).unwrap().as_str();
|
||||
let name = captures.get(2).unwrap().as_str();
|
||||
|
||||
refs.push(SecretRef {
|
||||
full_text: full_match.to_string(),
|
||||
provider: Some(provider.to_string()),
|
||||
name: name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Extract default provider references
|
||||
for captures in SECRET_PATTERN.captures_iter(text) {
|
||||
let full_match = captures.get(0).unwrap().as_str();
|
||||
let name = captures.get(1).unwrap().as_str();
|
||||
|
||||
refs.push(SecretRef {
|
||||
full_text: full_match.to_string(),
|
||||
provider: None,
|
||||
name: name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
refs
|
||||
}
|
||||
}
|
||||
|
||||
/// A reference to a secret found in text
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct SecretRef {
|
||||
/// The full text of the secret reference (e.g., "${{ secrets.API_KEY }}")
|
||||
pub full_text: String,
|
||||
/// The provider name, if specified
|
||||
pub provider: Option<String>,
|
||||
/// The secret name
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl SecretRef {
|
||||
/// Get the cache key for this secret reference
|
||||
pub fn cache_key(&self) -> String {
|
||||
match &self.provider {
|
||||
Some(provider) => format!("{}:{}", provider, self.name),
|
||||
None => self.name.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{SecretError, SecretManager};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_secret_substitution() {
|
||||
// Use unique secret names to avoid test conflicts
|
||||
let github_token_name = format!("GITHUB_TOKEN_{}", std::process::id());
|
||||
let api_key_name = format!("API_KEY_{}", std::process::id());
|
||||
|
||||
std::env::set_var(&github_token_name, "ghp_test_token");
|
||||
std::env::set_var(&api_key_name, "secret_api_key");
|
||||
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
let input = format!(
|
||||
"Token: ${{{{ secrets.{} }}}}, API: ${{{{ secrets.{} }}}}",
|
||||
github_token_name, api_key_name
|
||||
);
|
||||
let result = substitution.substitute(&input).await.unwrap();
|
||||
|
||||
assert_eq!(result, "Token: ghp_test_token, API: secret_api_key");
|
||||
|
||||
std::env::remove_var(&github_token_name);
|
||||
std::env::remove_var(&api_key_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_provider_specific_substitution() {
|
||||
// Use unique secret name to avoid test conflicts
|
||||
let vault_secret_name = format!("VAULT_SECRET_{}", std::process::id());
|
||||
std::env::set_var(&vault_secret_name, "vault_value");
|
||||
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
let input = format!("Value: ${{{{ secrets.env:{} }}}}", vault_secret_name);
|
||||
let result = substitution.substitute(&input).await.unwrap();
|
||||
|
||||
assert_eq!(result, "Value: vault_value");
|
||||
|
||||
std::env::remove_var(&vault_secret_name);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_extract_secret_refs() {
|
||||
let input = "Token: ${{ secrets.GITHUB_TOKEN }}, Vault: ${{ secrets.vault:API_KEY }}";
|
||||
let refs = SecretSubstitution::extract_secret_refs(input);
|
||||
|
||||
assert_eq!(refs.len(), 2);
|
||||
|
||||
let github_ref = &refs.iter().find(|r| r.name == "GITHUB_TOKEN").unwrap();
|
||||
assert_eq!(github_ref.provider, None);
|
||||
assert_eq!(github_ref.full_text, "${{ secrets.GITHUB_TOKEN }}");
|
||||
|
||||
let vault_ref = &refs.iter().find(|r| r.name == "API_KEY").unwrap();
|
||||
assert_eq!(vault_ref.provider, Some("vault".to_string()));
|
||||
assert_eq!(vault_ref.full_text, "${{ secrets.vault:API_KEY }}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_contains_secrets() {
|
||||
assert!(SecretSubstitution::contains_secrets(
|
||||
"${{ secrets.API_KEY }}"
|
||||
));
|
||||
assert!(SecretSubstitution::contains_secrets(
|
||||
"${{ secrets.vault:SECRET }}"
|
||||
));
|
||||
assert!(!SecretSubstitution::contains_secrets("${{ matrix.os }}"));
|
||||
assert!(!SecretSubstitution::contains_secrets("No secrets here"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secret_substitution_error_handling() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
let input = "Token: ${{ secrets.NONEXISTENT_SECRET }}";
|
||||
let result = substitution.substitute(input).await;
|
||||
|
||||
assert!(result.is_err());
|
||||
match result.unwrap_err() {
|
||||
SecretError::NotFound { name } => {
|
||||
assert_eq!(name, "NONEXISTENT_SECRET");
|
||||
}
|
||||
_ => panic!("Expected NotFound error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
241
crates/secrets/src/validation.rs
Normal file
241
crates/secrets/src/validation.rs
Normal file
@@ -0,0 +1,241 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Input validation utilities for secrets management
|
||||
|
||||
use crate::{SecretError, SecretResult};
|
||||
use regex::Regex;
|
||||
|
||||
/// Maximum allowed secret value size (1MB)
|
||||
pub const MAX_SECRET_SIZE: usize = 1024 * 1024;
|
||||
|
||||
/// Maximum allowed secret name length
|
||||
pub const MAX_SECRET_NAME_LENGTH: usize = 255;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
/// Valid secret name pattern: alphanumeric, underscores, hyphens, dots
|
||||
static ref SECRET_NAME_PATTERN: Regex = Regex::new(r"^[a-zA-Z0-9_.-]+$").unwrap();
|
||||
}
|
||||
|
||||
/// Validate a secret name
|
||||
pub fn validate_secret_name(name: &str) -> SecretResult<()> {
|
||||
if name.is_empty() {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name cannot be empty".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if name.len() > MAX_SECRET_NAME_LENGTH {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: format!(
|
||||
"Secret name too long: {} characters (max: {})",
|
||||
name.len(),
|
||||
MAX_SECRET_NAME_LENGTH
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
if !SECRET_NAME_PATTERN.is_match(name) {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name can only contain letters, numbers, underscores, hyphens, and dots"
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Check for potentially dangerous patterns
|
||||
if name.starts_with('.') || name.ends_with('.') {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name cannot start or end with a dot".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
if name.contains("..") {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: "Secret name cannot contain consecutive dots".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Reserved names
|
||||
let reserved_names = [
|
||||
"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8",
|
||||
"COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
|
||||
];
|
||||
|
||||
if reserved_names.contains(&name.to_uppercase().as_str()) {
|
||||
return Err(SecretError::InvalidSecretName {
|
||||
reason: format!("'{}' is a reserved name", name),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a secret value
|
||||
pub fn validate_secret_value(value: &str) -> SecretResult<()> {
|
||||
let size = value.len();
|
||||
|
||||
if size > MAX_SECRET_SIZE {
|
||||
return Err(SecretError::SecretTooLarge {
|
||||
size,
|
||||
max_size: MAX_SECRET_SIZE,
|
||||
});
|
||||
}
|
||||
|
||||
// Check for null bytes which could cause issues
|
||||
if value.contains('\0') {
|
||||
return Err(SecretError::InvalidFormat(
|
||||
"Secret value cannot contain null bytes".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate a provider name
|
||||
pub fn validate_provider_name(name: &str) -> SecretResult<()> {
|
||||
if name.is_empty() {
|
||||
return Err(SecretError::InvalidConfig(
|
||||
"Provider name cannot be empty".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if name.len() > 64 {
|
||||
return Err(SecretError::InvalidConfig(format!(
|
||||
"Provider name too long: {} characters (max: 64)",
|
||||
name.len()
|
||||
)));
|
||||
}
|
||||
|
||||
if !name
|
||||
.chars()
|
||||
.all(|c| c.is_alphanumeric() || c == '_' || c == '-')
|
||||
{
|
||||
return Err(SecretError::InvalidConfig(
|
||||
"Provider name can only contain letters, numbers, underscores, and hyphens".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sanitize input for logging to prevent log injection attacks
|
||||
pub fn sanitize_for_logging(input: &str) -> String {
|
||||
input
|
||||
.chars()
|
||||
.map(|c| match c {
|
||||
'\n' | '\r' | '\t' => ' ',
|
||||
c if c.is_control() => '?',
|
||||
c => c,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check if a string might be a secret based on common patterns
|
||||
pub fn looks_like_secret(value: &str) -> bool {
|
||||
if value.len() < 8 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check for high entropy (random-looking strings)
|
||||
let unique_chars: std::collections::HashSet<char> = value.chars().collect();
|
||||
let entropy_ratio = unique_chars.len() as f64 / value.len() as f64;
|
||||
|
||||
if entropy_ratio > 0.6 && value.len() > 16 {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for common secret patterns
|
||||
let secret_patterns = [
|
||||
r"^[A-Za-z0-9+/=]{40,}$", // Base64-like
|
||||
r"^[a-fA-F0-9]{32,}$", // Hex strings
|
||||
r"^[A-Z0-9]{20,}$", // All caps alphanumeric
|
||||
r"^sk_[a-zA-Z0-9_-]+$", // Stripe-like keys
|
||||
r"^pk_[a-zA-Z0-9_-]+$", // Public keys
|
||||
r"^rk_[a-zA-Z0-9_-]+$", // Restricted keys
|
||||
];
|
||||
|
||||
for pattern in &secret_patterns {
|
||||
if let Ok(regex) = Regex::new(pattern) {
|
||||
if regex.is_match(value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_validate_secret_name() {
|
||||
// Valid names
|
||||
assert!(validate_secret_name("API_KEY").is_ok());
|
||||
assert!(validate_secret_name("database-password").is_ok());
|
||||
assert!(validate_secret_name("service.token").is_ok());
|
||||
assert!(validate_secret_name("GITHUB_TOKEN_123").is_ok());
|
||||
|
||||
// Invalid names
|
||||
assert!(validate_secret_name("").is_err());
|
||||
assert!(validate_secret_name("name with spaces").is_err());
|
||||
assert!(validate_secret_name("name/with/slashes").is_err());
|
||||
assert!(validate_secret_name(".hidden").is_err());
|
||||
assert!(validate_secret_name("ending.").is_err());
|
||||
assert!(validate_secret_name("double..dot").is_err());
|
||||
assert!(validate_secret_name("CON").is_err());
|
||||
assert!(validate_secret_name(&"a".repeat(300)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_secret_value() {
|
||||
// Valid values
|
||||
assert!(validate_secret_value("short_secret").is_ok());
|
||||
assert!(validate_secret_value("").is_ok()); // Empty is allowed
|
||||
assert!(validate_secret_value(&"a".repeat(1000)).is_ok());
|
||||
|
||||
// Invalid values
|
||||
assert!(validate_secret_value(&"a".repeat(MAX_SECRET_SIZE + 1)).is_err());
|
||||
assert!(validate_secret_value("secret\0with\0nulls").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_provider_name() {
|
||||
// Valid names
|
||||
assert!(validate_provider_name("env").is_ok());
|
||||
assert!(validate_provider_name("file").is_ok());
|
||||
assert!(validate_provider_name("aws-secrets").is_ok());
|
||||
assert!(validate_provider_name("vault_prod").is_ok());
|
||||
|
||||
// Invalid names
|
||||
assert!(validate_provider_name("").is_err());
|
||||
assert!(validate_provider_name("name with spaces").is_err());
|
||||
assert!(validate_provider_name("name/with/slashes").is_err());
|
||||
assert!(validate_provider_name(&"a".repeat(100)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_for_logging() {
|
||||
assert_eq!(sanitize_for_logging("normal text"), "normal text");
|
||||
assert_eq!(sanitize_for_logging("line\nbreak"), "line break");
|
||||
assert_eq!(sanitize_for_logging("tab\there"), "tab here");
|
||||
assert_eq!(sanitize_for_logging("carriage\rreturn"), "carriage return");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_looks_like_secret() {
|
||||
// Should detect as secrets
|
||||
assert!(looks_like_secret("sk_test_abcdefghijklmnop1234567890"));
|
||||
assert!(looks_like_secret("abcdefghijklmnopqrstuvwxyz123456"));
|
||||
assert!(looks_like_secret("ABCDEF1234567890ABCDEF1234567890"));
|
||||
assert!(looks_like_secret(
|
||||
"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY3ODkw"
|
||||
));
|
||||
|
||||
// Should not detect as secrets
|
||||
assert!(!looks_like_secret("short"));
|
||||
assert!(!looks_like_secret("this_is_just_a_regular_variable_name"));
|
||||
assert!(!looks_like_secret("hello world this is plain text"));
|
||||
}
|
||||
}
|
||||
350
crates/secrets/tests/integration_tests.rs
Normal file
350
crates/secrets/tests/integration_tests.rs
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2024 wrkflw contributors
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Integration tests for the secrets crate
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::process;
|
||||
use tempfile::TempDir;
|
||||
use tokio;
|
||||
use wrkflw_secrets::{
|
||||
SecretConfig, SecretManager, SecretMasker, SecretProviderConfig, SecretSubstitution,
|
||||
};
|
||||
|
||||
/// Test end-to-end secret management workflow
|
||||
#[tokio::test]
|
||||
async fn test_end_to_end_secret_workflow() {
|
||||
// Create a temporary directory for file-based secrets
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let secrets_file = temp_dir.path().join("secrets.json");
|
||||
|
||||
// Create a secrets file
|
||||
let secrets_content = r#"
|
||||
{
|
||||
"database_password": "super_secret_db_pass_123",
|
||||
"api_token": "tk_abc123def456ghi789",
|
||||
"encryption_key": "key_zyxwvutsrqponmlkjihgfedcba9876543210"
|
||||
}
|
||||
"#;
|
||||
std::fs::write(&secrets_file, secrets_content).unwrap();
|
||||
|
||||
// Set up environment variables
|
||||
let env_secret_name = format!("GITHUB_TOKEN_{}", process::id());
|
||||
std::env::set_var(&env_secret_name, "ghp_1234567890abcdefghijklmnopqrstuvwxyz");
|
||||
|
||||
// Create configuration
|
||||
let mut providers = HashMap::new();
|
||||
providers.insert(
|
||||
"env".to_string(),
|
||||
SecretProviderConfig::Environment { prefix: None },
|
||||
);
|
||||
providers.insert(
|
||||
"file".to_string(),
|
||||
SecretProviderConfig::File {
|
||||
path: secrets_file.to_string_lossy().to_string(),
|
||||
},
|
||||
);
|
||||
|
||||
let config = SecretConfig {
|
||||
default_provider: "env".to_string(),
|
||||
providers,
|
||||
enable_masking: true,
|
||||
timeout_seconds: 30,
|
||||
enable_caching: true,
|
||||
cache_ttl_seconds: 300,
|
||||
rate_limit: Default::default(),
|
||||
};
|
||||
|
||||
// Initialize secret manager
|
||||
let manager = SecretManager::new(config).await.unwrap();
|
||||
|
||||
// Test 1: Get secret from environment provider
|
||||
let env_secret = manager.get_secret(&env_secret_name).await.unwrap();
|
||||
assert_eq!(
|
||||
env_secret.value(),
|
||||
"ghp_1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
);
|
||||
assert_eq!(
|
||||
env_secret.metadata.get("source"),
|
||||
Some(&"environment".to_string())
|
||||
);
|
||||
|
||||
// Test 2: Get secret from file provider
|
||||
let file_secret = manager
|
||||
.get_secret_from_provider("file", "database_password")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(file_secret.value(), "super_secret_db_pass_123");
|
||||
assert_eq!(
|
||||
file_secret.metadata.get("source"),
|
||||
Some(&"file".to_string())
|
||||
);
|
||||
|
||||
// Test 3: List secrets from file provider
|
||||
let all_secrets = manager.list_all_secrets().await.unwrap();
|
||||
assert!(all_secrets.contains_key("file"));
|
||||
let file_secrets = &all_secrets["file"];
|
||||
assert!(file_secrets.contains(&"database_password".to_string()));
|
||||
assert!(file_secrets.contains(&"api_token".to_string()));
|
||||
assert!(file_secrets.contains(&"encryption_key".to_string()));
|
||||
|
||||
// Test 4: Secret substitution
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
let input = format!(
|
||||
"Database: ${{{{ secrets.file:database_password }}}}, GitHub: ${{{{ secrets.{} }}}}",
|
||||
env_secret_name
|
||||
);
|
||||
let output = substitution.substitute(&input).await.unwrap();
|
||||
assert!(output.contains("super_secret_db_pass_123"));
|
||||
assert!(output.contains("ghp_1234567890abcdefghijklmnopqrstuvwxyz"));
|
||||
|
||||
// Test 5: Secret masking
|
||||
let mut masker = SecretMasker::new();
|
||||
masker.add_secret("super_secret_db_pass_123");
|
||||
masker.add_secret("ghp_1234567890abcdefghijklmnopqrstuvwxyz");
|
||||
|
||||
let log_message = "Connection failed: super_secret_db_pass_123 invalid for ghp_1234567890abcdefghijklmnopqrstuvwxyz";
|
||||
let masked = masker.mask(log_message);
|
||||
assert!(!masked.contains("super_secret_db_pass_123"));
|
||||
assert!(!masked.contains("ghp_1234567890abcdefghijklmnopqrstuvwxyz"));
|
||||
assert!(masked.contains("***"));
|
||||
|
||||
// Test 6: Health check
|
||||
let health_results = manager.health_check().await;
|
||||
assert!(health_results.get("env").unwrap().is_ok());
|
||||
assert!(health_results.get("file").unwrap().is_ok());
|
||||
|
||||
// Test 7: Caching behavior - functional test instead of timing
|
||||
// First call should succeed and populate cache
|
||||
let cached_secret = manager.get_secret(&env_secret_name).await.unwrap();
|
||||
assert_eq!(
|
||||
cached_secret.value(),
|
||||
"ghp_1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
);
|
||||
|
||||
// Remove the environment variable to test if cache works
|
||||
std::env::remove_var(&env_secret_name);
|
||||
|
||||
// Second call should still succeed because value is cached
|
||||
let cached_secret_2 = manager.get_secret(&env_secret_name).await.unwrap();
|
||||
assert_eq!(
|
||||
cached_secret_2.value(),
|
||||
"ghp_1234567890abcdefghijklmnopqrstuvwxyz"
|
||||
);
|
||||
|
||||
// Restore environment variable for cleanup
|
||||
std::env::set_var(&env_secret_name, "ghp_1234567890abcdefghijklmnopqrstuvwxyz");
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&env_secret_name);
|
||||
}
|
||||
|
||||
/// Test error handling scenarios
|
||||
#[tokio::test]
|
||||
async fn test_error_handling() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
|
||||
// Test 1: Secret not found
|
||||
let result = manager.get_secret("NONEXISTENT_SECRET_12345").await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("not found"));
|
||||
|
||||
// Test 2: Invalid provider
|
||||
let result = manager
|
||||
.get_secret_from_provider("invalid_provider", "some_secret")
|
||||
.await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("not found"));
|
||||
|
||||
// Test 3: Invalid secret name
|
||||
let result = manager.get_secret("").await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("cannot be empty"));
|
||||
|
||||
// Test 4: Invalid secret name with special characters
|
||||
let result = manager.get_secret("invalid/secret/name").await;
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("can only contain"));
|
||||
}
|
||||
|
||||
/// Test rate limiting functionality
|
||||
#[tokio::test]
|
||||
async fn test_rate_limiting() {
|
||||
use std::time::Duration;
|
||||
use wrkflw_secrets::rate_limit::RateLimitConfig;
|
||||
|
||||
// Create config with very low rate limit
|
||||
let mut config = SecretConfig::default();
|
||||
config.rate_limit = RateLimitConfig {
|
||||
max_requests: 2,
|
||||
window_duration: Duration::from_secs(10),
|
||||
enabled: true,
|
||||
};
|
||||
|
||||
let manager = SecretManager::new(config).await.unwrap();
|
||||
|
||||
// Set up test secret
|
||||
let test_secret_name = format!("RATE_LIMIT_TEST_{}", process::id());
|
||||
std::env::set_var(&test_secret_name, "test_value");
|
||||
|
||||
// First two requests should succeed
|
||||
let result1 = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result1.is_ok());
|
||||
|
||||
let result2 = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result2.is_ok());
|
||||
|
||||
// Third request should fail due to rate limiting
|
||||
let result3 = manager.get_secret(&test_secret_name).await;
|
||||
assert!(result3.is_err());
|
||||
assert!(result3
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("Rate limit exceeded"));
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
/// Test concurrent access patterns
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_access() {
|
||||
use std::sync::Arc;
|
||||
|
||||
let manager = Arc::new(SecretManager::default().await.unwrap());
|
||||
|
||||
// Set up test secret
|
||||
let test_secret_name = format!("CONCURRENT_TEST_{}", process::id());
|
||||
std::env::set_var(&test_secret_name, "concurrent_test_value");
|
||||
|
||||
// Spawn multiple concurrent tasks
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..10 {
|
||||
let manager_clone = Arc::clone(&manager);
|
||||
let secret_name = test_secret_name.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let result = manager_clone.get_secret(&secret_name).await;
|
||||
(i, result)
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let mut successful_requests = 0;
|
||||
for handle in handles {
|
||||
let (_, result) = handle.await.unwrap();
|
||||
if result.is_ok() {
|
||||
successful_requests += 1;
|
||||
assert_eq!(result.unwrap().value(), "concurrent_test_value");
|
||||
}
|
||||
}
|
||||
|
||||
// At least some requests should succeed (depending on rate limiting)
|
||||
assert!(successful_requests > 0);
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&test_secret_name);
|
||||
}
|
||||
|
||||
/// Test secret substitution edge cases
|
||||
#[tokio::test]
|
||||
async fn test_substitution_edge_cases() {
|
||||
let manager = SecretManager::default().await.unwrap();
|
||||
|
||||
// Set up test secrets
|
||||
let secret1_name = format!("EDGE_CASE_1_{}", process::id());
|
||||
let secret2_name = format!("EDGE_CASE_2_{}", process::id());
|
||||
std::env::set_var(&secret1_name, "value1");
|
||||
std::env::set_var(&secret2_name, "value2");
|
||||
|
||||
let mut substitution = SecretSubstitution::new(&manager);
|
||||
|
||||
// Test 1: Multiple references to the same secret
|
||||
let input = format!(
|
||||
"First: ${{{{ secrets.{} }}}} Second: ${{{{ secrets.{} }}}}",
|
||||
secret1_name, secret1_name
|
||||
);
|
||||
let output = substitution.substitute(&input).await.unwrap();
|
||||
assert_eq!(output, "First: value1 Second: value1");
|
||||
|
||||
// Test 2: Nested-like patterns (should not be substituted)
|
||||
let input = "This is not a secret: ${ secrets.FAKE }";
|
||||
let output = substitution.substitute(&input).await.unwrap();
|
||||
assert_eq!(input, output); // Should remain unchanged
|
||||
|
||||
// Test 3: Mixed valid and invalid references
|
||||
let input = format!(
|
||||
"Valid: ${{{{ secrets.{} }}}} Invalid: ${{{{ secrets.NONEXISTENT }}}}",
|
||||
secret1_name
|
||||
);
|
||||
let result = substitution.substitute(&input).await;
|
||||
assert!(result.is_err()); // Should fail due to missing secret
|
||||
|
||||
// Test 4: Empty input
|
||||
let output = substitution.substitute("").await.unwrap();
|
||||
assert_eq!(output, "");
|
||||
|
||||
// Test 5: No secret references
|
||||
let input = "This is just plain text with no secrets";
|
||||
let output = substitution.substitute(input).await.unwrap();
|
||||
assert_eq!(input, output);
|
||||
|
||||
// Cleanup
|
||||
std::env::remove_var(&secret1_name);
|
||||
std::env::remove_var(&secret2_name);
|
||||
}
|
||||
|
||||
/// Test masking comprehensive patterns
|
||||
#[tokio::test]
|
||||
async fn test_comprehensive_masking() {
|
||||
let mut masker = SecretMasker::new();
|
||||
|
||||
// Add various types of secrets
|
||||
masker.add_secret("password123");
|
||||
masker.add_secret("api_key_abcdef123456");
|
||||
masker.add_secret("very_long_secret_key_that_should_preserve_structure_987654321");
|
||||
|
||||
// Test various input scenarios
|
||||
let test_cases = vec![
|
||||
(
|
||||
"Password is password123 and API key is api_key_abcdef123456",
|
||||
vec!["password123", "api_key_abcdef123456"],
|
||||
),
|
||||
(
|
||||
"GitHub token: ghp_1234567890123456789012345678901234567890",
|
||||
vec!["ghp_"],
|
||||
),
|
||||
(
|
||||
"AWS key: AKIAIOSFODNN7EXAMPLE",
|
||||
vec!["AKIA"],
|
||||
),
|
||||
(
|
||||
"JWT: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c",
|
||||
vec!["eyJ", "***"],
|
||||
),
|
||||
];
|
||||
|
||||
for (input, should_not_contain) in test_cases {
|
||||
let masked = masker.mask(input);
|
||||
for pattern in should_not_contain {
|
||||
if pattern != "***" {
|
||||
assert!(
|
||||
!masked.contains(pattern)
|
||||
|| pattern == "ghp_"
|
||||
|| pattern == "AKIA"
|
||||
|| pattern == "eyJ",
|
||||
"Masked text '{}' should not contain '{}' (or only partial patterns)",
|
||||
masked,
|
||||
pattern
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
masked.contains(pattern),
|
||||
"Masked text '{}' should contain '{}'",
|
||||
masked,
|
||||
pattern
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,12 +12,12 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.6.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.7.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -154,6 +154,15 @@ fn run_tui_event_loop(
|
||||
if last_tick.elapsed() >= tick_rate {
|
||||
app.tick();
|
||||
app.update_running_workflow_progress();
|
||||
|
||||
// Check for log processing updates (includes system log change detection)
|
||||
app.check_log_processing_updates();
|
||||
|
||||
// Request log processing if needed
|
||||
if app.logs_need_update {
|
||||
app.request_log_processing_update();
|
||||
}
|
||||
|
||||
last_tick = Instant::now();
|
||||
}
|
||||
|
||||
@@ -180,6 +189,25 @@ fn run_tui_event_loop(
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle help overlay scrolling
|
||||
if app.show_help {
|
||||
match key.code {
|
||||
KeyCode::Up | KeyCode::Char('k') => {
|
||||
app.scroll_help_up();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Down | KeyCode::Char('j') => {
|
||||
app.scroll_help_down();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Esc | KeyCode::Char('?') => {
|
||||
app.show_help = false;
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
match key.code {
|
||||
KeyCode::Char('q') => {
|
||||
// Exit and clean up
|
||||
@@ -214,6 +242,8 @@ fn run_tui_event_loop(
|
||||
} else {
|
||||
app.scroll_logs_up();
|
||||
}
|
||||
} else if app.selected_tab == 3 {
|
||||
app.scroll_help_up();
|
||||
} else if app.selected_tab == 0 {
|
||||
app.previous_workflow();
|
||||
} else if app.selected_tab == 1 {
|
||||
@@ -231,6 +261,8 @@ fn run_tui_event_loop(
|
||||
} else {
|
||||
app.scroll_logs_down();
|
||||
}
|
||||
} else if app.selected_tab == 3 {
|
||||
app.scroll_help_down();
|
||||
} else if app.selected_tab == 0 {
|
||||
app.next_workflow();
|
||||
} else if app.selected_tab == 1 {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// App state for the UI
|
||||
use crate::log_processor::{LogProcessingRequest, LogProcessor, ProcessedLogEntry};
|
||||
use crate::models::{
|
||||
ExecutionResultMsg, JobExecution, LogFilterLevel, StepExecution, Workflow, WorkflowExecution,
|
||||
WorkflowStatus,
|
||||
@@ -40,6 +41,15 @@ pub struct App {
|
||||
pub log_filter_level: Option<LogFilterLevel>, // Current log level filter
|
||||
pub log_search_matches: Vec<usize>, // Indices of logs that match the search
|
||||
pub log_search_match_idx: usize, // Current match index for navigation
|
||||
|
||||
// Help tab scrolling
|
||||
pub help_scroll: usize, // Scrolling position for help content
|
||||
|
||||
// Background log processing
|
||||
pub log_processor: LogProcessor,
|
||||
pub processed_logs: Vec<ProcessedLogEntry>,
|
||||
pub logs_need_update: bool, // Flag to trigger log processing
|
||||
pub last_system_logs_count: usize, // Track system log changes
|
||||
}
|
||||
|
||||
impl App {
|
||||
@@ -168,6 +178,7 @@ impl App {
|
||||
}
|
||||
}
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
};
|
||||
|
||||
App {
|
||||
@@ -199,6 +210,13 @@ impl App {
|
||||
log_filter_level: Some(LogFilterLevel::All),
|
||||
log_search_matches: Vec::new(),
|
||||
log_search_match_idx: 0,
|
||||
help_scroll: 0,
|
||||
|
||||
// Background log processing
|
||||
log_processor: LogProcessor::new(),
|
||||
processed_logs: Vec::new(),
|
||||
logs_need_update: true,
|
||||
last_system_logs_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,7 +232,8 @@ impl App {
|
||||
pub fn toggle_emulation_mode(&mut self) {
|
||||
self.runtime_type = match self.runtime_type {
|
||||
RuntimeType::Docker => RuntimeType::Podman,
|
||||
RuntimeType::Podman => RuntimeType::Emulation,
|
||||
RuntimeType::Podman => RuntimeType::SecureEmulation,
|
||||
RuntimeType::SecureEmulation => RuntimeType::Emulation,
|
||||
RuntimeType::Emulation => RuntimeType::Docker,
|
||||
};
|
||||
self.logs
|
||||
@@ -238,7 +257,8 @@ impl App {
|
||||
match self.runtime_type {
|
||||
RuntimeType::Docker => "Docker",
|
||||
RuntimeType::Podman => "Podman",
|
||||
RuntimeType::Emulation => "Emulation",
|
||||
RuntimeType::SecureEmulation => "Secure Emulation",
|
||||
RuntimeType::Emulation => "Emulation (Unsafe)",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -429,10 +449,9 @@ impl App {
|
||||
if let Some(idx) = self.workflow_list_state.selected() {
|
||||
if idx < self.workflows.len() && !self.execution_queue.contains(&idx) {
|
||||
self.execution_queue.push(idx);
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs.push(format!(
|
||||
"[{}] Added '{}' to execution queue. Press 'Enter' to start.",
|
||||
timestamp, self.workflows[idx].name
|
||||
self.add_timestamped_log(&format!(
|
||||
"Added '{}' to execution queue. Press 'Enter' to start.",
|
||||
self.workflows[idx].name
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -635,10 +654,11 @@ impl App {
|
||||
self.log_search_active = false;
|
||||
self.log_search_query.clear();
|
||||
self.log_search_matches.clear();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
self.log_search_query.pop();
|
||||
self.update_log_search_matches();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
self.log_search_active = false;
|
||||
@@ -646,7 +666,7 @@ impl App {
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
self.log_search_query.push(c);
|
||||
self.update_log_search_matches();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -658,8 +678,8 @@ impl App {
|
||||
if !self.log_search_active {
|
||||
// Don't clear the query, this allows toggling the search UI while keeping the filter
|
||||
} else {
|
||||
// When activating search, update matches
|
||||
self.update_log_search_matches();
|
||||
// When activating search, trigger update
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -670,8 +690,8 @@ impl App {
|
||||
Some(level) => Some(level.next()),
|
||||
};
|
||||
|
||||
// Update search matches when filter changes
|
||||
self.update_log_search_matches();
|
||||
// Trigger log processing update when filter changes
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
// Clear log search and filter
|
||||
@@ -680,6 +700,7 @@ impl App {
|
||||
self.log_filter_level = None;
|
||||
self.log_search_matches.clear();
|
||||
self.log_search_match_idx = 0;
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
// Update matches based on current search and filter
|
||||
@@ -790,6 +811,18 @@ impl App {
|
||||
}
|
||||
}
|
||||
|
||||
// Scroll help content up
|
||||
pub fn scroll_help_up(&mut self) {
|
||||
self.help_scroll = self.help_scroll.saturating_sub(1);
|
||||
}
|
||||
|
||||
// Scroll help content down
|
||||
pub fn scroll_help_down(&mut self) {
|
||||
// The help content has a fixed number of lines, so we set a reasonable max
|
||||
const MAX_HELP_SCROLL: usize = 30; // Adjust based on help content length
|
||||
self.help_scroll = (self.help_scroll + 1).min(MAX_HELP_SCROLL);
|
||||
}
|
||||
|
||||
// Update progress for running workflows
|
||||
pub fn update_running_workflow_progress(&mut self) {
|
||||
if let Some(idx) = self.current_execution {
|
||||
@@ -955,4 +988,82 @@ impl App {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request log processing update from background thread
|
||||
pub fn request_log_processing_update(&mut self) {
|
||||
let request = LogProcessingRequest {
|
||||
search_query: self.log_search_query.clone(),
|
||||
filter_level: self.log_filter_level.clone(),
|
||||
app_logs: self.logs.clone(),
|
||||
app_logs_count: self.logs.len(),
|
||||
system_logs_count: wrkflw_logging::get_logs().len(),
|
||||
};
|
||||
|
||||
if self.log_processor.request_update(request).is_err() {
|
||||
// Log processor channel disconnected, recreate it
|
||||
self.log_processor = LogProcessor::new();
|
||||
self.logs_need_update = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Check for and apply log processing updates
|
||||
pub fn check_log_processing_updates(&mut self) {
|
||||
// Check if system logs have changed
|
||||
let current_system_logs_count = wrkflw_logging::get_logs().len();
|
||||
if current_system_logs_count != self.last_system_logs_count {
|
||||
self.last_system_logs_count = current_system_logs_count;
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
if let Some(response) = self.log_processor.try_get_update() {
|
||||
self.processed_logs = response.processed_logs;
|
||||
self.log_search_matches = response.search_matches;
|
||||
|
||||
// Update scroll position to first match if we have search results
|
||||
if !self.log_search_matches.is_empty() && !self.log_search_query.is_empty() {
|
||||
self.log_search_match_idx = 0;
|
||||
if let Some(&idx) = self.log_search_matches.first() {
|
||||
self.log_scroll = idx;
|
||||
}
|
||||
}
|
||||
|
||||
self.logs_need_update = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Trigger log processing when search/filter changes
|
||||
pub fn mark_logs_for_update(&mut self) {
|
||||
self.logs_need_update = true;
|
||||
self.request_log_processing_update();
|
||||
}
|
||||
|
||||
/// Get combined app and system logs for background processing
|
||||
pub fn get_combined_logs(&self) -> Vec<String> {
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Add app logs
|
||||
for log in &self.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Add system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
all_logs
|
||||
}
|
||||
|
||||
/// Add a log entry and trigger log processing update
|
||||
pub fn add_log(&mut self, message: String) {
|
||||
self.logs.push(message);
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
/// Add a formatted log entry with timestamp and trigger log processing update
|
||||
pub fn add_timestamped_log(&mut self, message: &str) {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
let formatted_message = format!("[{}] {}", timestamp, message);
|
||||
self.add_log(formatted_message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +122,7 @@ pub async fn execute_workflow_cli(
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
};
|
||||
|
||||
@@ -140,6 +141,7 @@ pub async fn execute_workflow_cli(
|
||||
runtime_type,
|
||||
verbose,
|
||||
preserve_containers_on_failure: false, // Default for this path
|
||||
secrets_config: None, // Use default secrets configuration
|
||||
};
|
||||
|
||||
match wrkflw_executor::execute_workflow(path, config).await {
|
||||
@@ -454,6 +456,7 @@ pub fn start_next_workflow_execution(
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
};
|
||||
|
||||
@@ -531,6 +534,7 @@ pub fn start_next_workflow_execution(
|
||||
runtime_type,
|
||||
verbose,
|
||||
preserve_containers_on_failure,
|
||||
secrets_config: None, // Use default secrets configuration
|
||||
};
|
||||
|
||||
let execution_result = wrkflw_utils::fd::with_stderr_to_null(|| {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
pub mod app;
|
||||
pub mod components;
|
||||
pub mod handlers;
|
||||
pub mod log_processor;
|
||||
pub mod models;
|
||||
pub mod utils;
|
||||
pub mod views;
|
||||
|
||||
305
crates/ui/src/log_processor.rs
Normal file
305
crates/ui/src/log_processor.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Background log processor for asynchronous log filtering and formatting
|
||||
use crate::models::LogFilterLevel;
|
||||
use ratatui::{
|
||||
style::{Color, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Cell, Row},
|
||||
};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Processed log entry ready for rendering
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProcessedLogEntry {
|
||||
pub timestamp: String,
|
||||
pub log_type: String,
|
||||
pub log_style: Style,
|
||||
pub content_spans: Vec<Span<'static>>,
|
||||
}
|
||||
|
||||
impl ProcessedLogEntry {
|
||||
/// Convert to a table row for rendering
|
||||
pub fn to_row(&self) -> Row<'static> {
|
||||
Row::new(vec![
|
||||
Cell::from(self.timestamp.clone()),
|
||||
Cell::from(self.log_type.clone()).style(self.log_style),
|
||||
Cell::from(Line::from(self.content_spans.clone())),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to update log processing parameters
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogProcessingRequest {
|
||||
pub search_query: String,
|
||||
pub filter_level: Option<LogFilterLevel>,
|
||||
pub app_logs: Vec<String>, // Complete app logs
|
||||
pub app_logs_count: usize, // To detect changes in app logs
|
||||
pub system_logs_count: usize, // To detect changes in system logs
|
||||
}
|
||||
|
||||
/// Response with processed logs
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogProcessingResponse {
|
||||
pub processed_logs: Vec<ProcessedLogEntry>,
|
||||
pub total_log_count: usize,
|
||||
pub filtered_count: usize,
|
||||
pub search_matches: Vec<usize>, // Indices of logs that match search
|
||||
}
|
||||
|
||||
/// Background log processor
|
||||
pub struct LogProcessor {
|
||||
request_tx: mpsc::Sender<LogProcessingRequest>,
|
||||
response_rx: mpsc::Receiver<LogProcessingResponse>,
|
||||
_worker_handle: thread::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl LogProcessor {
|
||||
/// Create a new log processor with a background worker thread
|
||||
pub fn new() -> Self {
|
||||
let (request_tx, request_rx) = mpsc::channel::<LogProcessingRequest>();
|
||||
let (response_tx, response_rx) = mpsc::channel::<LogProcessingResponse>();
|
||||
|
||||
let worker_handle = thread::spawn(move || {
|
||||
Self::worker_loop(request_rx, response_tx);
|
||||
});
|
||||
|
||||
Self {
|
||||
request_tx,
|
||||
response_rx,
|
||||
_worker_handle: worker_handle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a processing request (non-blocking)
|
||||
pub fn request_update(
|
||||
&self,
|
||||
request: LogProcessingRequest,
|
||||
) -> Result<(), mpsc::SendError<LogProcessingRequest>> {
|
||||
self.request_tx.send(request)
|
||||
}
|
||||
|
||||
/// Try to get the latest processed logs (non-blocking)
|
||||
pub fn try_get_update(&self) -> Option<LogProcessingResponse> {
|
||||
self.response_rx.try_recv().ok()
|
||||
}
|
||||
|
||||
/// Background worker loop
|
||||
fn worker_loop(
|
||||
request_rx: mpsc::Receiver<LogProcessingRequest>,
|
||||
response_tx: mpsc::Sender<LogProcessingResponse>,
|
||||
) {
|
||||
let mut last_request: Option<LogProcessingRequest> = None;
|
||||
let mut last_processed_time = Instant::now();
|
||||
let mut cached_logs: Vec<String> = Vec::new();
|
||||
let mut cached_app_logs_count = 0;
|
||||
let mut cached_system_logs_count = 0;
|
||||
|
||||
loop {
|
||||
// Check for new requests with a timeout to allow periodic processing
|
||||
let request = match request_rx.recv_timeout(Duration::from_millis(100)) {
|
||||
Ok(req) => Some(req),
|
||||
Err(mpsc::RecvTimeoutError::Timeout) => None,
|
||||
Err(mpsc::RecvTimeoutError::Disconnected) => break,
|
||||
};
|
||||
|
||||
// Update request if we received one
|
||||
if let Some(req) = request {
|
||||
last_request = Some(req);
|
||||
}
|
||||
|
||||
// Process if we have a request and enough time has passed since last processing
|
||||
if let Some(ref req) = last_request {
|
||||
let should_process = last_processed_time.elapsed() > Duration::from_millis(50)
|
||||
&& (cached_app_logs_count != req.app_logs_count
|
||||
|| cached_system_logs_count != req.system_logs_count
|
||||
|| cached_logs.is_empty());
|
||||
|
||||
if should_process {
|
||||
// Refresh log cache if log counts changed
|
||||
if cached_app_logs_count != req.app_logs_count
|
||||
|| cached_system_logs_count != req.system_logs_count
|
||||
|| cached_logs.is_empty()
|
||||
{
|
||||
cached_logs = Self::get_combined_logs(&req.app_logs);
|
||||
cached_app_logs_count = req.app_logs_count;
|
||||
cached_system_logs_count = req.system_logs_count;
|
||||
}
|
||||
|
||||
let response = Self::process_logs(&cached_logs, req);
|
||||
|
||||
if response_tx.send(response).is_err() {
|
||||
break; // Receiver disconnected
|
||||
}
|
||||
|
||||
last_processed_time = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get combined app and system logs
|
||||
fn get_combined_logs(app_logs: &[String]) -> Vec<String> {
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Add app logs
|
||||
for log in app_logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Add system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
all_logs
|
||||
}
|
||||
|
||||
/// Process logs according to search and filter criteria
|
||||
fn process_logs(all_logs: &[String], request: &LogProcessingRequest) -> LogProcessingResponse {
|
||||
// Filter logs based on search query and filter level
|
||||
let mut filtered_logs = Vec::new();
|
||||
let mut search_matches = Vec::new();
|
||||
|
||||
for (idx, log) in all_logs.iter().enumerate() {
|
||||
let passes_filter = match &request.filter_level {
|
||||
None => true,
|
||||
Some(level) => level.matches(log),
|
||||
};
|
||||
|
||||
let matches_search = if request.search_query.is_empty() {
|
||||
true
|
||||
} else {
|
||||
log.to_lowercase()
|
||||
.contains(&request.search_query.to_lowercase())
|
||||
};
|
||||
|
||||
if passes_filter && matches_search {
|
||||
filtered_logs.push((idx, log));
|
||||
if matches_search && !request.search_query.is_empty() {
|
||||
search_matches.push(filtered_logs.len() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process filtered logs into display format
|
||||
let processed_logs: Vec<ProcessedLogEntry> = filtered_logs
|
||||
.iter()
|
||||
.map(|(_, log_line)| Self::process_log_entry(log_line, &request.search_query))
|
||||
.collect();
|
||||
|
||||
LogProcessingResponse {
|
||||
processed_logs,
|
||||
total_log_count: all_logs.len(),
|
||||
filtered_count: filtered_logs.len(),
|
||||
search_matches,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a single log entry into display format
|
||||
fn process_log_entry(log_line: &str, search_query: &str) -> ProcessedLogEntry {
|
||||
// Extract timestamp from log format [HH:MM:SS]
|
||||
let timestamp = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let end = log_line.find(']').unwrap_or(0);
|
||||
if end > 1 {
|
||||
log_line[1..end].to_string()
|
||||
} else {
|
||||
"??:??:??".to_string()
|
||||
}
|
||||
} else {
|
||||
"??:??:??".to_string()
|
||||
};
|
||||
|
||||
// Determine log type and style
|
||||
let (log_type, log_style) =
|
||||
if log_line.contains("Error") || log_line.contains("error") || log_line.contains("❌")
|
||||
{
|
||||
("ERROR", Style::default().fg(Color::Red))
|
||||
} else if log_line.contains("Warning")
|
||||
|| log_line.contains("warning")
|
||||
|| log_line.contains("⚠️")
|
||||
{
|
||||
("WARN", Style::default().fg(Color::Yellow))
|
||||
} else if log_line.contains("Success")
|
||||
|| log_line.contains("success")
|
||||
|| log_line.contains("✅")
|
||||
{
|
||||
("SUCCESS", Style::default().fg(Color::Green))
|
||||
} else if log_line.contains("Running")
|
||||
|| log_line.contains("running")
|
||||
|| log_line.contains("⟳")
|
||||
{
|
||||
("INFO", Style::default().fg(Color::Cyan))
|
||||
} else if log_line.contains("Triggering") || log_line.contains("triggered") {
|
||||
("TRIG", Style::default().fg(Color::Magenta))
|
||||
} else {
|
||||
("INFO", Style::default().fg(Color::Gray))
|
||||
};
|
||||
|
||||
// Extract content after timestamp
|
||||
let content = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let start = log_line.find(']').unwrap_or(0) + 1;
|
||||
log_line[start..].trim()
|
||||
} else {
|
||||
log_line
|
||||
};
|
||||
|
||||
// Create content spans with search highlighting
|
||||
let content_spans = if !search_query.is_empty() {
|
||||
Self::highlight_search_matches(content, search_query)
|
||||
} else {
|
||||
vec![Span::raw(content.to_string())]
|
||||
};
|
||||
|
||||
ProcessedLogEntry {
|
||||
timestamp,
|
||||
log_type: log_type.to_string(),
|
||||
log_style,
|
||||
content_spans,
|
||||
}
|
||||
}
|
||||
|
||||
/// Highlight search matches in content
|
||||
fn highlight_search_matches(content: &str, search_query: &str) -> Vec<Span<'static>> {
|
||||
let mut spans = Vec::new();
|
||||
let lowercase_content = content.to_lowercase();
|
||||
let lowercase_query = search_query.to_lowercase();
|
||||
|
||||
if lowercase_content.contains(&lowercase_query) {
|
||||
let mut last_idx = 0;
|
||||
while let Some(idx) = lowercase_content[last_idx..].find(&lowercase_query) {
|
||||
let real_idx = last_idx + idx;
|
||||
|
||||
// Add text before match
|
||||
if real_idx > last_idx {
|
||||
spans.push(Span::raw(content[last_idx..real_idx].to_string()));
|
||||
}
|
||||
|
||||
// Add matched text with highlight
|
||||
let match_end = real_idx + search_query.len();
|
||||
spans.push(Span::styled(
|
||||
content[real_idx..match_end].to_string(),
|
||||
Style::default().bg(Color::Yellow).fg(Color::Black),
|
||||
));
|
||||
|
||||
last_idx = match_end;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if last_idx < content.len() {
|
||||
spans.push(Span::raw(content[last_idx..].to_string()));
|
||||
}
|
||||
} else {
|
||||
spans.push(Span::raw(content.to_string()));
|
||||
}
|
||||
|
||||
spans
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LogProcessor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -50,6 +50,7 @@ pub struct StepExecution {
|
||||
}
|
||||
|
||||
/// Log filter levels
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum LogFilterLevel {
|
||||
Info,
|
||||
Warning,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Help overlay rendering
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::Rect,
|
||||
layout::{Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, BorderType, Borders, Paragraph, Wrap},
|
||||
@@ -9,11 +9,22 @@ use ratatui::{
|
||||
};
|
||||
use std::io;
|
||||
|
||||
// Render the help tab
|
||||
pub fn render_help_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, area: Rect) {
|
||||
let help_text = vec![
|
||||
// Render the help tab with scroll support
|
||||
pub fn render_help_content(
|
||||
f: &mut Frame<CrosstermBackend<io::Stdout>>,
|
||||
area: Rect,
|
||||
scroll_offset: usize,
|
||||
) {
|
||||
// Split the area into columns for better organization
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
|
||||
.split(area);
|
||||
|
||||
// Left column content
|
||||
let left_help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
"Keyboard Controls",
|
||||
"🗂 NAVIGATION",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
@@ -21,35 +32,391 @@ pub fn render_help_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, area: Rect)
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Tab",
|
||||
"Tab / Shift+Tab",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Switch between tabs"),
|
||||
]),
|
||||
// More help text would follow...
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"1-4 / w,x,l,h",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Jump to specific tab"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"↑/↓ or k/j",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Navigate lists"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Enter",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Select/View details"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Esc",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Back/Exit help"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🚀 WORKFLOW MANAGEMENT",
|
||||
Style::default()
|
||||
.fg(Color::Green)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Space",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle workflow selection"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"r",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Run selected workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"a",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Select all workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"n",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Deselect all workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Shift+R",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Reset workflow status"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"t",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Trigger remote workflow"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🔧 EXECUTION MODES",
|
||||
Style::default()
|
||||
.fg(Color::Magenta)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"e",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle emulation mode"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"v",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle validation mode"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(vec![Span::styled(
|
||||
"Runtime Modes:",
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Docker", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" - Container isolation (default)"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Podman", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" - Rootless containers"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Emulation", Style::default().fg(Color::Red)),
|
||||
Span::raw(" - Process mode (UNSAFE)"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Secure Emulation", Style::default().fg(Color::Yellow)),
|
||||
Span::raw(" - Sandboxed processes"),
|
||||
]),
|
||||
];
|
||||
|
||||
let help_widget = Paragraph::new(help_text)
|
||||
// Right column content
|
||||
let right_help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
"📄 LOGS & SEARCH",
|
||||
Style::default()
|
||||
.fg(Color::Blue)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"s",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle log search"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"f",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle log filter"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"c",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Clear search & filter"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"n",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Next search match"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"↑/↓",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Scroll logs/Navigate"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"ℹ️ TAB OVERVIEW",
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"1. Workflows",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Browse & select workflows"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • View workflow files")]),
|
||||
Line::from(vec![Span::raw(" • Select multiple for batch execution")]),
|
||||
Line::from(vec![Span::raw(" • Trigger remote workflows")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"2. Execution",
|
||||
Style::default()
|
||||
.fg(Color::Green)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Monitor job progress"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • View job status and details")]),
|
||||
Line::from(vec![Span::raw(" • Enter job details with Enter")]),
|
||||
Line::from(vec![Span::raw(" • Navigate step execution")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"3. Logs",
|
||||
Style::default()
|
||||
.fg(Color::Blue)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - View execution logs"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • Search and filter logs")]),
|
||||
Line::from(vec![Span::raw(" • Real-time log streaming")]),
|
||||
Line::from(vec![Span::raw(" • Navigate search results")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"4. Help",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - This comprehensive guide"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🎯 QUICK ACTIONS",
|
||||
Style::default().fg(Color::Red).add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"?",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle help overlay"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"q",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Quit application"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"💡 TIPS",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• Use "),
|
||||
Span::styled("emulation mode", Style::default().fg(Color::Red)),
|
||||
Span::raw(" when containers"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" are unavailable or for quick testing")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• "),
|
||||
Span::styled("Secure emulation", Style::default().fg(Color::Yellow)),
|
||||
Span::raw(" provides sandboxing"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" for untrusted workflows")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• Use "),
|
||||
Span::styled("validation mode", Style::default().fg(Color::Green)),
|
||||
Span::raw(" to check"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" workflows without execution")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• "),
|
||||
Span::styled("Preserve containers", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" on failure"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" for debugging (Docker/Podman only)")]),
|
||||
];
|
||||
|
||||
// Apply scroll offset to the content
|
||||
let left_help_text = if scroll_offset < left_help_text.len() {
|
||||
left_help_text.into_iter().skip(scroll_offset).collect()
|
||||
} else {
|
||||
vec![Line::from("")]
|
||||
};
|
||||
|
||||
let right_help_text = if scroll_offset < right_help_text.len() {
|
||||
right_help_text.into_iter().skip(scroll_offset).collect()
|
||||
} else {
|
||||
vec![Line::from("")]
|
||||
};
|
||||
|
||||
// Render left column
|
||||
let left_widget = Paragraph::new(left_help_text)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.title(Span::styled(" Help ", Style::default().fg(Color::Yellow))),
|
||||
.title(Span::styled(
|
||||
" WRKFLW Help - Controls & Features ",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
)
|
||||
.wrap(Wrap { trim: true });
|
||||
|
||||
f.render_widget(help_widget, area);
|
||||
// Render right column
|
||||
let right_widget = Paragraph::new(right_help_text)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.title(Span::styled(
|
||||
" Interface Guide & Tips ",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
)
|
||||
.wrap(Wrap { trim: true });
|
||||
|
||||
f.render_widget(left_widget, chunks[0]);
|
||||
f.render_widget(right_widget, chunks[1]);
|
||||
}
|
||||
|
||||
// Render a help overlay
|
||||
pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>) {
|
||||
pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>, scroll_offset: usize) {
|
||||
let size = f.size();
|
||||
|
||||
// Create a slightly smaller centered modal
|
||||
let width = size.width.min(60);
|
||||
let height = size.height.min(20);
|
||||
// Create a larger centered modal to accommodate comprehensive help content
|
||||
let width = (size.width * 9 / 10).min(120); // Use 90% of width, max 120 chars
|
||||
let height = (size.height * 9 / 10).min(40); // Use 90% of height, max 40 lines
|
||||
let x = (size.width - width) / 2;
|
||||
let y = (size.height - height) / 2;
|
||||
|
||||
@@ -60,10 +427,32 @@ pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>) {
|
||||
height,
|
||||
};
|
||||
|
||||
// Create a clear background
|
||||
// Create a semi-transparent dark background for better visibility
|
||||
let clear = Block::default().style(Style::default().bg(Color::Black));
|
||||
f.render_widget(clear, size);
|
||||
|
||||
// Render the help content
|
||||
render_help_tab(f, help_area);
|
||||
// Add a border around the entire overlay for better visual separation
|
||||
let overlay_block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Double)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::White))
|
||||
.title(Span::styled(
|
||||
" Press ? or Esc to close help ",
|
||||
Style::default()
|
||||
.fg(Color::Gray)
|
||||
.add_modifier(Modifier::ITALIC),
|
||||
));
|
||||
|
||||
f.render_widget(overlay_block, help_area);
|
||||
|
||||
// Create inner area for content
|
||||
let inner_area = Rect {
|
||||
x: help_area.x + 1,
|
||||
y: help_area.y + 1,
|
||||
width: help_area.width.saturating_sub(2),
|
||||
height: help_area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
// Render the help content with scroll support
|
||||
render_help_content(f, inner_area, scroll_offset);
|
||||
}
|
||||
|
||||
@@ -140,45 +140,8 @@ pub fn render_logs_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, a
|
||||
f.render_widget(search_block, chunks[1]);
|
||||
}
|
||||
|
||||
// Combine application logs with system logs
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Now all logs should have timestamps in the format [HH:MM:SS]
|
||||
|
||||
// Process app logs
|
||||
for log in &app.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Process system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Filter logs based on search query and filter level
|
||||
let filtered_logs = if !app.log_search_query.is_empty() || app.log_filter_level.is_some() {
|
||||
all_logs
|
||||
.iter()
|
||||
.filter(|log| {
|
||||
let passes_filter = match &app.log_filter_level {
|
||||
None => true,
|
||||
Some(level) => level.matches(log),
|
||||
};
|
||||
|
||||
let matches_search = if app.log_search_query.is_empty() {
|
||||
true
|
||||
} else {
|
||||
log.to_lowercase()
|
||||
.contains(&app.log_search_query.to_lowercase())
|
||||
};
|
||||
|
||||
passes_filter && matches_search
|
||||
})
|
||||
.cloned()
|
||||
.collect::<Vec<String>>()
|
||||
} else {
|
||||
all_logs.clone() // Clone to avoid moving all_logs
|
||||
};
|
||||
// Use processed logs from background thread instead of processing on every frame
|
||||
let filtered_logs = &app.processed_logs;
|
||||
|
||||
// Create a table for logs for better organization
|
||||
let header_cells = ["Time", "Type", "Message"]
|
||||
@@ -189,109 +152,10 @@ pub fn render_logs_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, a
|
||||
.style(Style::default().add_modifier(Modifier::BOLD))
|
||||
.height(1);
|
||||
|
||||
let rows = filtered_logs.iter().map(|log_line| {
|
||||
// Parse log line to extract timestamp, type and message
|
||||
|
||||
// Extract timestamp from log format [HH:MM:SS]
|
||||
let timestamp = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let end = log_line.find(']').unwrap_or(0);
|
||||
if end > 1 {
|
||||
log_line[1..end].to_string()
|
||||
} else {
|
||||
"??:??:??".to_string() // Show placeholder for malformed logs
|
||||
}
|
||||
} else {
|
||||
"??:??:??".to_string() // Show placeholder for malformed logs
|
||||
};
|
||||
|
||||
let (log_type, log_style, _) =
|
||||
if log_line.contains("Error") || log_line.contains("error") || log_line.contains("❌")
|
||||
{
|
||||
("ERROR", Style::default().fg(Color::Red), log_line.as_str())
|
||||
} else if log_line.contains("Warning")
|
||||
|| log_line.contains("warning")
|
||||
|| log_line.contains("⚠️")
|
||||
{
|
||||
(
|
||||
"WARN",
|
||||
Style::default().fg(Color::Yellow),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else if log_line.contains("Success")
|
||||
|| log_line.contains("success")
|
||||
|| log_line.contains("✅")
|
||||
{
|
||||
(
|
||||
"SUCCESS",
|
||||
Style::default().fg(Color::Green),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else if log_line.contains("Running")
|
||||
|| log_line.contains("running")
|
||||
|| log_line.contains("⟳")
|
||||
{
|
||||
("INFO", Style::default().fg(Color::Cyan), log_line.as_str())
|
||||
} else if log_line.contains("Triggering") || log_line.contains("triggered") {
|
||||
(
|
||||
"TRIG",
|
||||
Style::default().fg(Color::Magenta),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else {
|
||||
("INFO", Style::default().fg(Color::Gray), log_line.as_str())
|
||||
};
|
||||
|
||||
// Extract content after timestamp
|
||||
let content = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let start = log_line.find(']').unwrap_or(0) + 1;
|
||||
log_line[start..].trim()
|
||||
} else {
|
||||
log_line.as_str()
|
||||
};
|
||||
|
||||
// Highlight search matches in content if search is active
|
||||
let mut content_spans = Vec::new();
|
||||
if !app.log_search_query.is_empty() {
|
||||
let lowercase_content = content.to_lowercase();
|
||||
let lowercase_query = app.log_search_query.to_lowercase();
|
||||
|
||||
if lowercase_content.contains(&lowercase_query) {
|
||||
let mut last_idx = 0;
|
||||
while let Some(idx) = lowercase_content[last_idx..].find(&lowercase_query) {
|
||||
let real_idx = last_idx + idx;
|
||||
|
||||
// Add text before match
|
||||
if real_idx > last_idx {
|
||||
content_spans.push(Span::raw(content[last_idx..real_idx].to_string()));
|
||||
}
|
||||
|
||||
// Add matched text with highlight
|
||||
let match_end = real_idx + app.log_search_query.len();
|
||||
content_spans.push(Span::styled(
|
||||
content[real_idx..match_end].to_string(),
|
||||
Style::default().bg(Color::Yellow).fg(Color::Black),
|
||||
));
|
||||
|
||||
last_idx = match_end;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if last_idx < content.len() {
|
||||
content_spans.push(Span::raw(content[last_idx..].to_string()));
|
||||
}
|
||||
} else {
|
||||
content_spans.push(Span::raw(content));
|
||||
}
|
||||
} else {
|
||||
content_spans.push(Span::raw(content));
|
||||
}
|
||||
|
||||
Row::new(vec![
|
||||
Cell::from(timestamp),
|
||||
Cell::from(log_type).style(log_style),
|
||||
Cell::from(Line::from(content_spans)),
|
||||
])
|
||||
});
|
||||
// Convert processed logs to table rows - this is now very fast since logs are pre-processed
|
||||
let rows = filtered_logs
|
||||
.iter()
|
||||
.map(|processed_log| processed_log.to_row());
|
||||
|
||||
let content_idx = if show_search_bar { 2 } else { 1 };
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ use std::io;
|
||||
pub fn render_ui(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &mut App) {
|
||||
// Check if help should be shown as an overlay
|
||||
if app.show_help {
|
||||
help_overlay::render_help_overlay(f);
|
||||
help_overlay::render_help_overlay(f, app.help_scroll);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ pub fn render_ui(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &mut App) {
|
||||
}
|
||||
}
|
||||
2 => logs_tab::render_logs_tab(f, app, main_chunks[1]),
|
||||
3 => help_overlay::render_help_tab(f, main_chunks[1]),
|
||||
3 => help_overlay::render_help_content(f, main_chunks[1], app.help_scroll),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,8 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
.bg(match app.runtime_type {
|
||||
RuntimeType::Docker => Color::Blue,
|
||||
RuntimeType::Podman => Color::Cyan,
|
||||
RuntimeType::Emulation => Color::Magenta,
|
||||
RuntimeType::SecureEmulation => Color::Green,
|
||||
RuntimeType::Emulation => Color::Red,
|
||||
})
|
||||
.fg(Color::White),
|
||||
));
|
||||
@@ -108,6 +109,12 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
.fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::SecureEmulation => {
|
||||
status_items.push(Span::styled(
|
||||
" 🔒SECURE ",
|
||||
Style::default().bg(Color::Green).fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::Emulation => {
|
||||
// No need to check anything for emulation mode
|
||||
}
|
||||
@@ -174,7 +181,7 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
"[No logs to display]"
|
||||
}
|
||||
}
|
||||
3 => "[?] Toggle help overlay",
|
||||
3 => "[↑/↓] Scroll help [?] Toggle help overlay",
|
||||
_ => "",
|
||||
};
|
||||
status_items.push(Span::styled(
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,8 +12,8 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,18 +12,18 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.6.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.6.0" }
|
||||
wrkflw-gitlab = { path = "../gitlab", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.6.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.6.0" }
|
||||
wrkflw-ui = { path = "../ui", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.6.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.7.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.7.0" }
|
||||
wrkflw-gitlab = { path = "../gitlab", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.7.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.7.0" }
|
||||
wrkflw-ui = { path = "../ui", version = "0.7.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.7.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
clap.workspace = true
|
||||
@@ -62,4 +62,4 @@ path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "wrkflw"
|
||||
path = "src/main.rs"
|
||||
path = "src/main.rs"
|
||||
|
||||
@@ -26,6 +26,9 @@ wrkflw validate
|
||||
wrkflw validate .github/workflows/ci.yml
|
||||
wrkflw validate path/to/workflows
|
||||
|
||||
# Validate multiple files and/or directories
|
||||
wrkflw validate path/to/flow-1.yml path/to/flow-2.yml path/to/workflows
|
||||
|
||||
# Run a workflow (Docker by default)
|
||||
wrkflw run .github/workflows/ci.yml
|
||||
|
||||
@@ -40,10 +43,11 @@ wrkflw tui --runtime podman
|
||||
|
||||
### Commands
|
||||
|
||||
- **validate**: Validate a workflow/pipeline file or directory
|
||||
- **validate**: Validate workflow/pipeline files and/or directories
|
||||
- GitHub (default): `.github/workflows/*.yml`
|
||||
- GitLab: `.gitlab-ci.yml` or files ending with `gitlab-ci.yml`
|
||||
- Exit code behavior (by default): `1` when validation failures are detected
|
||||
- Accepts multiple paths in a single invocation
|
||||
- Exit code behavior (by default): `1` when any validation failure is detected
|
||||
- Flags: `--gitlab`, `--exit-code`, `--no-exit-code`, `--verbose`
|
||||
|
||||
- **run**: Execute a workflow or pipeline locally
|
||||
|
||||
@@ -10,8 +10,10 @@ enum RuntimeChoice {
|
||||
Docker,
|
||||
/// Use Podman containers for isolation
|
||||
Podman,
|
||||
/// Use process emulation mode (no containers)
|
||||
/// Use process emulation mode (no containers, UNSAFE)
|
||||
Emulation,
|
||||
/// Use secure emulation mode with sandboxing (recommended for untrusted code)
|
||||
SecureEmulation,
|
||||
}
|
||||
|
||||
impl From<RuntimeChoice> for wrkflw_executor::RuntimeType {
|
||||
@@ -20,6 +22,7 @@ impl From<RuntimeChoice> for wrkflw_executor::RuntimeType {
|
||||
RuntimeChoice::Docker => wrkflw_executor::RuntimeType::Docker,
|
||||
RuntimeChoice::Podman => wrkflw_executor::RuntimeType::Podman,
|
||||
RuntimeChoice::Emulation => wrkflw_executor::RuntimeType::Emulation,
|
||||
RuntimeChoice::SecureEmulation => wrkflw_executor::RuntimeType::SecureEmulation,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,8 +51,9 @@ struct Wrkflw {
|
||||
enum Commands {
|
||||
/// Validate workflow or pipeline files
|
||||
Validate {
|
||||
/// Path to workflow/pipeline file or directory (defaults to .github/workflows)
|
||||
path: Option<PathBuf>,
|
||||
/// Path(s) to workflow/pipeline file(s) or directory(ies) (defaults to .github/workflows if none provided)
|
||||
#[arg(value_name = "path", num_args = 0..)]
|
||||
paths: Vec<PathBuf>,
|
||||
|
||||
/// Explicitly validate as GitLab CI/CD pipeline
|
||||
#[arg(long)]
|
||||
@@ -69,7 +73,7 @@ enum Commands {
|
||||
/// Path to workflow/pipeline file to execute
|
||||
path: PathBuf,
|
||||
|
||||
/// Container runtime to use (docker, podman, emulation)
|
||||
/// Container runtime to use (docker, podman, emulation, secure-emulation)
|
||||
#[arg(short, long, value_enum, default_value = "docker")]
|
||||
runtime: RuntimeChoice,
|
||||
|
||||
@@ -91,7 +95,7 @@ enum Commands {
|
||||
/// Path to workflow file or directory (defaults to .github/workflows)
|
||||
path: Option<PathBuf>,
|
||||
|
||||
/// Container runtime to use (docker, podman, emulation)
|
||||
/// Container runtime to use (docker, podman, emulation, secure-emulation)
|
||||
#[arg(short, long, value_enum, default_value = "docker")]
|
||||
runtime: RuntimeChoice,
|
||||
|
||||
@@ -266,6 +270,28 @@ fn is_gitlab_pipeline(path: &Path) -> bool {
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// Gracefully handle Broken pipe (EPIPE) when output is piped (e.g., to `head`)
|
||||
let default_panic_hook = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |info| {
|
||||
let mut is_broken_pipe = false;
|
||||
if let Some(s) = info.payload().downcast_ref::<&str>() {
|
||||
if s.contains("Broken pipe") {
|
||||
is_broken_pipe = true;
|
||||
}
|
||||
}
|
||||
if let Some(s) = info.payload().downcast_ref::<String>() {
|
||||
if s.contains("Broken pipe") {
|
||||
is_broken_pipe = true;
|
||||
}
|
||||
}
|
||||
if is_broken_pipe {
|
||||
// Treat as a successful, short-circuited exit
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Fallback to the default hook for all other panics
|
||||
default_panic_hook(info);
|
||||
}));
|
||||
|
||||
let cli = Wrkflw::parse();
|
||||
let verbose = cli.verbose;
|
||||
let debug = cli.debug;
|
||||
@@ -286,65 +312,78 @@ async fn main() {
|
||||
|
||||
match &cli.command {
|
||||
Some(Commands::Validate {
|
||||
path,
|
||||
paths,
|
||||
gitlab,
|
||||
exit_code,
|
||||
no_exit_code,
|
||||
}) => {
|
||||
// Determine the path to validate
|
||||
let validate_path = path
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(".github/workflows"));
|
||||
|
||||
// Check if the path exists
|
||||
if !validate_path.exists() {
|
||||
eprintln!("Error: Path does not exist: {}", validate_path.display());
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Determine the paths to validate (default to .github/workflows when none provided)
|
||||
let validate_paths: Vec<PathBuf> = if paths.is_empty() {
|
||||
vec![PathBuf::from(".github/workflows")]
|
||||
} else {
|
||||
paths.clone()
|
||||
};
|
||||
|
||||
// Determine if we're validating a GitLab pipeline based on the --gitlab flag or file detection
|
||||
let force_gitlab = *gitlab;
|
||||
let mut validation_failed = false;
|
||||
|
||||
if validate_path.is_dir() {
|
||||
// Validate all workflow files in the directory
|
||||
let entries = std::fs::read_dir(&validate_path)
|
||||
.expect("Failed to read directory")
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().is_file()
|
||||
&& entry
|
||||
.path()
|
||||
.extension()
|
||||
.is_some_and(|ext| ext == "yml" || ext == "yaml")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
for validate_path in validate_paths {
|
||||
// Check if the path exists; if not, mark failure but continue
|
||||
if !validate_path.exists() {
|
||||
eprintln!("Error: Path does not exist: {}", validate_path.display());
|
||||
validation_failed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
println!("Validating {} workflow file(s)...", entries.len());
|
||||
if validate_path.is_dir() {
|
||||
// Validate all workflow files in the directory
|
||||
let entries = std::fs::read_dir(&validate_path)
|
||||
.expect("Failed to read directory")
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().is_file()
|
||||
&& entry
|
||||
.path()
|
||||
.extension()
|
||||
.is_some_and(|ext| ext == "yml" || ext == "yaml")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&path);
|
||||
println!(
|
||||
"Validating {} workflow file(s) in {}...",
|
||||
entries.len(),
|
||||
validate_path.display()
|
||||
);
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&path);
|
||||
|
||||
let file_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&path, verbose)
|
||||
};
|
||||
|
||||
if file_failed {
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validate a single workflow file
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&validate_path);
|
||||
|
||||
let file_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&path, verbose)
|
||||
validate_gitlab_pipeline(&validate_path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&path, verbose)
|
||||
validate_github_workflow(&validate_path, verbose)
|
||||
};
|
||||
|
||||
if file_failed {
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validate a single workflow file
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&validate_path);
|
||||
|
||||
validation_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&validate_path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&validate_path, verbose)
|
||||
};
|
||||
}
|
||||
|
||||
// Set exit code if validation failed and exit_code flag is true (and no_exit_code is false)
|
||||
@@ -364,6 +403,7 @@ async fn main() {
|
||||
runtime_type: runtime.clone().into(),
|
||||
verbose,
|
||||
preserve_containers_on_failure: *preserve_containers_on_failure,
|
||||
secrets_config: None, // Use default secrets configuration
|
||||
};
|
||||
|
||||
// Check if we're explicitly or implicitly running a GitLab pipeline
|
||||
|
||||
65
examples/secrets-demo/.wrkflw/secrets.yml
Normal file
65
examples/secrets-demo/.wrkflw/secrets.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
# wrkflw Secrets Configuration
|
||||
# This file demonstrates various secret provider configurations
|
||||
|
||||
# Default provider to use when no provider is specified in ${{ secrets.name }}
|
||||
default_provider: env
|
||||
|
||||
# Enable automatic masking of secrets in logs and output
|
||||
enable_masking: true
|
||||
|
||||
# Timeout for secret operations (seconds)
|
||||
timeout_seconds: 30
|
||||
|
||||
# Enable caching for performance
|
||||
enable_caching: true
|
||||
|
||||
# Cache TTL in seconds
|
||||
cache_ttl_seconds: 300
|
||||
|
||||
# Secret provider configurations
|
||||
providers:
|
||||
# Environment variable provider
|
||||
env:
|
||||
type: environment
|
||||
# Optional prefix for environment variables
|
||||
# If specified, looks for WRKFLW_SECRET_* variables
|
||||
# prefix: "WRKFLW_SECRET_"
|
||||
|
||||
# File-based secret storage
|
||||
file:
|
||||
type: file
|
||||
# Path to secrets file (supports JSON, YAML, or environment format)
|
||||
path: "~/.wrkflw/secrets.json"
|
||||
|
||||
# HashiCorp Vault (requires vault-provider feature)
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.example.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
|
||||
# AWS Secrets Manager (requires aws-provider feature)
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
# Optional role to assume for cross-account access
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole"
|
||||
|
||||
# Azure Key Vault (requires azure-provider feature)
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
auth:
|
||||
method: service_principal
|
||||
client_id: "${AZURE_CLIENT_ID}"
|
||||
client_secret: "${AZURE_CLIENT_SECRET}"
|
||||
tenant_id: "${AZURE_TENANT_ID}"
|
||||
|
||||
# Google Cloud Secret Manager (requires gcp-provider feature)
|
||||
gcp:
|
||||
type: gcp_secret_manager
|
||||
project_id: "my-project-id"
|
||||
# Optional service account key file
|
||||
key_file: "/path/to/service-account.json"
|
||||
505
examples/secrets-demo/README.md
Normal file
505
examples/secrets-demo/README.md
Normal file
@@ -0,0 +1,505 @@
|
||||
# wrkflw Secrets Management Demo
|
||||
|
||||
This demo demonstrates the comprehensive secrets management system in wrkflw, addressing the critical need for secure secret handling in CI/CD workflows.
|
||||
|
||||
## The Problem
|
||||
|
||||
Without proper secrets support, workflows are severely limited because:
|
||||
|
||||
1. **No way to access sensitive data** - API keys, tokens, passwords, certificates
|
||||
2. **Security risks** - Hardcoded secrets in code or plain text in logs
|
||||
3. **Limited usefulness** - Can't integrate with real services that require authentication
|
||||
4. **Compliance issues** - Unable to meet security standards for production workflows
|
||||
|
||||
## The Solution
|
||||
|
||||
wrkflw now provides comprehensive secrets management with:
|
||||
|
||||
- **Multiple secret providers** (environment variables, files, HashiCorp Vault, AWS Secrets Manager, etc.)
|
||||
- **GitHub Actions-compatible syntax** (`${{ secrets.* }}`)
|
||||
- **Automatic secret masking** in logs and output
|
||||
- **Encrypted storage** for sensitive environments
|
||||
- **Flexible configuration** for different deployment scenarios
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Environment Variables (Simplest)
|
||||
|
||||
```bash
|
||||
# Set secrets as environment variables
|
||||
export GITHUB_TOKEN="ghp_your_token_here"
|
||||
export API_KEY="your_api_key"
|
||||
export DB_PASSWORD="secure_password"
|
||||
```
|
||||
|
||||
Create a workflow that uses secrets:
|
||||
|
||||
```yaml
|
||||
# .github/workflows/secrets-demo.yml
|
||||
name: Secrets Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Use GitHub Token
|
||||
run: |
|
||||
echo "Using token to access GitHub API"
|
||||
curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
https://api.github.com/user
|
||||
|
||||
- name: Use API Key
|
||||
run: |
|
||||
echo "API Key: ${{ secrets.API_KEY }}"
|
||||
|
||||
- name: Database Connection
|
||||
env:
|
||||
DB_PASS: ${{ secrets.DB_PASSWORD }}
|
||||
run: |
|
||||
echo "Connecting to database with password: ${DB_PASS}"
|
||||
```
|
||||
|
||||
Run with wrkflw:
|
||||
|
||||
```bash
|
||||
wrkflw run .github/workflows/secrets-demo.yml
|
||||
```
|
||||
|
||||
### 2. File-based Secrets
|
||||
|
||||
Create a secrets file:
|
||||
|
||||
```json
|
||||
{
|
||||
"API_KEY": "your_api_key_here",
|
||||
"DB_PASSWORD": "secure_database_password",
|
||||
"GITHUB_TOKEN": "ghp_your_github_token"
|
||||
}
|
||||
```
|
||||
|
||||
Or environment file format:
|
||||
|
||||
```bash
|
||||
# secrets.env
|
||||
API_KEY=your_api_key_here
|
||||
DB_PASSWORD="secure database password"
|
||||
GITHUB_TOKEN=ghp_your_github_token
|
||||
```
|
||||
|
||||
Configure wrkflw to use file-based secrets:
|
||||
|
||||
```yaml
|
||||
# ~/.wrkflw/secrets.yml
|
||||
default_provider: file
|
||||
enable_masking: true
|
||||
timeout_seconds: 30
|
||||
|
||||
providers:
|
||||
file:
|
||||
type: file
|
||||
path: "./secrets.json" # or "./secrets.env"
|
||||
```
|
||||
|
||||
### 3. Advanced Configuration
|
||||
|
||||
For production environments, use external secret managers:
|
||||
|
||||
```yaml
|
||||
# ~/.wrkflw/secrets.yml
|
||||
default_provider: vault
|
||||
enable_masking: true
|
||||
timeout_seconds: 30
|
||||
enable_caching: true
|
||||
cache_ttl_seconds: 300
|
||||
|
||||
providers:
|
||||
env:
|
||||
type: environment
|
||||
prefix: "WRKFLW_SECRET_"
|
||||
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.company.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret"
|
||||
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole"
|
||||
```
|
||||
|
||||
## Secret Providers
|
||||
|
||||
### Environment Variables
|
||||
|
||||
**Best for**: Development and simple deployments
|
||||
|
||||
```bash
|
||||
# With prefix
|
||||
export WRKFLW_SECRET_API_KEY="your_key"
|
||||
export WRKFLW_SECRET_DB_PASSWORD="password"
|
||||
|
||||
# Direct environment variables
|
||||
export GITHUB_TOKEN="ghp_token"
|
||||
export API_KEY="key_value"
|
||||
```
|
||||
|
||||
Use in workflows:
|
||||
```yaml
|
||||
steps:
|
||||
- name: Use prefixed secret
|
||||
run: echo "API: ${{ secrets.env:API_KEY }}"
|
||||
|
||||
- name: Use direct secret
|
||||
run: echo "Token: ${{ secrets.GITHUB_TOKEN }}"
|
||||
```
|
||||
|
||||
### File-based Storage
|
||||
|
||||
**Best for**: Local development and testing
|
||||
|
||||
Supports multiple formats:
|
||||
|
||||
**JSON** (`secrets.json`):
|
||||
```json
|
||||
{
|
||||
"GITHUB_TOKEN": "ghp_your_token",
|
||||
"API_KEY": "your_api_key",
|
||||
"DATABASE_URL": "postgresql://user:pass@localhost/db"
|
||||
}
|
||||
```
|
||||
|
||||
**YAML** (`secrets.yml`):
|
||||
```yaml
|
||||
GITHUB_TOKEN: ghp_your_token
|
||||
API_KEY: your_api_key
|
||||
DATABASE_URL: postgresql://user:pass@localhost/db
|
||||
```
|
||||
|
||||
**Environment** (`secrets.env`):
|
||||
```bash
|
||||
GITHUB_TOKEN=ghp_your_token
|
||||
API_KEY=your_api_key
|
||||
DATABASE_URL="postgresql://user:pass@localhost/db"
|
||||
```
|
||||
|
||||
### HashiCorp Vault
|
||||
|
||||
**Best for**: Production environments with centralized secret management
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
url: "https://vault.company.com"
|
||||
auth:
|
||||
method: token
|
||||
token: "${VAULT_TOKEN}"
|
||||
mount_path: "secret/v2"
|
||||
```
|
||||
|
||||
Use vault secrets in workflows:
|
||||
```yaml
|
||||
steps:
|
||||
- name: Use Vault secret
|
||||
run: curl -H "X-API-Key: ${{ secrets.vault:api-key }}" api.service.com
|
||||
```
|
||||
|
||||
### AWS Secrets Manager
|
||||
|
||||
**Best for**: AWS-native deployments
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
role_arn: "arn:aws:iam::123456789012:role/SecretRole"
|
||||
```
|
||||
|
||||
### Azure Key Vault
|
||||
|
||||
**Best for**: Azure-native deployments
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
auth:
|
||||
method: service_principal
|
||||
client_id: "${AZURE_CLIENT_ID}"
|
||||
client_secret: "${AZURE_CLIENT_SECRET}"
|
||||
tenant_id: "${AZURE_TENANT_ID}"
|
||||
```
|
||||
|
||||
## Secret Masking
|
||||
|
||||
wrkflw automatically masks secrets in logs to prevent accidental exposure:
|
||||
|
||||
```bash
|
||||
# Original log:
|
||||
# "API response: {\"token\": \"ghp_1234567890abcdef\", \"status\": \"ok\"}"
|
||||
|
||||
# Masked log:
|
||||
# "API response: {\"token\": \"ghp_***\", \"status\": \"ok\"}"
|
||||
```
|
||||
|
||||
Automatically detects and masks:
|
||||
- GitHub Personal Access Tokens (`ghp_*`)
|
||||
- GitHub App tokens (`ghs_*`)
|
||||
- GitHub OAuth tokens (`gho_*`)
|
||||
- AWS Access Keys (`AKIA*`)
|
||||
- JWT tokens
|
||||
- Generic API keys
|
||||
|
||||
## Workflow Examples
|
||||
|
||||
### GitHub API Integration
|
||||
|
||||
```yaml
|
||||
name: GitHub API Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
github-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: List repositories
|
||||
run: |
|
||||
curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/user/repos
|
||||
|
||||
- name: Create issue
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/owner/repo/issues \
|
||||
-d '{"title":"Automated issue","body":"Created by wrkflw"}'
|
||||
```
|
||||
|
||||
### Database Operations
|
||||
|
||||
```yaml
|
||||
name: Database Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
database-ops:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Run migrations
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.DATABASE_URL }}
|
||||
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
run: |
|
||||
echo "Running database migrations..."
|
||||
# Your migration commands here
|
||||
|
||||
- name: Backup database
|
||||
run: |
|
||||
pg_dump "${{ secrets.DATABASE_URL }}" > backup.sql
|
||||
```
|
||||
|
||||
### Multi-Provider Example
|
||||
|
||||
```yaml
|
||||
name: Multi-Provider Demo
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
multi-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Use environment secret
|
||||
run: echo "Env: ${{ secrets.env:API_KEY }}"
|
||||
|
||||
- name: Use file secret
|
||||
run: echo "File: ${{ secrets.file:GITHUB_TOKEN }}"
|
||||
|
||||
- name: Use Vault secret
|
||||
run: echo "Vault: ${{ secrets.vault:database-password }}"
|
||||
|
||||
- name: Use AWS secret
|
||||
run: echo "AWS: ${{ secrets.aws:prod/api/key }}"
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### 1. Use Appropriate Providers
|
||||
|
||||
- **Development**: Environment variables or files
|
||||
- **Staging**: File-based or simple vault
|
||||
- **Production**: External secret managers (Vault, AWS, Azure, GCP)
|
||||
|
||||
### 2. Enable Secret Masking
|
||||
|
||||
Always enable masking in production:
|
||||
|
||||
```yaml
|
||||
enable_masking: true
|
||||
```
|
||||
|
||||
### 3. Rotate Secrets Regularly
|
||||
|
||||
Use secret managers that support automatic rotation:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
aws:
|
||||
type: aws_secrets_manager
|
||||
region: "us-east-1"
|
||||
# AWS Secrets Manager handles automatic rotation
|
||||
```
|
||||
|
||||
### 4. Use Least Privilege
|
||||
|
||||
Grant minimal necessary permissions:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
auth:
|
||||
method: app_role
|
||||
role_id: "${VAULT_ROLE_ID}"
|
||||
secret_id: "${VAULT_SECRET_ID}"
|
||||
# Role has access only to required secrets
|
||||
```
|
||||
|
||||
### 5. Monitor Secret Access
|
||||
|
||||
Use secret managers with audit logging:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
azure:
|
||||
type: azure_key_vault
|
||||
vault_url: "https://myvault.vault.azure.net/"
|
||||
# Azure Key Vault provides detailed audit logs
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Secret Not Found
|
||||
|
||||
```bash
|
||||
Error: Secret 'API_KEY' not found
|
||||
|
||||
# Check:
|
||||
1. Secret exists in the provider
|
||||
2. Provider is correctly configured
|
||||
3. Authentication is working
|
||||
4. Correct provider name in ${{ secrets.provider:name }}
|
||||
```
|
||||
|
||||
### Authentication Failed
|
||||
|
||||
```bash
|
||||
Error: Authentication failed for provider 'vault'
|
||||
|
||||
# Check:
|
||||
1. Credentials are correct
|
||||
2. Network connectivity to secret manager
|
||||
3. Permissions for the service account
|
||||
4. Token/credential expiration
|
||||
```
|
||||
|
||||
### Secret Masking Not Working
|
||||
|
||||
```bash
|
||||
# Secrets appearing in logs
|
||||
|
||||
# Check:
|
||||
1. enable_masking: true in configuration
|
||||
2. Secret is properly retrieved (not hardcoded)
|
||||
3. Secret matches known patterns for auto-masking
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From GitHub Actions
|
||||
|
||||
Most GitHub Actions workflows work without changes:
|
||||
|
||||
```yaml
|
||||
# This works directly in wrkflw
|
||||
steps:
|
||||
- name: Deploy
|
||||
env:
|
||||
API_TOKEN: ${{ secrets.API_TOKEN }}
|
||||
run: deploy.sh
|
||||
```
|
||||
|
||||
### From Environment Variables
|
||||
|
||||
```bash
|
||||
# Before (environment variables)
|
||||
export API_KEY="your_key"
|
||||
./script.sh
|
||||
|
||||
# After (wrkflw secrets)
|
||||
# Set in secrets.env:
|
||||
# API_KEY=your_key
|
||||
|
||||
# Use in workflow:
|
||||
# ${{ secrets.API_KEY }}
|
||||
```
|
||||
|
||||
### From CI/CD Platforms
|
||||
|
||||
Most secrets can be migrated by:
|
||||
|
||||
1. Exporting from current platform
|
||||
2. Importing into wrkflw's chosen provider
|
||||
3. Updating workflow syntax to `${{ secrets.NAME }}`
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Caching
|
||||
|
||||
Enable caching for frequently accessed secrets:
|
||||
|
||||
```yaml
|
||||
enable_caching: true
|
||||
cache_ttl_seconds: 300 # 5 minutes
|
||||
```
|
||||
|
||||
### Connection Pooling
|
||||
|
||||
For high-volume deployments, secret managers support connection pooling:
|
||||
|
||||
```yaml
|
||||
providers:
|
||||
vault:
|
||||
type: vault
|
||||
# Vault client automatically handles connection pooling
|
||||
```
|
||||
|
||||
### Timeout Configuration
|
||||
|
||||
Adjust timeouts based on network conditions:
|
||||
|
||||
```yaml
|
||||
timeout_seconds: 30 # Increase for slow networks
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
With comprehensive secrets management, wrkflw is now suitable for production workflows requiring secure access to:
|
||||
|
||||
- External APIs and services
|
||||
- Databases and storage systems
|
||||
- Cloud provider resources
|
||||
- Authentication systems
|
||||
- Deployment targets
|
||||
|
||||
The flexible provider system ensures compatibility with existing secret management infrastructure while providing a GitHub Actions-compatible developer experience.
|
||||
|
||||
**The usefulness limitation has been removed** - wrkflw can now handle real-world CI/CD scenarios securely and efficiently.
|
||||
49
examples/secrets-demo/env.example
Normal file
49
examples/secrets-demo/env.example
Normal file
@@ -0,0 +1,49 @@
|
||||
# Example environment variables for wrkflw secrets demo
|
||||
# Copy this file to .env and fill in your actual values
|
||||
|
||||
# GitHub integration
|
||||
GITHUB_TOKEN=ghp_your_github_personal_access_token
|
||||
|
||||
# Generic API credentials
|
||||
API_KEY=your_api_key_here
|
||||
API_ENDPOINT=https://api.example.com/v1
|
||||
|
||||
# Database credentials
|
||||
DB_USER=your_db_username
|
||||
DB_PASSWORD=your_secure_db_password
|
||||
DATABASE_URL=postgresql://user:password@localhost:5432/dbname
|
||||
MONGO_CONNECTION_STRING=mongodb://user:password@localhost:27017/dbname
|
||||
|
||||
# Docker registry credentials
|
||||
DOCKER_USERNAME=your_docker_username
|
||||
DOCKER_PASSWORD=your_docker_password
|
||||
|
||||
# AWS credentials
|
||||
AWS_ACCESS_KEY_ID=your_aws_access_key_id
|
||||
AWS_SECRET_ACCESS_KEY=your_aws_secret_access_key
|
||||
S3_BUCKET_NAME=your-s3-bucket-name
|
||||
|
||||
# Deployment credentials
|
||||
STAGING_DEPLOY_KEY=your_base64_encoded_ssh_private_key
|
||||
STAGING_HOST=staging.yourdomain.com
|
||||
|
||||
# Notification webhooks
|
||||
WEBHOOK_URL=https://your.webhook.endpoint/path
|
||||
SLACK_WEBHOOK=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
|
||||
|
||||
# Demo and testing secrets
|
||||
DEMO_SECRET=this_will_be_masked_in_logs
|
||||
REQUIRED_SECRET=required_for_validation_tests
|
||||
|
||||
# Prefixed secrets (if using WRKFLW_SECRET_ prefix)
|
||||
WRKFLW_SECRET_PREFIXED_KEY=prefixed_secret_value
|
||||
|
||||
# Vault credentials (if using HashiCorp Vault)
|
||||
VAULT_TOKEN=your_vault_token
|
||||
VAULT_ROLE_ID=your_vault_role_id
|
||||
VAULT_SECRET_ID=your_vault_secret_id
|
||||
|
||||
# Azure credentials (if using Azure Key Vault)
|
||||
AZURE_CLIENT_ID=your_azure_client_id
|
||||
AZURE_CLIENT_SECRET=your_azure_client_secret
|
||||
AZURE_TENANT_ID=your_azure_tenant_id
|
||||
213
examples/secrets-demo/secrets-workflow.yml
Normal file
213
examples/secrets-demo/secrets-workflow.yml
Normal file
@@ -0,0 +1,213 @@
|
||||
name: Comprehensive Secrets Demo
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
# Basic environment variable secrets
|
||||
env-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Use GitHub Token
|
||||
run: |
|
||||
echo "Fetching user info from GitHub API"
|
||||
curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
https://api.github.com/user | jq '.login'
|
||||
|
||||
- name: Use API Key
|
||||
env:
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "API Key length: ${#API_KEY}"
|
||||
# Key will be masked in logs automatically
|
||||
|
||||
- name: Database connection
|
||||
run: |
|
||||
echo "Connecting to database with credentials"
|
||||
echo "User: ${{ secrets.DB_USER }}"
|
||||
echo "Password: [MASKED]"
|
||||
# Password would be: ${{ secrets.DB_PASSWORD }}
|
||||
|
||||
# Provider-specific secrets
|
||||
provider-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Use file-based secrets
|
||||
run: |
|
||||
echo "File secret: ${{ secrets.file:FILE_SECRET }}"
|
||||
|
||||
- name: Use environment with prefix
|
||||
run: |
|
||||
echo "Prefixed secret: ${{ secrets.env:PREFIXED_KEY }}"
|
||||
|
||||
- name: Use Vault secret (if configured)
|
||||
run: |
|
||||
# This would work if Vault provider is configured
|
||||
echo "Vault secret: ${{ secrets.vault:api-key }}"
|
||||
|
||||
- name: Use AWS Secrets Manager (if configured)
|
||||
run: |
|
||||
# This would work if AWS provider is configured
|
||||
echo "AWS secret: ${{ secrets.aws:prod/database/password }}"
|
||||
|
||||
# Real-world integration examples
|
||||
github-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create GitHub issue
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Content-Type: application/json" \
|
||||
https://api.github.com/repos/${{ github.repository }}/issues \
|
||||
-d '{
|
||||
"title": "Automated issue from wrkflw",
|
||||
"body": "This issue was created automatically by wrkflw secrets demo",
|
||||
"labels": ["automation", "demo"]
|
||||
}'
|
||||
|
||||
- name: List repository secrets (admin only)
|
||||
run: |
|
||||
# This would require admin permissions
|
||||
curl -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
|
||||
https://api.github.com/repos/${{ github.repository }}/actions/secrets
|
||||
|
||||
# Docker registry integration
|
||||
docker-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
run: |
|
||||
echo "Logging into Docker Hub"
|
||||
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
|
||||
|
||||
- name: Pull private image
|
||||
run: |
|
||||
docker pull private-registry.com/myapp:latest
|
||||
|
||||
- name: Push image
|
||||
run: |
|
||||
docker tag myapp:latest "${{ secrets.DOCKER_USERNAME }}/myapp:${{ github.sha }}"
|
||||
docker push "${{ secrets.DOCKER_USERNAME }}/myapp:${{ github.sha }}"
|
||||
|
||||
# Cloud provider integration
|
||||
aws-integration:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: us-east-1
|
||||
run: |
|
||||
echo "Configuring AWS CLI"
|
||||
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID"
|
||||
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY"
|
||||
aws configure set default.region "$AWS_DEFAULT_REGION"
|
||||
|
||||
- name: List S3 buckets
|
||||
run: |
|
||||
aws s3 ls
|
||||
|
||||
- name: Deploy to S3
|
||||
run: |
|
||||
echo "Deploying to S3 bucket"
|
||||
aws s3 sync ./build/ s3://${{ secrets.S3_BUCKET_NAME }}/
|
||||
|
||||
# Database operations
|
||||
database-operations:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: PostgreSQL operations
|
||||
env:
|
||||
DATABASE_URL: ${{ secrets.DATABASE_URL }}
|
||||
PGPASSWORD: ${{ secrets.DB_PASSWORD }}
|
||||
run: |
|
||||
echo "Connecting to PostgreSQL database"
|
||||
psql "$DATABASE_URL" -c "SELECT version();"
|
||||
|
||||
- name: MongoDB operations
|
||||
env:
|
||||
MONGO_CONNECTION_STRING: ${{ secrets.MONGO_CONNECTION_STRING }}
|
||||
run: |
|
||||
echo "Connecting to MongoDB"
|
||||
mongosh "$MONGO_CONNECTION_STRING" --eval "db.stats()"
|
||||
|
||||
# API testing with secrets
|
||||
api-testing:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test external API
|
||||
env:
|
||||
API_ENDPOINT: ${{ secrets.API_ENDPOINT }}
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "Testing API endpoint"
|
||||
curl -X GET \
|
||||
-H "Authorization: Bearer $API_KEY" \
|
||||
-H "Content-Type: application/json" \
|
||||
"$API_ENDPOINT/health"
|
||||
|
||||
- name: Test webhook
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"event": "test", "source": "wrkflw"}' \
|
||||
"${{ secrets.WEBHOOK_URL }}"
|
||||
|
||||
# Deployment with secrets
|
||||
deployment:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [env-secrets, api-testing]
|
||||
if: github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Deploy to staging
|
||||
env:
|
||||
DEPLOY_KEY: ${{ secrets.STAGING_DEPLOY_KEY }}
|
||||
STAGING_HOST: ${{ secrets.STAGING_HOST }}
|
||||
run: |
|
||||
echo "Deploying to staging environment"
|
||||
echo "$DEPLOY_KEY" | base64 -d > deploy_key
|
||||
chmod 600 deploy_key
|
||||
ssh -i deploy_key -o StrictHostKeyChecking=no \
|
||||
deploy@"$STAGING_HOST" 'cd /app && git pull && ./deploy.sh'
|
||||
|
||||
- name: Notify deployment
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
run: |
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
--data '{"text":"Deployment completed successfully"}' \
|
||||
"$SLACK_WEBHOOK"
|
||||
|
||||
# Security testing
|
||||
security-demo:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Demonstrate secret masking
|
||||
run: |
|
||||
echo "This secret will be masked: ${{ secrets.DEMO_SECRET }}"
|
||||
echo "Even in complex strings: prefix_${{ secrets.DEMO_SECRET }}_suffix"
|
||||
|
||||
- name: Show environment (secrets masked)
|
||||
run: |
|
||||
env | grep -E "(SECRET|TOKEN|PASSWORD|KEY)" || echo "No secrets visible in environment"
|
||||
|
||||
- name: Test secret validation
|
||||
run: |
|
||||
# This would fail if secret doesn't exist
|
||||
if [ -z "${{ secrets.REQUIRED_SECRET }}" ]; then
|
||||
echo "ERROR: Required secret is missing"
|
||||
exit 1
|
||||
else
|
||||
echo "Required secret is present"
|
||||
fi
|
||||
21
examples/secrets-demo/secrets.json.example
Normal file
21
examples/secrets-demo/secrets.json.example
Normal file
@@ -0,0 +1,21 @@
|
||||
{
|
||||
"GITHUB_TOKEN": "ghp_example_token_replace_with_real_token",
|
||||
"API_KEY": "demo_api_key_12345",
|
||||
"DB_PASSWORD": "secure_database_password",
|
||||
"DB_USER": "application_user",
|
||||
"DOCKER_USERNAME": "your_docker_username",
|
||||
"DOCKER_PASSWORD": "your_docker_password",
|
||||
"AWS_ACCESS_KEY_ID": "AKIAIOSFODNN7EXAMPLE",
|
||||
"AWS_SECRET_ACCESS_KEY": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
"S3_BUCKET_NAME": "my-deployment-bucket",
|
||||
"DATABASE_URL": "postgresql://user:password@localhost:5432/mydb",
|
||||
"MONGO_CONNECTION_STRING": "mongodb://user:password@localhost:27017/mydb",
|
||||
"API_ENDPOINT": "https://api.example.com/v1",
|
||||
"WEBHOOK_URL": "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"STAGING_DEPLOY_KEY": "base64_encoded_ssh_private_key",
|
||||
"STAGING_HOST": "staging.example.com",
|
||||
"SLACK_WEBHOOK": "https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"DEMO_SECRET": "this_will_be_masked_in_logs",
|
||||
"REQUIRED_SECRET": "required_for_validation",
|
||||
"FILE_SECRET": "stored_in_file_provider"
|
||||
}
|
||||
13
final-test.yml
Normal file
13
final-test.yml
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Final Secrets Test
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
verify-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test secrets are working
|
||||
env:
|
||||
SECRET_VAL: ${{ secrets.TEST_SECRET }}
|
||||
run: |
|
||||
echo "Secret length: ${#SECRET_VAL}"
|
||||
echo "All secrets functionality verified!"
|
||||
@@ -1,71 +1,179 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple script to publish all wrkflw crates to crates.io in dependency order
|
||||
# Enhanced script to manage versions and publish all wrkflw crates using cargo-workspaces
|
||||
|
||||
set -e
|
||||
|
||||
DRY_RUN=${1:-""}
|
||||
# Parse command line arguments
|
||||
COMMAND=${1:-""}
|
||||
VERSION_TYPE=${2:-""}
|
||||
DRY_RUN=""
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Testing wrkflw crates publication"
|
||||
else
|
||||
echo "🚀 Publishing wrkflw crates to crates.io"
|
||||
fi
|
||||
show_help() {
|
||||
echo "Usage: $0 <command> [options]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " version <type> Update versions across workspace"
|
||||
echo " Types: patch, minor, major"
|
||||
echo " publish Publish all crates to crates.io"
|
||||
echo " release <type> Update versions and publish (combines version + publish)"
|
||||
echo " help Show this help message"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --dry-run Test without making changes (for publish/release)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 version minor # Bump to 0.7.0"
|
||||
echo " $0 publish --dry-run # Test publishing"
|
||||
echo " $0 release minor --dry-run # Test version bump + publish"
|
||||
echo " $0 release patch # Release patch version"
|
||||
}
|
||||
|
||||
# Check if we're logged in to crates.io
|
||||
if [ ! -f ~/.cargo/credentials.toml ] && [ ! -f ~/.cargo/credentials ]; then
|
||||
echo "❌ Not logged in to crates.io. Please run: cargo login <your-token>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Publication order (respecting dependencies)
|
||||
CRATES=(
|
||||
"models"
|
||||
"logging"
|
||||
"utils"
|
||||
"matrix"
|
||||
"validators"
|
||||
"github"
|
||||
"gitlab"
|
||||
"parser"
|
||||
"runtime"
|
||||
"evaluator"
|
||||
"executor"
|
||||
"ui"
|
||||
"wrkflw"
|
||||
)
|
||||
|
||||
echo "📦 Publishing crates in dependency order..."
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "Testing $crate..."
|
||||
cd "crates/$crate"
|
||||
cargo publish --dry-run --allow-dirty
|
||||
echo "✅ $crate dry-run successful"
|
||||
else
|
||||
echo "Publishing $crate..."
|
||||
cd "crates/$crate"
|
||||
cargo publish --allow-dirty
|
||||
echo "✅ Published $crate"
|
||||
fi
|
||||
cd - > /dev/null
|
||||
|
||||
# Small delay to avoid rate limiting (except for the last crate and in dry-run)
|
||||
if [[ "$crate" != "wrkflw" ]] && [[ "$DRY_RUN" != "--dry-run" ]]; then
|
||||
echo " Waiting 10 seconds to avoid rate limits..."
|
||||
sleep 10
|
||||
# Parse dry-run flag from any position
|
||||
for arg in "$@"; do
|
||||
if [[ "$arg" == "--dry-run" ]]; then
|
||||
DRY_RUN="--dry-run"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🎉 All crates passed dry-run tests!"
|
||||
echo ""
|
||||
echo "To actually publish, run:"
|
||||
echo " ./publish_crates.sh"
|
||||
else
|
||||
echo "🎉 All crates published successfully!"
|
||||
echo ""
|
||||
echo "Users can now install wrkflw with:"
|
||||
echo " cargo install wrkflw"
|
||||
case "$COMMAND" in
|
||||
"help"|"-h"|"--help"|"")
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
"version")
|
||||
if [[ -z "$VERSION_TYPE" ]]; then
|
||||
echo "❌ Error: Version type required (patch, minor, major)"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"publish")
|
||||
# publish command doesn't need version type
|
||||
;;
|
||||
"release")
|
||||
if [[ -z "$VERSION_TYPE" ]]; then
|
||||
echo "❌ Error: Version type required for release (patch, minor, major)"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "❌ Error: Unknown command '$COMMAND'"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check if cargo-workspaces is installed
|
||||
if ! command -v cargo-workspaces &> /dev/null; then
|
||||
echo "❌ cargo-workspaces not found. Installing..."
|
||||
cargo install cargo-workspaces
|
||||
fi
|
||||
|
||||
# Check if we're logged in to crates.io (only for publish operations)
|
||||
if [[ "$COMMAND" == "publish" ]] || [[ "$COMMAND" == "release" ]]; then
|
||||
if [ ! -f ~/.cargo/credentials.toml ] && [ ! -f ~/.cargo/credentials ]; then
|
||||
echo "❌ Not logged in to crates.io. Please run: cargo login <your-token>"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Function to update versions
|
||||
update_versions() {
|
||||
local version_type=$1
|
||||
echo "🔄 Updating workspace versions ($version_type)..."
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Simulating version update"
|
||||
echo ""
|
||||
echo "Current workspace version: $(grep '^version =' Cargo.toml | cut -d'"' -f2)"
|
||||
echo "Would execute: cargo workspaces version $version_type"
|
||||
echo ""
|
||||
echo "This would update all crates and their internal dependencies."
|
||||
echo "✅ Version update simulation completed (no changes made)"
|
||||
else
|
||||
cargo workspaces version "$version_type"
|
||||
echo "✅ Versions updated successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test build
|
||||
test_build() {
|
||||
echo "🔨 Testing workspace build..."
|
||||
if cargo build --workspace; then
|
||||
echo "✅ Workspace builds successfully"
|
||||
else
|
||||
echo "❌ Build failed. Please fix errors before publishing."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to publish crates
|
||||
publish_crates() {
|
||||
echo "📦 Publishing crates to crates.io..."
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Testing publication"
|
||||
cargo workspaces publish --dry-run
|
||||
echo "✅ All crates passed dry-run tests!"
|
||||
echo ""
|
||||
echo "To actually publish, run:"
|
||||
echo " $0 publish"
|
||||
else
|
||||
cargo workspaces publish
|
||||
echo "🎉 All crates published successfully!"
|
||||
echo ""
|
||||
echo "Users can now install wrkflw with:"
|
||||
echo " cargo install wrkflw"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to show changelog info
|
||||
show_changelog_info() {
|
||||
echo "📝 Changelog will be generated automatically by GitHub Actions workflow"
|
||||
}
|
||||
|
||||
# Execute commands based on the operation
|
||||
case "$COMMAND" in
|
||||
"version")
|
||||
update_versions "$VERSION_TYPE"
|
||||
show_changelog_info
|
||||
;;
|
||||
"publish")
|
||||
test_build
|
||||
publish_crates
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Starting release process..."
|
||||
echo ""
|
||||
|
||||
# Step 1: Update versions
|
||||
update_versions "$VERSION_TYPE"
|
||||
|
||||
# Step 2: Test build
|
||||
test_build
|
||||
|
||||
# Step 3: Show changelog info
|
||||
show_changelog_info
|
||||
|
||||
# Step 4: Publish (if not dry-run)
|
||||
if [[ "$DRY_RUN" != "--dry-run" ]]; then
|
||||
echo ""
|
||||
read -p "🤔 Continue with publishing? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
publish_crates
|
||||
else
|
||||
echo "⏸️ Publishing cancelled. To publish later, run:"
|
||||
echo " $0 publish"
|
||||
fi
|
||||
else
|
||||
echo ""
|
||||
publish_crates
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
35
tests/safe_workflow.yml
Normal file
35
tests/safe_workflow.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Safe Workflow Test
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
safe_operations:
|
||||
name: Safe Operations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: List files
|
||||
run: ls -la
|
||||
|
||||
- name: Show current directory
|
||||
run: pwd
|
||||
|
||||
- name: Echo message
|
||||
run: echo "Hello, this is a safe command!"
|
||||
|
||||
- name: Create and read file
|
||||
run: |
|
||||
echo "test content" > safe-file.txt
|
||||
cat safe-file.txt
|
||||
rm safe-file.txt
|
||||
|
||||
- name: Show environment (safe)
|
||||
run: echo "GITHUB_WORKSPACE=$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Check if Rust is available
|
||||
run: which rustc && rustc --version || echo "Rust not found"
|
||||
continue-on-error: true
|
||||
29
tests/security_comparison.yml
Normal file
29
tests/security_comparison.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Security Comparison Demo
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
safe_operations:
|
||||
name: Safe Operations (Works in Both Modes)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: List files
|
||||
run: ls -la
|
||||
|
||||
- name: Create and test file
|
||||
run: |
|
||||
echo "Hello World" > test.txt
|
||||
cat test.txt
|
||||
rm test.txt
|
||||
echo "File operations completed safely"
|
||||
|
||||
- name: Environment check
|
||||
run: |
|
||||
echo "Current directory: $(pwd)"
|
||||
echo "User: $(whoami)"
|
||||
echo "Available commands: ls, echo, cat work fine"
|
||||
92
tests/security_demo.yml
Normal file
92
tests/security_demo.yml
Normal file
@@ -0,0 +1,92 @@
|
||||
name: Security Demo Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
safe_commands:
|
||||
name: Safe Commands (Will Pass)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: List project files
|
||||
run: ls -la
|
||||
|
||||
- name: Show current directory
|
||||
run: pwd
|
||||
|
||||
- name: Echo a message
|
||||
run: echo "This command is safe and will execute successfully"
|
||||
|
||||
- name: Check Rust version (if available)
|
||||
run: rustc --version || echo "Rust not installed"
|
||||
|
||||
- name: Build documentation
|
||||
run: echo "Building docs..." && mkdir -p target/doc
|
||||
|
||||
- name: Show environment
|
||||
run: env | grep GITHUB
|
||||
|
||||
dangerous_commands:
|
||||
name: Dangerous Commands (Will Be Blocked)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# These commands will be blocked in secure emulation mode
|
||||
- name: Dangerous file deletion
|
||||
run: rm -rf /tmp/* # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
- name: System modification attempt
|
||||
run: sudo apt-get update # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
- name: Network download attempt
|
||||
run: wget https://example.com/script.sh # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
- name: Process manipulation
|
||||
run: kill -9 $$ # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
resource_intensive:
|
||||
name: Resource Limits Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: CPU intensive task
|
||||
run: |
|
||||
echo "Testing resource limits..."
|
||||
# This might hit CPU or time limits
|
||||
for i in {1..1000}; do
|
||||
echo "Iteration $i"
|
||||
sleep 0.1
|
||||
done
|
||||
continue-on-error: true
|
||||
|
||||
filesystem_test:
|
||||
name: Filesystem Access Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create files in allowed location
|
||||
run: |
|
||||
mkdir -p ./test-output
|
||||
echo "test content" > ./test-output/safe-file.txt
|
||||
cat ./test-output/safe-file.txt
|
||||
|
||||
- name: Attempt to access system files
|
||||
run: cat /etc/passwd # This may be blocked
|
||||
continue-on-error: true
|
||||
|
||||
- name: Show allowed file operations
|
||||
run: |
|
||||
echo "Safe file operations:"
|
||||
touch ./temp-file.txt
|
||||
echo "content" > ./temp-file.txt
|
||||
cat ./temp-file.txt
|
||||
rm ./temp-file.txt
|
||||
echo "File operations completed safely"
|
||||
46
working-secrets-test.yml
Normal file
46
working-secrets-test.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Working Secrets Test
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
test-secrets:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test environment variable secrets
|
||||
env:
|
||||
MY_SECRET: ${{ secrets.TEST_SECRET }}
|
||||
API_KEY: ${{ secrets.API_KEY }}
|
||||
run: |
|
||||
echo "Secret length: ${#MY_SECRET}"
|
||||
echo "API Key length: ${#API_KEY}"
|
||||
echo "API Key exists: $([ -n "$API_KEY" ] && echo "yes" || echo "no")"
|
||||
|
||||
- name: Test direct secret usage in commands
|
||||
run: |
|
||||
echo "Using secret directly: ${{ secrets.TEST_SECRET }}"
|
||||
echo "Using GitHub token: ${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Test secret in variable assignment
|
||||
run: |
|
||||
SECRET_VAL="${{ secrets.TEST_SECRET }}"
|
||||
echo "Secret value length: ${#SECRET_VAL}"
|
||||
|
||||
- name: Test multiple secrets in one command
|
||||
run: |
|
||||
echo "Token: ${{ secrets.GITHUB_TOKEN }}, Key: ${{ secrets.API_KEY }}"
|
||||
|
||||
test-masking:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Test automatic token masking
|
||||
run: |
|
||||
echo "GitHub token should be masked: ${{ secrets.GITHUB_TOKEN }}"
|
||||
echo "API key should be masked: ${{ secrets.API_KEY }}"
|
||||
|
||||
- name: Test pattern masking
|
||||
env:
|
||||
DEMO_TOKEN: ghp_1234567890abcdef1234567890abcdef12345678
|
||||
AWS_KEY: AKIAIOSFODNN7EXAMPLE
|
||||
run: |
|
||||
echo "Demo GitHub token: $DEMO_TOKEN"
|
||||
echo "Demo AWS key: $AWS_KEY"
|
||||
echo "These should be automatically masked"
|
||||
Reference in New Issue
Block a user