Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20af2a9f4b | ||
|
|
c179fcd3eb | ||
|
|
89d16ae513 | ||
|
|
72867c930e | ||
|
|
eddddd5034 | ||
|
|
25e646c56a | ||
|
|
ce4c302d73 | ||
|
|
685ca994f9 | ||
|
|
285b0699ab | ||
|
|
7825489cfc | ||
|
|
fb2fa31f13 |
70
.github/workflows/ci.yml
vendored
@@ -1,30 +1,76 @@
|
||||
name: ci
|
||||
name: ci
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- prep-0.26.0
|
||||
- master
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
container-build:
|
||||
if: github.event_name == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Configure Git Credentials
|
||||
|
||||
- name: Remove unnecessary files to release disk space
|
||||
run: |
|
||||
git config user.name github-actions[bot]
|
||||
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
|
||||
- uses: actions/setup-python@v5
|
||||
sudo rm -rf \
|
||||
"$AGENT_TOOLSDIRECTORY" \
|
||||
/opt/ghc \
|
||||
/opt/google/chrome \
|
||||
/opt/microsoft/msedge \
|
||||
/opt/microsoft/powershell \
|
||||
/opt/pipx \
|
||||
/usr/lib/mono \
|
||||
/usr/local/julia* \
|
||||
/usr/local/lib/android \
|
||||
/usr/local/lib/node_modules \
|
||||
/usr/local/share/chromium \
|
||||
/usr/local/share/powershell \
|
||||
/usr/local/share/powershell \
|
||||
/usr/share/dotnet \
|
||||
/usr/share/swift
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
ghcr.io/${{ github.repository }}:${{ github.ref_name }}
|
||||
|
||||
deploy-docs:
|
||||
if: github.event_name == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Configure Git credentials
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
- uses: actions/setup-python@v5
|
||||
with: { python-version: '3.x' }
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
restore-keys: mkdocs-material-
|
||||
- run: pip install mkdocs-material mkdocs-awesome-pages-plugin mkdocs-glightbox
|
||||
- run: mkdocs gh-deploy --force
|
||||
51
.github/workflows/test-container-build.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: test-container-build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'prep-*' ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
jobs:
|
||||
container-build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Remove unnecessary files to release disk space
|
||||
run: |
|
||||
sudo rm -rf \
|
||||
"$AGENT_TOOLSDIRECTORY" \
|
||||
/opt/ghc \
|
||||
/opt/google/chrome \
|
||||
/opt/microsoft/msedge \
|
||||
/opt/microsoft/powershell \
|
||||
/opt/pipx \
|
||||
/usr/lib/mono \
|
||||
/usr/local/julia* \
|
||||
/usr/local/lib/android \
|
||||
/usr/local/lib/node_modules \
|
||||
/usr/local/share/chromium \
|
||||
/usr/local/share/powershell \
|
||||
/usr/local/share/powershell \
|
||||
/usr/share/dotnet \
|
||||
/usr/share/swift
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build & push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
push: true
|
||||
# Tag with prep suffix to avoid conflicts with production
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ github.ref_name }}
|
||||
5
.github/workflows/test.yml
vendored
@@ -42,6 +42,11 @@ jobs:
|
||||
source .venv/bin/activate
|
||||
uv pip install -e ".[dev]"
|
||||
|
||||
- name: Run linting
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
uv run pre-commit run --all-files
|
||||
|
||||
- name: Setup configuration file
|
||||
run: |
|
||||
cp config.example.yaml config.yaml
|
||||
|
||||
13
.gitignore
vendored
@@ -8,11 +8,20 @@
|
||||
talemate_env
|
||||
chroma
|
||||
config.yaml
|
||||
.cursor
|
||||
.claude
|
||||
|
||||
# uv
|
||||
.venv/
|
||||
templates/llm-prompt/user/*.jinja2
|
||||
templates/world-state/*.yaml
|
||||
tts/voice/piper/*.onnx
|
||||
tts/voice/piper/*.json
|
||||
tts/voice/kokoro/*.pt
|
||||
tts/voice/xtts2/*.wav
|
||||
tts/voice/chatterbox/*.wav
|
||||
tts/voice/f5tts/*.wav
|
||||
tts/voice/voice-library.json
|
||||
scenes/
|
||||
!scenes/infinity-quest-dynamic-scenario/
|
||||
!scenes/infinity-quest-dynamic-scenario/assets/
|
||||
@@ -21,4 +30,6 @@ scenes/
|
||||
!scenes/infinity-quest/assets/
|
||||
!scenes/infinity-quest/infinity-quest.json
|
||||
tts_voice_samples/*.wav
|
||||
third-party-docs/
|
||||
third-party-docs/
|
||||
legacy-state-reinforcements.yaml
|
||||
CLAUDE.md
|
||||
16
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
fail_fast: false
|
||||
exclude: |
|
||||
(?x)^(
|
||||
tests/data/.*
|
||||
|install-utils/.*
|
||||
)$
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
# Ruff version.
|
||||
rev: v0.12.1
|
||||
hooks:
|
||||
# Run the linter.
|
||||
- id: ruff
|
||||
args: [ --fix ]
|
||||
# Run the formatter.
|
||||
- id: ruff-format
|
||||
64
CONTRIBUTING.md
Normal file
@@ -0,0 +1,64 @@
|
||||
# Contributing to Talemate
|
||||
|
||||
## About This Project
|
||||
|
||||
Talemate is a **personal hobbyist project** that I maintain in my spare time. While I appreciate the community's interest and contributions, please understand that:
|
||||
|
||||
- This is primarily a passion project that I enjoy working on myself
|
||||
- I have limited time for code reviews and prefer to spend that time developing fixes or new features myself
|
||||
- Large contributions require significant review and testing time that takes away from my own development
|
||||
|
||||
For these reasons, I've established contribution guidelines that balance community involvement with my desire to actively develop the project myself.
|
||||
|
||||
## Contribution Policy
|
||||
|
||||
**I welcome small bugfix and small feature pull requests!** If you've found a bug and have a fix, or have a small feature improvement, I'd love to review it.
|
||||
|
||||
However, please note that **I am not accepting large refactors or major feature additions** at this time. This includes:
|
||||
- Major architectural changes
|
||||
- Large new features or significant functionality additions
|
||||
- Large-scale code reorganization
|
||||
- Breaking API changes
|
||||
- Features that would require significant maintenance
|
||||
|
||||
## What is accepted
|
||||
|
||||
✅ **Small bugfixes** - Fixes for specific, isolated bugs
|
||||
|
||||
✅ **Small features** - Minor improvements that don't break existing functionality
|
||||
|
||||
✅ **Documentation fixes** - Typo corrections, clarifications in existing docs
|
||||
|
||||
✅ **Minor dependency updates** - Security patches or minor version bumps
|
||||
|
||||
## What is not accepted
|
||||
|
||||
❌ **Major features** - Large new functionality or systems
|
||||
|
||||
❌ **Large refactors** - Code reorganization or architectural changes
|
||||
|
||||
❌ **Breaking changes** - Any changes that break existing functionality
|
||||
|
||||
❌ **Major dependency changes** - Framework upgrades or replacements
|
||||
|
||||
## Submitting a PR
|
||||
|
||||
If you'd like to submit a bugfix or small feature:
|
||||
|
||||
1. **Open an issue first** - Describe the bug you've found or feature you'd like to add
|
||||
2. **Keep it small** - Focus on one specific issue or small improvement
|
||||
3. **Follow existing code style** - Match the project's current patterns
|
||||
4. **Don't break existing functionality** - Ensure all existing tests pass
|
||||
5. **Include tests** - Add or update tests that verify your fix or feature
|
||||
6. **Update documentation** - If your changes affect behavior, update relevant docs
|
||||
|
||||
## Testing
|
||||
|
||||
Ensure all tests pass by running:
|
||||
```bash
|
||||
uv run pytest tests/ -p no:warnings
|
||||
```
|
||||
|
||||
## Questions?
|
||||
|
||||
If you're unsure whether your contribution would be welcome, please open an issue to discuss it first. This saves everyone time and ensures alignment with the project's direction.
|
||||
29
Dockerfile
@@ -35,18 +35,9 @@ COPY pyproject.toml uv.lock /app/
|
||||
# Copy the Python source code (needed for editable install)
|
||||
COPY ./src /app/src
|
||||
|
||||
# Create virtual environment and install dependencies
|
||||
# Create virtual environment and install dependencies (includes CUDA support via pyproject.toml)
|
||||
RUN uv sync
|
||||
|
||||
# Conditional PyTorch+CUDA install
|
||||
ARG CUDA_AVAILABLE=false
|
||||
RUN . /app/.venv/bin/activate && \
|
||||
if [ "$CUDA_AVAILABLE" = "true" ]; then \
|
||||
echo "Installing PyTorch with CUDA support..." && \
|
||||
uv pip uninstall torch torchaudio && \
|
||||
uv pip install torch~=2.7.0 torchaudio~=2.7.0 --index-url https://download.pytorch.org/whl/cu128; \
|
||||
fi
|
||||
|
||||
# Stage 3: Final image
|
||||
FROM python:3.11-slim
|
||||
|
||||
@@ -54,6 +45,9 @@ WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
bash \
|
||||
wget \
|
||||
tar \
|
||||
xz-utils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install uv in the final stage
|
||||
@@ -62,6 +56,21 @@ RUN pip install uv
|
||||
# Copy virtual environment from backend-build stage
|
||||
COPY --from=backend-build /app/.venv /app/.venv
|
||||
|
||||
# Download and install FFmpeg 8.0 with shared libraries into .venv (matching Windows installer approach)
|
||||
# Using BtbN FFmpeg builds which provide shared libraries - verified to work
|
||||
# Note: We tried using jrottenberg/ffmpeg:8.0-ubuntu image but copying libraries from it didn't work properly,
|
||||
# so we use the direct download approach which is more reliable and matches the Windows installer
|
||||
RUN cd /tmp && \
|
||||
wget -q https://github.com/BtbN/FFmpeg-Builds/releases/download/latest/ffmpeg-master-latest-linux64-gpl-shared.tar.xz -O ffmpeg.tar.xz && \
|
||||
tar -xf ffmpeg.tar.xz && \
|
||||
cp -a ffmpeg-master-latest-linux64-gpl-shared/bin/* /app/.venv/bin/ && \
|
||||
cp -a ffmpeg-master-latest-linux64-gpl-shared/lib/* /app/.venv/lib/ && \
|
||||
rm -rf ffmpeg-master-latest-linux64-gpl-shared ffmpeg.tar.xz && \
|
||||
LD_LIBRARY_PATH=/app/.venv/lib /app/.venv/bin/ffmpeg -version | head -n 1
|
||||
|
||||
# Set LD_LIBRARY_PATH so torchcodec can find ffmpeg libraries at runtime
|
||||
ENV LD_LIBRARY_PATH=/app/.venv/lib:${LD_LIBRARY_PATH}
|
||||
|
||||
# Copy Python source code
|
||||
COPY --from=backend-build /app/src /app/src
|
||||
|
||||
|
||||
20
docker-compose.manual.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
talemate:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "${FRONTEND_PORT:-8080}:8080"
|
||||
- "${BACKEND_PORT:-5050}:5050"
|
||||
volumes:
|
||||
- ./config.yaml:/app/config.yaml
|
||||
- ./scenes:/app/scenes
|
||||
- ./templates:/app/templates
|
||||
- ./chroma:/app/chroma
|
||||
- ./tts:/app/tts
|
||||
environment:
|
||||
- PYTHONUNBUFFERED=1
|
||||
- PYTHONPATH=/app/src:$PYTHONPATH
|
||||
command: ["uv", "run", "src/talemate/server/run.py", "runserver", "--host", "0.0.0.0", "--port", "5050", "--frontend-host", "0.0.0.0", "--frontend-port", "8080"]
|
||||
@@ -2,11 +2,7 @@ version: '3.8'
|
||||
|
||||
services:
|
||||
talemate:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- CUDA_AVAILABLE=${CUDA_AVAILABLE:-false}
|
||||
image: ghcr.io/vegu-ai/talemate:latest
|
||||
ports:
|
||||
- "${FRONTEND_PORT:-8080}:8080"
|
||||
- "${BACKEND_PORT:-5050}:5050"
|
||||
@@ -15,6 +11,7 @@ services:
|
||||
- ./scenes:/app/scenes
|
||||
- ./templates:/app/templates
|
||||
- ./chroma:/app/chroma
|
||||
- ./tts:/app/tts
|
||||
environment:
|
||||
- PYTHONUNBUFFERED=1
|
||||
- PYTHONPATH=/app/src:$PYTHONPATH
|
||||
|
||||
@@ -1,60 +1,63 @@
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
import argparse
|
||||
|
||||
|
||||
def find_image_references(md_file):
|
||||
"""Find all image references in a markdown file."""
|
||||
with open(md_file, 'r', encoding='utf-8') as f:
|
||||
with open(md_file, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
pattern = r'!\[.*?\]\((.*?)\)'
|
||||
|
||||
pattern = r"!\[.*?\]\((.*?)\)"
|
||||
matches = re.findall(pattern, content)
|
||||
|
||||
|
||||
cleaned_paths = []
|
||||
for match in matches:
|
||||
path = match.lstrip('/')
|
||||
if 'img/' in path:
|
||||
path = path[path.index('img/') + 4:]
|
||||
path = match.lstrip("/")
|
||||
if "img/" in path:
|
||||
path = path[path.index("img/") + 4 :]
|
||||
# Only keep references to versioned images
|
||||
parts = os.path.normpath(path).split(os.sep)
|
||||
if len(parts) >= 2 and parts[0].replace('.', '').isdigit():
|
||||
if len(parts) >= 2 and parts[0].replace(".", "").isdigit():
|
||||
cleaned_paths.append(path)
|
||||
|
||||
|
||||
return cleaned_paths
|
||||
|
||||
|
||||
def scan_markdown_files(docs_dir):
|
||||
"""Recursively scan all markdown files in the docs directory."""
|
||||
md_files = []
|
||||
for root, _, files in os.walk(docs_dir):
|
||||
for file in files:
|
||||
if file.endswith('.md'):
|
||||
if file.endswith(".md"):
|
||||
md_files.append(os.path.join(root, file))
|
||||
return md_files
|
||||
|
||||
|
||||
def find_all_images(img_dir):
|
||||
"""Find all image files in version subdirectories."""
|
||||
image_files = []
|
||||
for root, _, files in os.walk(img_dir):
|
||||
# Get the relative path from img_dir to current directory
|
||||
rel_dir = os.path.relpath(root, img_dir)
|
||||
|
||||
|
||||
# Skip if we're in the root img directory
|
||||
if rel_dir == '.':
|
||||
if rel_dir == ".":
|
||||
continue
|
||||
|
||||
|
||||
# Check if the immediate parent directory is a version number
|
||||
parent_dir = rel_dir.split(os.sep)[0]
|
||||
if not parent_dir.replace('.', '').isdigit():
|
||||
if not parent_dir.replace(".", "").isdigit():
|
||||
continue
|
||||
|
||||
|
||||
for file in files:
|
||||
if file.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.svg')):
|
||||
if file.lower().endswith((".png", ".jpg", ".jpeg", ".gif", ".svg")):
|
||||
rel_path = os.path.relpath(os.path.join(root, file), img_dir)
|
||||
image_files.append(rel_path)
|
||||
return image_files
|
||||
|
||||
|
||||
def grep_check_image(docs_dir, image_path):
|
||||
"""
|
||||
Check if versioned image is referenced anywhere using grep.
|
||||
@@ -65,33 +68,46 @@ def grep_check_image(docs_dir, image_path):
|
||||
parts = os.path.normpath(image_path).split(os.sep)
|
||||
version = parts[0] # e.g., "0.29.0"
|
||||
filename = parts[-1] # e.g., "world-state-suggestions-2.png"
|
||||
|
||||
|
||||
# For versioned images, require both version and filename to match
|
||||
version_pattern = f"{version}.*{filename}"
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['grep', '-r', '-l', version_pattern, docs_dir],
|
||||
["grep", "-r", "-l", version_pattern, docs_dir],
|
||||
capture_output=True,
|
||||
text=True
|
||||
text=True,
|
||||
)
|
||||
if result.stdout.strip():
|
||||
print(f"Found reference to {image_path} with version pattern: {version_pattern}")
|
||||
print(
|
||||
f"Found reference to {image_path} with version pattern: {version_pattern}"
|
||||
)
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during grep check for {image_path}: {e}")
|
||||
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Find and optionally delete unused versioned images in MkDocs project')
|
||||
parser.add_argument('--docs-dir', type=str, required=True, help='Path to the docs directory')
|
||||
parser.add_argument('--img-dir', type=str, required=True, help='Path to the images directory')
|
||||
parser.add_argument('--delete', action='store_true', help='Delete unused images')
|
||||
parser.add_argument('--verbose', action='store_true', help='Show all found references and files')
|
||||
parser.add_argument('--skip-grep', action='store_true', help='Skip the additional grep validation')
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find and optionally delete unused versioned images in MkDocs project"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--docs-dir", type=str, required=True, help="Path to the docs directory"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--img-dir", type=str, required=True, help="Path to the images directory"
|
||||
)
|
||||
parser.add_argument("--delete", action="store_true", help="Delete unused images")
|
||||
parser.add_argument(
|
||||
"--verbose", action="store_true", help="Show all found references and files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-grep", action="store_true", help="Skip the additional grep validation"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Convert paths to absolute paths
|
||||
@@ -118,7 +134,7 @@ def main():
|
||||
print("\nAll versioned image references found in markdown:")
|
||||
for img in sorted(used_images):
|
||||
print(f"- {img}")
|
||||
|
||||
|
||||
print("\nAll versioned images in directory:")
|
||||
for img in sorted(all_images):
|
||||
print(f"- {img}")
|
||||
@@ -133,9 +149,11 @@ def main():
|
||||
for img in unused_images:
|
||||
if not grep_check_image(docs_dir, img):
|
||||
actually_unused.add(img)
|
||||
|
||||
|
||||
if len(actually_unused) != len(unused_images):
|
||||
print(f"\nGrep validation found {len(unused_images) - len(actually_unused)} additional image references!")
|
||||
print(
|
||||
f"\nGrep validation found {len(unused_images) - len(actually_unused)} additional image references!"
|
||||
)
|
||||
unused_images = actually_unused
|
||||
|
||||
# Report findings
|
||||
@@ -148,7 +166,7 @@ def main():
|
||||
print("\nUnused versioned images:")
|
||||
for img in sorted(unused_images):
|
||||
print(f"- {img}")
|
||||
|
||||
|
||||
if args.delete:
|
||||
print("\nDeleting unused versioned images...")
|
||||
for img in unused_images:
|
||||
@@ -162,5 +180,6 @@ def main():
|
||||
else:
|
||||
print("\nNo unused versioned images found!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
@@ -4,12 +4,12 @@ from talemate.events import GameLoopEvent
|
||||
import talemate.emit.async_signals
|
||||
from talemate.emit import emit
|
||||
|
||||
|
||||
@register()
|
||||
class TestAgent(Agent):
|
||||
|
||||
agent_type = "test"
|
||||
verbose_name = "Test"
|
||||
|
||||
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
self.is_enabled = True
|
||||
@@ -20,7 +20,7 @@ class TestAgent(Agent):
|
||||
description="Test",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return self.is_enabled
|
||||
@@ -36,7 +36,7 @@ class TestAgent(Agent):
|
||||
def connect(self, scene):
|
||||
super().connect(scene)
|
||||
talemate.emit.async_signals.get("game_loop").connect(self.on_game_loop)
|
||||
|
||||
|
||||
async def on_game_loop(self, emission: GameLoopEvent):
|
||||
"""
|
||||
Called on the beginning of every game loop
|
||||
@@ -45,4 +45,8 @@ class TestAgent(Agent):
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
emit("status", status="info", message="Annoying you with a test message every game loop.")
|
||||
emit(
|
||||
"status",
|
||||
status="info",
|
||||
message="Annoying you with a test message every game loop.",
|
||||
)
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
"""
|
||||
An attempt to write a client against the runpod serverless vllm worker.
|
||||
|
||||
This is close to functional, but since runpod serverless gpu availability is currently terrible, i have
|
||||
been unable to properly test it.
|
||||
|
||||
Putting it here for now since i think it makes a decent example of how to write a client against a new service.
|
||||
"""
|
||||
|
||||
import pydantic
|
||||
import structlog
|
||||
import runpod
|
||||
import asyncio
|
||||
import aiohttp
|
||||
from talemate.client.base import ClientBase, ExtraField
|
||||
from talemate.client.registry import register
|
||||
from talemate.emit import emit
|
||||
from talemate.config import Client as BaseClientConfig
|
||||
|
||||
log = structlog.get_logger("talemate.client.runpod_vllm")
|
||||
|
||||
class Defaults(pydantic.BaseModel):
|
||||
max_token_length: int = 4096
|
||||
model: str = ""
|
||||
runpod_id: str = ""
|
||||
|
||||
class ClientConfig(BaseClientConfig):
|
||||
runpod_id: str = ""
|
||||
|
||||
@register()
|
||||
class RunPodVLLMClient(ClientBase):
|
||||
client_type = "runpod_vllm"
|
||||
conversation_retries = 5
|
||||
config_cls = ClientConfig
|
||||
|
||||
class Meta(ClientBase.Meta):
|
||||
title: str = "Runpod VLLM"
|
||||
name_prefix: str = "Runpod VLLM"
|
||||
enable_api_auth: bool = True
|
||||
manual_model: bool = True
|
||||
defaults: Defaults = Defaults()
|
||||
extra_fields: dict[str, ExtraField] = {
|
||||
"runpod_id": ExtraField(
|
||||
name="runpod_id",
|
||||
type="text",
|
||||
label="Runpod ID",
|
||||
required=True,
|
||||
description="The Runpod ID to connect to.",
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, model=None, runpod_id=None, **kwargs):
|
||||
self.model_name = model
|
||||
self.runpod_id = runpod_id
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@property
|
||||
def experimental(self):
|
||||
return False
|
||||
|
||||
|
||||
def set_client(self, **kwargs):
|
||||
log.debug("set_client", kwargs=kwargs, runpod_id=self.runpod_id)
|
||||
self.runpod_id = kwargs.get("runpod_id", self.runpod_id)
|
||||
|
||||
|
||||
def tune_prompt_parameters(self, parameters: dict, kind: str):
|
||||
super().tune_prompt_parameters(parameters, kind)
|
||||
|
||||
keys = list(parameters.keys())
|
||||
|
||||
valid_keys = ["temperature", "top_p", "max_tokens"]
|
||||
|
||||
for key in keys:
|
||||
if key not in valid_keys:
|
||||
del parameters[key]
|
||||
|
||||
async def get_model_name(self):
|
||||
return self.model_name
|
||||
|
||||
async def generate(self, prompt: str, parameters: dict, kind: str):
|
||||
"""
|
||||
Generates text from the given prompt and parameters.
|
||||
"""
|
||||
prompt = prompt.strip()
|
||||
|
||||
self.log.debug("generate", prompt=prompt[:128] + " ...", parameters=parameters)
|
||||
|
||||
try:
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
endpoint = runpod.AsyncioEndpoint(self.runpod_id, session)
|
||||
|
||||
run_request = await endpoint.run({
|
||||
"input": {
|
||||
"prompt": prompt,
|
||||
}
|
||||
#"parameters": parameters
|
||||
})
|
||||
|
||||
while (await run_request.status()) not in ["COMPLETED", "FAILED", "CANCELLED"]:
|
||||
status = await run_request.status()
|
||||
log.debug("generate", status=status)
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
status = await run_request.status()
|
||||
|
||||
log.debug("generate", status=status)
|
||||
|
||||
response = await run_request.output()
|
||||
|
||||
log.debug("generate", response=response)
|
||||
|
||||
return response["choices"][0]["tokens"][0]
|
||||
|
||||
except Exception as e:
|
||||
self.log.error("generate error", e=e)
|
||||
emit(
|
||||
"status", message="Error during generation (check logs)", status="error"
|
||||
)
|
||||
return ""
|
||||
|
||||
def reconfigure(self, **kwargs):
|
||||
if kwargs.get("model"):
|
||||
self.model_name = kwargs["model"]
|
||||
if "runpod_id" in kwargs:
|
||||
self.api_auth = kwargs["runpod_id"]
|
||||
self.set_client(**kwargs)
|
||||
@@ -9,6 +9,7 @@ class Defaults(pydantic.BaseModel):
|
||||
api_url: str = "http://localhost:1234"
|
||||
max_token_length: int = 4096
|
||||
|
||||
|
||||
@register()
|
||||
class TestClient(ClientBase):
|
||||
client_type = "test"
|
||||
@@ -22,14 +23,13 @@ class TestClient(ClientBase):
|
||||
self.client = AsyncOpenAI(base_url=self.api_url + "/v1", api_key="sk-1111")
|
||||
|
||||
def tune_prompt_parameters(self, parameters: dict, kind: str):
|
||||
|
||||
"""
|
||||
Talemate adds a bunch of parameters to the prompt, but not all of them are valid for all clients.
|
||||
|
||||
|
||||
This method is called before the prompt is sent to the client, and it allows the client to remove
|
||||
any parameters that it doesn't support.
|
||||
"""
|
||||
|
||||
|
||||
super().tune_prompt_parameters(parameters, kind)
|
||||
|
||||
keys = list(parameters.keys())
|
||||
@@ -41,11 +41,10 @@ class TestClient(ClientBase):
|
||||
del parameters[key]
|
||||
|
||||
async def get_model_name(self):
|
||||
|
||||
"""
|
||||
This should return the name of the model that is being used.
|
||||
"""
|
||||
|
||||
|
||||
return "Mock test model"
|
||||
|
||||
async def generate(self, prompt: str, parameters: dict, kind: str):
|
||||
|
||||
@@ -27,10 +27,10 @@ uv run src\talemate\server\run.py runserver --host 0.0.0.0 --port 1234
|
||||
|
||||
### Letting the frontend know about the new host and port
|
||||
|
||||
Copy `talemate_frontend/example.env.development.local` to `talemate_frontend/.env.production.local` and edit the `VUE_APP_TALEMATE_BACKEND_WEBSOCKET_URL`.
|
||||
Copy `talemate_frontend/example.env.development.local` to `talemate_frontend/.env.production.local` and edit the `VITE_TALEMATE_BACKEND_WEBSOCKET_URL`.
|
||||
|
||||
```env
|
||||
VUE_APP_TALEMATE_BACKEND_WEBSOCKET_URL=ws://localhost:1234
|
||||
VITE_TALEMATE_BACKEND_WEBSOCKET_URL=ws://localhost:1234
|
||||
```
|
||||
|
||||
Next rebuild the frontend.
|
||||
|
||||
@@ -1,22 +1,15 @@
|
||||
!!! example "Experimental"
|
||||
Talemate through docker has not received a lot of testing from me, so please let me know if you encounter any issues.
|
||||
|
||||
You can do so by creating an issue on the [:material-github: GitHub repository](https://github.com/vegu-ai/talemate)
|
||||
|
||||
## Quick install instructions
|
||||
|
||||
1. `git clone https://github.com/vegu-ai/talemate.git`
|
||||
1. `cd talemate`
|
||||
1. copy config file
|
||||
1. linux: `cp config.example.yaml config.yaml`
|
||||
1. windows: `copy config.example.yaml config.yaml`
|
||||
1. If your host has a CUDA compatible Nvidia GPU
|
||||
1. Windows (via PowerShell): `$env:CUDA_AVAILABLE="true"; docker compose up`
|
||||
1. Linux: `CUDA_AVAILABLE=true docker compose up`
|
||||
1. If your host does **NOT** have a CUDA compatible Nvidia GPU
|
||||
1. Windows: `docker compose up`
|
||||
1. Linux: `docker compose up`
|
||||
1. windows: `copy config.example.yaml config.yaml` (or just copy the file and rename it via the file explorer)
|
||||
1. `docker compose up`
|
||||
1. Navigate your browser to http://localhost:8080
|
||||
|
||||
!!! info "Pre-built Images"
|
||||
The default setup uses pre-built images from GitHub Container Registry that include CUDA support by default. To manually build the container instead, use `docker compose -f docker-compose.manual.yml up --build`.
|
||||
|
||||
!!! note
|
||||
When connecting local APIs running on the hostmachine (e.g. text-generation-webui), you need to use `host.docker.internal` as the hostname.
|
||||
|
||||
@@ -12,14 +12,6 @@
|
||||
!!! note "First start can take a while"
|
||||
The initial download and dependency installation may take several minutes, especially on slow internet connections. The console will keep you updated – just wait until the Talemate logo shows up.
|
||||
|
||||
### Optional: CUDA support
|
||||
|
||||
If you have an NVIDIA GPU and want CUDA acceleration for larger embedding models:
|
||||
|
||||
1. Close Talemate (if it is running).
|
||||
2. Double-click **`install-cuda.bat`**. This script swaps the CPU-only Torch build for the CUDA 12.8 build.
|
||||
3. Start Talemate again via **`start.bat`**.
|
||||
|
||||
## Maintenance & advanced usage
|
||||
|
||||
| Script | Purpose |
|
||||
|
||||
BIN
docs/img/0.32.0/add-chatterbox-voice.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
docs/img/0.32.0/add-elevenlabs-voice.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/img/0.32.0/add-f5tts-voice.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
docs/img/0.32.0/character-voice-assignment.png
Normal file
|
After Width: | Height: | Size: 65 KiB |
BIN
docs/img/0.32.0/chatterbox-api-settings.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
docs/img/0.32.0/chatterbox-parameters.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.32.0/client-reasoning-2.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.32.0/client-reasoning.png
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
docs/img/0.32.0/elevenlabs-api-settings.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
docs/img/0.32.0/elevenlabs-copy-voice-id.png
Normal file
|
After Width: | Height: | Size: 9.6 KiB |
BIN
docs/img/0.32.0/f5tts-api-settings.png
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
docs/img/0.32.0/f5tts-parameters.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/img/0.32.0/google-tts-api-settings.png
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
docs/img/0.32.0/kokoro-mixer.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
docs/img/0.32.0/openai-tts-api-settings.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
docs/img/0.32.0/voice-agent-settings.png
Normal file
|
After Width: | Height: | Size: 107 KiB |
BIN
docs/img/0.32.0/voice-agent-status-characters.png
Normal file
|
After Width: | Height: | Size: 3.0 KiB |
BIN
docs/img/0.32.0/voice-library-access.png
Normal file
|
After Width: | Height: | Size: 9.3 KiB |
BIN
docs/img/0.32.0/voice-library-api-status.png
Normal file
|
After Width: | Height: | Size: 6.6 KiB |
BIN
docs/img/0.32.0/voice-library-interface.png
Normal file
|
After Width: | Height: | Size: 142 KiB |
BIN
docs/img/0.33.0/client-lock-template-0001.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.33.0/client-lock-template-0002.png
Normal file
|
After Width: | Height: | Size: 15 KiB |
BIN
docs/img/0.33.0/client-lock-template-0003.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
docs/img/0.33.0/director-agent-chat-settings.png
Normal file
|
After Width: | Height: | Size: 63 KiB |
BIN
docs/img/0.33.0/director-chat-0001.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/img/0.33.0/director-chat-0002.png
Normal file
|
After Width: | Height: | Size: 64 KiB |
BIN
docs/img/0.33.0/director-chat-0003.png
Normal file
|
After Width: | Height: | Size: 61 KiB |
BIN
docs/img/0.33.0/director-chat-0004.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/img/0.33.0/director-chat-confirm-off.png
Normal file
|
After Width: | Height: | Size: 4.2 KiB |
BIN
docs/img/0.33.0/director-chat-confirm-on.png
Normal file
|
After Width: | Height: | Size: 3.9 KiB |
BIN
docs/img/0.33.0/director-chat-expanded-function-call.png
Normal file
|
After Width: | Height: | Size: 72 KiB |
BIN
docs/img/0.33.0/director-chat-interaction.png
Normal file
|
After Width: | Height: | Size: 67 KiB |
BIN
docs/img/0.33.0/director-chat-mode.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/img/0.33.0/director-chat-persona-0001.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/img/0.33.0/director-chat-persona-0002.png
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
docs/img/0.33.0/director-chat-reject-0001.png
Normal file
|
After Width: | Height: | Size: 60 KiB |
BIN
docs/img/0.33.0/director-chat-reject-0002.png
Normal file
|
After Width: | Height: | Size: 71 KiB |
BIN
docs/img/0.33.0/director-console-chat.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.33.0/history-shared-context.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
docs/img/0.33.0/open-director-console.png
Normal file
|
After Width: | Height: | Size: 2.6 KiB |
BIN
docs/img/0.33.0/restore-from-backup-dlg.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
docs/img/0.33.0/restore-from-backup.png
Normal file
|
After Width: | Height: | Size: 107 KiB |
BIN
docs/img/0.33.0/share-with-world.png
Normal file
|
After Width: | Height: | Size: 2.3 KiB |
BIN
docs/img/0.33.0/shared-context-1.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/img/0.33.0/shared-context-2.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
docs/img/0.33.0/shared-context-3.png
Normal file
|
After Width: | Height: | Size: 6.9 KiB |
BIN
docs/img/0.33.0/shared-context-new-scene.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
BIN
docs/img/0.33.0/unshare-from-world.png
Normal file
|
After Width: | Height: | Size: 2.6 KiB |
BIN
docs/img/0.33.0/world-entry-shared-context.png
Normal file
|
After Width: | Height: | Size: 7.6 KiB |
BIN
docs/img/0.34.0/character-card-1.png
Normal file
|
After Width: | Height: | Size: 346 KiB |
BIN
docs/img/0.34.0/character-card-2.png
Normal file
|
After Width: | Height: | Size: 702 KiB |
BIN
docs/img/0.34.0/character-card-3.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/img/0.34.0/character-card-4.png
Normal file
|
After Width: | Height: | Size: 5.5 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.agent_config.png
Normal file
|
After Width: | Height: | Size: 54 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.browse-templates.png
Normal file
|
After Width: | Height: | Size: 9.4 KiB |
|
After Width: | Height: | Size: 3.0 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.lighting-lora.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.qwen-export.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.qwen-save.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.qwen-start.png
Normal file
|
After Width: | Height: | Size: 471 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.qwen-template.png
Normal file
|
After Width: | Height: | Size: 180 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.talemate-empty-prompt.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.talemate-prompts.png
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.talemate-references.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
docs/img/0.34.0/comfyui.workflow.setup.talemate-resulotion.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
docs/img/0.34.0/shared-world-1.png
Normal file
|
After Width: | Height: | Size: 70 KiB |
BIN
docs/img/0.34.0/shared-world-10.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
docs/img/0.34.0/shared-world-11.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
docs/img/0.34.0/shared-world-12.png
Normal file
|
After Width: | Height: | Size: 4.9 KiB |
BIN
docs/img/0.34.0/shared-world-13.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/img/0.34.0/shared-world-14.png
Normal file
|
After Width: | Height: | Size: 411 KiB |
BIN
docs/img/0.34.0/shared-world-2.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.34.0/shared-world-3.png
Normal file
|
After Width: | Height: | Size: 9.5 KiB |
BIN
docs/img/0.34.0/shared-world-4.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
docs/img/0.34.0/shared-world-5.png
Normal file
|
After Width: | Height: | Size: 16 KiB |
BIN
docs/img/0.34.0/shared-world-6.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
docs/img/0.34.0/shared-world-7.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
docs/img/0.34.0/shared-world-8.png
Normal file
|
After Width: | Height: | Size: 371 KiB |
BIN
docs/img/0.34.0/shared-world-9.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
docs/img/0.34.0/visual-agent-a1111-1.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
docs/img/0.34.0/visual-agent-a1111-2.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
docs/img/0.34.0/visual-agent-a1111-3.png
Normal file
|
After Width: | Height: | Size: 2.8 KiB |
BIN
docs/img/0.34.0/visual-agent-comfyui-1.png
Normal file
|
After Width: | Height: | Size: 50 KiB |
BIN
docs/img/0.34.0/visual-agent-comfyui-2.png
Normal file
|
After Width: | Height: | Size: 42 KiB |