Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
113553c306 | ||
|
|
736e6702f5 | ||
|
|
80256012ad | ||
|
|
bb1cf6941b | ||
|
|
2c8b4b8186 | ||
|
|
95a17197ba | ||
|
|
d09f9f8ac4 | ||
|
|
de16feeed5 | ||
|
|
cdcc804ffa | ||
|
|
9a2bbd78a4 | ||
|
|
ddfbd6891b | ||
|
|
143dd47e02 | ||
|
|
cc7cb773d1 | ||
|
|
02c88f75a1 | ||
|
|
419371e0fb | ||
|
|
6e847bf283 | ||
|
|
ceedd3019f | ||
|
|
a28cf2a029 | ||
|
|
60cb271e30 | ||
|
|
1874234d2c | ||
|
|
ef99539e69 | ||
|
|
39bd02722d | ||
|
|
f0b627b900 | ||
|
|
95ae00e01f | ||
|
|
83027b3a0f | ||
|
|
27eba3bd63 | ||
|
|
ba64050eab | ||
|
|
199ffd1095 | ||
|
|
88b9fcb8bb | ||
|
|
2f5944bc09 | ||
|
|
abdfb1abbf |
30
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: ci
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- prep-0.26.0
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Configure Git Credentials
|
||||
run: |
|
||||
git config user.name github-actions[bot]
|
||||
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
- run: pip install mkdocs-material mkdocs-awesome-pages-plugin mkdocs-glightbox
|
||||
- run: mkdocs gh-deploy --force
|
||||
2
.gitignore
vendored
@@ -9,6 +9,7 @@ talemate_env
|
||||
chroma
|
||||
config.yaml
|
||||
templates/llm-prompt/user/*.jinja2
|
||||
templates/world-state/*.yaml
|
||||
scenes/
|
||||
!scenes/infinity-quest-dynamic-scenario/
|
||||
!scenes/infinity-quest-dynamic-scenario/assets/
|
||||
@@ -16,3 +17,4 @@ scenes/
|
||||
!scenes/infinity-quest-dynamic-scenario/infinity-quest.json
|
||||
!scenes/infinity-quest/assets/
|
||||
!scenes/infinity-quest/infinity-quest.json
|
||||
tts_voice_samples/*.wav
|
||||
86
Dockerfile
Normal file
@@ -0,0 +1,86 @@
|
||||
# Stage 1: Frontend build
|
||||
FROM node:21 AS frontend-build
|
||||
|
||||
ENV NODE_ENV=development
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the frontend directory contents into the container at /app
|
||||
COPY ./talemate_frontend /app
|
||||
|
||||
# Install all dependencies and build
|
||||
RUN npm install && npm run build
|
||||
|
||||
# Stage 2: Backend build
|
||||
FROM python:3.11-slim AS backend-build
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
bash \
|
||||
gcc \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install poetry
|
||||
RUN pip install poetry
|
||||
|
||||
# Copy poetry files
|
||||
COPY pyproject.toml poetry.lock* /app/
|
||||
|
||||
# Create a virtual environment
|
||||
RUN python -m venv /app/talemate_env
|
||||
|
||||
# Activate virtual environment and install dependencies
|
||||
RUN . /app/talemate_env/bin/activate && \
|
||||
poetry config virtualenvs.create false && \
|
||||
poetry install --only main --no-root
|
||||
|
||||
# Copy the Python source code
|
||||
COPY ./src /app/src
|
||||
|
||||
# Conditional PyTorch+CUDA install
|
||||
ARG CUDA_AVAILABLE=false
|
||||
RUN . /app/talemate_env/bin/activate && \
|
||||
if [ "$CUDA_AVAILABLE" = "true" ]; then \
|
||||
echo "Installing PyTorch with CUDA support..." && \
|
||||
pip uninstall torch torchaudio -y && \
|
||||
pip install torch~=2.4.1 torchaudio~=2.4.1 --index-url https://download.pytorch.org/whl/cu121; \
|
||||
fi
|
||||
|
||||
# Stage 3: Final image
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
bash \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy virtual environment from backend-build stage
|
||||
COPY --from=backend-build /app/talemate_env /app/talemate_env
|
||||
|
||||
# Copy Python source code
|
||||
COPY --from=backend-build /app/src /app/src
|
||||
|
||||
# Copy Node.js build artifacts from frontend-build stage
|
||||
COPY --from=frontend-build /app/dist /app/talemate_frontend/dist
|
||||
|
||||
# Copy the frontend WSGI file if it exists
|
||||
COPY frontend_wsgi.py /app/frontend_wsgi.py
|
||||
|
||||
# Copy base config
|
||||
COPY config.example.yaml /app/config.yaml
|
||||
|
||||
# Copy essentials
|
||||
COPY scenes templates chroma* /app/
|
||||
|
||||
# Set PYTHONPATH to include the src directory
|
||||
ENV PYTHONPATH=/app/src:$PYTHONPATH
|
||||
|
||||
# Make ports available to the world outside this container
|
||||
EXPOSE 5050
|
||||
EXPOSE 8080
|
||||
|
||||
# Use bash as the shell, activate the virtual environment, and run backend server
|
||||
CMD ["/bin/bash", "-c", "source /app/talemate_env/bin/activate && python src/talemate/server/run.py runserver --host 0.0.0.0 --port 5050 --frontend-host 0.0.0.0 --frontend-port 8080"]
|
||||
213
README.md
@@ -2,194 +2,43 @@
|
||||
|
||||
Roleplay with AI with a focus on strong narration and consistent world and game state tracking.
|
||||
|
||||
|||
|
||||
|||
|
||||
|------------------------------------------|------------------------------------------|
|
||||
|||
|
||||
|||
|
||||
|||
|
||||
|||
|
||||
|
||||
> :warning: **It does not run any large language models itself but relies on existing APIs. Currently supports OpenAI, text-generation-webui and LMStudio. 0.18.0 also adds support for generic OpenAI api implementations, but generation quality on that will vary.**
|
||||
## Core Features
|
||||
|
||||
This means you need to either have:
|
||||
- an [OpenAI](https://platform.openai.com/overview) api key
|
||||
- setup local (or remote via runpod) LLM inference via:
|
||||
- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui)
|
||||
- [LMStudio](https://lmstudio.ai/)
|
||||
- Any other OpenAI api implementation that implements the v1/completions endpoint
|
||||
- tested llamacpp with the `api_like_OAI.py` wrapper
|
||||
- let me know if you have tested any other implementations and they failed / worked or landed somewhere in between
|
||||
- Multiple agents for dialogue, narration, summarization, direction, editing, world state management, character/scenario creation, text-to-speech, and visual generation
|
||||
- Supports per agent API selection
|
||||
- Long-term memory and passage of time tracking
|
||||
- Narrative world state management to reinforce character and world truths
|
||||
- Creative tools for managing NPCs, AI-assisted character, and scenario creation with template support
|
||||
- Context management for character details, world information, past events, and pinned information
|
||||
- Customizable templates for all prompts using Jinja2
|
||||
- Modern, responsive UI
|
||||
|
||||
## Current features
|
||||
## Documentation
|
||||
|
||||
- responsive modern ui
|
||||
- agents
|
||||
- conversation: handles character dialogue
|
||||
- narration: handles narrative exposition
|
||||
- summarization: handles summarization to compress context while maintaining history
|
||||
- director: can be used to direct the story / characters
|
||||
- editor: improves AI responses (very hit and miss at the moment)
|
||||
- world state: generates world snapshot and handles passage of time (objects and characters)
|
||||
- creator: character / scenario creator
|
||||
- tts: text to speech via elevenlabs, OpenAI or local tts
|
||||
- visual: stable-diffusion client for in place visual generation via AUTOMATIC1111, ComfyUI or OpenAI
|
||||
- multi-client support (agents can be connected to separate APIs)
|
||||
- long term memory
|
||||
- chromadb integration
|
||||
- passage of time
|
||||
- narrative world state
|
||||
- Automatically keep track and reinforce selected character and world truths / states.
|
||||
- narrative tools
|
||||
- creative tools
|
||||
- manage multiple NPCs
|
||||
- AI backed character creation with template support (jinja2)
|
||||
- AI backed scenario creation
|
||||
- context managegement
|
||||
- Manage character details and attributes
|
||||
- Manage world information / past events
|
||||
- Pin important information to the context (Manually or conditionally through AI)
|
||||
- runpod integration
|
||||
- overridable templates for all prompts. (jinja2)
|
||||
- [Installation and Getting started](https://vegu-ai.github.io/talemate/)
|
||||
- [User Guide](https://vegu-ai.github.io/talemate/user-guide/interacting/)
|
||||
|
||||
## Planned features
|
||||
## Supported APIs
|
||||
|
||||
Kinda making it up as i go along, but i want to lean more into gameplay through AI, keeping track of gamestates, moving away from simply roleplaying towards a more game-ified experience.
|
||||
- [OpenAI](https://platform.openai.com/overview)
|
||||
- [Anthropic](https://www.anthropic.com/)
|
||||
- [mistral.ai](https://mistral.ai/)
|
||||
- [Cohere](https://www.cohere.com/)
|
||||
- [Groq](https://www.groq.com/)
|
||||
- [Google Gemini](https://console.cloud.google.com/)
|
||||
|
||||
In no particular order:
|
||||
Supported self-hosted APIs:
|
||||
- [KoboldCpp](https://koboldai.org/cpp) ([Local](https://koboldai.org/cpp), [Runpod](https://koboldai.org/runpodcpp), [VastAI](https://koboldai.org/vastcpp), also includes image gen support)
|
||||
- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) (local or with runpod support)
|
||||
- [LMStudio](https://lmstudio.ai/)
|
||||
- [TabbyAPI](https://github.com/theroyallab/tabbyAPI/)
|
||||
|
||||
- Extension support
|
||||
- modular agents and clients
|
||||
- Improved world state
|
||||
- Dynamic player choice generation
|
||||
- Better creative tools
|
||||
- node based scenario / character creation
|
||||
- Improved and consistent long term memory and accurate current state of the world
|
||||
- Improved director agent
|
||||
- Right now this doesn't really work well on anything but GPT-4 (and even there it's debatable). It tends to steer the story in a way that introduces pacing issues. It needs a model that is creative but also reasons really well i think.
|
||||
- Gameplay loop governed by AI
|
||||
- objectives
|
||||
- quests
|
||||
- win / lose conditions
|
||||
|
||||
|
||||
# Instructions
|
||||
|
||||
Please read the documents in the `docs` folder for more advanced configuration and usage.
|
||||
|
||||
- [Quickstart](#quickstart)
|
||||
- [Installation](#installation)
|
||||
- [Connecting to an LLM](#connecting-to-an-llm)
|
||||
- [Text-generation-webui](#text-generation-webui)
|
||||
- [Recommended Models](#recommended-models)
|
||||
- [OpenAI](#openai)
|
||||
- [Ready to go](#ready-to-go)
|
||||
- [Load the introductory scenario "Infinity Quest"](#load-the-introductory-scenario-infinity-quest)
|
||||
- [Loading character cards](#loading-character-cards)
|
||||
- [Text-to-Speech (TTS)](docs/tts.md)
|
||||
- [Visual Generation](docs/visual.md)
|
||||
- [ChromaDB (long term memory) configuration](docs/chromadb.md)
|
||||
- [Runpod Integration](docs/runpod.md)
|
||||
- [Prompt template overrides](docs/templates.md)
|
||||
|
||||
# Quickstart
|
||||
|
||||
## Installation
|
||||
|
||||
Post [here](https://github.com/vegu-ai/talemate/issues/17) if you run into problems during installation.
|
||||
|
||||
There is also a [troubleshooting guide](docs/troubleshoot.md) that might help.
|
||||
|
||||
### Windows
|
||||
|
||||
1. Download and install Python 3.10 or Python 3.11 from the [official Python website](https://www.python.org/downloads/windows/). :warning: python3.12 is currently not supported.
|
||||
1. Download and install Node.js v20 from the [official Node.js website](https://nodejs.org/en/download/). This will also install npm. :warning: v21 is currently not supported.
|
||||
1. Download the Talemate project to your local machine. Download from [the Releases page](https://github.com/vegu-ai/talemate/releases).
|
||||
1. Unpack the download and run `install.bat` by double clicking it. This will set up the project on your local machine.
|
||||
1. Once the installation is complete, you can start the backend and frontend servers by running `start.bat`.
|
||||
1. Navigate your browser to http://localhost:8080
|
||||
|
||||
### Linux
|
||||
|
||||
`python 3.10` or `python 3.11` is required. :warning: `python 3.12` not supported yet.
|
||||
|
||||
`nodejs v19 or v20` :warning: `v21` not supported yet.
|
||||
|
||||
1. `git clone git@github.com:vegu-ai/talemate`
|
||||
1. `cd talemate`
|
||||
1. `source install.sh`
|
||||
1. Start the backend: `python src/talemate/server/run.py runserver --host 0.0.0.0 --port 5050`.
|
||||
1. Open a new terminal, navigate to the `talemate_frontend` directory, and start the frontend server by running `npm run serve`.
|
||||
|
||||
## Connecting to an LLM
|
||||
|
||||
On the right hand side click the "Add Client" button. If there is no button, you may need to toggle the client options by clicking this button:
|
||||
|
||||

|
||||
|
||||
### Text-generation-webui
|
||||
|
||||
> :warning: As of version 0.13.0 the legacy text-generator-webui API `--extension api` is no longer supported, please use their new `--extension openai` api implementation instead.
|
||||
|
||||
In the modal if you're planning to connect to text-generation-webui, you can likely leave everything as is and just click Save.
|
||||
|
||||

|
||||
|
||||
|
||||
#### Recommended Models
|
||||
|
||||
As of 2024.02.06 my personal regular drivers (the ones i test with) are:
|
||||
|
||||
- Kunoichi-7B
|
||||
- sparsetral-16x7B
|
||||
- Nous-Hermes-2-SOLAR-10.7B
|
||||
- brucethemoose_Yi-34B-200K-RPMerge
|
||||
- dolphin-2.7-mixtral-8x7b
|
||||
- Mixtral-8x7B-instruct
|
||||
- GPT-3.5-turbo 0125
|
||||
- GPT-4-turbo 0116
|
||||
|
||||
That said, any of the top models in any of the size classes here should work well (i wouldn't recommend going lower than 7B):
|
||||
|
||||
https://www.reddit.com/r/LocalLLaMA/comments/18yp9u4/llm_comparisontest_api_edition_gpt4_vs_gemini_vs/
|
||||
|
||||
### OpenAI
|
||||
|
||||
If you want to add an OpenAI client, just change the client type and select the apropriate model.
|
||||
|
||||

|
||||
|
||||
If you are setting this up for the first time, you should now see the client, but it will have a red dot next to it, stating that it requires an API key.
|
||||
|
||||

|
||||
|
||||
Click the `SET API KEY` button. This will open a modal where you can enter your API key.
|
||||
|
||||

|
||||
|
||||
Click `Save` and after a moment the client should have a green dot next to it, indicating that it is ready to go.
|
||||
|
||||

|
||||
|
||||
## Ready to go
|
||||
|
||||
You will know you are good to go when the client and all the agents have a green dot next to them.
|
||||
|
||||

|
||||
|
||||
## Load the introductory scenario "Infinity Quest"
|
||||
|
||||
Generated using talemate creative tools, mostly used for testing / demoing.
|
||||
|
||||
You can load it (and any other talemate scenarios or save files) by expanding the "Load" menu in the top left corner and selecting the middle tab. Then simple search for a partial name of the scenario you want to load and click on the result.
|
||||
|
||||

|
||||
|
||||
## Loading character cards
|
||||
|
||||
Supports both v1 and v2 chara specs.
|
||||
|
||||
Expand the "Load" menu in the top left corner and either click on "Upload a character card" or simply drag and drop a character card file into the same area.
|
||||
|
||||

|
||||
|
||||
Once a character is uploaded, talemate may actually take a moment because it needs to convert it to a talemate format and will also run additional LLM prompts to generate character attributes and world state.
|
||||
|
||||
Make sure you save the scene after the character is loaded as it can then be loaded as normal talemate scenario in the future.
|
||||
Generic OpenAI api implementations (tested and confirmed working):
|
||||
- [DeepInfra](https://deepinfra.com/)
|
||||
- [llamacpp](https://github.com/ggerganov/llama.cpp) with the `api_like_OAI.py` wrapper
|
||||
- let me know if you have tested any other implementations and they failed / worked or landed somewhere in between
|
||||
|
||||
@@ -7,40 +7,6 @@ creator:
|
||||
- a thrilling action story
|
||||
- a mysterious adventure
|
||||
- an epic sci-fi adventure
|
||||
game:
|
||||
world_state:
|
||||
templates:
|
||||
state_reinforcement:
|
||||
Goals:
|
||||
auto_create: false
|
||||
description: Long term and short term goals
|
||||
favorite: true
|
||||
insert: conversation-context
|
||||
instructions: Create a long term goal and two short term goals for {character_name}. Your response must only be the long terms and two short term goals.
|
||||
interval: 20
|
||||
name: Goals
|
||||
query: Goals
|
||||
state_type: npc
|
||||
Physical Health:
|
||||
auto_create: false
|
||||
description: Keep track of health.
|
||||
favorite: true
|
||||
insert: sequential
|
||||
instructions: ''
|
||||
interval: 10
|
||||
name: Physical Health
|
||||
query: What is {character_name}'s current physical health status?
|
||||
state_type: character
|
||||
Time of day:
|
||||
auto_create: false
|
||||
description: Track night / day cycle
|
||||
favorite: true
|
||||
insert: sequential
|
||||
instructions: ''
|
||||
interval: 10
|
||||
name: Time of day
|
||||
query: What is the current time of day?
|
||||
state_type: world
|
||||
|
||||
## Long-term memory
|
||||
|
||||
|
||||
21
docker-compose.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
talemate:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
- CUDA_AVAILABLE=${CUDA_AVAILABLE:-false}
|
||||
ports:
|
||||
- "${FRONTEND_PORT:-8080}:8080"
|
||||
- "${BACKEND_PORT:-5050}:5050"
|
||||
volumes:
|
||||
- ./config.yaml:/app/config.yaml
|
||||
- ./scenes:/app/scenes
|
||||
- ./templates:/app/templates
|
||||
- ./chroma:/app/chroma
|
||||
environment:
|
||||
- PYTHONUNBUFFERED=1
|
||||
- PYTHONPATH=/app/src:$PYTHONPATH
|
||||
command: ["/bin/bash", "-c", "source /app/talemate_env/bin/activate && python src/talemate/server/run.py runserver --host 0.0.0.0 --port 5050 --frontend-host 0.0.0.0 --frontend-port 8080"]
|
||||
5
docs/.pages
Normal file
@@ -0,0 +1,5 @@
|
||||
nav:
|
||||
- Home: index.md
|
||||
- Getting started: getting-started
|
||||
- User guide: user-guide
|
||||
- Developer guide: dev
|
||||
@@ -1,62 +0,0 @@
|
||||
# ChromaDB
|
||||
|
||||
Talemate uses ChromaDB to maintain long-term memory. The default embeddings used are really fast but also not incredibly accurate. If you want to use more accurate embeddings you can use the instructor embeddings or the openai embeddings. See below for instructions on how to enable these.
|
||||
|
||||
In my testing so far, instructor-xl has proved to be the most accurate (even more-so than openai)
|
||||
|
||||
## Local instructor embeddings
|
||||
|
||||
If you want chromaDB to use the more accurate (but much slower) instructor embeddings add the following to `config.yaml`:
|
||||
|
||||
**Note**: The `xl` model takes a while to load even with cuda. Expect a minute of loading time on the first scene you load.
|
||||
|
||||
```yaml
|
||||
chromadb:
|
||||
embeddings: instructor
|
||||
instructor_device: cpu
|
||||
instructor_model: hkunlp/instructor-xl
|
||||
```
|
||||
|
||||
### Instructor embedding models
|
||||
|
||||
- `hkunlp/instructor-base` (smallest / fastest)
|
||||
- `hkunlp/instructor-large`
|
||||
- `hkunlp/instructor-xl` (largest / slowest) - requires about 5GB of memory
|
||||
|
||||
You will need to restart the backend for this change to take effect.
|
||||
|
||||
**NOTE** - The first time you do this it will need to download the instructor model you selected. This may take a while, and the talemate backend will be un-responsive during that time.
|
||||
|
||||
Once the download is finished, if talemate is still un-responsive, try reloading the front-end to reconnect. When all fails just restart the backend as well. I'll try to make this more robust in the future.
|
||||
|
||||
### GPU support
|
||||
|
||||
If you want to use the instructor embeddings with GPU support, you will need to install pytorch with CUDA support.
|
||||
|
||||
To do this on windows, run `install-pytorch-cuda.bat` from the project directory. Then change your device in the config to `cuda`:
|
||||
|
||||
```yaml
|
||||
chromadb:
|
||||
embeddings: instructor
|
||||
instructor_device: cuda
|
||||
instructor_model: hkunlp/instructor-xl
|
||||
```
|
||||
|
||||
## OpenAI embeddings
|
||||
|
||||
First make sure your openai key is specified in the `config.yaml` file
|
||||
|
||||
```yaml
|
||||
openai:
|
||||
api_key: <your-key-here>
|
||||
```
|
||||
|
||||
Then add the following to `config.yaml` for chromadb:
|
||||
|
||||
```yaml
|
||||
chromadb:
|
||||
embeddings: openai
|
||||
openai_model: text-embedding-3-small
|
||||
```
|
||||
|
||||
**Note**: As with everything openai, using this isn't free. It's way cheaper than their text completion though. ALSO - if you send super explicit content they may flag / ban your key, so keep that in mind (i hear they usually send warnings first though), and always monitor your usage on their dashboard.
|
||||
166
docs/cleanup.py
Normal file
@@ -0,0 +1,166 @@
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
import argparse
|
||||
|
||||
def find_image_references(md_file):
|
||||
"""Find all image references in a markdown file."""
|
||||
with open(md_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
pattern = r'!\[.*?\]\((.*?)\)'
|
||||
matches = re.findall(pattern, content)
|
||||
|
||||
cleaned_paths = []
|
||||
for match in matches:
|
||||
path = match.lstrip('/')
|
||||
if 'img/' in path:
|
||||
path = path[path.index('img/') + 4:]
|
||||
# Only keep references to versioned images
|
||||
parts = os.path.normpath(path).split(os.sep)
|
||||
if len(parts) >= 2 and parts[0].replace('.', '').isdigit():
|
||||
cleaned_paths.append(path)
|
||||
|
||||
return cleaned_paths
|
||||
|
||||
def scan_markdown_files(docs_dir):
|
||||
"""Recursively scan all markdown files in the docs directory."""
|
||||
md_files = []
|
||||
for root, _, files in os.walk(docs_dir):
|
||||
for file in files:
|
||||
if file.endswith('.md'):
|
||||
md_files.append(os.path.join(root, file))
|
||||
return md_files
|
||||
|
||||
def find_all_images(img_dir):
|
||||
"""Find all image files in version subdirectories."""
|
||||
image_files = []
|
||||
for root, _, files in os.walk(img_dir):
|
||||
# Get the relative path from img_dir to current directory
|
||||
rel_dir = os.path.relpath(root, img_dir)
|
||||
|
||||
# Skip if we're in the root img directory
|
||||
if rel_dir == '.':
|
||||
continue
|
||||
|
||||
# Check if the immediate parent directory is a version number
|
||||
parent_dir = rel_dir.split(os.sep)[0]
|
||||
if not parent_dir.replace('.', '').isdigit():
|
||||
continue
|
||||
|
||||
for file in files:
|
||||
if file.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.svg')):
|
||||
rel_path = os.path.relpath(os.path.join(root, file), img_dir)
|
||||
image_files.append(rel_path)
|
||||
return image_files
|
||||
|
||||
def grep_check_image(docs_dir, image_path):
|
||||
"""
|
||||
Check if versioned image is referenced anywhere using grep.
|
||||
Returns True if any reference is found, False otherwise.
|
||||
"""
|
||||
try:
|
||||
# Split the image path to get version and filename
|
||||
parts = os.path.normpath(image_path).split(os.sep)
|
||||
version = parts[0] # e.g., "0.29.0"
|
||||
filename = parts[-1] # e.g., "world-state-suggestions-2.png"
|
||||
|
||||
# For versioned images, require both version and filename to match
|
||||
version_pattern = f"{version}.*{filename}"
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['grep', '-r', '-l', version_pattern, docs_dir],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.stdout.strip():
|
||||
print(f"Found reference to {image_path} with version pattern: {version_pattern}")
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during grep check for {image_path}: {e}")
|
||||
|
||||
return False
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Find and optionally delete unused versioned images in MkDocs project')
|
||||
parser.add_argument('--docs-dir', type=str, required=True, help='Path to the docs directory')
|
||||
parser.add_argument('--img-dir', type=str, required=True, help='Path to the images directory')
|
||||
parser.add_argument('--delete', action='store_true', help='Delete unused images')
|
||||
parser.add_argument('--verbose', action='store_true', help='Show all found references and files')
|
||||
parser.add_argument('--skip-grep', action='store_true', help='Skip the additional grep validation')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Convert paths to absolute paths
|
||||
docs_dir = os.path.abspath(args.docs_dir)
|
||||
img_dir = os.path.abspath(args.img_dir)
|
||||
|
||||
print(f"Scanning markdown files in: {docs_dir}")
|
||||
print(f"Looking for versioned images in: {img_dir}")
|
||||
|
||||
# Get all markdown files
|
||||
md_files = scan_markdown_files(docs_dir)
|
||||
print(f"Found {len(md_files)} markdown files")
|
||||
|
||||
# Collect all image references
|
||||
used_images = set()
|
||||
for md_file in md_files:
|
||||
refs = find_image_references(md_file)
|
||||
used_images.update(refs)
|
||||
|
||||
# Get all actual images (only from version directories)
|
||||
all_images = set(find_all_images(img_dir))
|
||||
|
||||
if args.verbose:
|
||||
print("\nAll versioned image references found in markdown:")
|
||||
for img in sorted(used_images):
|
||||
print(f"- {img}")
|
||||
|
||||
print("\nAll versioned images in directory:")
|
||||
for img in sorted(all_images):
|
||||
print(f"- {img}")
|
||||
|
||||
# Find potentially unused images
|
||||
unused_images = all_images - used_images
|
||||
|
||||
# Additional grep validation if not skipped
|
||||
if not args.skip_grep and unused_images:
|
||||
print("\nPerforming additional grep validation...")
|
||||
actually_unused = set()
|
||||
for img in unused_images:
|
||||
if not grep_check_image(docs_dir, img):
|
||||
actually_unused.add(img)
|
||||
|
||||
if len(actually_unused) != len(unused_images):
|
||||
print(f"\nGrep validation found {len(unused_images) - len(actually_unused)} additional image references!")
|
||||
unused_images = actually_unused
|
||||
|
||||
# Report findings
|
||||
print("\nResults:")
|
||||
print(f"Total versioned images found: {len(all_images)}")
|
||||
print(f"Versioned images referenced in markdown: {len(used_images)}")
|
||||
print(f"Unused versioned images: {len(unused_images)}")
|
||||
|
||||
if unused_images:
|
||||
print("\nUnused versioned images:")
|
||||
for img in sorted(unused_images):
|
||||
print(f"- {img}")
|
||||
|
||||
if args.delete:
|
||||
print("\nDeleting unused versioned images...")
|
||||
for img in unused_images:
|
||||
full_path = os.path.join(img_dir, img)
|
||||
try:
|
||||
os.remove(full_path)
|
||||
print(f"Deleted: {img}")
|
||||
except Exception as e:
|
||||
print(f"Error deleting {img}: {e}")
|
||||
print("\nDeletion complete")
|
||||
else:
|
||||
print("\nNo unused versioned images found!")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
docs/dev/index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Coming soon
|
||||
|
||||
Developer documentation is coming soon. Stay tuned!
|
||||
@@ -1,4 +1,7 @@
|
||||
# Template Overrides in Talemate
|
||||
# Template Overrides
|
||||
|
||||
!!! warning "Old documentation"
|
||||
This is old documentation and needs to be updated, however may still contain useful information.
|
||||
|
||||
## Introduction to Templates
|
||||
|
||||
@@ -23,9 +26,9 @@ The creator agent templates allow for the creation of new characters within the
|
||||
|
||||
### Example Templates
|
||||
|
||||
- [Character Attributes Human Template](src/talemate/prompts/templates/creator/character-attributes-human.jinja2)
|
||||
- [Character Details Human Template](src/talemate/prompts/templates/creator/character-details-human.jinja2)
|
||||
- [Character Example Dialogue Human Template](src/talemate/prompts/templates/creator/character-example-dialogue-human.jinja2)
|
||||
- `src/talemate/prompts/templates/creator/character-attributes-human.jinja2`
|
||||
- `src/talemate/prompts/templates/creator/character-details-human.jinja2`
|
||||
- `src/talemate/prompts/templates/creator/character-example-dialogue-human.jinja2`
|
||||
|
||||
These example templates can serve as a guide for users to create their own custom templates for the character creator.
|
||||
|
||||
14
docs/dev/third-party-reference.md
Normal file
@@ -0,0 +1,14 @@
|
||||
## Third Party API docs
|
||||
|
||||
### Chat completions
|
||||
|
||||
- [Anthropic](https://docs.anthropic.com/en/api/messages)
|
||||
- [Cohere](https://docs.cohere.com/reference/chat)
|
||||
- [Google AI](https://ai.google.dev/api/generate-content#v1beta.GenerationConfig)
|
||||
- [Groq](https://console.groq.com/docs/api-reference#chat-create)
|
||||
- [KoboldCpp](https://lite.koboldai.net/koboldcpp_api#/api/v1)
|
||||
- [LMStudio](https://lmstudio.ai/docs/api/rest-api)
|
||||
- [Mistral AI](https://docs.mistral.ai/api/)
|
||||
- [OpenAI](https://platform.openai.com/docs/api-reference/completions)
|
||||
- [TabbyAPI](https://theroyallab.github.io/tabbyAPI/#operation/chat_completion_request_v1_chat_completions_post)
|
||||
- [Text-Generation-WebUI](https://github.com/oobabooga/text-generation-webui/blob/main/extensions/openai/typing.py)
|
||||
5
docs/getting-started/.pages
Normal file
@@ -0,0 +1,5 @@
|
||||
nav:
|
||||
- 1. Installation: installation
|
||||
- 2. Connect a client: connect-a-client.md
|
||||
- 3. Load a scene: load-a-scene.md
|
||||
- ...
|
||||
3
docs/getting-started/advanced/.pages
Normal file
@@ -0,0 +1,3 @@
|
||||
nav:
|
||||
- change-host-and-port.md
|
||||
- ...
|
||||
102
docs/getting-started/advanced/change-host-and-port.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# Changing host and port
|
||||
|
||||
## Backend
|
||||
|
||||
By default, the backend listens on `localhost:5050`.
|
||||
|
||||
To run the server on a different host and port, you need to change the values passed to the `--host` and `--port` parameters during startup and also make sure the frontend knows the new values.
|
||||
|
||||
### Changing the host and port for the backend
|
||||
|
||||
#### :material-linux: Linux
|
||||
|
||||
Copy `start.sh` to `start_custom.sh` and edit the `--host` and `--port` parameters in the `uvicorn` command.
|
||||
|
||||
```bash
|
||||
#!/bin/sh
|
||||
. talemate_env/bin/activate
|
||||
python src/talemate/server/run.py runserver --host 0.0.0.0 --port 1234
|
||||
```
|
||||
|
||||
#### :material-microsoft-windows: Windows
|
||||
|
||||
Copy `start.bat` to `start_custom.bat` and edit the `--host` and `--port` parameters in the `uvicorn` command.
|
||||
|
||||
```batch
|
||||
start cmd /k "cd talemate_env\Scripts && activate && cd ../../ && python src\talemate\server\run.py runserver --host 0.0.0.0 --port 1234"
|
||||
```
|
||||
|
||||
### Letting the frontend know about the new host and port
|
||||
|
||||
Copy `talemate_frontend/example.env.development.local` to `talemate_frontend/.env.production.local` and edit the `VUE_APP_TALEMATE_BACKEND_WEBSOCKET_URL`.
|
||||
|
||||
```env
|
||||
VUE_APP_TALEMATE_BACKEND_WEBSOCKET_URL=ws://localhost:1234
|
||||
```
|
||||
|
||||
Next rebuild the frontend.
|
||||
|
||||
```bash
|
||||
cd talemate_frontend
|
||||
npm run build
|
||||
```
|
||||
|
||||
### Start the backend and frontend
|
||||
|
||||
Start the backend and frontend as usual.
|
||||
|
||||
#### :material-linux: Linux
|
||||
|
||||
```bash
|
||||
./start_custom.sh
|
||||
```
|
||||
|
||||
#### :material-microsoft-windows: Windows
|
||||
|
||||
```batch
|
||||
start_custom.bat
|
||||
```
|
||||
|
||||
## Frontend
|
||||
|
||||
By default, the frontend listens on `localhost:8080`.
|
||||
|
||||
To change the frontend host and port, you need to change the values passed to the `--frontend-host` and `--frontend-port` parameters during startup.
|
||||
|
||||
### Changing the host and port for the frontend
|
||||
|
||||
#### :material-linux: Linux
|
||||
|
||||
Copy `start.sh` to `start_custom.sh` and edit the `--frontend-host` and `--frontend-port` parameters.
|
||||
|
||||
```bash
|
||||
#!/bin/sh
|
||||
. talemate_env/bin/activate
|
||||
python src/talemate/server/run.py runserver --host 0.0.0.0 --port 5055 \
|
||||
--frontend-host localhost --frontend-port 8082
|
||||
```
|
||||
|
||||
#### :material-microsoft-windows: Windows
|
||||
|
||||
Copy `start.bat` to `start_custom.bat` and edit the `--frontend-host` and `--frontend-port` parameters.
|
||||
|
||||
```batch
|
||||
start cmd /k "cd talemate_env\Scripts && activate && cd ../../ && python src\talemate\server\run.py runserver --host 0.0.0.0 --port 5055 --frontend-host localhost --frontend-port 8082"
|
||||
```
|
||||
|
||||
### Start the backend and frontend
|
||||
|
||||
Start the backend and frontend as usual.
|
||||
|
||||
#### :material-linux: Linux
|
||||
|
||||
```bash
|
||||
./start_custom.sh
|
||||
```
|
||||
|
||||
#### :material-microsoft-windows: Windows
|
||||
|
||||
```batch
|
||||
start_custom.bat
|
||||
```
|
||||
|
||||
68
docs/getting-started/connect-a-client.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Connect a client
|
||||
|
||||
Once Talemate is up and running and you are connected, you will see a notification in the corner instructing you to configured a client.
|
||||
|
||||

|
||||
|
||||
Talemate uses client(s) to connect to local or remote AI text generation APIs like koboldcpp, text-generation-webui or OpenAI.
|
||||
|
||||
## Add a new client
|
||||
|
||||
On the right hand side click the **:material-plus-box: ADD CLIENT** button.
|
||||
|
||||

|
||||
|
||||
!!! note "No button?"
|
||||
If there is no button, you may need to toggle the client options by clicking this button
|
||||
|
||||

|
||||
|
||||
The client configuration window will appear. Here you can choose the type of client you want to add.
|
||||
|
||||

|
||||
|
||||
## Choose an API / Client Type
|
||||
|
||||
We have support for multiple local and remote APIs. You can choose to use one or more of them.
|
||||
|
||||
!!! note "Local vs remote"
|
||||
A local API runs on your machine, while a remote API runs on a server somewhere else.
|
||||
|
||||
Select the API you want to use and click through to follow the instructions to configure a client for it:
|
||||
|
||||
##### Remote APIs
|
||||
|
||||
- [OpenAI](/talemate/user-guide/clients/types/openai/)
|
||||
- [Anthropic](/talemate/user-guide/clients/types/anthropic/)
|
||||
- [mistral.ai](/talemate/user-guide/clients/types/mistral/)
|
||||
- [Cohere](/talemate/user-guide/clients/types/cohere/)
|
||||
- [Groq](/talemate/user-guide/clients/types/groq/)
|
||||
- [Google Gemini](/talemate/user-guide/clients/types/google/)
|
||||
|
||||
##### Local APIs
|
||||
|
||||
- [KoboldCpp](/talemate/user-guide/clients/types/koboldcpp/)
|
||||
- [Text-Generation-WebUI](/talemate/user-guide/clients/types/text-generation-webui/)
|
||||
- [LMStudio](/talemate/user-guide/clients/types/lmstudio/)
|
||||
- [TabbyAPI](/talemate/user-guide/clients/types/tabbyapi/)
|
||||
|
||||
##### Unofficial OpenAI API implementations
|
||||
|
||||
- [DeepInfra](/talemate/user-guide/clients/types/openai-compatible/#deepinfra)
|
||||
- llamacpp with the `api_like_OAI.py` wrapper
|
||||
|
||||
## Assign the client to the agents
|
||||
|
||||
Whenever you add your first client, Talemate will automatically assign it to all agents. Once the client is configured and assigned, all agents should have a green dot next to them. (Or grey if the agent is currently disabled)
|
||||
|
||||

|
||||
|
||||
You can tell the client is assigned to the agent by checking the tag beneath the agent name, which will contain the client name if it is assigned.
|
||||
|
||||

|
||||
|
||||
## Its not assigned!
|
||||
|
||||
If for some reason the client is not assigned to the agent, you can manually assign it to all agents by clicking the **:material-transit-connection-variant: Assign to all agents** button.
|
||||
|
||||

|
||||
5
docs/getting-started/installation/.pages
Normal file
@@ -0,0 +1,5 @@
|
||||
nav:
|
||||
- windows.md
|
||||
- linux.md
|
||||
- docker.md
|
||||
- ...
|
||||
22
docs/getting-started/installation/docker.md
Normal file
@@ -0,0 +1,22 @@
|
||||
!!! example "Experimental"
|
||||
Talemate through docker has not received a lot of testing from me, so please let me know if you encounter any issues.
|
||||
|
||||
You can do so by creating an issue on the [:material-github: GitHub repository](https://github.com/vegu-ai/talemate)
|
||||
|
||||
## Quick install instructions
|
||||
|
||||
1. `git clone https://github.com/vegu-ai/talemate.git`
|
||||
1. `cd talemate`
|
||||
1. copy config file
|
||||
1. linux: `cp config.example.yaml config.yaml`
|
||||
1. windows: `copy config.example.yaml config.yaml`
|
||||
1. If your host has a CUDA compatible Nvidia GPU
|
||||
1. Windows (via PowerShell): `$env:CUDA_AVAILABLE="true"; docker compose up`
|
||||
1. Linux: `CUDA_AVAILABLE=true docker compose up`
|
||||
1. If your host does **NOT** have a CUDA compatible Nvidia GPU
|
||||
1. Windows: `docker compose up`
|
||||
1. Linux: `docker compose up`
|
||||
1. Navigate your browser to http://localhost:8080
|
||||
|
||||
!!! note
|
||||
When connecting local APIs running on the hostmachine (e.g. text-generation-webui), you need to use `host.docker.internal` as the hostname.
|
||||
@@ -1,3 +1,27 @@
|
||||
|
||||
## Quick install instructions
|
||||
|
||||
!!! warning
|
||||
python 3.12 is currently not supported.
|
||||
|
||||
### Dependencies
|
||||
|
||||
1. node.js and npm - see instructions [here](https://nodejs.org/en/download/package-manager/)
|
||||
1. python 3.10 or 3.11 - see instructions [here](https://www.python.org/downloads/)
|
||||
|
||||
### Installation
|
||||
|
||||
1. `git clone https://github.com/vegu-ai/talemate.git`
|
||||
1. `cd talemate`
|
||||
1. `source install.sh`
|
||||
- When asked if you want to install pytorch with CUDA support choose `y` if you have
|
||||
a CUDA compatible Nvidia GPU and have installed the necessary drivers.
|
||||
1. `source start.sh`
|
||||
|
||||
If everything went well, you can proceed to [connect a client](../../connect-a-client).
|
||||
|
||||
## Additional Information
|
||||
|
||||
### Setting Up a Virtual Environment
|
||||
|
||||
1. Open a terminal.
|
||||
28
docs/getting-started/installation/troubleshoot.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# Common issues
|
||||
|
||||
## Windows
|
||||
|
||||
### Installation fails with "Microsoft Visual C++" error
|
||||
|
||||
If your installation errors with a notification to upgrade "Microsoft Visual C++" go to https://visualstudio.microsoft.com/visual-cpp-build-tools/ and click "Download Build Tools" and run it.
|
||||
|
||||
- During installation make sure you select the C++ development package (upper left corner)
|
||||
- Run `reinstall.bat` inside talemate directory
|
||||
|
||||
## Docker
|
||||
|
||||
### Docker has created `config.yaml` directory
|
||||
|
||||
If you do not copy the example config to `config.yaml` before running `docker compose up` docker will create a `config` directory in the root of the project. This will cause the backend to fail to start.
|
||||
|
||||
This happens because we mount the config file directly as a docker volume, and if it does not exist docker will create a directory with the same name.
|
||||
|
||||
This will eventually be fixed, for now please make sure to copy the example config file before running the docker compose command.
|
||||
|
||||
## General
|
||||
|
||||
### Running behind reverse proxy with ssl
|
||||
|
||||
Personally i have not been able to make this work yet, but its on my list, issue stems from some vue oddities when specifying the base urls while running in a dev environment. I expect once i start building the project for production this will be resolved.
|
||||
|
||||
If you do make it work, please reach out to me so i can update this documentation.
|
||||
42
docs/getting-started/installation/windows.md
Normal file
@@ -0,0 +1,42 @@
|
||||
## Quick install instructions
|
||||
|
||||
!!! warning
|
||||
python 3.12 is currently not supported
|
||||
|
||||
1. Download and install Python 3.10 or Python 3.11 from the [official Python website](https://www.python.org/downloads/windows/).
|
||||
- [Click here for direct link to python 3.11.9 download](https://www.python.org/downloads/release/python-3119/)
|
||||
1. Download and install Node.js from the [official Node.js website](https://nodejs.org/en/download/prebuilt-installer). This will also install npm.
|
||||
1. Download the Talemate project to your local machine. Download from [the Releases page](https://github.com/vegu-ai/talemate/releases).
|
||||
1. Unpack the download and run `install.bat` by double clicking it. This will set up the project on your local machine.
|
||||
1. Once the installation is complete, you can start the backend and frontend servers by running `start.bat`.
|
||||
1. Navigate your browser to http://localhost:8080
|
||||
|
||||
If everything went well, you can proceed to [connect a client](../../connect-a-client).
|
||||
|
||||
## Additional Information
|
||||
|
||||
### How to Install Python 3.10 or 3.11
|
||||
|
||||
1. Visit the official Python website's download page for Windows at [https://www.python.org/downloads/windows/](https://www.python.org/downloads/windows/).
|
||||
2. Find the latest version of Python 3.10 or 3.11 and click on one of the download links. (You will likely want the Windows installer (64-bit))
|
||||
4. Run the installer file and follow the setup instructions. Make sure to check the box that says Add Python 3.10 to PATH before you click Install Now.
|
||||
|
||||
### How to Install npm
|
||||
|
||||
1. Download Node.js from the official site [https://nodejs.org/en/download/prebuilt-installer](https://nodejs.org/en/download/prebuilt-installer).
|
||||
2. Run the installer (the .msi installer is recommended).
|
||||
3. Follow the prompts in the installer (Accept the license agreement, click the NEXT button a bunch of times and accept the default installation settings).
|
||||
|
||||
### Usage of the Supplied bat Files
|
||||
|
||||
#### install.bat
|
||||
|
||||
This batch file is used to set up the project on your local machine. It creates a virtual environment, activates it, installs poetry, and uses poetry to install dependencies. It then navigates to the frontend directory and installs the necessary npm packages.
|
||||
|
||||
To run this file, simply double click on it or open a command prompt in the same directory and type `install.bat`.
|
||||
|
||||
#### start.bat
|
||||
|
||||
This batch file is used to start the backend and frontend servers. It opens two command prompts, one for the frontend and one for the backend.
|
||||
|
||||
To run this file, simply double click on it or open a command prompt in the same directory and type `start.bat`.
|
||||
57
docs/getting-started/load-a-scene.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Load a scenario
|
||||
|
||||
Once you've set up a client and assigned it to all the agents, you will be presented with the `Home` screen. From here, you can load talemate scenarios and upload character cards.
|
||||
|
||||
To load the introductory `Infinity Quest` scenario, simply click on its entry in the `Quick Load` section.
|
||||
|
||||

|
||||
|
||||
!!! info "First time may take a moment"
|
||||
When you load the a scenario for the first time, Talemate will need to initialize the long term memory model. Which likely means a download. Just be patient and it will be ready soon.
|
||||
|
||||
## Interacting with the scenario
|
||||
|
||||
After a moment of loading, you will see the scenario's introductory message and be able to send a text interaction.
|
||||
|
||||

|
||||
|
||||
Its time to send the first message.
|
||||
|
||||
Spoken words should go into `"` and actions should be written in `*`. Talemate will automatically supply the other if you supply one.
|
||||
|
||||

|
||||
|
||||
Once sent, its now the AI's turn to respond - depending on the service and model selected this can take a a moment.
|
||||
|
||||

|
||||
|
||||
## Quick overview of UI elements
|
||||
|
||||
### Scenario tools
|
||||
|
||||
Above the chat input there is a set of tools to help you interact with the scenario.
|
||||
|
||||

|
||||
|
||||
These contain tools to, for example:
|
||||
|
||||
- regenrate the most recent AI response
|
||||
- give directions to characters
|
||||
- narrate the scene
|
||||
- advance time
|
||||
- save the current scene state
|
||||
- and more ...
|
||||
|
||||
A full guide can be found in the [Scenario Tools](/talemate/user-guide/scenario-tools) section of the user guide.
|
||||
|
||||
### World state
|
||||
|
||||
Shows a sumamrization of the current scene state.
|
||||
|
||||

|
||||
|
||||
Each item can be expanded for more information.
|
||||
|
||||

|
||||
|
||||
Find out more about the world state in the [World State](/talemate/user-guide/world-state) section of the user guide.
|
||||
|
Before Width: | Height: | Size: 5.6 KiB |
|
Before Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 4.7 KiB |
|
Before Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 30 KiB |
|
Before Width: | Height: | Size: 2.9 KiB |
BIN
docs/img/0.26.0/agent-disabled.png
Normal file
|
After Width: | Height: | Size: 1.1 KiB |
BIN
docs/img/0.26.0/agent-enabled.png
Normal file
|
After Width: | Height: | Size: 1.6 KiB |
BIN
docs/img/0.26.0/agent-has-client-assigned.png
Normal file
|
After Width: | Height: | Size: 1.1 KiB |
BIN
docs/img/0.26.0/anthropic-settings.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
docs/img/0.26.0/auto-progress-off.png
Normal file
|
After Width: | Height: | Size: 1.4 KiB |
BIN
docs/img/0.26.0/autosave-blocked.png
Normal file
|
After Width: | Height: | Size: 1.3 KiB |
BIN
docs/img/0.26.0/autosave-disabled.png
Normal file
|
After Width: | Height: | Size: 1.2 KiB |
BIN
docs/img/0.26.0/autosave-enabled.png
Normal file
|
After Width: | Height: | Size: 1.3 KiB |
BIN
docs/img/0.26.0/client-anthropic-no-api-key.png
Normal file
|
After Width: | Height: | Size: 8.7 KiB |
BIN
docs/img/0.26.0/client-anthropic-ready.png
Normal file
|
After Width: | Height: | Size: 8.0 KiB |
BIN
docs/img/0.26.0/client-anthropic.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/img/0.26.0/client-assigned-prompt-template.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
docs/img/0.26.0/client-cohere-no-api-key.png
Normal file
|
After Width: | Height: | Size: 8.4 KiB |
BIN
docs/img/0.26.0/client-cohere-ready.png
Normal file
|
After Width: | Height: | Size: 7.0 KiB |
BIN
docs/img/0.26.0/client-cohere.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
docs/img/0.26.0/client-deepinfra-ready.png
Normal file
|
After Width: | Height: | Size: 8.6 KiB |
BIN
docs/img/0.26.0/client-deepinfra.png
Normal file
|
After Width: | Height: | Size: 78 KiB |
BIN
docs/img/0.26.0/client-google-creds-missing.png
Normal file
|
After Width: | Height: | Size: 9.3 KiB |
BIN
docs/img/0.26.0/client-google-ready.png
Normal file
|
After Width: | Height: | Size: 7.9 KiB |
BIN
docs/img/0.26.0/client-google.png
Normal file
|
After Width: | Height: | Size: 26 KiB |
BIN
docs/img/0.26.0/client-groq-no-api-key.png
Normal file
|
After Width: | Height: | Size: 8.1 KiB |
BIN
docs/img/0.26.0/client-groq-ready.png
Normal file
|
After Width: | Height: | Size: 6.9 KiB |
BIN
docs/img/0.26.0/client-groq.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
docs/img/0.26.0/client-hibernate-1.png
Normal file
|
After Width: | Height: | Size: 19 KiB |
BIN
docs/img/0.26.0/client-hibernate-2.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
docs/img/0.26.0/client-koboldcpp-could-not-connect.png
Normal file
|
After Width: | Height: | Size: 7.5 KiB |
BIN
docs/img/0.26.0/client-koboldcpp-ready.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
docs/img/0.26.0/client-koboldcpp.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
docs/img/0.26.0/client-lmstudio-could-not-connect.png
Normal file
|
After Width: | Height: | Size: 7.3 KiB |
BIN
docs/img/0.26.0/client-lmstudio-ready.png
Normal file
|
After Width: | Height: | Size: 7.7 KiB |
BIN
docs/img/0.26.0/client-lmstudio.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
docs/img/0.26.0/client-mistral-no-api-key.png
Normal file
|
After Width: | Height: | Size: 8.6 KiB |
BIN
docs/img/0.26.0/client-mistral-ready.png
Normal file
|
After Width: | Height: | Size: 7.6 KiB |
BIN
docs/img/0.26.0/client-mistral.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
docs/img/0.26.0/client-ooba-could-not-connect.png
Normal file
|
After Width: | Height: | Size: 8.2 KiB |
BIN
docs/img/0.26.0/client-ooba-no-model-loaded.png
Normal file
|
After Width: | Height: | Size: 8.1 KiB |
BIN
docs/img/0.26.0/client-ooba-ready.png
Normal file
|
After Width: | Height: | Size: 9.2 KiB |
BIN
docs/img/0.26.0/client-ooba.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/img/0.26.0/client-openai-no-api-key.png
Normal file
|
After Width: | Height: | Size: 8.5 KiB |
BIN
docs/img/0.26.0/client-openai-ready.png
Normal file
|
After Width: | Height: | Size: 6.7 KiB |
BIN
docs/img/0.26.0/client-openai.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
docs/img/0.26.0/client-tabbyapi-could-not-connect.png
Normal file
|
After Width: | Height: | Size: 7.4 KiB |
BIN
docs/img/0.26.0/client-tabbyapi-ready.png
Normal file
|
After Width: | Height: | Size: 8.3 KiB |
BIN
docs/img/0.26.0/client-tabbyapi.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/img/0.26.0/client-unknown-prompt-template-modal.png
Normal file
|
After Width: | Height: | Size: 49 KiB |
BIN
docs/img/0.26.0/client-unknown-prompt-template.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
docs/img/0.26.0/cohere-settings.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/img/0.26.0/connect-a-client-add-client-modal.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/img/0.26.0/connect-a-client-add-client.png
Normal file
|
After Width: | Height: | Size: 7.1 KiB |
BIN
docs/img/0.26.0/connect-a-client-assign-to-all-agents.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
BIN
docs/img/0.26.0/connect-a-client-ready.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
docs/img/0.26.0/create-new-scene-test.png
Normal file
|
After Width: | Height: | Size: 246 KiB |
BIN
docs/img/0.26.0/create-new-scene.png
Normal file
|
After Width: | Height: | Size: 3.4 KiB |
BIN
docs/img/0.26.0/elevenlabs-ready.png
Normal file
|
After Width: | Height: | Size: 3.1 KiB |
BIN
docs/img/0.26.0/elevenlabs-settings.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
BIN
docs/img/0.26.0/getting-started-first-ai-response.png
Normal file
|
After Width: | Height: | Size: 111 KiB |
BIN
docs/img/0.26.0/getting-started-first-interaction.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
docs/img/0.26.0/getting-started-load-screen.png
Normal file
|
After Width: | Height: | Size: 634 KiB |
BIN
docs/img/0.26.0/getting-started-scene-1.png
Normal file
|
After Width: | Height: | Size: 441 KiB |
BIN
docs/img/0.26.0/getting-started-ui-element-tools.png
Normal file
|
After Width: | Height: | Size: 5.6 KiB |
BIN
docs/img/0.26.0/getting-started-world-state-1.png
Normal file
|
After Width: | Height: | Size: 289 KiB |
BIN
docs/img/0.26.0/getting-started-world-state-2.png
Normal file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/img/0.26.0/google-settings.png
Normal file
|
After Width: | Height: | Size: 67 KiB |
BIN
docs/img/0.26.0/groq-settings.png
Normal file
|
After Width: | Height: | Size: 41 KiB |
BIN
docs/img/0.26.0/inference-presets-1.png
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
docs/img/0.26.0/interacting-input-act-as-character.png
Normal file
|
After Width: | Height: | Size: 5.6 KiB |
BIN
docs/img/0.26.0/interacting-input-act-as-narrator.png
Normal file
|
After Width: | Height: | Size: 5.8 KiB |
BIN
docs/img/0.26.0/interacting-input-request.png
Normal file
|
After Width: | Height: | Size: 5.4 KiB |
BIN
docs/img/0.26.0/mistral-settings.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
docs/img/0.26.0/no-api-token.png
Normal file
|
After Width: | Height: | Size: 1.0 KiB |
BIN
docs/img/0.26.0/no-clients.png
Normal file
|
After Width: | Height: | Size: 10 KiB |