Files
talemate/tests/test_graphs.py
veguAI ce4c302d73 0.32.0 (#208)
* separate other tts apis and improve chunking

* move old tts config to voice agent config and implement config widget ux elements for table editing

* elevenlabs updated to use their client and expose model selection

* linting

* separate character class into character.pt and start on voice routing

* linting

* tts hot swapping and chunking improvements

* linting

* add support for piper-tts

* update gitignore

* linting

* support google tts
fix issue where quick_toggle agent config didnt work on standard config items

* linting

* only show agent quick toggles if the agent is enabled

* change elevenlabs to use a locally maintained voice list

* tts generate before / after events

* voice library refactor

* linting

* update openai model and voices

* tweak configs

* voice library ux

* linting

* add support for kokoro tts

* fix add / remove voice

* voice library tags

* linting

* linting

* tts api status

* api infos and add more kokoro voices

* allow voice testing before saving a new voice

* tweaks to voice library ux and some api info text

* linting

* voice mixer

* polish

* voice files go into /tts instead of templates/voice

* change default narrator voice

* xtts confirmation note

* character voice select

* koboldai format template

* polish

* skip empty chunks

* change default voice

* replace em-dash with normal dash

* adjust limit

* replace libebreaks

* chunk cleanup for whitespace

* info updated

* remove invalid endif tag

* sort voices by ready api

* Character hashable type

* clarify set_simulated_environment use to avoid unwanted character deactivated

* allow manual generation of tts and fix assorted issues with tts

* tts websocket handler router renamed

* voice mixer: when there are only 2 voices auto adjust the other weight as needed

* separate persist character functions into own mixin

* auto assign voices

* fix chara load and auto assign voice during chara load

* smart speaker separation

* tts speaker separation config

* generate tts for intro text

* fix prompting issues with anthropic, google and openrouter clients

* decensor flag off again

* only to ai assisted voice markup on narrator messages

* openrouter provider configuration

* linting

* improved sound controls

* add support for chatterbox

* fix info

* chatterbox dependencies

* remove piper and xtts2

* linting

* voice params

* linting

* tts model overrides and move tts info to tab

* reorg toolbar

* allow overriding of test text

* more tts fixes, apply intensity, chatterbox voices

* confirm voice delete

* lintinG

* groq updates

* reorg decorators

* tts fixes

* cancelable audio queue

* voice library uploads

* scene voice library

* Config refactor (#13)

* config refactor progres

* config nuke continues

* fix system prompts

* linting

* client fun

* client config refactor

* fix kcpp auto embedding selection

* linting

* fix proxy config

* remove cruft

* fix remaining client bugs from config refactor
always use get_config(), dont keep an instance reference

* support for reasoning models

* more reasoning tweaks

* only allow one frontend to connect at a time

* fix tests

* relock

* relock

* more client adjustments

* pattern prefill

* some tts agent fixes

* fix ai assist cond

* tts nodes

* fix config retrieval

* assign voice node and fixes

* sim suite char gen assign voice

* fix voice assign template to consider used voices

* get rid of auto break repetition which wasn't working right for a while anyhow

* linting

* generate tts node
as string node

* linting

* voice change on character event

* tweak chatterbox max length

* koboldai default template

* linting

* fix saving of existing voice

* relock

* adjust params of eva default voice

* f5tts support

* f5tts samples

* f5tts support

* f5tts tweaks

* chunk size per tts api and reorg defaul f5tts voices

* chatterbox default voice reog to match f5-tts default voices

* voice library ux polish pass

* cleanup

* f5-tts tweaks

* missing samples

* get rid of old save cmd

* add chatterbox and f5tts

* housekeeping

* fix some issues with world entry editing

* remove cruft

* replace exclamation marks

* fix save immutable check

* fix replace_exclamation_marks

* better error handling in websocket plugins and fix issue with saves

* agent config save on dialog close

* ctrl click to disable / enable agents

* fix quick config

* allow modifying response size of focal requests

* sim suite set goal always sets story intent, encourage calling of set goal during simulation start

* allow setting of model

* voice param tweaks

* tts tweaks

* fix character card load

* fix note_on_value

* add mixed speaker_separation mode

* indicate which message the audio is for and provide way to stop audio from the message

* fix issue with some tts generation failing

* linting

* fix speaker separate modes

* bad idea

* linting

* refactor speaker separation prompt

* add kimi think pattern

* fix issue with unwanted cover image replacemenT

* no scene analysis for visual promp generation (for now)

* linting

* tts for context investigation messages

* prompt tweaks

* tweak intro

* fix intro text tts not auto playing sometimes

* consider narrator voice when assigning voice tro a character

* allow director log messages to go only into the director console

* linting

* startup performance fixes

* init time

* linting

* only show audio control for messagews taht can have it

* always create story intent and dont override existing saves during character card load

* fix history check in dynamic story line node
add HasHistory node

* linting

* fix intro message not having speaker separation

* voice library character manager

* sequantial and cancelable auto assign all

* linting

* fix generation cancel handling

* tooltips

* fix auto assign voice from scene voices

* polish

* kokoro does not like lazy import

* update info text

* complete scene export / import

* linting

* wording

* remove cruft

* fix story intent generation during character card import

* fix generation cancelled emit status inf loop

* prompt tweak

* reasoning quick toggle, reasoning token slider, tooltips

* improved reasoning pattern handling

* fix indirect coercion response parsing

* fix streaming issue

* response length instructions

* more robust streaming

* adjust default

* adjust formatting

* litning

* remove debug output

* director console log function calls

* install cuda script updated

* linting

* add another step

* adjust default

* update dialogue examples

* fix voice selection issues

* what's happening here

* third time's the charm?

* Vite migration (#207)

* add vite config

* replace babel, webpack, vue-cli deps with vite, switch to esm modules, separate eslint config

* change process.env to import.meta.env

* update index.html for vite and move to root

* update docs for vite

* remove vue cli config

* update example env with vite

* bump frontend deps after rebase to 32.0

---------

Co-authored-by: pax-co <Pax_801@proton.me>

* properly referencer data type

* what's new

* better indication of dialogue example supporting multiple lines, improve dialogue example display

* fix potential issue with cached scene anlysis being reused when it shouldn't

* fix character creation issues with player character toggle

* fix issue where editing a message would sometimes lose parts of the message

* fix slider ux thumb labels (vuetify update)

* relock

* narrative conversation format

* remove planning step

* linting

* tweaks

* don't overthink

* update dialogue examples and intro

* dont dictate response length instructions when data structures are expected

* prompt tweaks

* prompt tweaks

* linting

* fix edit message not handling : well

* prompt tweaks

* fix tests

* fix manual revision when character message was generated in new narrative mode

* fix issue with message editing

* Docker packages relese (#204)

* add CI workflow for Docker image build and MkDocs deployment

* rename CI workflow from 'ci' to 'package'

* refactor CI workflow: consolidate container build and documentation deployment into a single file

* fix: correct indentation for permissions in CI workflow

* fix: correct indentation for steps in deploy-docs job in CI workflow

* build both cpu and cuda image

* docs

* docs

* expose writing style during state reinforcement

* prompt tweaks

* test container build

* test container  image

* update docker compose

* docs

* test-container-build

* test container build

* test container build

* update docker build workflows

* fix guidance prompt prefix not being dropped

* mount tts dir

* add gpt-5

* remove debug output

* docs

* openai auto toggle reasoning based on model selection

* linting

---------

Co-authored-by: pax-co <123330830+pax-co@users.noreply.github.com>
Co-authored-by: pax-co <Pax_801@proton.me>
Co-authored-by: Luis Alexandre Deschamps Brandão <brandao_luis@yahoo.com>
2025-08-08 13:56:29 +03:00

221 lines
6.1 KiB
Python

import os
import json
import pytest
import contextvars
import talemate.agents as agents
import talemate.game.engine.nodes.load_definitions # noqa: F401
import talemate.agents.director # noqa: F401
import talemate.agents.memory
from talemate.context import ActiveScene
from talemate.tale_mate import Scene
import talemate.agents.tts.voice_library as voice_library
import talemate.instance as instance
from talemate.game.engine.nodes.core import (
Graph,
GraphState,
)
import structlog
from talemate.game.engine.nodes.layout import load_graph_from_file
from talemate.game.engine.nodes.registry import import_talemate_node_definitions
from talemate.client import ClientBase
from collections import deque
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_GRAPH_DIR = os.path.join(BASE_DIR, "data", "graphs")
RESULTS_DIR = os.path.join(BASE_DIR, "data", "graphs", "results")
UPDATE_RESULTS = False
log = structlog.get_logger("talemate.test_graphs")
# This runs once for the entire test session
@pytest.fixture(scope="session", autouse=True)
def load_node_definitions():
import_talemate_node_definitions()
def load_test_graph(name) -> Graph:
path = os.path.join(TEST_GRAPH_DIR, f"{name}.json")
graph, _ = load_graph_from_file(path)
return graph
def bootstrap_engine():
voice_library.VOICE_LIBRARY = voice_library.VoiceLibrary(voices={})
for agent_type in agents.AGENT_CLASSES:
if agent_type == "memory":
agent = MockMemoryAgent()
else:
agent = agents.AGENT_CLASSES[agent_type]()
instance.AGENTS[agent_type] = agent
client_reponses = contextvars.ContextVar("client_reponses", default=deque())
class MockClientContext:
async def __aenter__(self):
try:
self.client_reponses = client_reponses.get()
except LookupError:
_client_reponses = deque()
self.token = client_reponses.set(_client_reponses)
self.client_reponses = _client_reponses
return self.client_reponses
async def __aexit__(self, exc_type, exc_value, traceback):
if hasattr(self, "token"):
client_reponses.reset(self.token)
class MockMemoryAgent(talemate.agents.memory.MemoryAgent):
async def add_many(self, items: list[dict]):
pass
async def delete(self, filters: dict):
pass
class MockClient(ClientBase):
def __init__(self, name: str):
self.name = name
self.remote_model_name = "test-model"
self.current_status = "idle"
self.prompt_history = []
@property
def enabled(self):
return True
async def send_prompt(
self, prompt, kind="conversation", finalize=lambda x: x, retries=2, **kwargs
):
"""Override send_prompt to return a pre-defined response instead of calling LLM.
If no responses are configured, returns an empty string.
Records the prompt in prompt_history for later inspection.
"""
response_stack = client_reponses.get()
self.prompt_history.append({"prompt": prompt, "kind": kind})
if not response_stack:
return ""
return response_stack.popleft()
class MockScene(Scene):
@property
def auto_progress(self):
"""
These tests currently assume that auto_progress is True
"""
return True
@pytest.fixture
def mock_scene():
scene = MockScene()
bootstrap_scene(scene)
return scene
def bootstrap_scene(mock_scene):
bootstrap_engine()
client = MockClient("test_client")
for agent in instance.AGENTS.values():
agent.client = client
agent.scene = mock_scene
director = instance.get_agent("director")
conversation = instance.get_agent("conversation")
summarizer = instance.get_agent("summarizer")
editor = instance.get_agent("editor")
world_state = instance.get_agent("world_state")
mock_scene.mock_client = client
return {
"director": director,
"conversation": conversation,
"summarizer": summarizer,
"editor": editor,
"world_state": world_state,
}
def make_assert_fn(name: str, write_results: bool = False):
async def assert_fn(state: GraphState):
if write_results or not os.path.exists(
os.path.join(RESULTS_DIR, f"{name}.json")
):
with open(os.path.join(RESULTS_DIR, f"{name}.json"), "w") as f:
json.dump(state.shared, f, indent=4)
else:
with open(os.path.join(RESULTS_DIR, f"{name}.json"), "r") as f:
expected = json.load(f)
assert state.shared == expected
return assert_fn
def make_graph_test(name: str, write_results: bool = False):
async def test_graph(scene):
assert_fn = make_assert_fn(name, write_results)
def error_handler(state, error: Exception):
raise error
with ActiveScene(scene):
graph = load_test_graph(name)
assert graph is not None
graph.callbacks.append(assert_fn)
graph.error_handlers.append(error_handler)
await graph.execute()
return test_graph
@pytest.mark.asyncio
async def test_graph_core(mock_scene):
fn = make_graph_test("test-harness-core", False)
await fn(mock_scene)
@pytest.mark.asyncio
async def test_graph_data(mock_scene):
fn = make_graph_test("test-harness-data", False)
await fn(mock_scene)
@pytest.mark.asyncio
async def test_graph_scene(mock_scene):
fn = make_graph_test("test-harness-scene", False)
await fn(mock_scene)
@pytest.mark.asyncio
async def test_graph_functions(mock_scene):
fn = make_graph_test("test-harness-functions", False)
await fn(mock_scene)
@pytest.mark.asyncio
async def test_graph_agents(mock_scene):
fn = make_graph_test("test-harness-agents", False)
await fn(mock_scene)
@pytest.mark.asyncio
async def test_graph_prompt(mock_scene):
fn = make_graph_test("test-harness-prompt", False)
async with MockClientContext() as client_reponses:
client_reponses.append("The sum of 1 and 5 is 6.")
client_reponses.append('```json\n{\n "result": 6\n}\n```')
await fn(mock_scene)