mirror of
https://github.com/vegu-ai/talemate.git
synced 2025-12-24 15:39:34 +01:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
611f77a730 | ||
|
|
0738899ac9 | ||
|
|
76b7b5c0e0 | ||
|
|
cae5e8d217 | ||
|
|
97bfd3a672 | ||
|
|
8fb1341b93 | ||
|
|
cba4412f3d | ||
|
|
2ad87f6e8a | ||
|
|
496eb469db | ||
|
|
b78fec3bac | ||
|
|
d250df8950 | ||
|
|
816f950afe | ||
|
|
8fb72fdbe9 | ||
|
|
54297a4768 | ||
|
|
d7e72d27c5 | ||
|
|
f9b23f8705 | ||
|
|
37a5873330 | ||
|
|
bc3f5d63c8 | ||
|
|
72202dee02 | ||
|
|
91f228aa68 | ||
|
|
27d6c5e7c2 | ||
|
|
1f5cff4c6d | ||
|
|
77425935be |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -6,3 +6,8 @@
|
||||
*.internal*
|
||||
*_internal*
|
||||
talemate_env
|
||||
chroma
|
||||
scenes
|
||||
config.yaml
|
||||
!scenes/infinity-quest/assets
|
||||
!scenes/infinity-quest/infinity-quest.json
|
||||
|
||||
52
README.md
52
README.md
@@ -2,24 +2,32 @@
|
||||
|
||||
Allows you to play roleplay scenarios with large language models.
|
||||
|
||||
It does not run any large language models itself but relies on existing APIs. Currently supports **text-generation-webui** and **openai**.
|
||||
|
||||
This means you need to either have an openai api key or know how to setup [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) (locally or remotely via gpu renting. `--api` flag needs to be set)
|
||||
|||
|
||||
|------------------------------------------|------------------------------------------|
|
||||
|
||||

|
||||

|
||||
> :warning: **It does not run any large language models itself but relies on existing APIs. Currently supports OpenAI, text-generation-webui and LMStudio.**
|
||||
|
||||
This means you need to either have:
|
||||
- an [OpenAI](https://platform.openai.com/overview) api key
|
||||
- OR setup local (or remote via runpod) LLM inference via one of these options:
|
||||
- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui)
|
||||
- [LMStudio](https://lmstudio.ai/)
|
||||
|
||||
## Current features
|
||||
|
||||
- responive modern ui
|
||||
- agents
|
||||
- conversation
|
||||
- narration
|
||||
- summarization
|
||||
- director
|
||||
- creative
|
||||
- multi-client (agents can be connected to separate APIs)
|
||||
- long term memory (experimental)
|
||||
- conversation: handles character dialogue
|
||||
- narration: handles narrative exposition
|
||||
- summarization: handles summarization to compress context while maintain history
|
||||
- director: can be used to direct the story / characters
|
||||
- editor: improves AI responses (very hit and miss at the moment)
|
||||
- world state: generates world snapshot and handles passage of time (objects and characters)
|
||||
- creator: character / scenario creator
|
||||
- tts: text to speech via elevenlabs, coqui studio, coqui local
|
||||
- multi-client support (agents can be connected to separate APIs)
|
||||
- long term memory
|
||||
- chromadb integration
|
||||
- passage of time
|
||||
- narrative world state
|
||||
@@ -36,6 +44,7 @@ Kinda making it up as i go along, but i want to lean more into gameplay through
|
||||
|
||||
In no particular order:
|
||||
|
||||
|
||||
- Extension support
|
||||
- modular agents and clients
|
||||
- Improved world state
|
||||
@@ -49,19 +58,19 @@ In no particular order:
|
||||
- objectives
|
||||
- quests
|
||||
- win / lose conditions
|
||||
- Automatic1111 client
|
||||
- Automatic1111 client for in place visual generation
|
||||
|
||||
# Quickstart
|
||||
|
||||
## Installation
|
||||
|
||||
Post [here](https://github.com/final-wombat/talemate/issues/17) if you run into problems during installation.
|
||||
Post [here](https://github.com/vegu-ai/talemate/issues/17) if you run into problems during installation.
|
||||
|
||||
### Windows
|
||||
|
||||
1. Download and install Python 3.10 or higher from the [official Python website](https://www.python.org/downloads/windows/).
|
||||
1. Download and install Node.js from the [official Node.js website](https://nodejs.org/en/download/). This will also install npm.
|
||||
1. Download the Talemate project to your local machine. Download from [the Releases page](https://github.com/final-wombat/talemate/releases).
|
||||
1. Download the Talemate project to your local machine. Download from [the Releases page](https://github.com/vegu-ai/talemate/releases).
|
||||
1. Unpack the download and run `install.bat` by double clicking it. This will set up the project on your local machine.
|
||||
1. Once the installation is complete, you can start the backend and frontend servers by running `start.bat`.
|
||||
1. Navigate your browser to http://localhost:8080
|
||||
@@ -70,7 +79,7 @@ Post [here](https://github.com/final-wombat/talemate/issues/17) if you run into
|
||||
|
||||
`python 3.10` or higher is required.
|
||||
|
||||
1. `git clone git@github.com:final-wombat/talemate`
|
||||
1. `git clone git@github.com:vegu-ai/talemate`
|
||||
1. `cd talemate`
|
||||
1. `source install.sh`
|
||||
1. Start the backend: `python src/talemate/server/run.py runserver --host 0.0.0.0 --port 5050`.
|
||||
@@ -117,9 +126,11 @@ On the right hand side click the "Add Client" button. If there is no button, you
|
||||
|
||||
### Text-generation-webui
|
||||
|
||||
> :warning: As of version 0.13.0 the legacy text-generator-webui API `--extension api` is no longer supported, please use their new `--extension openai` api implementation instead.
|
||||
|
||||
In the modal if you're planning to connect to text-generation-webui, you can likely leave everything as is and just click Save.
|
||||
|
||||

|
||||

|
||||
|
||||
### OpenAI
|
||||
|
||||
@@ -155,7 +166,10 @@ Make sure you save the scene after the character is loaded as it can then be loa
|
||||
|
||||
## Further documentation
|
||||
|
||||
- Creative mode (docs WIP)
|
||||
- Prompt template overrides
|
||||
Please read the documents in the `docs` folder for more advanced configuration and usage.
|
||||
|
||||
- [Prompt template overrides](docs/templates.md)
|
||||
- [Text-to-Speech (TTS)](docs/tts.md)
|
||||
- [ChromaDB (long term memory)](docs/chromadb.md)
|
||||
- Runpod Integration
|
||||
- [Runpod Integration](docs/runpod.md)
|
||||
- Creative mode
|
||||
@@ -7,20 +7,34 @@ creator:
|
||||
- a thrilling action story aimed at an adult audience.
|
||||
- a mysterious adventure aimed at an adult audience.
|
||||
- an epic sci-fi adventure aimed at an adult audience.
|
||||
game:
|
||||
default_player_character:
|
||||
color: '#6495ed'
|
||||
description: a young man with a penchant for adventure.
|
||||
gender: male
|
||||
name: Elmer
|
||||
game: {}
|
||||
|
||||
## Long-term memory
|
||||
|
||||
#chromadb:
|
||||
# embeddings: instructor
|
||||
# instructor_device: cuda
|
||||
# instructor_model: hkunlp/instructor-xl
|
||||
|
||||
## Remote LLMs
|
||||
|
||||
#openai:
|
||||
# api_key: <API_KEY>
|
||||
|
||||
#runpod:
|
||||
# api_key: <API_KEY>
|
||||
# api_key: <API_KEY>
|
||||
|
||||
## TTS (Text-to-Speech)
|
||||
|
||||
#elevenlabs:
|
||||
# api_key: <API_KEY>
|
||||
|
||||
#coqui:
|
||||
# api_key: <API_KEY>
|
||||
|
||||
#tts:
|
||||
# device: cuda
|
||||
# model: tts_models/multilingual/multi-dataset/xtts_v2
|
||||
# voices:
|
||||
# - label: <name>
|
||||
# value: <path to .wav for voice sample>
|
||||
BIN
docs/img/Screenshot_9.png
Normal file
BIN
docs/img/Screenshot_9.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 551 KiB |
BIN
docs/img/client-setup-0.13.png
Normal file
BIN
docs/img/client-setup-0.13.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/img/runpod-docs-1.png
Normal file
BIN
docs/img/runpod-docs-1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 6.6 KiB |
52
docs/runpod.md
Normal file
52
docs/runpod.md
Normal file
@@ -0,0 +1,52 @@
|
||||
## RunPod integration
|
||||
|
||||
RunPod allows you to quickly set up and run text-generation-webui instances on powerful GPUs, remotely. If you want to run the significantly larger models (like 70B parameters) with reasonable speeds, this is probably the best way to do it.
|
||||
|
||||
### Create / grab your RunPod API key and add it to the talemate config
|
||||
|
||||
You can manage your RunPod api keys at [https://www.runpod.io/console/user/settings](https://www.runpod.io/console/user/settings)
|
||||
|
||||
Add the key to your Talemate config file (config.yaml):
|
||||
|
||||
```yaml
|
||||
runpod:
|
||||
api_key: <your api key>
|
||||
```
|
||||
|
||||
Then restart Talemate.
|
||||
|
||||
### Create a RunPod instance
|
||||
|
||||
#### Community Cloud
|
||||
|
||||
The community cloud pods are cheaper and there are generally more GPUs available. They do however not support persistent storage and you will have to download your model and data every time you deploy a pod.
|
||||
|
||||
#### Secure Cloud
|
||||
|
||||
The secure cloud pods are more expensive and there are generally fewer GPUs available, but they do support persistent storage.
|
||||
|
||||
Peristent volumes are super convenient, but optional for our purposes and are **not** free and you will have to pay for the storage you use.
|
||||
|
||||
### Deploy pod
|
||||
|
||||
For us it does not matter which cloud you choose. The only thing that matters is that it deploys a text-generation-webui instance, and you ensure that by choosing the right template.
|
||||
|
||||
Pick the GPU you want to use, for 70B models you want at least 48GB of VRAM and click `Deploy`, then select a template and deploy.
|
||||
|
||||
When choosing the template for your pod, choose the `RunPod TheBloke LLMs` template. This template is pre-configured with all the dependencies needed to run text-generation-webui. There are other text-generation-webui templates, but they are usually out of date and this one i found to be consistently good.
|
||||
|
||||
> :warning: The name of your pod is important and ensures that Talemate will be able to find it. Talemate will only be able to find pods that have the word `thebloke llms` or `textgen` in their name. (case insensitive)
|
||||
|
||||
Once your pod is deployed and has finished setup and is running, the client will automatically appear in the Talemate client list, making it available for you to use like you would use a locally hosted text-generation-webui instance.
|
||||
|
||||

|
||||
|
||||
### Connecting to the text-generation-webui UI
|
||||
|
||||
To manage your text-generation-webui instance, click the `Connect` button in your RunPod pod dashboard at [https://www.runpod.io/console/pods](https://www.runpod.io/console/pods) and in the popup click on `Connect to HTTP Service [Port 7860]` to open the text-generation-webui UI. Then just download and load your model as you normally would.
|
||||
|
||||
## :warning: Always check your pod status on the RunPod dashboard
|
||||
|
||||
Talemate is not a suitable or reliable way for you to determine whether your pod is currently running or not. **Always** check the runpod dashboard to see if your pod is running or not.
|
||||
|
||||
While your pod us running it will be eating up your credits, so make sure to stop it when you're not using it.
|
||||
82
docs/templates.md
Normal file
82
docs/templates.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Template Overrides in Talemate
|
||||
|
||||
## Introduction to Templates
|
||||
|
||||
In Talemate, templates are used to generate dynamic content for various agents involved in roleplaying scenarios. These templates leverage the Jinja2 templating engine, allowing for the inclusion of variables, conditional logic, and custom functions to create rich and interactive narratives.
|
||||
|
||||
## Template Structure
|
||||
|
||||
A typical template in Talemate consists of several sections, each enclosed within special section tags (`<|SECTION:NAME|>` and `<|CLOSE_SECTION|>`). These sections can include character details, dialogue examples, scenario overviews, tasks, and additional context. Templates utilize loops and blocks to iterate over data and render content conditionally based on the task requirements.
|
||||
|
||||
## Overriding Templates
|
||||
|
||||
Users can customize the behavior of Talemate by overriding the default templates. To override a template, create a new template file with the same name in the `./templates/prompts/{agent}/` directory. When a custom template is present, Jinja2 will prioritize it over the default template located in the `./src/talemate/prompts/templates/{agent}/` directory.
|
||||
|
||||
## Creator Agent Templates
|
||||
|
||||
The creator agent templates allow for the creation of new characters within the character creator. Following the naming convention `character-attributes-*.jinja2`, `character-details-*.jinja2`, and `character-example-dialogue-*.jinja2`, users can add new templates that will be available for selection in the character creator.
|
||||
|
||||
### Requirements for Creator Templates
|
||||
|
||||
- All three types (`attributes`, `details`, `example-dialogue`) need to be available for a choice to be valid in the character creator.
|
||||
- Users can check the human templates for an understanding of how to structure these templates.
|
||||
|
||||
### Example Templates
|
||||
|
||||
- [Character Attributes Human Template](src/talemate/prompts/templates/creator/character-attributes-human.jinja2)
|
||||
- [Character Details Human Template](src/talemate/prompts/templates/creator/character-details-human.jinja2)
|
||||
- [Character Example Dialogue Human Template](src/talemate/prompts/templates/creator/character-example-dialogue-human.jinja2)
|
||||
|
||||
These example templates can serve as a guide for users to create their own custom templates for the character creator.
|
||||
|
||||
### Extending Existing Templates
|
||||
|
||||
Jinja2's template inheritance feature allows users to extend existing templates and add extra information. By using the `{% extends "template-name.jinja2" %}` tag, a new template can inherit everything from an existing template and then add or override specific blocks of content.
|
||||
|
||||
#### Example
|
||||
|
||||
To add a description of a character's hairstyle to the human character details template, you could create a new template like this:
|
||||
|
||||
```jinja2
|
||||
{% extends "character-details-human.jinja2" %}
|
||||
{% block questions %}
|
||||
{% if character_details.q("what does "+character.name+"'s hair look like?") -%}
|
||||
Briefly describe {{ character.name }}'s hair-style using a narrative writing style that reminds of mid 90s point and click adventure games. (2 - 3 sentences).
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
```
|
||||
|
||||
This example shows how to extend the `character-details-human.jinja2` template and add a block for questions about the character's hair. The `{% block questions %}` tag is used to define a section where additional questions can be inserted or existing ones can be overridden.
|
||||
|
||||
## Advanced Template Topics
|
||||
|
||||
### Jinja2 Functions in Talemate
|
||||
|
||||
Talemate exposes several functions to the Jinja2 template environment, providing utilities for data manipulation, querying, and controlling content flow. Here's a list of available functions:
|
||||
|
||||
1. `set_prepared_response(response, prepend)`: Sets the prepared response with an optional prepend string. This function allows the template to specify the beginning of the LLM response when processing the rendered template. For example, `set_prepared_response("Certainly!")` will ensure that the LLM's response starts with "Certainly!".
|
||||
2. `set_prepared_response_random(responses, prefix)`: Chooses a random response from a list and sets it as the prepared response with an optional prefix.
|
||||
3. `set_eval_response(empty)`: Prepares the response for evaluation, optionally initializing a counter for an empty string.
|
||||
4. `set_json_response(initial_object, instruction, cutoff)`: Prepares for a JSON response with an initial object and optional instruction and cutoff.
|
||||
5. `set_question_eval(question, trigger, counter, weight)`: Sets up a question for evaluation with a trigger, counter, and weight.
|
||||
6. `disable_dedupe()`: Disables deduplication of the response text.
|
||||
7. `random(min, max)`: Generates a random integer between the specified minimum and maximum.
|
||||
8. `query_scene(query, at_the_end, as_narrative)`: Queries the scene with a question and returns the formatted response.
|
||||
9. `query_text(query, text, as_question_answer)`: Queries a text with a question and returns the formatted response.
|
||||
10. `query_memory(query, as_question_answer, **kwargs)`: Queries the memory with a question and returns the formatted response.
|
||||
11. `instruct_text(instruction, text)`: Instructs the text with a command and returns the result.
|
||||
12. `retrieve_memories(lines, goal)`: Retrieves memories based on the provided lines and an optional goal.
|
||||
13. `uuidgen()`: Generates a UUID string.
|
||||
14. `to_int(x)`: Converts the given value to an integer.
|
||||
15. `config`: Accesses the configuration settings.
|
||||
16. `len(x)`: Returns the length of the given object.
|
||||
17. `count_tokens(x)`: Counts the number of tokens in the given text.
|
||||
18. `print(x)`: Prints the given object (mainly for debugging purposes).
|
||||
|
||||
These functions enhance the capabilities of templates, allowing for dynamic and interactive content generation.
|
||||
|
||||
### Error Handling
|
||||
|
||||
Errors encountered during template rendering are logged and propagated to the user interface. This ensures that users are informed of any issues that may arise, allowing them to troubleshoot and resolve problems effectively.
|
||||
|
||||
By following these guidelines, users can create custom templates that tailor the Talemate experience to their specific storytelling needs.# Template Overrides in Talemate
|
||||
84
docs/tts.md
Normal file
84
docs/tts.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Talemate Text-to-Speech (TTS) Configuration
|
||||
|
||||
Talemate supports Text-to-Speech (TTS) functionality, allowing users to convert text into spoken audio. This document outlines the steps required to configure TTS for Talemate using different providers, including ElevenLabs, Coqui, and a local TTS API.
|
||||
|
||||
## Configuring ElevenLabs TTS
|
||||
|
||||
To use ElevenLabs TTS with Talemate, follow these steps:
|
||||
|
||||
1. Visit [ElevenLabs](https://elevenlabs.com) and create an account if you don't already have one.
|
||||
2. Click on your profile in the upper right corner of the Eleven Labs website to access your API key.
|
||||
3. In the `config.yaml` file, under the `elevenlabs` section, set the `api_key` field with your ElevenLabs API key.
|
||||
|
||||
Example configuration snippet:
|
||||
|
||||
```yaml
|
||||
elevenlabs:
|
||||
api_key: <YOUR_ELEVENLABS_API_KEY>
|
||||
```
|
||||
|
||||
## Configuring Coqui TTS
|
||||
|
||||
To use Coqui TTS with Talemate, follow these steps:
|
||||
|
||||
1. Visit [Coqui](https://app.coqui.ai) and sign up for an account.
|
||||
2. Go to the [account page](https://app.coqui.ai/account) and scroll to the bottom to find your API key.
|
||||
3. In the `config.yaml` file, under the `coqui` section, set the `api_key` field with your Coqui API key.
|
||||
|
||||
Example configuration snippet:
|
||||
|
||||
```yaml
|
||||
coqui:
|
||||
api_key: <YOUR_COQUI_API_KEY>
|
||||
```
|
||||
|
||||
## Configuring Local TTS API
|
||||
|
||||
For running a local TTS API, Talemate requires specific dependencies to be installed.
|
||||
|
||||
### Windows Installation
|
||||
|
||||
Run `install-local-tts.bat` to install the necessary requirements.
|
||||
|
||||
### Linux Installation
|
||||
|
||||
Execute the following command:
|
||||
|
||||
```bash
|
||||
pip install TTS
|
||||
```
|
||||
|
||||
### Model and Device Configuration
|
||||
|
||||
1. Choose a TTS model from the [Coqui TTS model list](https://github.com/coqui-ai/TTS).
|
||||
2. Decide whether to use `cuda` or `cpu` for the device setting.
|
||||
3. The first time you run TTS through the local API, it will download the specified model. Please note that this may take some time, and the download progress will be visible in the Talemate backend output.
|
||||
|
||||
Example configuration snippet:
|
||||
|
||||
```yaml
|
||||
tts:
|
||||
device: cuda # or 'cpu'
|
||||
model: tts_models/multilingual/multi-dataset/xtts_v2
|
||||
```
|
||||
|
||||
### Voice Samples Configuration
|
||||
|
||||
Configure voice samples by setting the `value` field to the path of a .wav file voice sample. Official samples can be downloaded from [Coqui XTTS-v2 samples](https://huggingface.co/coqui/XTTS-v2/tree/main/samples).
|
||||
|
||||
Example configuration snippet:
|
||||
|
||||
```yaml
|
||||
tts:
|
||||
voices:
|
||||
- label: English Male
|
||||
value: path/to/english_male.wav
|
||||
- label: English Female
|
||||
value: path/to/english_female.wav
|
||||
```
|
||||
|
||||
## Saving the Configuration
|
||||
|
||||
After configuring the `config.yaml` file, save your changes. Talemate will use the updated settings the next time it starts.
|
||||
|
||||
For more detailed information on configuring Talemate, refer to the `config.py` file in the Talemate source code and the `config.example.yaml` file for a barebone configuration example.
|
||||
4
install-local-tts.bat
Normal file
4
install-local-tts.bat
Normal file
@@ -0,0 +1,4 @@
|
||||
REM activate the virtual environment
|
||||
call talemate_env\Scripts\activate
|
||||
|
||||
call pip install "TTS>=0.21.1"
|
||||
@@ -7,10 +7,10 @@ REM activate the virtual environment
|
||||
call talemate_env\Scripts\activate
|
||||
|
||||
REM install poetry
|
||||
python -m pip install poetry "rapidfuzz>=3" -U
|
||||
python -m pip install "poetry==1.7.1" "rapidfuzz>=3" -U
|
||||
|
||||
REM use poetry to install dependencies
|
||||
poetry install
|
||||
python -m poetry install
|
||||
|
||||
REM copy config.example.yaml to config.yaml only if config.yaml doesn't exist
|
||||
IF NOT EXIST config.yaml copy config.example.yaml config.yaml
|
||||
|
||||
3481
poetry.lock
generated
3481
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@ build-backend = "poetry.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "talemate"
|
||||
version = "0.10.1"
|
||||
version = "0.16.0"
|
||||
description = "AI-backed roleplay and narrative tools"
|
||||
authors = ["FinalWombat"]
|
||||
license = "GNU Affero General Public License v3.0"
|
||||
@@ -17,7 +17,7 @@ black = "*"
|
||||
rope = "^0.22"
|
||||
isort = "^5.10"
|
||||
jinja2 = "^3.0"
|
||||
openai = "*"
|
||||
openai = ">=1"
|
||||
requests = "^2.26"
|
||||
colorama = ">=0.4.6"
|
||||
Pillow = "^9.5"
|
||||
@@ -28,20 +28,21 @@ typing_extensions = "^4.5.0"
|
||||
uvicorn = "^0.23"
|
||||
blinker = "^1.6.2"
|
||||
pydantic = "<3"
|
||||
langchain = ">0.0.213"
|
||||
beautifulsoup4 = "^4.12.2"
|
||||
python-dotenv = "^1.0.0"
|
||||
websockets = "^11.0.3"
|
||||
structlog = "^23.1.0"
|
||||
runpod = "==1.2.0"
|
||||
runpod = "^1.2.0"
|
||||
nest_asyncio = "^1.5.7"
|
||||
isodate = ">=0.6.1"
|
||||
thefuzz = ">=0.20.0"
|
||||
tiktoken = ">=0.5.1"
|
||||
nltk = ">=3.8.1"
|
||||
|
||||
# ChromaDB
|
||||
chromadb = ">=0.4,<1"
|
||||
chromadb = ">=0.4.17,<1"
|
||||
InstructorEmbedding = "^1.0.1"
|
||||
torch = ">=2.0.0, !=2.0.1"
|
||||
torch = ">=2.1.0"
|
||||
sentence-transformers="^2.2.2"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
|
||||
@@ -9,7 +9,7 @@ REM activate the virtual environment
|
||||
call talemate_env\Scripts\activate
|
||||
|
||||
REM install poetry
|
||||
python -m pip install poetry "rapidfuzz>=3" -U
|
||||
python -m pip install "poetry==1.7.1" "rapidfuzz>=3" -U
|
||||
|
||||
REM use poetry to install dependencies
|
||||
python -m poetry install
|
||||
|
||||
@@ -2,4 +2,4 @@ from .agents import Agent
|
||||
from .client import TextGeneratorWebuiClient
|
||||
from .tale_mate import *
|
||||
|
||||
VERSION = "0.10.1"
|
||||
VERSION = "0.16.0"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from .base import Agent
|
||||
from .creator import CreatorAgent
|
||||
from .context import ContextAgent
|
||||
from .conversation import ConversationAgent
|
||||
from .director import DirectorAgent
|
||||
from .memory import ChromaDBMemoryAgent, MemoryAgent
|
||||
@@ -8,4 +7,5 @@ from .narrator import NarratorAgent
|
||||
from .registry import AGENT_CLASSES, get_agent_class, register
|
||||
from .summarize import SummarizeAgent
|
||||
from .editor import EditorAgent
|
||||
from .world_state import WorldStateAgent
|
||||
from .world_state import WorldStateAgent
|
||||
from .tts import TTSAgent
|
||||
@@ -9,23 +9,37 @@ from blinker import signal
|
||||
|
||||
import talemate.instance as instance
|
||||
import talemate.util as util
|
||||
from talemate.agents.context import ActiveAgent
|
||||
from talemate.emit import emit
|
||||
from talemate.events import GameLoopStartEvent
|
||||
import talemate.emit.async_signals
|
||||
import dataclasses
|
||||
import pydantic
|
||||
import structlog
|
||||
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"set_processing",
|
||||
]
|
||||
|
||||
log = structlog.get_logger("talemate.agents.base")
|
||||
|
||||
|
||||
class AgentActionConfig(pydantic.BaseModel):
|
||||
type: str
|
||||
label: str
|
||||
description: str = ""
|
||||
value: Union[int, float, str, bool]
|
||||
value: Union[int, float, str, bool, None] = None
|
||||
default_value: Union[int, float, str, bool] = None
|
||||
max: Union[int, float, None] = None
|
||||
min: Union[int, float, None] = None
|
||||
step: Union[int, float, None] = None
|
||||
scope: str = "global"
|
||||
choices: Union[list[dict[str, str]], None] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
|
||||
class AgentAction(pydantic.BaseModel):
|
||||
enabled: bool = True
|
||||
@@ -33,7 +47,6 @@ class AgentAction(pydantic.BaseModel):
|
||||
description: str = ""
|
||||
config: Union[dict[str, AgentActionConfig], None] = None
|
||||
|
||||
|
||||
def set_processing(fn):
|
||||
"""
|
||||
decorator that emits the agent status as processing while the function
|
||||
@@ -44,11 +57,12 @@ def set_processing(fn):
|
||||
"""
|
||||
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
await self.emit_status(processing=True)
|
||||
return await fn(self, *args, **kwargs)
|
||||
finally:
|
||||
await self.emit_status(processing=False)
|
||||
with ActiveAgent(self, fn):
|
||||
try:
|
||||
await self.emit_status(processing=True)
|
||||
return await fn(self, *args, **kwargs)
|
||||
finally:
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
wrapper.__name__ = fn.__name__
|
||||
|
||||
@@ -63,6 +77,8 @@ class Agent(ABC):
|
||||
agent_type = "agent"
|
||||
verbose_name = None
|
||||
set_processing = set_processing
|
||||
requires_llm_client = True
|
||||
auto_break_repetition = False
|
||||
|
||||
@property
|
||||
def agent_details(self):
|
||||
@@ -82,7 +98,7 @@ class Agent(ABC):
|
||||
if not getattr(self.client, "enabled", True):
|
||||
return False
|
||||
|
||||
if self.client.current_status in ["error", "warning"]:
|
||||
if self.client and self.client.current_status in ["error", "warning"]:
|
||||
return False
|
||||
|
||||
return self.client is not None
|
||||
@@ -128,6 +144,7 @@ class Agent(ABC):
|
||||
"enabled": agent.enabled if agent else True,
|
||||
"has_toggle": agent.has_toggle if agent else False,
|
||||
"experimental": agent.experimental if agent else False,
|
||||
"requires_llm_client": cls.requires_llm_client,
|
||||
}
|
||||
actions = getattr(agent, "actions", None)
|
||||
|
||||
@@ -160,7 +177,34 @@ class Agent(ABC):
|
||||
config.value = kwargs.get("actions", {}).get(action_key, {}).get("config", {}).get(config_key, {}).get("value", config.value)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
async def on_game_loop_start(self, event:GameLoopStartEvent):
|
||||
|
||||
"""
|
||||
Finds all ActionConfigs that have a scope of "scene" and resets them to their default values
|
||||
"""
|
||||
|
||||
if not getattr(self, "actions", None):
|
||||
return
|
||||
|
||||
for _, action in self.actions.items():
|
||||
if not action.config:
|
||||
continue
|
||||
|
||||
for _, config in action.config.items():
|
||||
if config.scope == "scene":
|
||||
# if default_value is None, just use the `type` of the current
|
||||
# value
|
||||
if config.default_value is None:
|
||||
default_value = type(config.value)()
|
||||
else:
|
||||
default_value = config.default_value
|
||||
|
||||
log.debug("resetting config", config=config, default_value=default_value)
|
||||
config.value = default_value
|
||||
|
||||
await self.emit_status()
|
||||
|
||||
async def emit_status(self, processing: bool = None):
|
||||
|
||||
# should keep a count of processing requests, and when the
|
||||
@@ -195,6 +239,8 @@ class Agent(ABC):
|
||||
|
||||
def connect(self, scene):
|
||||
self.scene = scene
|
||||
talemate.emit.async_signals.get("game_loop_start").connect(self.on_game_loop_start)
|
||||
|
||||
|
||||
def clean_result(self, result):
|
||||
if "#" in result:
|
||||
@@ -239,6 +285,22 @@ class Agent(ABC):
|
||||
|
||||
current_memory_context.append(memory)
|
||||
return current_memory_context
|
||||
|
||||
# LLM client related methods. These are called during or after the client
|
||||
# sends the prompt to the API.
|
||||
|
||||
def inject_prompt_paramters(self, prompt_param:dict, kind:str, agent_function_name:str):
|
||||
"""
|
||||
Injects prompt parameters before the client sends off the prompt
|
||||
Override as needed.
|
||||
"""
|
||||
pass
|
||||
|
||||
def allow_repetition_break(self, kind:str, agent_function_name:str, auto:bool=False):
|
||||
"""
|
||||
Returns True if repetition breaking is allowed, False otherwise.
|
||||
"""
|
||||
return False
|
||||
|
||||
@dataclasses.dataclass
|
||||
class AgentEmission:
|
||||
|
||||
@@ -1,54 +1,33 @@
|
||||
from .base import Agent
|
||||
from .registry import register
|
||||
|
||||
from typing import Callable, TYPE_CHECKING
|
||||
import contextvars
|
||||
import pydantic
|
||||
|
||||
@register
|
||||
class ContextAgent(Agent):
|
||||
"""
|
||||
Agent that helps retrieve context for the continuation
|
||||
of dialogue.
|
||||
"""
|
||||
__all__ = [
|
||||
"active_agent",
|
||||
]
|
||||
|
||||
agent_type = "context"
|
||||
active_agent = contextvars.ContextVar("active_agent", default=None)
|
||||
|
||||
def __init__(self, client, **kwargs):
|
||||
self.client = client
|
||||
class ActiveAgentContext(pydantic.BaseModel):
|
||||
agent: object
|
||||
fn: Callable
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed=True
|
||||
|
||||
@property
|
||||
def action(self):
|
||||
return self.fn.__name__
|
||||
|
||||
def determine_questions(self, scene_text):
|
||||
prompt = [
|
||||
"You are tasked to continue the following dialogue in a roleplaying session, but before you can do so you can ask three questions for extra context."
|
||||
"",
|
||||
"What are the questions you would ask?",
|
||||
"",
|
||||
"Known context and dialogue:" "",
|
||||
scene_text,
|
||||
"",
|
||||
"Questions:",
|
||||
"",
|
||||
]
|
||||
|
||||
prompt = "\n".join(prompt)
|
||||
|
||||
questions = self.client.send_prompt(prompt, kind="question")
|
||||
|
||||
questions = self.clean_result(questions)
|
||||
|
||||
return questions.split("\n")
|
||||
|
||||
def get_answer(self, question, context):
|
||||
prompt = [
|
||||
"Read the context and answer the question:",
|
||||
"",
|
||||
"Context:",
|
||||
"",
|
||||
context,
|
||||
"",
|
||||
f"Question: {question}",
|
||||
"Answer:",
|
||||
]
|
||||
|
||||
prompt = "\n".join(prompt)
|
||||
|
||||
answer = self.client.send_prompt(prompt, kind="answer")
|
||||
answer = self.clean_result(answer)
|
||||
return answer
|
||||
class ActiveAgent:
|
||||
|
||||
def __init__(self, agent, fn):
|
||||
self.agent = ActiveAgentContext(agent=agent, fn=fn)
|
||||
|
||||
def __enter__(self):
|
||||
self.token = active_agent.set(self.agent)
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
active_agent.reset(self.token)
|
||||
return False
|
||||
|
||||
@@ -6,6 +6,7 @@ from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Optional, Union
|
||||
|
||||
import talemate.client as client
|
||||
import talemate.instance as instance
|
||||
import talemate.util as util
|
||||
import structlog
|
||||
from talemate.emit import emit
|
||||
@@ -30,6 +31,7 @@ class ConversationAgentEmission(AgentEmission):
|
||||
generation: list[str]
|
||||
|
||||
talemate.emit.async_signals.register(
|
||||
"agent.conversation.before_generate",
|
||||
"agent.conversation.generated"
|
||||
)
|
||||
|
||||
@@ -79,18 +81,29 @@ class ConversationAgent(Agent):
|
||||
min=32,
|
||||
max=512,
|
||||
step=32,
|
||||
),#
|
||||
"instructions": AgentActionConfig(
|
||||
type="text",
|
||||
label="Instructions",
|
||||
value="Write 1-3 sentences. Never wax poetic.",
|
||||
description="Extra instructions to give the AI for dialog generatrion.",
|
||||
),
|
||||
"jiggle": AgentActionConfig(
|
||||
type="number",
|
||||
label="Jiggle",
|
||||
label="Jiggle (Increased Randomness)",
|
||||
description="If > 0.0 will cause certain generation parameters to have a slight random offset applied to them. The bigger the number, the higher the potential offset.",
|
||||
value=0.0,
|
||||
min=0.0,
|
||||
max=1.0,
|
||||
step=0.1,
|
||||
),
|
||||
)
|
||||
}
|
||||
),
|
||||
"auto_break_repetition": AgentAction(
|
||||
enabled = True,
|
||||
label = "Auto Break Repetition",
|
||||
description = "Will attempt to automatically break AI repetition.",
|
||||
),
|
||||
"natural_flow": AgentAction(
|
||||
enabled = True,
|
||||
label = "Natural Flow",
|
||||
@@ -116,6 +129,19 @@ class ConversationAgent(Agent):
|
||||
),
|
||||
}
|
||||
),
|
||||
"use_long_term_memory": AgentAction(
|
||||
enabled = True,
|
||||
label = "Long Term Memory",
|
||||
description = "Will augment the conversation prompt with long term memory.",
|
||||
config = {
|
||||
"ai_selected": AgentActionConfig(
|
||||
type="bool",
|
||||
label="AI memory retrieval",
|
||||
description="If enabled, the AI will select the long term memory to use. (will increase how long it takes to generate a response)",
|
||||
value=False,
|
||||
),
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
def connect(self, scene):
|
||||
@@ -301,10 +327,7 @@ class ConversationAgent(Agent):
|
||||
insert_bot_token=10
|
||||
)
|
||||
|
||||
memory = await self.build_prompt_default_memory(
|
||||
scene, long_term_memory_budget,
|
||||
scene_and_dialogue + [f"{character.name}: {character.description}" for character in scene.get_characters()]
|
||||
)
|
||||
memory = await self.build_prompt_default_memory(character)
|
||||
|
||||
main_character = scene.main_character.character
|
||||
|
||||
@@ -326,6 +349,10 @@ class ConversationAgent(Agent):
|
||||
director_message = isinstance(scene_and_dialogue[-1], DirectorMessage)
|
||||
except IndexError:
|
||||
director_message = False
|
||||
|
||||
extra_instructions = ""
|
||||
if self.actions["generation_override"].enabled:
|
||||
extra_instructions = self.actions["generation_override"].config["instructions"].value
|
||||
|
||||
prompt = Prompt.get("conversation.dialogue", vars={
|
||||
"scene": scene,
|
||||
@@ -339,12 +366,13 @@ class ConversationAgent(Agent):
|
||||
"talking_character": character,
|
||||
"partial_message": char_message,
|
||||
"director_message": director_message,
|
||||
"extra_instructions": extra_instructions,
|
||||
})
|
||||
|
||||
return str(prompt)
|
||||
|
||||
async def build_prompt_default_memory(
|
||||
self, scene: Scene, budget: int, existing_context: list
|
||||
self, character: Character
|
||||
):
|
||||
"""
|
||||
Builds long term memory for the conversation prompt
|
||||
@@ -357,29 +385,35 @@ class ConversationAgent(Agent):
|
||||
Also it will only add information that is not already in the existing context.
|
||||
"""
|
||||
|
||||
memory = scene.get_helper("memory").agent
|
||||
|
||||
if not memory:
|
||||
if not self.actions["use_long_term_memory"].enabled:
|
||||
return []
|
||||
|
||||
|
||||
if self.current_memory_context:
|
||||
return self.current_memory_context
|
||||
|
||||
self.current_memory_context = []
|
||||
self.current_memory_context = ""
|
||||
|
||||
|
||||
# feed the last 3 history message into multi_query
|
||||
history_length = len(scene.history)
|
||||
i = history_length - 1
|
||||
while i >= 0 and i >= len(scene.history) - 3:
|
||||
self.current_memory_context += await memory.multi_query(
|
||||
[scene.history[i]],
|
||||
filter=lambda x: x
|
||||
not in self.current_memory_context + existing_context,
|
||||
if self.actions["use_long_term_memory"].config["ai_selected"].value:
|
||||
history = self.scene.context_history(min_dialogue=3, max_dialogue=15, keep_director=False, sections=False, add_archieved_history=False)
|
||||
text = "\n".join(history)
|
||||
world_state = instance.get_agent("world_state")
|
||||
log.debug("conversation_agent.build_prompt_default_memory", direct=False)
|
||||
self.current_memory_context = await world_state.analyze_text_and_extract_context(
|
||||
text, f"continue the conversation as {character.name}"
|
||||
)
|
||||
i -= 1
|
||||
|
||||
else:
|
||||
history = self.scene.context_history(min_dialogue=3, max_dialogue=3, keep_director=False, sections=False, add_archieved_history=False)
|
||||
log.debug("conversation_agent.build_prompt_default_memory", history=history, direct=True)
|
||||
memory = instance.get_agent("memory")
|
||||
|
||||
context = await memory.multi_query(history, max_tokens=500, iterate=5)
|
||||
|
||||
self.current_memory_context = "\n\n".join(context)
|
||||
|
||||
return self.current_memory_context
|
||||
|
||||
|
||||
async def build_prompt(self, character, char_message: str = ""):
|
||||
fn = self.build_prompt_default
|
||||
@@ -423,6 +457,9 @@ class ConversationAgent(Agent):
|
||||
|
||||
character = actor.character
|
||||
|
||||
emission = ConversationAgentEmission(agent=self, generation="", actor=actor, character=character)
|
||||
await talemate.emit.async_signals.get("agent.conversation.before_generate").send(emission)
|
||||
|
||||
self.set_generation_overrides()
|
||||
|
||||
result = await self.client.send_prompt(await self.build_prompt(character))
|
||||
@@ -473,9 +510,6 @@ class ConversationAgent(Agent):
|
||||
# Remove "{character.name}:" - all occurences
|
||||
total_result = total_result.replace(f"{character.name}:", "")
|
||||
|
||||
if total_result.count("*") % 2 == 1:
|
||||
total_result += "*"
|
||||
|
||||
# Check if total_result starts with character name, if not, prepend it
|
||||
if not total_result.startswith(character.name):
|
||||
total_result = f"{character.name}: {total_result}"
|
||||
@@ -505,3 +539,11 @@ class ConversationAgent(Agent):
|
||||
actor.scene.push_history(messages)
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
def allow_repetition_break(self, kind: str, agent_function_name: str, auto: bool = False):
|
||||
|
||||
if auto and not self.actions["auto_break_repetition"].enabled:
|
||||
return False
|
||||
|
||||
return agent_function_name == "converse"
|
||||
@@ -8,10 +8,12 @@ from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
||||
|
||||
import talemate.util as util
|
||||
from talemate.emit import wait_for_input, emit
|
||||
import talemate.emit.async_signals
|
||||
from talemate.prompts import Prompt
|
||||
from talemate.scene_message import NarratorMessage, DirectorMessage
|
||||
from talemate.automated_action import AutomatedAction
|
||||
import talemate.automated_action as automated_action
|
||||
from talemate.agents.conversation import ConversationAgentEmission
|
||||
from .registry import register
|
||||
from .base import set_processing, AgentAction, AgentActionConfig, Agent
|
||||
|
||||
@@ -26,11 +28,13 @@ class DirectorAgent(Agent):
|
||||
verbose_name = "Director"
|
||||
|
||||
def __init__(self, client, **kwargs):
|
||||
self.is_enabled = True
|
||||
self.is_enabled = False
|
||||
self.client = client
|
||||
self.next_direct = 0
|
||||
self.actions = {
|
||||
"direct": AgentAction(enabled=False, label="Direct", description="Will attempt to direct the scene. Runs automatically after AI dialogue (n turns).", config={
|
||||
"turns": AgentActionConfig(type="number", label="Turns", description="Number of turns to wait before directing the sceen", value=10, min=1, max=100, step=1)
|
||||
"direct": AgentAction(enabled=True, label="Direct", description="Will attempt to direct the scene. Runs automatically after AI dialogue (n turns).", config={
|
||||
"turns": AgentActionConfig(type="number", label="Turns", description="Number of turns to wait before directing the sceen", value=5, min=1, max=100, step=1),
|
||||
"prompt": AgentActionConfig(type="text", label="Instructions", description="Instructions to the director", value="", scope="scene")
|
||||
}),
|
||||
}
|
||||
|
||||
@@ -46,316 +50,57 @@ class DirectorAgent(Agent):
|
||||
def experimental(self):
|
||||
return True
|
||||
|
||||
def get_base_prompt(self, character: Character, budget:int):
|
||||
return [character.description, character.base_attributes.get("scenario_context", "")] + self.scene.context_history(budget=budget, keep_director=False)
|
||||
|
||||
|
||||
async def decide_action(self, character: Character, goal_override:str=None):
|
||||
def connect(self, scene):
|
||||
super().connect(scene)
|
||||
talemate.emit.async_signals.get("agent.conversation.before_generate").connect(self.on_conversation_before_generate)
|
||||
|
||||
"""
|
||||
Pick an action to perform to move the story towards the current story goal
|
||||
"""
|
||||
async def on_conversation_before_generate(self, event:ConversationAgentEmission):
|
||||
log.info("on_conversation_before_generate", director_enabled=self.enabled)
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
current_goal = goal_override or await self.select_goal(self.scene)
|
||||
current_goal = f"Current story goal: {current_goal}" if current_goal else current_goal
|
||||
await self.direct_scene(event.character)
|
||||
|
||||
response, action_eval, prompt = await self.decide_action_analyze(character, current_goal)
|
||||
# action_eval will hold {'narrate': N, 'direct': N, 'watch': N, ...}
|
||||
# where N is a number, action with the highest number wins, default action is watch
|
||||
# if there is no clear winner
|
||||
async def direct_scene(self, character: Character):
|
||||
|
||||
watch_action = action_eval.get("watch", 0)
|
||||
action = max(action_eval, key=action_eval.get)
|
||||
if not self.actions["direct"].enabled:
|
||||
log.info("direct_scene", skip=True, enabled=self.actions["direct"].enabled)
|
||||
return
|
||||
|
||||
if action_eval[action] <= watch_action:
|
||||
action = "watch"
|
||||
prompt = self.actions["direct"].config["prompt"].value
|
||||
|
||||
log.info("decide_action", action=action, action_eval=action_eval)
|
||||
if not prompt:
|
||||
log.info("direct_scene", skip=True, prompt=prompt)
|
||||
return
|
||||
|
||||
return response, current_goal, action
|
||||
if self.next_direct % self.actions["direct"].config["turns"].value != 0 or self.next_direct == 0:
|
||||
|
||||
log.info("direct_scene", skip=True, next_direct=self.next_direct)
|
||||
self.next_direct += 1
|
||||
return
|
||||
|
||||
self.next_direct = 0
|
||||
|
||||
async def decide_action_analyze(self, character: Character, goal:str):
|
||||
|
||||
prompt = Prompt.get("director.decide-action-analyze", vars={
|
||||
await self.direct_character(character, prompt)
|
||||
|
||||
@set_processing
|
||||
async def direct_character(self, character: Character, prompt:str):
|
||||
|
||||
response = await Prompt.request("director.direct-scene", self.client, "director", vars={
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"scene": self.scene,
|
||||
"current_goal": goal,
|
||||
"prompt": prompt,
|
||||
"character": character,
|
||||
})
|
||||
|
||||
response, evaluation = await prompt.send(self.client, kind="director")
|
||||
|
||||
log.info("question_direction", response=response)
|
||||
return response, evaluation, prompt
|
||||
|
||||
@set_processing
|
||||
async def direct(self, character: Character, goal_override:str=None):
|
||||
|
||||
analysis, current_goal, action = await self.decide_action(character, goal_override=goal_override)
|
||||
|
||||
if action == "watch":
|
||||
return None
|
||||
|
||||
if action == "direct":
|
||||
return await self.direct_character_with_self_reflection(character, analysis, goal_override=current_goal)
|
||||
|
||||
if action.startswith("narrate"):
|
||||
|
||||
narration_type = action.split(":")[1]
|
||||
|
||||
direct_narrative = await self.direct_narrative(analysis, narration_type=narration_type, goal=current_goal)
|
||||
if direct_narrative:
|
||||
narrator = self.scene.get_helper("narrator").agent
|
||||
narrator_response = await narrator.progress_story(direct_narrative)
|
||||
if not narrator_response:
|
||||
return None
|
||||
narrator_message = NarratorMessage(narrator_response, source="progress_story")
|
||||
self.scene.push_history(narrator_message)
|
||||
emit("narrator", narrator_message)
|
||||
return True
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_narrative(self, analysis:str, narration_type:str="progress", goal:str=None):
|
||||
|
||||
if goal is None:
|
||||
goal = await self.select_goal(self.scene)
|
||||
|
||||
prompt = Prompt.get("director.direct-narrative", vars={
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"scene": self.scene,
|
||||
"narration_type": narration_type,
|
||||
"analysis": analysis,
|
||||
"current_goal": goal,
|
||||
})
|
||||
|
||||
response = await prompt.send(self.client, kind="director")
|
||||
response = response.strip().split("\n")[0].strip()
|
||||
|
||||
if not response:
|
||||
return None
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def direct_character_with_self_reflection(self, character: Character, analysis:str, goal_override:str=None):
|
||||
|
||||
max_retries = 3
|
||||
num_retries = 0
|
||||
keep_direction = False
|
||||
response = None
|
||||
self_reflection = None
|
||||
|
||||
while num_retries < max_retries:
|
||||
|
||||
response, direction_prompt = await self.direct_character(
|
||||
character,
|
||||
analysis,
|
||||
goal_override=goal_override,
|
||||
previous_direction=response,
|
||||
previous_direction_feedback=self_reflection
|
||||
)
|
||||
|
||||
keep_direction, self_reflection = await self.direct_character_self_reflect(
|
||||
response, character, goal_override, direction_prompt
|
||||
)
|
||||
|
||||
if keep_direction:
|
||||
break
|
||||
|
||||
num_retries += 1
|
||||
|
||||
log.info("direct_character_with_self_reflection", response=response, keep_direction=keep_direction)
|
||||
|
||||
if not keep_direction:
|
||||
return None
|
||||
|
||||
#character_agreement = f" *{character.name} agrees with the director and progresses the story accordingly*"
|
||||
#
|
||||
#if "accordingly" not in response:
|
||||
# response += character_agreement
|
||||
#
|
||||
|
||||
#response = await self.transform_character_direction_to_inner_monologue(character, response)
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def transform_character_direction_to_inner_monologue(self, character:Character, direction:str):
|
||||
|
||||
inner_monologue = await Prompt.request(
|
||||
"conversation.direction-to-inner-monologue",
|
||||
self.client,
|
||||
"conversation_long",
|
||||
vars={
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"scene": self.scene,
|
||||
"character": character,
|
||||
"director_instructions": direction,
|
||||
}
|
||||
)
|
||||
|
||||
return inner_monologue
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_character(
|
||||
self,
|
||||
character: Character,
|
||||
analysis:str,
|
||||
goal_override:str=None,
|
||||
previous_direction:str=None,
|
||||
previous_direction_feedback:str=None,
|
||||
):
|
||||
"""
|
||||
Direct the scene
|
||||
"""
|
||||
|
||||
if goal_override:
|
||||
current_goal = goal_override
|
||||
else:
|
||||
current_goal = await self.select_goal(self.scene)
|
||||
|
||||
if current_goal and not current_goal.startswith("Current story goal: "):
|
||||
current_goal = f"Current story goal: {current_goal}"
|
||||
|
||||
prompt = Prompt.get("director.direct-character", vars={
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"scene": self.scene,
|
||||
"character": character,
|
||||
"current_goal": current_goal,
|
||||
"previous_direction": previous_direction,
|
||||
"previous_direction_feedback": previous_direction_feedback,
|
||||
"analysis": analysis,
|
||||
})
|
||||
|
||||
response = await prompt.send(self.client, kind="director")
|
||||
response = response.strip().split("\n")[0].strip()
|
||||
|
||||
log.info(
|
||||
"direct_character",
|
||||
direction=response,
|
||||
previous_direction=previous_direction,
|
||||
previous_direction_feedback=previous_direction_feedback
|
||||
)
|
||||
|
||||
if not response:
|
||||
return None
|
||||
|
||||
if not response.startswith(prompt.prepared_response):
|
||||
response = prompt.prepared_response + response
|
||||
|
||||
return response, "\n".join(prompt.as_list[:-1])
|
||||
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_character_self_reflect(self, direction:str, character: Character, goal:str, direction_prompt:Prompt) -> (bool, str):
|
||||
response += f" (current story goal: {prompt})"
|
||||
|
||||
change_matches = ["change", "retry", "alter", "reconsider"]
|
||||
log.info("direct_scene", response=response)
|
||||
|
||||
prompt = Prompt.get("director.direct-character-self-reflect", vars={
|
||||
"direction_prompt": str(direction_prompt),
|
||||
"direction": direction,
|
||||
"analysis": await self.direct_character_analyze(direction, character, goal, direction_prompt),
|
||||
"character": character,
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
})
|
||||
|
||||
response = await prompt.send(self.client, kind="director")
|
||||
message = DirectorMessage(response, source=character.name)
|
||||
emit("director", message, character=character)
|
||||
|
||||
parse_choice = response[len(prompt.prepared_response):].lower().split(" ")[0]
|
||||
|
||||
keep = not parse_choice in change_matches
|
||||
|
||||
log.info("direct_character_self_reflect", keep=keep, response=response, parsed=parse_choice)
|
||||
|
||||
return keep, response
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_character_analyze(self, direction:str, character: Character, goal:str, direction_prompt:Prompt):
|
||||
|
||||
prompt = Prompt.get("director.direct-character-analyze", vars={
|
||||
"direction_prompt": str(direction_prompt),
|
||||
"direction": direction,
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"character": character,
|
||||
})
|
||||
|
||||
analysis = await prompt.send(self.client, kind="director")
|
||||
|
||||
log.info("direct_character_analyze", analysis=analysis)
|
||||
|
||||
return analysis
|
||||
|
||||
async def select_goal(self, scene: Scene):
|
||||
|
||||
if not scene.goals:
|
||||
return ""
|
||||
|
||||
if isinstance(self.scene.goal, int):
|
||||
# fixes legacy goal format
|
||||
self.scene.goal = self.scene.goals[self.scene.goal]
|
||||
|
||||
while True:
|
||||
|
||||
# get current goal position in goals
|
||||
|
||||
current_goal = scene.goal
|
||||
current_goal_positon = None
|
||||
if current_goal:
|
||||
try:
|
||||
current_goal_positon = self.scene.goals.index(current_goal)
|
||||
except ValueError:
|
||||
pass
|
||||
elif self.scene.goals:
|
||||
current_goal = self.scene.goals[0]
|
||||
current_goal_positon = 0
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
# if current goal is set but not found, its a custom goal override
|
||||
|
||||
custom_goal = (current_goal and current_goal_positon is None)
|
||||
|
||||
log.info("select_goal", current_goal=current_goal, current_goal_positon=current_goal_positon, custom_goal=custom_goal)
|
||||
|
||||
if current_goal:
|
||||
current_goal_met = await self.goal_analyze(current_goal)
|
||||
|
||||
log.info("select_goal", current_goal_met=current_goal_met)
|
||||
if current_goal_met is not True:
|
||||
return current_goal + f"\nThe goal has {current_goal_met})"
|
||||
try:
|
||||
self.scene.goal = self.scene.goals[current_goal_positon + 1]
|
||||
continue
|
||||
except IndexError:
|
||||
return ""
|
||||
|
||||
else:
|
||||
return ""
|
||||
|
||||
@set_processing
|
||||
async def goal_analyze(self, goal:str):
|
||||
|
||||
prompt = Prompt.get("director.goal-analyze", vars={
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"current_goal": goal,
|
||||
})
|
||||
|
||||
response = await prompt.send(self.client, kind="director")
|
||||
|
||||
log.info("goal_analyze", response=response)
|
||||
|
||||
if "not satisfied" in response.lower().strip() or "not been satisfied" in response.lower().strip():
|
||||
goal_met = response
|
||||
else:
|
||||
goal_met = True
|
||||
|
||||
return goal_met
|
||||
self.scene.push_history(message)
|
||||
@@ -10,7 +10,7 @@ import talemate.emit.async_signals
|
||||
from talemate.prompts import Prompt
|
||||
from talemate.scene_message import DirectorMessage, TimePassageMessage
|
||||
|
||||
from .base import Agent, set_processing, AgentAction
|
||||
from .base import Agent, set_processing, AgentAction, AgentActionConfig
|
||||
from .registry import register
|
||||
|
||||
import structlog
|
||||
@@ -21,6 +21,7 @@ import re
|
||||
if TYPE_CHECKING:
|
||||
from talemate.tale_mate import Actor, Character, Scene
|
||||
from talemate.agents.conversation import ConversationAgentEmission
|
||||
from talemate.agents.narrator import NarratorAgentEmission
|
||||
|
||||
log = structlog.get_logger("talemate.agents.editor")
|
||||
|
||||
@@ -40,7 +41,9 @@ class EditorAgent(Agent):
|
||||
self.is_enabled = True
|
||||
self.actions = {
|
||||
"edit_dialogue": AgentAction(enabled=False, label="Edit dialogue", description="Will attempt to improve the quality of dialogue based on the character and scene. Runs automatically after each AI dialogue."),
|
||||
"fix_exposition": AgentAction(enabled=True, label="Fix exposition", description="Will attempt to fix exposition and emotes, making sure they are displayed in italics. Runs automatically after each AI dialogue."),
|
||||
"fix_exposition": AgentAction(enabled=True, label="Fix exposition", description="Will attempt to fix exposition and emotes, making sure they are displayed in italics. Runs automatically after each AI dialogue.", config={
|
||||
"narrator": AgentActionConfig(type="bool", label="Fix narrator messages", description="Will attempt to fix exposition issues in narrator messages", value=True),
|
||||
}),
|
||||
"add_detail": AgentAction(enabled=False, label="Add detail", description="Will attempt to add extra detail and exposition to the dialogue. Runs automatically after each AI dialogue.")
|
||||
}
|
||||
|
||||
@@ -59,6 +62,7 @@ class EditorAgent(Agent):
|
||||
def connect(self, scene):
|
||||
super().connect(scene)
|
||||
talemate.emit.async_signals.get("agent.conversation.generated").connect(self.on_conversation_generated)
|
||||
talemate.emit.async_signals.get("agent.narrator.generated").connect(self.on_narrator_generated)
|
||||
|
||||
async def on_conversation_generated(self, emission:ConversationAgentEmission):
|
||||
"""
|
||||
@@ -93,6 +97,24 @@ class EditorAgent(Agent):
|
||||
|
||||
emission.generation = edited
|
||||
|
||||
async def on_narrator_generated(self, emission:NarratorAgentEmission):
|
||||
"""
|
||||
Called when a narrator message is generated
|
||||
"""
|
||||
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
log.info("editing narrator", emission=emission)
|
||||
|
||||
edited = []
|
||||
|
||||
for text in emission.generation:
|
||||
edit = await self.fix_exposition_on_narrator(text)
|
||||
edited.append(edit)
|
||||
|
||||
emission.generation = edited
|
||||
|
||||
|
||||
@set_processing
|
||||
async def edit_conversation(self, content:str, character:Character):
|
||||
@@ -127,12 +149,19 @@ class EditorAgent(Agent):
|
||||
if not self.actions["fix_exposition"].enabled:
|
||||
return content
|
||||
|
||||
#response = await Prompt.request("editor.fix-exposition", self.client, "edit_fix_exposition", vars={
|
||||
# "content": content,
|
||||
# "character": character,
|
||||
# "scene": self.scene,
|
||||
# "max_length": self.client.max_token_length
|
||||
#})
|
||||
if not character.is_player:
|
||||
if '"' not in content and '*' not in content:
|
||||
content = util.strip_partial_sentences(content)
|
||||
character_prefix = f"{character.name}: "
|
||||
message = content.split(character_prefix)[1]
|
||||
content = f"{character_prefix}*{message.strip('*')}*"
|
||||
return content
|
||||
elif '"' in content:
|
||||
# if both are present we strip the * and add them back later
|
||||
# through ensure_dialog_format - right now most LLMs aren't
|
||||
# smart enough to do quotes and italics at the same time consistently
|
||||
# especially throughout long conversations
|
||||
content = content.replace('*', '')
|
||||
|
||||
content = util.clean_dialogue(content, main_name=character.name)
|
||||
content = util.strip_partial_sentences(content)
|
||||
@@ -140,6 +169,24 @@ class EditorAgent(Agent):
|
||||
|
||||
return content
|
||||
|
||||
@set_processing
|
||||
async def fix_exposition_on_narrator(self, content:str):
|
||||
|
||||
if not self.actions["fix_exposition"].enabled:
|
||||
return content
|
||||
|
||||
if not self.actions["fix_exposition"].config["narrator"].value:
|
||||
return content
|
||||
|
||||
content = util.strip_partial_sentences(content)
|
||||
|
||||
if '"' not in content:
|
||||
content = f"*{content.strip('*')}*"
|
||||
else:
|
||||
content = util.ensure_dialog_format(content)
|
||||
|
||||
return content
|
||||
|
||||
@set_processing
|
||||
async def add_detail(self, content:str, character:Character):
|
||||
"""
|
||||
|
||||
@@ -6,8 +6,14 @@ from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
||||
from chromadb.config import Settings
|
||||
import talemate.events as events
|
||||
import talemate.util as util
|
||||
from talemate.emit import emit
|
||||
from talemate.emit.signals import handlers
|
||||
from talemate.context import scene_is_loading
|
||||
from talemate.config import load_config
|
||||
from talemate.agents.base import set_processing
|
||||
import structlog
|
||||
import shutil
|
||||
import functools
|
||||
|
||||
try:
|
||||
import chromadb
|
||||
@@ -34,6 +40,18 @@ class MemoryAgent(Agent):
|
||||
agent_type = "memory"
|
||||
verbose_name = "Long-term memory"
|
||||
|
||||
@property
|
||||
def readonly(self):
|
||||
|
||||
if scene_is_loading.get() and not getattr(self.scene, "_memory_never_persisted", False):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def db_name(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def config_options(cls, agent=None):
|
||||
return {}
|
||||
@@ -43,6 +61,15 @@ class MemoryAgent(Agent):
|
||||
self.scene = scene
|
||||
self.memory_tracker = {}
|
||||
self.config = load_config()
|
||||
|
||||
handlers["config_saved"].connect(self.on_config_saved)
|
||||
|
||||
def on_config_saved(self, event):
|
||||
openai_key = self.openai_api_key
|
||||
self.config = load_config()
|
||||
if openai_key != self.openai_api_key:
|
||||
loop = asyncio.get_running_loop()
|
||||
loop.run_until_complete(self.emit_status())
|
||||
|
||||
async def set_db(self):
|
||||
raise NotImplementedError()
|
||||
@@ -50,28 +77,46 @@ class MemoryAgent(Agent):
|
||||
def close_db(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
async def count(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@set_processing
|
||||
async def add(self, text, character=None, uid=None, ts:str=None, **kwargs):
|
||||
if not text:
|
||||
return
|
||||
if self.readonly:
|
||||
log.debug("memory agent", status="readonly")
|
||||
return
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
await loop.run_in_executor(None, functools.partial(self._add, text, character, uid=uid, ts=ts, **kwargs))
|
||||
|
||||
await self._add(text, character=character, uid=uid, ts=ts, **kwargs)
|
||||
|
||||
async def _add(self, text, character=None, ts:str=None, **kwargs):
|
||||
def _add(self, text, character=None, ts:str=None, **kwargs):
|
||||
raise NotImplementedError()
|
||||
|
||||
@set_processing
|
||||
async def add_many(self, objects: list[dict]):
|
||||
await self._add_many(objects)
|
||||
if self.readonly:
|
||||
log.debug("memory agent", status="readonly")
|
||||
return
|
||||
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(None, self._add_many, objects)
|
||||
|
||||
async def _add_many(self, objects: list[dict]):
|
||||
def _add_many(self, objects: list[dict]):
|
||||
"""
|
||||
Add multiple objects to the memory
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@set_processing
|
||||
async def get(self, text, character=None, **query):
|
||||
return await self._get(str(text), character, **query)
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
return await loop.run_in_executor(None, functools.partial(self._get, text, character, **query))
|
||||
|
||||
async def _get(self, text, character=None, **query):
|
||||
def _get(self, text, character=None, **query):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_document(self, id):
|
||||
@@ -118,6 +163,10 @@ class MemoryAgent(Agent):
|
||||
"""
|
||||
|
||||
memory_context = []
|
||||
|
||||
if not query:
|
||||
return memory_context
|
||||
|
||||
for memory in await self.get(query):
|
||||
if memory in memory_context:
|
||||
continue
|
||||
@@ -131,13 +180,13 @@ class MemoryAgent(Agent):
|
||||
break
|
||||
return memory_context
|
||||
|
||||
async def query(self, query:str, max_tokens:int=1000, filter:Callable=lambda x:True):
|
||||
async def query(self, query:str, max_tokens:int=1000, filter:Callable=lambda x:True, **where):
|
||||
"""
|
||||
Get the character memory context for a given character
|
||||
"""
|
||||
|
||||
try:
|
||||
return (await self.multi_query([query], max_tokens=max_tokens, filter=filter))[0]
|
||||
return (await self.multi_query([query], max_tokens=max_tokens, filter=filter, **where))[0]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
@@ -157,8 +206,12 @@ class MemoryAgent(Agent):
|
||||
|
||||
memory_context = []
|
||||
for query in queries:
|
||||
|
||||
if not query:
|
||||
continue
|
||||
|
||||
i = 0
|
||||
for memory in await self.get(formatter(query), **where):
|
||||
for memory in await self.get(formatter(query), limit=iterate, **where):
|
||||
if memory in memory_context:
|
||||
continue
|
||||
|
||||
@@ -184,9 +237,14 @@ from .registry import register
|
||||
@register(condition=lambda: chromadb is not None)
|
||||
class ChromaDBMemoryAgent(MemoryAgent):
|
||||
|
||||
requires_llm_client = False
|
||||
|
||||
@property
|
||||
def ready(self):
|
||||
|
||||
if self.embeddings == "openai" and not self.openai_api_key:
|
||||
return False
|
||||
|
||||
if getattr(self, "db_client", None):
|
||||
return True
|
||||
return False
|
||||
@@ -195,12 +253,20 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
def status(self):
|
||||
if self.ready:
|
||||
return "active" if not getattr(self, "processing", False) else "busy"
|
||||
|
||||
if self.embeddings == "openai" and not self.openai_api_key:
|
||||
return "error"
|
||||
|
||||
return "waiting"
|
||||
|
||||
@property
|
||||
def agent_details(self):
|
||||
|
||||
if self.embeddings == "openai" and not self.openai_api_key:
|
||||
return "No OpenAI API key set"
|
||||
|
||||
return f"ChromaDB: {self.embeddings}"
|
||||
|
||||
|
||||
@property
|
||||
def embeddings(self):
|
||||
"""
|
||||
@@ -238,26 +304,58 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
@property
|
||||
def USE_INSTRUCTOR(self):
|
||||
return self.embeddings == "instructor"
|
||||
|
||||
@property
|
||||
def db_name(self):
|
||||
return getattr(self, "collection_name", "<unnamed>")
|
||||
|
||||
@property
|
||||
def openai_api_key(self):
|
||||
return self.config.get("openai",{}).get("api_key")
|
||||
|
||||
def make_collection_name(self, scene):
|
||||
|
||||
if self.USE_OPENAI:
|
||||
suffix = "-openai"
|
||||
elif self.USE_INSTRUCTOR:
|
||||
suffix = "-instructor"
|
||||
model = self.config.get("chromadb").get("instructor_model", "hkunlp/instructor-xl")
|
||||
if "xl" in model:
|
||||
suffix += "-xl"
|
||||
elif "large" in model:
|
||||
suffix += "-large"
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
return f"{scene.memory_id}-tm{suffix}"
|
||||
|
||||
async def count(self):
|
||||
await asyncio.sleep(0)
|
||||
return self.db.count()
|
||||
|
||||
@set_processing
|
||||
async def set_db(self):
|
||||
await self.emit_status(processing=True)
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(None, self._set_db)
|
||||
|
||||
if getattr(self, "db", None):
|
||||
try:
|
||||
self.db.delete(where={"source": "talemate"})
|
||||
except ValueError:
|
||||
pass
|
||||
await self.emit_status(processing=False)
|
||||
def _set_db(self):
|
||||
if not getattr(self, "db_client", None):
|
||||
log.info("chromadb agent", status="setting up db client to persistent db")
|
||||
self.db_client = chromadb.PersistentClient(
|
||||
settings=Settings(anonymized_telemetry=False)
|
||||
)
|
||||
|
||||
openai_key = self.openai_api_key
|
||||
|
||||
self.collection_name = collection_name = self.make_collection_name(self.scene)
|
||||
|
||||
log.info("chromadb agent", status="setting up db", collection_name=collection_name)
|
||||
|
||||
if self.USE_OPENAI:
|
||||
|
||||
if not openai_key:
|
||||
raise ValueError("You must provide an the openai ai key in the config if you want to use it for chromadb embeddings")
|
||||
|
||||
return
|
||||
|
||||
log.info("chromadb agent", status="setting up db")
|
||||
|
||||
self.db_client = chromadb.Client(Settings(anonymized_telemetry=False))
|
||||
|
||||
openai_key = self.config.get("openai").get("api_key") or os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
if openai_key and self.USE_OPENAI:
|
||||
log.info(
|
||||
"crhomadb", status="using openai", openai_key=openai_key[:5] + "..."
|
||||
)
|
||||
@@ -266,7 +364,7 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
model_name="text-embedding-ada-002",
|
||||
)
|
||||
self.db = self.db_client.get_or_create_collection(
|
||||
"talemate-story", embedding_function=openai_ef
|
||||
collection_name, embedding_function=openai_ef
|
||||
)
|
||||
elif self.USE_INSTRUCTOR:
|
||||
|
||||
@@ -280,31 +378,62 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
model_name=instructor_model, device=instructor_device
|
||||
)
|
||||
|
||||
log.info("chromadb", status="embedding function ready")
|
||||
|
||||
self.db = self.db_client.get_or_create_collection(
|
||||
"talemate-story", embedding_function=ef
|
||||
collection_name, embedding_function=ef
|
||||
)
|
||||
|
||||
log.info("chromadb", status="instructor db ready")
|
||||
else:
|
||||
log.info("chromadb", status="using default embeddings")
|
||||
self.db = self.db_client.get_or_create_collection("talemate-story")
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
self.db = self.db_client.get_or_create_collection(collection_name)
|
||||
|
||||
self.scene._memory_never_persisted = self.db.count() == 0
|
||||
log.info("chromadb agent", status="db ready")
|
||||
|
||||
def close_db(self):
|
||||
def clear_db(self):
|
||||
if not self.db:
|
||||
return
|
||||
|
||||
log.info("chromadb agent", status="clearing db", collection_name=self.collection_name)
|
||||
|
||||
self.db.delete(where={"source": "talemate"})
|
||||
|
||||
def drop_db(self):
|
||||
if not self.db:
|
||||
return
|
||||
|
||||
log.info("chromadb agent", status="dropping db", collection_name=self.collection_name)
|
||||
|
||||
try:
|
||||
self.db.delete(where={"source": "talemate"})
|
||||
except ValueError:
|
||||
pass
|
||||
self.db_client.delete_collection(self.collection_name)
|
||||
except ValueError as exc:
|
||||
if "Collection not found" not in str(exc):
|
||||
raise
|
||||
|
||||
async def _add(self, text, character=None, uid=None, ts:str=None, **kwargs):
|
||||
def close_db(self, scene):
|
||||
if not self.db:
|
||||
return
|
||||
|
||||
log.info("chromadb agent", status="closing db", collection_name=self.collection_name)
|
||||
|
||||
if not scene.saved:
|
||||
# scene was never saved so we can discard the memory
|
||||
collection_name = self.make_collection_name(scene)
|
||||
log.info("chromadb agent", status="discarding memory", collection_name=collection_name)
|
||||
try:
|
||||
self.db_client.delete_collection(collection_name)
|
||||
except ValueError as exc:
|
||||
if "Collection not found" not in str(exc):
|
||||
raise
|
||||
|
||||
self.db = None
|
||||
|
||||
def _add(self, text, character=None, uid=None, ts:str=None, **kwargs):
|
||||
metadatas = []
|
||||
ids = []
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
if character:
|
||||
meta = {"character": character.name, "source": "talemate"}
|
||||
if ts:
|
||||
@@ -326,20 +455,16 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
id = uid or f"__narrator__-{self.memory_tracker['__narrator__']}"
|
||||
ids = [id]
|
||||
|
||||
log.debug("chromadb agent add", text=text, meta=meta, id=id)
|
||||
#log.debug("chromadb agent add", text=text, meta=meta, id=id)
|
||||
|
||||
self.db.upsert(documents=[text], metadatas=metadatas, ids=ids)
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
async def _add_many(self, objects: list[dict]):
|
||||
def _add_many(self, objects: list[dict]):
|
||||
|
||||
documents = []
|
||||
metadatas = []
|
||||
ids = []
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
for obj in objects:
|
||||
documents.append(obj["text"])
|
||||
meta = obj.get("meta", {})
|
||||
@@ -352,11 +477,7 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
ids.append(uid)
|
||||
self.db.upsert(documents=documents, metadatas=metadatas, ids=ids)
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
async def _get(self, text, character=None, **kwargs):
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
def _get(self, text, character=None, limit:int=15, **kwargs):
|
||||
where = {}
|
||||
where.setdefault("$and", [])
|
||||
|
||||
@@ -378,7 +499,11 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
|
||||
#log.debug("crhomadb agent get", text=text, where=where)
|
||||
|
||||
_results = self.db.query(query_texts=[text], where=where)
|
||||
_results = self.db.query(query_texts=[text], where=where, n_results=limit)
|
||||
|
||||
#import json
|
||||
#print(json.dumps(_results["ids"], indent=2))
|
||||
#print(json.dumps(_results["distances"], indent=2))
|
||||
|
||||
results = []
|
||||
|
||||
@@ -392,9 +517,10 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
if distance < 1:
|
||||
|
||||
try:
|
||||
#log.debug("chromadb agent get", ts=ts, scene_ts=self.scene.ts)
|
||||
date_prefix = util.iso8601_diff_to_human(ts, self.scene.ts)
|
||||
except Exception:
|
||||
log.error("chromadb agent", error="failed to get date prefix", ts=ts, scene_ts=self.scene.ts)
|
||||
except Exception as e:
|
||||
log.error("chromadb agent", error="failed to get date prefix", details=e, ts=ts, scene_ts=self.scene.ts)
|
||||
date_prefix = None
|
||||
|
||||
if date_prefix:
|
||||
@@ -405,9 +531,7 @@ class ChromaDBMemoryAgent(MemoryAgent):
|
||||
|
||||
# log.debug("crhomadb agent get", result=results[-1], distance=distance)
|
||||
|
||||
if len(results) > 10:
|
||||
if len(results) > limit:
|
||||
break
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return results
|
||||
|
||||
@@ -1,19 +1,60 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
||||
|
||||
import dataclasses
|
||||
import structlog
|
||||
import random
|
||||
import talemate.util as util
|
||||
from talemate.emit import wait_for_input
|
||||
from talemate.emit import emit
|
||||
import talemate.emit.async_signals
|
||||
from talemate.prompts import Prompt
|
||||
from talemate.agents.base import set_processing, Agent
|
||||
from talemate.agents.base import set_processing as _set_processing, Agent, AgentAction, AgentActionConfig, AgentEmission
|
||||
from talemate.agents.world_state import TimePassageEmission
|
||||
from talemate.scene_message import NarratorMessage
|
||||
from talemate.events import GameLoopActorIterEvent
|
||||
import talemate.client as client
|
||||
|
||||
from .registry import register
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from talemate.tale_mate import Actor, Player, Character
|
||||
|
||||
log = structlog.get_logger("talemate.agents.narrator")
|
||||
|
||||
@dataclasses.dataclass
|
||||
class NarratorAgentEmission(AgentEmission):
|
||||
generation: list[str] = dataclasses.field(default_factory=list)
|
||||
|
||||
talemate.emit.async_signals.register(
|
||||
"agent.narrator.generated"
|
||||
)
|
||||
|
||||
def set_processing(fn):
|
||||
|
||||
"""
|
||||
Custom decorator that emits the agent status as processing while the function
|
||||
is running and then emits the result of the function as a NarratorAgentEmission
|
||||
"""
|
||||
|
||||
@_set_processing
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
response = await fn(self, *args, **kwargs)
|
||||
emission = NarratorAgentEmission(
|
||||
agent=self,
|
||||
generation=[response],
|
||||
)
|
||||
await talemate.emit.async_signals.get("agent.narrator.generated").send(emission)
|
||||
return emission.generation[0]
|
||||
wrapper.__name__ = fn.__name__
|
||||
return wrapper
|
||||
|
||||
@register()
|
||||
class NarratorAgent(Agent):
|
||||
|
||||
"""
|
||||
Handles narration of the story
|
||||
"""
|
||||
|
||||
agent_type = "narrator"
|
||||
verbose_name = "Narrator"
|
||||
|
||||
@@ -24,20 +65,146 @@ class NarratorAgent(Agent):
|
||||
):
|
||||
self.client = client
|
||||
|
||||
# agent actions
|
||||
|
||||
self.actions = {
|
||||
"generation_override": AgentAction(
|
||||
enabled = True,
|
||||
label = "Generation Override",
|
||||
description = "Override generation parameters",
|
||||
config = {
|
||||
"instructions": AgentActionConfig(
|
||||
type="text",
|
||||
label="Instructions",
|
||||
value="Never wax poetic.",
|
||||
description="Extra instructions to give to the AI for narrative generation.",
|
||||
),
|
||||
}
|
||||
),
|
||||
"auto_break_repetition": AgentAction(
|
||||
enabled = True,
|
||||
label = "Auto Break Repetition",
|
||||
description = "Will attempt to automatically break AI repetition.",
|
||||
),
|
||||
"narrate_time_passage": AgentAction(enabled=True, label="Narrate Time Passage", description="Whenever you indicate passage of time, narrate right after"),
|
||||
"narrate_dialogue": AgentAction(
|
||||
enabled=True,
|
||||
label="Narrate Dialogue",
|
||||
description="Narrator will get a chance to narrate after every line of dialogue",
|
||||
config = {
|
||||
"ai_dialog": AgentActionConfig(
|
||||
type="number",
|
||||
label="AI Dialogue",
|
||||
description="Chance to narrate after every line of dialogue, 1 = always, 0 = never",
|
||||
value=0.3,
|
||||
min=0.0,
|
||||
max=1.0,
|
||||
step=0.1,
|
||||
),
|
||||
"player_dialog": AgentActionConfig(
|
||||
type="number",
|
||||
label="Player Dialogue",
|
||||
description="Chance to narrate after every line of dialogue, 1 = always, 0 = never",
|
||||
value=0.3,
|
||||
min=0.0,
|
||||
max=1.0,
|
||||
step=0.1,
|
||||
),
|
||||
"generate_dialogue": AgentActionConfig(
|
||||
type="bool",
|
||||
label="Allow Dialogue in Narration",
|
||||
description="Allow the narrator to generate dialogue in narration",
|
||||
value=False,
|
||||
),
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
@property
|
||||
def extra_instructions(self):
|
||||
if self.actions["generation_override"].enabled:
|
||||
return self.actions["generation_override"].config["instructions"].value
|
||||
return ""
|
||||
|
||||
def clean_result(self, result):
|
||||
|
||||
"""
|
||||
Cleans the result of a narration
|
||||
"""
|
||||
|
||||
result = result.strip().strip(":").strip()
|
||||
|
||||
if "#" in result:
|
||||
result = result.split("#")[0]
|
||||
|
||||
character_names = [c.name for c in self.scene.get_characters()]
|
||||
|
||||
|
||||
cleaned = []
|
||||
for line in result.split("\n"):
|
||||
if ":" in line.strip():
|
||||
break
|
||||
for character_name in character_names:
|
||||
if line.startswith(f"{character_name}:"):
|
||||
break
|
||||
cleaned.append(line)
|
||||
|
||||
return "\n".join(cleaned)
|
||||
result = "\n".join(cleaned)
|
||||
#result = util.strip_partial_sentences(result)
|
||||
return result
|
||||
|
||||
def connect(self, scene):
|
||||
|
||||
"""
|
||||
Connect to signals
|
||||
"""
|
||||
|
||||
super().connect(scene)
|
||||
talemate.emit.async_signals.get("agent.world_state.time").connect(self.on_time_passage)
|
||||
talemate.emit.async_signals.get("game_loop_actor_iter").connect(self.on_dialog)
|
||||
|
||||
async def on_time_passage(self, event:TimePassageEmission):
|
||||
|
||||
"""
|
||||
Handles time passage narration, if enabled
|
||||
"""
|
||||
|
||||
if not self.actions["narrate_time_passage"].enabled:
|
||||
return
|
||||
|
||||
response = await self.narrate_time_passage(event.duration, event.narrative)
|
||||
narrator_message = NarratorMessage(response, source=f"narrate_time_passage:{event.duration};{event.narrative}")
|
||||
emit("narrator", narrator_message)
|
||||
self.scene.push_history(narrator_message)
|
||||
|
||||
async def on_dialog(self, event:GameLoopActorIterEvent):
|
||||
|
||||
"""
|
||||
Handles dialogue narration, if enabled
|
||||
"""
|
||||
|
||||
if not self.actions["narrate_dialogue"].enabled:
|
||||
return
|
||||
narrate_on_ai_chance = self.actions["narrate_dialogue"].config["ai_dialog"].value
|
||||
narrate_on_player_chance = self.actions["narrate_dialogue"].config["player_dialog"].value
|
||||
narrate_on_ai = random.random() < narrate_on_ai_chance
|
||||
narrate_on_player = random.random() < narrate_on_player_chance
|
||||
log.debug(
|
||||
"narrate on dialog",
|
||||
narrate_on_ai=narrate_on_ai,
|
||||
narrate_on_ai_chance=narrate_on_ai_chance,
|
||||
narrate_on_player=narrate_on_player,
|
||||
narrate_on_player_chance=narrate_on_player_chance,
|
||||
)
|
||||
|
||||
if event.actor.character.is_player and not narrate_on_player:
|
||||
return
|
||||
|
||||
if not event.actor.character.is_player and not narrate_on_ai:
|
||||
return
|
||||
|
||||
response = await self.narrate_after_dialogue(event.actor.character)
|
||||
narrator_message = NarratorMessage(response, source=f"narrate_dialogue:{event.actor.character.name}")
|
||||
emit("narrator", narrator_message)
|
||||
self.scene.push_history(narrator_message)
|
||||
|
||||
@set_processing
|
||||
async def narrate_scene(self):
|
||||
@@ -52,9 +219,13 @@ class NarratorAgent(Agent):
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
response = response.strip("*")
|
||||
response = util.strip_partial_sentences(response)
|
||||
|
||||
response = f"*{response.strip('*')}*"
|
||||
|
||||
return response
|
||||
@@ -66,22 +237,11 @@ class NarratorAgent(Agent):
|
||||
"""
|
||||
|
||||
scene = self.scene
|
||||
director = scene.get_helper("director").agent
|
||||
pc = scene.get_player_character()
|
||||
npcs = list(scene.get_npc_characters())
|
||||
npc_names= ", ".join([npc.name for npc in npcs])
|
||||
|
||||
#summarized_history = await scene.summarized_dialogue_history(
|
||||
# budget = self.client.max_token_length - 300,
|
||||
# min_dialogue = 50,
|
||||
#)
|
||||
|
||||
#augmented_context = await self.augment_context()
|
||||
|
||||
if narrative_direction is None:
|
||||
#narrative_direction = await director.direct_narrative(
|
||||
# scene.context_history(budget=self.client.max_token_length - 500, min_dialogue=20),
|
||||
#)
|
||||
narrative_direction = "Slightly move the current scene forward."
|
||||
|
||||
self.scene.log.info("narrative_direction", narrative_direction=narrative_direction)
|
||||
@@ -92,13 +252,12 @@ class NarratorAgent(Agent):
|
||||
"narrate",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
#"summarized_history": summarized_history,
|
||||
#"augmented_context": augmented_context,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"narrative_direction": narrative_direction,
|
||||
"player_character": pc,
|
||||
"npcs": npcs,
|
||||
"npc_names": npc_names,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -129,10 +288,12 @@ class NarratorAgent(Agent):
|
||||
"query": query,
|
||||
"at_the_end": at_the_end,
|
||||
"as_narrative": as_narrative,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
log.info("narrate_query", response=response)
|
||||
response = self.clean_result(response.strip())
|
||||
log.info("narrate_query (after clean)", response=response)
|
||||
if as_narrative:
|
||||
response = f"*{response}*"
|
||||
|
||||
@@ -164,6 +325,7 @@ class NarratorAgent(Agent):
|
||||
"character": character,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"memory": memory_context,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -188,6 +350,7 @@ class NarratorAgent(Agent):
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -208,6 +371,7 @@ class NarratorAgent(Agent):
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"memory": memory_context,
|
||||
"questions": questions,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -216,4 +380,80 @@ class NarratorAgent(Agent):
|
||||
answers = [a for a in answers.split("\n") if a.strip()]
|
||||
|
||||
# return questions and answers
|
||||
return list(zip(questions, answers))
|
||||
return list(zip(questions, answers))
|
||||
|
||||
@set_processing
|
||||
async def narrate_time_passage(self, duration:str, narrative:str=None):
|
||||
"""
|
||||
Narrate a specific character
|
||||
"""
|
||||
|
||||
response = await Prompt.request(
|
||||
"narrator.narrate-time-passage",
|
||||
self.client,
|
||||
"narrate",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"duration": duration,
|
||||
"narrative": narrative,
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
log.info("narrate_time_passage", response=response)
|
||||
|
||||
response = self.clean_result(response.strip())
|
||||
response = f"*{response}*"
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@set_processing
|
||||
async def narrate_after_dialogue(self, character:Character):
|
||||
"""
|
||||
Narrate after a line of dialogue
|
||||
"""
|
||||
|
||||
response = await Prompt.request(
|
||||
"narrator.narrate-after-dialogue",
|
||||
self.client,
|
||||
"narrate",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"character": character,
|
||||
"last_line": str(self.scene.history[-1]),
|
||||
"extra_instructions": self.extra_instructions,
|
||||
}
|
||||
)
|
||||
|
||||
log.info("narrate_after_dialogue", response=response)
|
||||
|
||||
response = self.clean_result(response.strip().strip("*"))
|
||||
response = f"*{response}*"
|
||||
|
||||
allow_dialogue = self.actions["narrate_dialogue"].config["generate_dialogue"].value
|
||||
|
||||
if not allow_dialogue:
|
||||
response = response.split('"')[0].strip()
|
||||
response = response.replace("*", "")
|
||||
response = util.strip_partial_sentences(response)
|
||||
response = f"*{response}*"
|
||||
|
||||
return response
|
||||
|
||||
# LLM client related methods. These are called during or after the client
|
||||
|
||||
def inject_prompt_paramters(self, prompt_param: dict, kind: str, agent_function_name: str):
|
||||
log.debug("inject_prompt_paramters", prompt_param=prompt_param, kind=kind, agent_function_name=agent_function_name)
|
||||
character_names = [f"\n{c.name}:" for c in self.scene.get_characters()]
|
||||
if prompt_param.get("extra_stopping_strings") is None:
|
||||
prompt_param["extra_stopping_strings"] = []
|
||||
prompt_param["extra_stopping_strings"] += character_names
|
||||
|
||||
def allow_repetition_break(self, kind: str, agent_function_name: str, auto:bool=False):
|
||||
if auto and not self.actions["auto_break_repetition"].enabled:
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -5,11 +5,13 @@ import traceback
|
||||
from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
||||
|
||||
import talemate.data_objects as data_objects
|
||||
import talemate.emit.async_signals
|
||||
import talemate.util as util
|
||||
from talemate.prompts import Prompt
|
||||
from talemate.scene_message import DirectorMessage, TimePassageMessage
|
||||
from talemate.events import GameLoopEvent
|
||||
|
||||
from .base import Agent, set_processing
|
||||
from .base import Agent, set_processing, AgentAction, AgentActionConfig
|
||||
from .registry import register
|
||||
|
||||
import structlog
|
||||
@@ -34,14 +36,40 @@ class SummarizeAgent(Agent):
|
||||
|
||||
def __init__(self, client, **kwargs):
|
||||
self.client = client
|
||||
|
||||
def on_history_add(self, event):
|
||||
asyncio.ensure_future(self.build_archive(event.scene))
|
||||
|
||||
|
||||
self.actions = {
|
||||
"archive": AgentAction(
|
||||
enabled=True,
|
||||
label="Summarize to long-term memory archive",
|
||||
description="Automatically summarize scene dialogue when the number of tokens in the history exceeds a threshold. This helps keep the context history from growing too large.",
|
||||
config={
|
||||
"threshold": AgentActionConfig(
|
||||
type="number",
|
||||
label="Token Threshold",
|
||||
description="Will summarize when the number of tokens in the history exceeds this threshold",
|
||||
min=512,
|
||||
max=8192,
|
||||
step=256,
|
||||
value=1536,
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def connect(self, scene):
|
||||
super().connect(scene)
|
||||
scene.signals["history_add"].connect(self.on_history_add)
|
||||
talemate.emit.async_signals.get("game_loop").connect(self.on_game_loop)
|
||||
|
||||
|
||||
async def on_game_loop(self, emission:GameLoopEvent):
|
||||
"""
|
||||
Called when a conversation is generated
|
||||
"""
|
||||
|
||||
await self.build_archive(self.scene)
|
||||
|
||||
|
||||
def clean_result(self, result):
|
||||
if "#" in result:
|
||||
result = result.split("#")[0]
|
||||
@@ -55,25 +83,39 @@ class SummarizeAgent(Agent):
|
||||
@set_processing
|
||||
async def build_archive(self, scene):
|
||||
end = None
|
||||
|
||||
|
||||
if not self.actions["archive"].enabled:
|
||||
return
|
||||
|
||||
if not scene.archived_history:
|
||||
start = 0
|
||||
recent_entry = None
|
||||
else:
|
||||
recent_entry = scene.archived_history[-1]
|
||||
start = recent_entry.get("end", 0) + 1
|
||||
if "end" not in recent_entry:
|
||||
# permanent historical archive entry, not tied to any specific history entry
|
||||
# meaning we are still at the beginning of the scene
|
||||
start = 0
|
||||
else:
|
||||
start = recent_entry.get("end", 0)+1
|
||||
|
||||
token_threshold = 1500
|
||||
tokens = 0
|
||||
dialogue_entries = []
|
||||
ts = "PT0S"
|
||||
time_passage_termination = False
|
||||
|
||||
token_threshold = self.actions["archive"].config["threshold"].value
|
||||
|
||||
log.debug("build_archive", start=start, recent_entry=recent_entry)
|
||||
|
||||
if recent_entry:
|
||||
ts = recent_entry.get("ts", ts)
|
||||
|
||||
for i in range(start, len(scene.history)):
|
||||
dialogue = scene.history[i]
|
||||
|
||||
#log.debug("build_archive", idx=i, content=str(dialogue)[:64]+"...")
|
||||
|
||||
if isinstance(dialogue, DirectorMessage):
|
||||
if i == start:
|
||||
start += 1
|
||||
@@ -130,7 +172,7 @@ class SummarizeAgent(Agent):
|
||||
break
|
||||
adjusted_dialogue.append(line)
|
||||
dialogue_entries = adjusted_dialogue
|
||||
end = start + len(dialogue_entries)
|
||||
end = start + len(dialogue_entries)-1
|
||||
|
||||
if dialogue_entries:
|
||||
summarized = await self.summarize(
|
||||
|
||||
617
src/talemate/agents/tts.py
Normal file
617
src/talemate/agents/tts.py
Normal file
@@ -0,0 +1,617 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
import asyncio
|
||||
import httpx
|
||||
import io
|
||||
import os
|
||||
import pydantic
|
||||
import nltk
|
||||
import tempfile
|
||||
import base64
|
||||
import uuid
|
||||
import functools
|
||||
from nltk.tokenize import sent_tokenize
|
||||
|
||||
import talemate.config as config
|
||||
import talemate.emit.async_signals
|
||||
import talemate.instance as instance
|
||||
from talemate.emit import emit
|
||||
from talemate.emit.signals import handlers
|
||||
from talemate.events import GameLoopNewMessageEvent
|
||||
from talemate.scene_message import CharacterMessage, NarratorMessage
|
||||
|
||||
from .base import Agent, set_processing, AgentAction, AgentActionConfig
|
||||
from .registry import register
|
||||
|
||||
import structlog
|
||||
|
||||
import time
|
||||
|
||||
try:
|
||||
from TTS.api import TTS
|
||||
except ImportError:
|
||||
TTS = None
|
||||
|
||||
log = structlog.get_logger("talemate.agents.tts")#
|
||||
|
||||
if not TTS:
|
||||
# TTS installation is massive and requires a lot of dependencies
|
||||
# so we don't want to require it unless the user wants to use it
|
||||
log.info("TTS (local) requires the TTS package, please install with `pip install TTS` if you want to use the local api")
|
||||
|
||||
|
||||
def parse_chunks(text):
|
||||
|
||||
text = text.replace("...", "__ellipsis__")
|
||||
|
||||
chunks = sent_tokenize(text)
|
||||
cleaned_chunks = []
|
||||
|
||||
for chunk in chunks:
|
||||
chunk = chunk.replace("*","")
|
||||
if not chunk:
|
||||
continue
|
||||
cleaned_chunks.append(chunk)
|
||||
|
||||
|
||||
for i, chunk in enumerate(cleaned_chunks):
|
||||
chunk = chunk.replace("__ellipsis__", "...")
|
||||
|
||||
cleaned_chunks[i] = chunk
|
||||
|
||||
return cleaned_chunks
|
||||
|
||||
def clean_quotes(chunk:str):
|
||||
|
||||
# if there is an uneven number of quotes, remove the last one if its
|
||||
# at the end of the chunk. If its in the middle, add a quote to the end
|
||||
if chunk.count('"') % 2 == 1:
|
||||
|
||||
if chunk.endswith('"'):
|
||||
chunk = chunk[:-1]
|
||||
else:
|
||||
chunk += '"'
|
||||
|
||||
return chunk
|
||||
|
||||
|
||||
def rejoin_chunks(chunks:list[str], chunk_size:int=250):
|
||||
|
||||
"""
|
||||
Will combine chunks split by punctuation into a single chunk until
|
||||
max chunk size is reached
|
||||
"""
|
||||
|
||||
joined_chunks = []
|
||||
|
||||
current_chunk = ""
|
||||
|
||||
for chunk in chunks:
|
||||
|
||||
if len(current_chunk) + len(chunk) > chunk_size:
|
||||
joined_chunks.append(clean_quotes(current_chunk))
|
||||
current_chunk = ""
|
||||
|
||||
current_chunk += chunk
|
||||
|
||||
if current_chunk:
|
||||
joined_chunks.append(clean_quotes(current_chunk))
|
||||
return joined_chunks
|
||||
|
||||
|
||||
class Voice(pydantic.BaseModel):
|
||||
value:str
|
||||
label:str
|
||||
|
||||
class VoiceLibrary(pydantic.BaseModel):
|
||||
|
||||
api: str
|
||||
voices: list[Voice] = pydantic.Field(default_factory=list)
|
||||
last_synced: float = None
|
||||
|
||||
|
||||
@register()
|
||||
class TTSAgent(Agent):
|
||||
|
||||
"""
|
||||
Text to speech agent
|
||||
"""
|
||||
|
||||
agent_type = "tts"
|
||||
verbose_name = "Voice"
|
||||
requires_llm_client = False
|
||||
|
||||
@classmethod
|
||||
def config_options(cls, agent=None):
|
||||
config_options = super().config_options(agent=agent)
|
||||
|
||||
if agent:
|
||||
config_options["actions"]["_config"]["config"]["voice_id"]["choices"] = [
|
||||
voice.model_dump() for voice in agent.list_voices_sync()
|
||||
]
|
||||
|
||||
return config_options
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.is_enabled = False
|
||||
nltk.download("punkt", quiet=True)
|
||||
|
||||
self.voices = {
|
||||
"elevenlabs": VoiceLibrary(api="elevenlabs"),
|
||||
"coqui": VoiceLibrary(api="coqui"),
|
||||
"tts": VoiceLibrary(api="tts"),
|
||||
}
|
||||
self.config = config.load_config()
|
||||
self.playback_done_event = asyncio.Event()
|
||||
self.actions = {
|
||||
"_config": AgentAction(
|
||||
enabled=True,
|
||||
label="Configure",
|
||||
description="TTS agent configuration",
|
||||
config={
|
||||
"api": AgentActionConfig(
|
||||
type="text",
|
||||
choices=[
|
||||
# TODO at local TTS support
|
||||
{"value": "tts", "label": "TTS (Local)"},
|
||||
{"value": "elevenlabs", "label": "Eleven Labs"},
|
||||
{"value": "coqui", "label": "Coqui Studio"},
|
||||
],
|
||||
value="tts",
|
||||
label="API",
|
||||
description="Which TTS API to use",
|
||||
onchange="emit",
|
||||
),
|
||||
"voice_id": AgentActionConfig(
|
||||
type="text",
|
||||
value="default",
|
||||
label="Narrator Voice",
|
||||
description="Voice ID/Name to use for TTS",
|
||||
choices=[]
|
||||
),
|
||||
"generate_for_player": AgentActionConfig(
|
||||
type="bool",
|
||||
value=False,
|
||||
label="Generate for player",
|
||||
description="Generate audio for player messages",
|
||||
),
|
||||
"generate_for_npc": AgentActionConfig(
|
||||
type="bool",
|
||||
value=True,
|
||||
label="Generate for NPCs",
|
||||
description="Generate audio for NPC messages",
|
||||
),
|
||||
"generate_for_narration": AgentActionConfig(
|
||||
type="bool",
|
||||
value=True,
|
||||
label="Generate for narration",
|
||||
description="Generate audio for narration messages",
|
||||
),
|
||||
"generate_chunks": AgentActionConfig(
|
||||
type="bool",
|
||||
value=False,
|
||||
label="Split generation",
|
||||
description="Generate audio chunks for each sentence - will be much more responsive but may loose context to inform inflection",
|
||||
)
|
||||
}
|
||||
),
|
||||
}
|
||||
|
||||
self.actions["_config"].model_dump()
|
||||
handlers["config_saved"].connect(self.on_config_saved)
|
||||
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
return self.is_enabled
|
||||
|
||||
@property
|
||||
def has_toggle(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def experimental(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def not_ready_reason(self) -> str:
|
||||
"""
|
||||
Returns a string explaining why the agent is not ready
|
||||
"""
|
||||
|
||||
if self.ready:
|
||||
return ""
|
||||
|
||||
if self.api == "tts":
|
||||
if not TTS:
|
||||
return "TTS not installed"
|
||||
|
||||
elif self.requires_token and not self.token:
|
||||
return "No API token"
|
||||
|
||||
elif not self.default_voice_id:
|
||||
return "No voice selected"
|
||||
|
||||
@property
|
||||
def agent_details(self):
|
||||
suffix = ""
|
||||
|
||||
if not self.ready:
|
||||
suffix = f" - {self.not_ready_reason}"
|
||||
else:
|
||||
suffix = f" - {self.voice_id_to_label(self.default_voice_id)}"
|
||||
|
||||
api = self.api
|
||||
choices = self.actions["_config"].config["api"].choices
|
||||
api_label = api
|
||||
for choice in choices:
|
||||
if choice["value"] == api:
|
||||
api_label = choice["label"]
|
||||
break
|
||||
|
||||
return f"{api_label}{suffix}"
|
||||
|
||||
@property
|
||||
def api(self):
|
||||
return self.actions["_config"].config["api"].value
|
||||
|
||||
@property
|
||||
def token(self):
|
||||
api = self.api
|
||||
return self.config.get(api,{}).get("api_key")
|
||||
|
||||
@property
|
||||
def default_voice_id(self):
|
||||
return self.actions["_config"].config["voice_id"].value
|
||||
|
||||
@property
|
||||
def requires_token(self):
|
||||
return self.api != "tts"
|
||||
|
||||
@property
|
||||
def ready(self):
|
||||
|
||||
if self.api == "tts":
|
||||
if not TTS:
|
||||
return False
|
||||
return True
|
||||
|
||||
return (not self.requires_token or self.token) and self.default_voice_id
|
||||
|
||||
@property
|
||||
def status(self):
|
||||
if not self.enabled:
|
||||
return "disabled"
|
||||
if self.ready:
|
||||
return "active" if not getattr(self, "processing", False) else "busy"
|
||||
if self.requires_token and not self.token:
|
||||
return "error"
|
||||
if self.api == "tts":
|
||||
if not TTS:
|
||||
return "error"
|
||||
return "uninitialized"
|
||||
|
||||
@property
|
||||
def max_generation_length(self):
|
||||
if self.api == "elevenlabs":
|
||||
return 1024
|
||||
elif self.api == "coqui":
|
||||
return 250
|
||||
|
||||
return 250
|
||||
|
||||
def apply_config(self, *args, **kwargs):
|
||||
|
||||
try:
|
||||
api = kwargs["actions"]["_config"]["config"]["api"]["value"]
|
||||
except KeyError:
|
||||
api = self.api
|
||||
|
||||
api_changed = api != self.api
|
||||
|
||||
log.debug("apply_config", api=api, api_changed=api != self.api, current_api=self.api)
|
||||
|
||||
super().apply_config(*args, **kwargs)
|
||||
|
||||
|
||||
if api_changed:
|
||||
try:
|
||||
self.actions["_config"].config["voice_id"].value = self.voices[api].voices[0].value
|
||||
except IndexError:
|
||||
self.actions["_config"].config["voice_id"].value = ""
|
||||
|
||||
|
||||
def connect(self, scene):
|
||||
super().connect(scene)
|
||||
talemate.emit.async_signals.get("game_loop_new_message").connect(self.on_game_loop_new_message)
|
||||
|
||||
def on_config_saved(self, event):
|
||||
config = event.data
|
||||
self.config = config
|
||||
instance.emit_agent_status(self.__class__, self)
|
||||
|
||||
async def on_game_loop_new_message(self, emission:GameLoopNewMessageEvent):
|
||||
"""
|
||||
Called when a conversation is generated
|
||||
"""
|
||||
|
||||
if not self.enabled or not self.ready:
|
||||
return
|
||||
|
||||
if not isinstance(emission.message, (CharacterMessage, NarratorMessage)):
|
||||
return
|
||||
|
||||
if isinstance(emission.message, NarratorMessage) and not self.actions["_config"].config["generate_for_narration"].value:
|
||||
return
|
||||
|
||||
if isinstance(emission.message, CharacterMessage):
|
||||
|
||||
if emission.message.source == "player" and not self.actions["_config"].config["generate_for_player"].value:
|
||||
return
|
||||
elif emission.message.source == "ai" and not self.actions["_config"].config["generate_for_npc"].value:
|
||||
return
|
||||
|
||||
if isinstance(emission.message, CharacterMessage):
|
||||
character_prefix = emission.message.split(":", 1)[0]
|
||||
else:
|
||||
character_prefix = ""
|
||||
|
||||
log.info("reactive tts", message=emission.message, character_prefix=character_prefix)
|
||||
|
||||
await self.generate(str(emission.message).replace(character_prefix+": ", ""))
|
||||
|
||||
|
||||
def voice(self, voice_id:str) -> Union[Voice, None]:
|
||||
for voice in self.voices[self.api].voices:
|
||||
if voice.value == voice_id:
|
||||
return voice
|
||||
return None
|
||||
|
||||
def voice_id_to_label(self, voice_id:str):
|
||||
for voice in self.voices[self.api].voices:
|
||||
if voice.value == voice_id:
|
||||
return voice.label
|
||||
return None
|
||||
|
||||
def list_voices_sync(self):
|
||||
loop = asyncio.get_event_loop()
|
||||
return loop.run_until_complete(self.list_voices())
|
||||
|
||||
async def list_voices(self):
|
||||
if self.requires_token and not self.token:
|
||||
return []
|
||||
|
||||
library = self.voices[self.api]
|
||||
|
||||
# TODO: allow re-syncing voices
|
||||
if library.last_synced:
|
||||
return library.voices
|
||||
|
||||
list_fn = getattr(self, f"_list_voices_{self.api}")
|
||||
log.info("Listing voices", api=self.api)
|
||||
|
||||
library.voices = await list_fn()
|
||||
library.last_synced = time.time()
|
||||
|
||||
# if the current voice cannot be found, reset it
|
||||
if not self.voice(self.default_voice_id):
|
||||
self.actions["_config"].config["voice_id"].value = ""
|
||||
|
||||
# set loading to false
|
||||
return library.voices
|
||||
|
||||
@set_processing
|
||||
async def generate(self, text: str):
|
||||
if not self.enabled or not self.ready or not text:
|
||||
return
|
||||
|
||||
|
||||
self.playback_done_event.set()
|
||||
|
||||
generate_fn = getattr(self, f"_generate_{self.api}")
|
||||
|
||||
if self.actions["_config"].config["generate_chunks"].value:
|
||||
chunks = parse_chunks(text)
|
||||
chunks = rejoin_chunks(chunks)
|
||||
else:
|
||||
chunks = parse_chunks(text)
|
||||
chunks = rejoin_chunks(chunks, chunk_size=self.max_generation_length)
|
||||
|
||||
# Start generating audio chunks in the background
|
||||
generation_task = asyncio.create_task(self.generate_chunks(generate_fn, chunks))
|
||||
|
||||
# Wait for both tasks to complete
|
||||
await asyncio.gather(generation_task)
|
||||
|
||||
async def generate_chunks(self, generate_fn, chunks):
|
||||
for chunk in chunks:
|
||||
chunk = chunk.replace("*","").strip()
|
||||
log.info("Generating audio", api=self.api, chunk=chunk)
|
||||
audio_data = await generate_fn(chunk)
|
||||
self.play_audio(audio_data)
|
||||
|
||||
def play_audio(self, audio_data):
|
||||
# play audio through the python audio player
|
||||
#play(audio_data)
|
||||
|
||||
emit("audio_queue", data={"audio_data": base64.b64encode(audio_data).decode("utf-8")})
|
||||
|
||||
self.playback_done_event.set() # Signal that playback is finished
|
||||
|
||||
# LOCAL
|
||||
|
||||
async def _generate_tts(self, text: str) -> Union[bytes, None]:
|
||||
|
||||
if not TTS:
|
||||
return
|
||||
|
||||
tts_config = self.config.get("tts",{})
|
||||
model = tts_config.get("model")
|
||||
device = tts_config.get("device", "cpu")
|
||||
|
||||
log.debug("tts local", model=model, device=device)
|
||||
|
||||
if not hasattr(self, "tts_instance"):
|
||||
self.tts_instance = TTS(model).to(device)
|
||||
|
||||
tts = self.tts_instance
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
voice = self.voice(self.default_voice_id)
|
||||
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
file_path = os.path.join(temp_dir, f"tts-{uuid.uuid4()}.wav")
|
||||
|
||||
await loop.run_in_executor(None, functools.partial(tts.tts_to_file, text=text, speaker_wav=voice.value, language="en", file_path=file_path))
|
||||
#tts.tts_to_file(text=text, speaker_wav=voice.value, language="en", file_path=file_path)
|
||||
|
||||
|
||||
with open(file_path, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
|
||||
async def _list_voices_tts(self) -> dict[str, str]:
|
||||
return [Voice(**voice) for voice in self.config.get("tts",{}).get("voices",[])]
|
||||
|
||||
# ELEVENLABS
|
||||
|
||||
async def _generate_elevenlabs(self, text: str, chunk_size: int = 1024) -> Union[bytes, None]:
|
||||
api_key = self.token
|
||||
if not api_key:
|
||||
return
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
url = f"https://api.elevenlabs.io/v1/text-to-speech/{self.default_voice_id}"
|
||||
headers = {
|
||||
"Accept": "audio/mpeg",
|
||||
"Content-Type": "application/json",
|
||||
"xi-api-key": api_key,
|
||||
}
|
||||
data = {
|
||||
"text": text,
|
||||
"model_id": self.config.get("elevenlabs",{}).get("model"),
|
||||
"voice_settings": {
|
||||
"stability": 0.5,
|
||||
"similarity_boost": 0.5
|
||||
}
|
||||
}
|
||||
|
||||
response = await client.post(url, json=data, headers=headers, timeout=300)
|
||||
|
||||
if response.status_code == 200:
|
||||
bytes_io = io.BytesIO()
|
||||
for chunk in response.iter_bytes(chunk_size=chunk_size):
|
||||
if chunk:
|
||||
bytes_io.write(chunk)
|
||||
|
||||
# Put the audio data in the queue for playback
|
||||
return bytes_io.getvalue()
|
||||
else:
|
||||
log.error(f"Error generating audio: {response.text}")
|
||||
|
||||
async def _list_voices_elevenlabs(self) -> dict[str, str]:
|
||||
|
||||
url_voices = "https://api.elevenlabs.io/v1/voices"
|
||||
|
||||
voices = []
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"xi-api-key": self.token,
|
||||
}
|
||||
response = await client.get(url_voices, headers=headers, params={"per_page":1000})
|
||||
speakers = response.json()["voices"]
|
||||
voices.extend([Voice(value=speaker["voice_id"], label=speaker["name"]) for speaker in speakers])
|
||||
|
||||
# sort by name
|
||||
voices.sort(key=lambda x: x.label)
|
||||
|
||||
return voices
|
||||
|
||||
# COQUI STUDIO
|
||||
|
||||
async def _generate_coqui(self, text: str) -> Union[bytes, None]:
|
||||
api_key = self.token
|
||||
if not api_key:
|
||||
return
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
url = "https://app.coqui.ai/api/v2/samples/xtts/render/"
|
||||
headers = {
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {api_key}"
|
||||
}
|
||||
data = {
|
||||
"voice_id": self.default_voice_id,
|
||||
"text": text,
|
||||
"language": "en" # Assuming English language for simplicity; this could be parameterized
|
||||
}
|
||||
|
||||
# Make the POST request to Coqui API
|
||||
response = await client.post(url, json=data, headers=headers, timeout=300)
|
||||
if response.status_code in [200, 201]:
|
||||
# Parse the JSON response to get the audio URL
|
||||
response_data = response.json()
|
||||
audio_url = response_data.get('audio_url')
|
||||
if audio_url:
|
||||
# Make a GET request to download the audio file
|
||||
audio_response = await client.get(audio_url)
|
||||
if audio_response.status_code == 200:
|
||||
# delete the sample from Coqui Studio
|
||||
# await self._cleanup_coqui(response_data.get('id'))
|
||||
return audio_response.content
|
||||
else:
|
||||
log.error(f"Error downloading audio: {audio_response.text}")
|
||||
else:
|
||||
log.error("No audio URL in response")
|
||||
else:
|
||||
log.error(f"Error generating audio: {response.text}")
|
||||
|
||||
async def _cleanup_coqui(self, sample_id: str):
|
||||
api_key = self.token
|
||||
if not api_key or not sample_id:
|
||||
return
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
url = f"https://app.coqui.ai/api/v2/samples/xtts/{sample_id}"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_key}"
|
||||
}
|
||||
|
||||
# Make the DELETE request to Coqui API
|
||||
response = await client.delete(url, headers=headers)
|
||||
|
||||
if response.status_code == 204:
|
||||
log.info(f"Successfully deleted sample with ID: {sample_id}")
|
||||
else:
|
||||
log.error(f"Error deleting sample with ID: {sample_id}: {response.text}")
|
||||
|
||||
async def _list_voices_coqui(self) -> dict[str, str]:
|
||||
|
||||
url_speakers = "https://app.coqui.ai/api/v2/speakers"
|
||||
url_custom_voices = "https://app.coqui.ai/api/v2/voices"
|
||||
|
||||
voices = []
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.token}"
|
||||
}
|
||||
response = await client.get(url_speakers, headers=headers, params={"per_page":1000})
|
||||
speakers = response.json()["result"]
|
||||
voices.extend([Voice(value=speaker["id"], label=speaker["name"]) for speaker in speakers])
|
||||
|
||||
response = await client.get(url_custom_voices, headers=headers, params={"per_page":1000})
|
||||
custom_voices = response.json()["result"]
|
||||
voices.extend([Voice(value=voice["id"], label=voice["name"]) for voice in custom_voices])
|
||||
|
||||
# sort by name
|
||||
voices.sort(key=lambda x: x.label)
|
||||
|
||||
return voices
|
||||
@@ -1,29 +1,43 @@
|
||||
from __future__ import annotations
|
||||
import dataclasses
|
||||
|
||||
import asyncio
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
||||
|
||||
import talemate.data_objects as data_objects
|
||||
import talemate.emit.async_signals
|
||||
import talemate.util as util
|
||||
from talemate.prompts import Prompt
|
||||
from talemate.scene_message import DirectorMessage, TimePassageMessage
|
||||
from talemate.emit import emit
|
||||
from talemate.events import GameLoopEvent
|
||||
|
||||
from .base import Agent, set_processing, AgentAction, AgentActionConfig
|
||||
from .base import Agent, set_processing, AgentAction, AgentActionConfig, AgentEmission
|
||||
from .registry import register
|
||||
|
||||
import structlog
|
||||
|
||||
import isodate
|
||||
import time
|
||||
import re
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from talemate.agents.conversation import ConversationAgentEmission
|
||||
|
||||
|
||||
log = structlog.get_logger("talemate.agents.world_state")
|
||||
|
||||
talemate.emit.async_signals.register("agent.world_state.time")
|
||||
|
||||
@dataclasses.dataclass
|
||||
class WorldStateAgentEmission(AgentEmission):
|
||||
"""
|
||||
Emission class for world state agent
|
||||
"""
|
||||
pass
|
||||
|
||||
@dataclasses.dataclass
|
||||
class TimePassageEmission(WorldStateAgentEmission):
|
||||
"""
|
||||
Emission class for time passage
|
||||
"""
|
||||
duration: str
|
||||
narrative: str
|
||||
|
||||
|
||||
@register()
|
||||
class WorldStateAgent(Agent):
|
||||
"""
|
||||
@@ -58,9 +72,29 @@ class WorldStateAgent(Agent):
|
||||
|
||||
def connect(self, scene):
|
||||
super().connect(scene)
|
||||
talemate.emit.async_signals.get("agent.conversation.generated").connect(self.on_conversation_generated)
|
||||
talemate.emit.async_signals.get("game_loop").connect(self.on_game_loop)
|
||||
|
||||
async def on_conversation_generated(self, emission:ConversationAgentEmission):
|
||||
async def advance_time(self, duration:str, narrative:str=None):
|
||||
"""
|
||||
Emit a time passage message
|
||||
"""
|
||||
|
||||
isodate.parse_duration(duration)
|
||||
msg_text = narrative or util.iso8601_duration_to_human(duration, suffix=" later")
|
||||
message = TimePassageMessage(ts=duration, message=msg_text)
|
||||
|
||||
log.debug("world_state.advance_time", message=message)
|
||||
self.scene.push_history(message)
|
||||
self.scene.emit_status()
|
||||
|
||||
emit("time", message)
|
||||
|
||||
await talemate.emit.async_signals.get("agent.world_state.time").send(
|
||||
TimePassageEmission(agent=self, duration=duration, narrative=msg_text)
|
||||
)
|
||||
|
||||
|
||||
async def on_game_loop(self, emission:GameLoopEvent):
|
||||
"""
|
||||
Called when a conversation is generated
|
||||
"""
|
||||
@@ -68,8 +102,7 @@ class WorldStateAgent(Agent):
|
||||
if not self.enabled:
|
||||
return
|
||||
|
||||
for _ in emission.generation:
|
||||
await self.update_world_state()
|
||||
await self.update_world_state()
|
||||
|
||||
|
||||
async def update_world_state(self):
|
||||
@@ -97,7 +130,7 @@ class WorldStateAgent(Agent):
|
||||
t1 = time.time()
|
||||
|
||||
_, world_state = await Prompt.request(
|
||||
"world_state.request-world-state",
|
||||
"world_state.request-world-state-v2",
|
||||
self.client,
|
||||
"analyze_long",
|
||||
vars = {
|
||||
@@ -111,6 +144,7 @@ class WorldStateAgent(Agent):
|
||||
self.scene.log.debug("request_world_state", response=world_state, time=time.time() - t1)
|
||||
|
||||
return world_state
|
||||
|
||||
|
||||
@set_processing
|
||||
async def request_world_state_inline(self):
|
||||
@@ -123,10 +157,10 @@ class WorldStateAgent(Agent):
|
||||
|
||||
# first, we need to get the marked items (objects etc.)
|
||||
|
||||
marked_items_response = await Prompt.request(
|
||||
_, marked_items_response = await Prompt.request(
|
||||
"world_state.request-world-state-inline-items",
|
||||
self.client,
|
||||
"analyze_freeform",
|
||||
"analyze_long",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
@@ -160,6 +194,53 @@ class WorldStateAgent(Agent):
|
||||
duration = "P"+duration
|
||||
|
||||
return duration
|
||||
|
||||
|
||||
@set_processing
|
||||
async def analyze_text_and_extract_context(
|
||||
self,
|
||||
text: str,
|
||||
goal: str,
|
||||
):
|
||||
|
||||
response = await Prompt.request(
|
||||
"world_state.analyze-text-and-extract-context",
|
||||
self.client,
|
||||
"analyze_freeform",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"text": text,
|
||||
"goal": goal,
|
||||
}
|
||||
)
|
||||
|
||||
log.debug("analyze_text_and_extract_context", goal=goal, text=text, response=response)
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def analyze_and_follow_instruction(
|
||||
self,
|
||||
text: str,
|
||||
instruction: str,
|
||||
):
|
||||
|
||||
response = await Prompt.request(
|
||||
"world_state.analyze-text-and-follow-instruction",
|
||||
self.client,
|
||||
"analyze_freeform",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"text": text,
|
||||
"instruction": instruction,
|
||||
}
|
||||
)
|
||||
|
||||
log.debug("analyze_and_follow_instruction", instruction=instruction, text=text, response=response)
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def analyze_text_and_answer_question(
|
||||
@@ -246,4 +327,27 @@ class WorldStateAgent(Agent):
|
||||
name, value = line.split(":", 1)
|
||||
data[name.strip()] = value.strip()
|
||||
|
||||
return data
|
||||
return data
|
||||
|
||||
|
||||
@set_processing
|
||||
async def match_character_names(self, names:list[str]):
|
||||
|
||||
"""
|
||||
Attempts to match character names.
|
||||
"""
|
||||
|
||||
_, response = await Prompt.request(
|
||||
"world_state.match-character-names",
|
||||
self.client,
|
||||
"analyze_long",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"names": names,
|
||||
}
|
||||
)
|
||||
|
||||
log.debug("match_character_names", names=names, response=response)
|
||||
|
||||
return response
|
||||
@@ -1,4 +1,6 @@
|
||||
import os
|
||||
from talemate.client.openai import OpenAIClient
|
||||
from talemate.client.registry import CLIENT_CLASSES, get_client_class, register
|
||||
from talemate.client.textgenwebui import TextGeneratorWebuiClient
|
||||
import talemate.client.runpod
|
||||
from talemate.client.lmstudio import LMStudioClient
|
||||
import talemate.client.runpod
|
||||
|
||||
518
src/talemate/client/base.py
Normal file
518
src/talemate/client/base.py
Normal file
@@ -0,0 +1,518 @@
|
||||
"""
|
||||
A unified client base, based on the openai API
|
||||
"""
|
||||
import copy
|
||||
import random
|
||||
import time
|
||||
from typing import Callable
|
||||
|
||||
import structlog
|
||||
import logging
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from talemate.emit import emit
|
||||
import talemate.instance as instance
|
||||
import talemate.client.presets as presets
|
||||
import talemate.client.system_prompts as system_prompts
|
||||
import talemate.util as util
|
||||
from talemate.client.context import client_context_attribute
|
||||
from talemate.client.model_prompts import model_prompt
|
||||
from talemate.agents.context import active_agent
|
||||
|
||||
# Set up logging level for httpx to WARNING to suppress debug logs.
|
||||
logging.getLogger('httpx').setLevel(logging.WARNING)
|
||||
|
||||
REMOTE_SERVICES = [
|
||||
# TODO: runpod.py should add this to the list
|
||||
".runpod.net"
|
||||
]
|
||||
|
||||
STOPPING_STRINGS = ["<|im_end|>", "</s>"]
|
||||
|
||||
class ClientBase:
|
||||
|
||||
api_url: str
|
||||
model_name: str
|
||||
name:str = None
|
||||
enabled: bool = True
|
||||
current_status: str = None
|
||||
max_token_length: int = 4096
|
||||
processing: bool = False
|
||||
connected: bool = False
|
||||
conversation_retries: int = 5
|
||||
auto_break_repetition_enabled: bool = True
|
||||
|
||||
client_type = "base"
|
||||
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_url: str = None,
|
||||
name = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.api_url = api_url
|
||||
self.name = name or self.client_type
|
||||
self.log = structlog.get_logger(f"client.{self.client_type}")
|
||||
self.set_client()
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.client_type}Client[{self.api_url}][{self.model_name or ''}]"
|
||||
|
||||
def set_client(self):
|
||||
self.client = AsyncOpenAI(base_url=self.api_url, api_key="sk-1111")
|
||||
|
||||
def prompt_template(self, sys_msg, prompt):
|
||||
|
||||
"""
|
||||
Applies the appropriate prompt template for the model.
|
||||
"""
|
||||
|
||||
if not self.model_name:
|
||||
self.log.warning("prompt template not applied", reason="no model loaded")
|
||||
return f"{sys_msg}\n{prompt}"
|
||||
|
||||
return model_prompt(self.model_name, sys_msg, prompt)
|
||||
|
||||
def has_prompt_template(self):
|
||||
if not self.model_name:
|
||||
return False
|
||||
|
||||
return model_prompt.exists(self.model_name)
|
||||
|
||||
def prompt_template_example(self):
|
||||
if not self.model_name:
|
||||
return None
|
||||
return model_prompt(self.model_name, "sysmsg", "prompt<|BOT|>{LLM coercion}")
|
||||
|
||||
def reconfigure(self, **kwargs):
|
||||
|
||||
"""
|
||||
Reconfigures the client.
|
||||
|
||||
Keyword Arguments:
|
||||
|
||||
- api_url: the API URL to use
|
||||
- max_token_length: the max token length to use
|
||||
- enabled: whether the client is enabled
|
||||
"""
|
||||
|
||||
if "api_url" in kwargs:
|
||||
self.api_url = kwargs["api_url"]
|
||||
|
||||
if "max_token_length" in kwargs:
|
||||
self.max_token_length = kwargs["max_token_length"]
|
||||
|
||||
if "enabled" in kwargs:
|
||||
self.enabled = bool(kwargs["enabled"])
|
||||
|
||||
|
||||
def toggle_disabled_if_remote(self):
|
||||
|
||||
"""
|
||||
If the client is targeting a remote recognized service, this
|
||||
will disable the client.
|
||||
"""
|
||||
|
||||
for service in REMOTE_SERVICES:
|
||||
if service in self.api_url:
|
||||
if self.enabled:
|
||||
self.log.warn("remote service unreachable, disabling client", client=self.name)
|
||||
self.enabled = False
|
||||
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_system_message(self, kind: str) -> str:
|
||||
|
||||
"""
|
||||
Returns the appropriate system message for the given kind of generation
|
||||
|
||||
Arguments:
|
||||
|
||||
- kind: the kind of generation
|
||||
"""
|
||||
|
||||
# TODO: make extensible
|
||||
|
||||
if "narrate" in kind:
|
||||
return system_prompts.NARRATOR
|
||||
if "story" in kind:
|
||||
return system_prompts.NARRATOR
|
||||
if "director" in kind:
|
||||
return system_prompts.DIRECTOR
|
||||
if "create" in kind:
|
||||
return system_prompts.CREATOR
|
||||
if "roleplay" in kind:
|
||||
return system_prompts.ROLEPLAY
|
||||
if "conversation" in kind:
|
||||
return system_prompts.ROLEPLAY
|
||||
if "editor" in kind:
|
||||
return system_prompts.EDITOR
|
||||
if "world_state" in kind:
|
||||
return system_prompts.WORLD_STATE
|
||||
if "analyze_freeform" in kind:
|
||||
return system_prompts.ANALYST_FREEFORM
|
||||
if "analyst" in kind:
|
||||
return system_prompts.ANALYST
|
||||
if "analyze" in kind:
|
||||
return system_prompts.ANALYST
|
||||
|
||||
return system_prompts.BASIC
|
||||
|
||||
|
||||
def emit_status(self, processing: bool = None):
|
||||
|
||||
"""
|
||||
Sets and emits the client status.
|
||||
"""
|
||||
|
||||
if processing is not None:
|
||||
self.processing = processing
|
||||
|
||||
if not self.enabled:
|
||||
status = "disabled"
|
||||
model_name = "Disabled"
|
||||
elif not self.connected:
|
||||
status = "error"
|
||||
model_name = "Could not connect"
|
||||
elif self.model_name:
|
||||
status = "busy" if self.processing else "idle"
|
||||
model_name = self.model_name
|
||||
else:
|
||||
model_name = "No model loaded"
|
||||
status = "warning"
|
||||
|
||||
status_change = status != self.current_status
|
||||
self.current_status = status
|
||||
|
||||
emit(
|
||||
"client_status",
|
||||
message=self.client_type,
|
||||
id=self.name,
|
||||
details=model_name,
|
||||
status=status,
|
||||
data={
|
||||
"prompt_template_example": self.prompt_template_example(),
|
||||
"has_prompt_template": self.has_prompt_template(),
|
||||
}
|
||||
)
|
||||
|
||||
if status_change:
|
||||
instance.emit_agent_status_by_client(self)
|
||||
|
||||
|
||||
async def get_model_name(self):
|
||||
models = await self.client.models.list()
|
||||
try:
|
||||
return models.data[0].id
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
async def status(self):
|
||||
"""
|
||||
Send a request to the API to retrieve the loaded AI model name.
|
||||
Raises an error if no model name is returned.
|
||||
:return: None
|
||||
"""
|
||||
if self.processing:
|
||||
return
|
||||
|
||||
if not self.enabled:
|
||||
self.connected = False
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
try:
|
||||
self.model_name = await self.get_model_name()
|
||||
except Exception as e:
|
||||
self.log.warning("client status error", e=e, client=self.name)
|
||||
self.model_name = None
|
||||
self.connected = False
|
||||
self.toggle_disabled_if_remote()
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
self.connected = True
|
||||
|
||||
if not self.model_name or self.model_name == "None":
|
||||
self.log.warning("client model not loaded", client=self)
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
self.emit_status()
|
||||
|
||||
|
||||
def generate_prompt_parameters(self, kind:str):
|
||||
parameters = {}
|
||||
self.tune_prompt_parameters(
|
||||
presets.configure(parameters, kind, self.max_token_length),
|
||||
kind
|
||||
)
|
||||
return parameters
|
||||
|
||||
def tune_prompt_parameters(self, parameters:dict, kind:str):
|
||||
parameters["stream"] = False
|
||||
if client_context_attribute("nuke_repetition") > 0.0 and self.jiggle_enabled_for(kind):
|
||||
self.jiggle_randomness(parameters, offset=client_context_attribute("nuke_repetition"))
|
||||
|
||||
fn_tune_kind = getattr(self, f"tune_prompt_parameters_{kind}", None)
|
||||
if fn_tune_kind:
|
||||
fn_tune_kind(parameters)
|
||||
|
||||
agent_context = active_agent.get()
|
||||
if agent_context.agent:
|
||||
agent_context.agent.inject_prompt_paramters(parameters, kind, agent_context.action)
|
||||
|
||||
def tune_prompt_parameters_conversation(self, parameters:dict):
|
||||
conversation_context = client_context_attribute("conversation")
|
||||
parameters["max_tokens"] = conversation_context.get("length", 96)
|
||||
|
||||
dialog_stopping_strings = [
|
||||
f"{character}:" for character in conversation_context["other_characters"]
|
||||
]
|
||||
|
||||
if "extra_stopping_strings" in parameters:
|
||||
parameters["extra_stopping_strings"] += dialog_stopping_strings
|
||||
else:
|
||||
parameters["extra_stopping_strings"] = dialog_stopping_strings
|
||||
|
||||
|
||||
async def generate(self, prompt:str, parameters:dict, kind:str):
|
||||
|
||||
"""
|
||||
Generates text from the given prompt and parameters.
|
||||
"""
|
||||
|
||||
self.log.debug("generate", prompt=prompt[:128]+" ...", parameters=parameters)
|
||||
|
||||
try:
|
||||
response = await self.client.completions.create(prompt=prompt.strip(), **parameters)
|
||||
return response.get("choices", [{}])[0].get("text", "")
|
||||
except Exception as e:
|
||||
self.log.error("generate error", e=e)
|
||||
return ""
|
||||
|
||||
async def send_prompt(
|
||||
self, prompt: str, kind: str = "conversation", finalize: Callable = lambda x: x, retries:int=2
|
||||
) -> str:
|
||||
"""
|
||||
Send a prompt to the AI and return its response.
|
||||
:param prompt: The text prompt to send.
|
||||
:return: The AI's response text.
|
||||
"""
|
||||
|
||||
try:
|
||||
self.emit_status(processing=True)
|
||||
await self.status()
|
||||
|
||||
prompt_param = self.generate_prompt_parameters(kind)
|
||||
|
||||
finalized_prompt = self.prompt_template(self.get_system_message(kind), prompt).strip()
|
||||
prompt_param = finalize(prompt_param)
|
||||
|
||||
token_length = self.count_tokens(finalized_prompt)
|
||||
|
||||
|
||||
time_start = time.time()
|
||||
extra_stopping_strings = prompt_param.pop("extra_stopping_strings", [])
|
||||
|
||||
self.log.debug("send_prompt", token_length=token_length, max_token_length=self.max_token_length, parameters=prompt_param)
|
||||
response = await self.generate(
|
||||
self.repetition_adjustment(finalized_prompt),
|
||||
prompt_param,
|
||||
kind
|
||||
)
|
||||
|
||||
response, finalized_prompt = await self.auto_break_repetition(finalized_prompt, prompt_param, response, kind, retries)
|
||||
|
||||
time_end = time.time()
|
||||
|
||||
# stopping strings sometimes get appended to the end of the response anyways
|
||||
# split the response by the first stopping string and take the first part
|
||||
|
||||
|
||||
for stopping_string in STOPPING_STRINGS + extra_stopping_strings:
|
||||
if stopping_string in response:
|
||||
response = response.split(stopping_string)[0]
|
||||
break
|
||||
|
||||
emit("prompt_sent", data={
|
||||
"kind": kind,
|
||||
"prompt": finalized_prompt,
|
||||
"response": response,
|
||||
"prompt_tokens": token_length,
|
||||
"response_tokens": self.count_tokens(response),
|
||||
"time": time_end - time_start,
|
||||
})
|
||||
|
||||
return response
|
||||
finally:
|
||||
self.emit_status(processing=False)
|
||||
|
||||
|
||||
async def auto_break_repetition(
|
||||
self,
|
||||
finalized_prompt:str,
|
||||
prompt_param:dict,
|
||||
response:str,
|
||||
kind:str,
|
||||
retries:int,
|
||||
pad_max_tokens:int=32,
|
||||
) -> str:
|
||||
|
||||
"""
|
||||
If repetition breaking is enabled, this will retry the prompt if its
|
||||
response is too similar to other messages in the prompt
|
||||
|
||||
This requires the agent to have the allow_repetition_break method
|
||||
and the jiggle_enabled_for method and the client to have the
|
||||
auto_break_repetition_enabled attribute set to True
|
||||
|
||||
Arguments:
|
||||
|
||||
- finalized_prompt: the prompt that was sent
|
||||
- prompt_param: the parameters that were used
|
||||
- response: the response that was received
|
||||
- kind: the kind of generation
|
||||
- retries: the number of retries left
|
||||
- pad_max_tokens: increase response max_tokens by this amount per iteration
|
||||
|
||||
Returns:
|
||||
|
||||
- the response
|
||||
"""
|
||||
|
||||
if not self.auto_break_repetition_enabled:
|
||||
return response, finalized_prompt
|
||||
|
||||
agent_context = active_agent.get()
|
||||
if self.jiggle_enabled_for(kind, auto=True):
|
||||
|
||||
# check if the response is a repetition
|
||||
# using the default similarity threshold of 98, meaning it needs
|
||||
# to be really similar to be considered a repetition
|
||||
|
||||
is_repetition, similarity_score, matched_line = util.similarity_score(
|
||||
response,
|
||||
finalized_prompt.split("\n"),
|
||||
)
|
||||
|
||||
if not is_repetition:
|
||||
|
||||
# not a repetition, return the response
|
||||
|
||||
self.log.debug("send_prompt no similarity", similarity_score=similarity_score)
|
||||
return response, finalized_prompt
|
||||
|
||||
while is_repetition and retries > 0:
|
||||
|
||||
# it's a repetition, retry the prompt with adjusted parameters
|
||||
|
||||
self.log.warn(
|
||||
"send_prompt similarity retry",
|
||||
agent=agent_context.agent.agent_type,
|
||||
similarity_score=similarity_score,
|
||||
retries=retries
|
||||
)
|
||||
|
||||
# first we apply the client's randomness jiggle which will adjust
|
||||
# parameters like temperature and repetition_penalty, depending
|
||||
# on the client
|
||||
#
|
||||
# this is a cumulative adjustment, so it will add to the previous
|
||||
# iteration's adjustment, this also means retries should be kept low
|
||||
# otherwise it will get out of hand and start generating nonsense
|
||||
|
||||
self.jiggle_randomness(prompt_param, offset=0.5)
|
||||
|
||||
# then we pad the max_tokens by the pad_max_tokens amount
|
||||
|
||||
prompt_param["max_tokens"] += pad_max_tokens
|
||||
|
||||
# send the prompt again
|
||||
# we use the repetition_adjustment method to further encourage
|
||||
# the AI to break the repetition on its own as well.
|
||||
|
||||
finalized_prompt = self.repetition_adjustment(finalized_prompt, is_repetitive=True)
|
||||
|
||||
response = retried_response = await self.generate(
|
||||
finalized_prompt,
|
||||
prompt_param,
|
||||
kind
|
||||
)
|
||||
|
||||
self.log.debug("send_prompt dedupe sentences", response=response, matched_line=matched_line)
|
||||
|
||||
# a lot of the times the response will now contain the repetition + something new
|
||||
# so we dedupe the response to remove the repetition on sentences level
|
||||
|
||||
response = util.dedupe_sentences(response, matched_line, similarity_threshold=85, debug=True)
|
||||
self.log.debug("send_prompt dedupe sentences (after)", response=response)
|
||||
|
||||
# deduping may have removed the entire response, so we check for that
|
||||
|
||||
if not util.strip_partial_sentences(response).strip():
|
||||
|
||||
# if the response is empty, we set the response to the original
|
||||
# and try again next loop
|
||||
|
||||
response = retried_response
|
||||
|
||||
# check if the response is a repetition again
|
||||
|
||||
is_repetition, similarity_score, matched_line = util.similarity_score(
|
||||
response,
|
||||
finalized_prompt.split("\n"),
|
||||
)
|
||||
retries -= 1
|
||||
|
||||
return response, finalized_prompt
|
||||
|
||||
def count_tokens(self, content:str):
|
||||
return util.count_tokens(content)
|
||||
|
||||
def jiggle_randomness(self, prompt_config:dict, offset:float=0.3) -> dict:
|
||||
"""
|
||||
adjusts temperature and repetition_penalty
|
||||
by random values using the base value as a center
|
||||
"""
|
||||
|
||||
temp = prompt_config["temperature"]
|
||||
min_offset = offset * 0.3
|
||||
prompt_config["temperature"] = random.uniform(temp + min_offset, temp + offset)
|
||||
|
||||
def jiggle_enabled_for(self, kind:str, auto:bool=False) -> bool:
|
||||
|
||||
agent_context = active_agent.get()
|
||||
agent = agent_context.agent
|
||||
|
||||
if not agent:
|
||||
return False
|
||||
|
||||
return agent.allow_repetition_break(kind, agent_context.action, auto=auto)
|
||||
|
||||
def repetition_adjustment(self, prompt:str, is_repetitive:bool=False):
|
||||
"""
|
||||
Breaks the prompt into lines and checkse each line for a match with
|
||||
[$REPETITION|{repetition_adjustment}].
|
||||
|
||||
On match and if is_repetitive is True, the line is removed from the prompt and
|
||||
replaced with the repetition_adjustment.
|
||||
|
||||
On match and if is_repetitive is False, the line is removed from the prompt.
|
||||
"""
|
||||
|
||||
lines = prompt.split("\n")
|
||||
new_lines = []
|
||||
|
||||
for line in lines:
|
||||
if line.startswith("[$REPETITION|"):
|
||||
if is_repetitive:
|
||||
new_lines.append(line.split("|")[1][:-1])
|
||||
else:
|
||||
new_lines.append(line)
|
||||
|
||||
return "\n".join(new_lines)
|
||||
56
src/talemate/client/lmstudio.py
Normal file
56
src/talemate/client/lmstudio.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from talemate.client.base import ClientBase
|
||||
from talemate.client.registry import register
|
||||
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
|
||||
@register()
|
||||
class LMStudioClient(ClientBase):
|
||||
|
||||
client_type = "lmstudio"
|
||||
conversation_retries = 5
|
||||
|
||||
def set_client(self):
|
||||
self.client = AsyncOpenAI(base_url=self.api_url+"/v1", api_key="sk-1111")
|
||||
|
||||
def tune_prompt_parameters(self, parameters:dict, kind:str):
|
||||
super().tune_prompt_parameters(parameters, kind)
|
||||
|
||||
keys = list(parameters.keys())
|
||||
|
||||
valid_keys = ["temperature", "top_p"]
|
||||
|
||||
for key in keys:
|
||||
if key not in valid_keys:
|
||||
del parameters[key]
|
||||
|
||||
|
||||
async def get_model_name(self):
|
||||
model_name = await super().get_model_name()
|
||||
|
||||
# model name comes back as a file path, so we need to extract the model name
|
||||
# the path could be windows or linux so it needs to handle both backslash and forward slash
|
||||
|
||||
if model_name:
|
||||
model_name = model_name.replace("\\", "/").split("/")[-1]
|
||||
|
||||
return model_name
|
||||
|
||||
async def generate(self, prompt:str, parameters:dict, kind:str):
|
||||
|
||||
"""
|
||||
Generates text from the given prompt and parameters.
|
||||
"""
|
||||
human_message = {'role': 'user', 'content': prompt.strip()}
|
||||
|
||||
self.log.debug("generate", prompt=prompt[:128]+" ...", parameters=parameters)
|
||||
|
||||
try:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.model_name, messages=[human_message], **parameters
|
||||
)
|
||||
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
self.log.error("generate error", e=e)
|
||||
return ""
|
||||
@@ -39,6 +39,9 @@ class ModelPrompt:
|
||||
"set_response" : self.set_response
|
||||
})
|
||||
|
||||
def exists(self, model_name:str):
|
||||
return bool(self.get_template(model_name))
|
||||
|
||||
def set_response(self, prompt:str, response_str:str):
|
||||
|
||||
prompt = prompt.strip("\n").strip()
|
||||
|
||||
@@ -1,66 +1,117 @@
|
||||
import asyncio
|
||||
import os
|
||||
from typing import Callable
|
||||
import json
|
||||
from openai import AsyncOpenAI
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
||||
|
||||
from talemate.client.base import ClientBase
|
||||
from talemate.client.registry import register
|
||||
from talemate.emit import emit
|
||||
from talemate.emit.signals import handlers
|
||||
import talemate.emit.async_signals as async_signals
|
||||
from talemate.config import load_config
|
||||
import talemate.instance as instance
|
||||
import talemate.client.system_prompts as system_prompts
|
||||
import structlog
|
||||
import tiktoken
|
||||
|
||||
__all__ = [
|
||||
"OpenAIClient",
|
||||
]
|
||||
|
||||
log = structlog.get_logger("talemate")
|
||||
|
||||
def num_tokens_from_messages(messages:list[dict], model:str="gpt-3.5-turbo-0613"):
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-1106-preview",
|
||||
}:
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
tokens_per_message = (
|
||||
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
)
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" in model:
|
||||
print(
|
||||
"Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613."
|
||||
)
|
||||
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
|
||||
elif "gpt-4" in model:
|
||||
print(
|
||||
"Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613."
|
||||
)
|
||||
return num_tokens_from_messages(messages, model="gpt-4-0613")
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
||||
)
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
for key, value in message.items():
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, dict):
|
||||
value = json.dumps(value)
|
||||
num_tokens += len(encoding.encode(value))
|
||||
if key == "name":
|
||||
num_tokens += tokens_per_name
|
||||
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
||||
return num_tokens
|
||||
|
||||
@register()
|
||||
class OpenAIClient:
|
||||
class OpenAIClient(ClientBase):
|
||||
"""
|
||||
OpenAI client for generating text.
|
||||
"""
|
||||
|
||||
client_type = "openai"
|
||||
conversation_retries = 0
|
||||
auto_break_repetition_enabled = False
|
||||
|
||||
def __init__(self, model="gpt-3.5-turbo", **kwargs):
|
||||
self.name = kwargs.get("name", "openai")
|
||||
def __init__(self, model="gpt-4-1106-preview", **kwargs):
|
||||
|
||||
self.model_name = model
|
||||
self.last_token_length = 0
|
||||
self.max_token_length = 2048
|
||||
self.processing = False
|
||||
self.current_status = "idle"
|
||||
self.api_key_status = None
|
||||
self.config = load_config()
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# if os.environ.get("OPENAI_API_KEY") is not set, look in the config file
|
||||
# and set it
|
||||
self.set_client()
|
||||
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
if self.config.get("openai", {}).get("api_key"):
|
||||
os.environ["OPENAI_API_KEY"] = self.config["openai"]["api_key"]
|
||||
|
||||
self.set_client(model)
|
||||
handlers["config_saved"].connect(self.on_config_saved)
|
||||
|
||||
|
||||
@property
|
||||
def openai_api_key(self):
|
||||
return os.environ.get("OPENAI_API_KEY")
|
||||
return self.config.get("openai",{}).get("api_key")
|
||||
|
||||
|
||||
def emit_status(self, processing: bool = None):
|
||||
if processing is not None:
|
||||
self.processing = processing
|
||||
|
||||
if os.environ.get("OPENAI_API_KEY"):
|
||||
if self.openai_api_key:
|
||||
status = "busy" if self.processing else "idle"
|
||||
model_name = self.model_name or "No model loaded"
|
||||
model_name = self.model_name
|
||||
else:
|
||||
status = "error"
|
||||
model_name = "No API key set"
|
||||
|
||||
if not self.model_name:
|
||||
status = "error"
|
||||
model_name = "No model loaded"
|
||||
|
||||
self.current_status = status
|
||||
|
||||
emit(
|
||||
@@ -71,60 +122,60 @@ class OpenAIClient:
|
||||
status=status,
|
||||
)
|
||||
|
||||
def set_client(self, model:str, max_token_length:int=None):
|
||||
def set_client(self, max_token_length:int=None):
|
||||
|
||||
if not self.openai_api_key:
|
||||
self.client = AsyncOpenAI(api_key="sk-1111")
|
||||
log.error("No OpenAI API key set")
|
||||
if self.api_key_status:
|
||||
self.api_key_status = False
|
||||
emit('request_client_status')
|
||||
emit('request_agent_status')
|
||||
return
|
||||
|
||||
self.chat = ChatOpenAI(model=model, verbose=True)
|
||||
model = self.model_name
|
||||
|
||||
self.client = AsyncOpenAI(api_key=self.openai_api_key)
|
||||
if model == "gpt-3.5-turbo":
|
||||
self.max_token_length = min(max_token_length or 4096, 4096)
|
||||
elif model == "gpt-4":
|
||||
self.max_token_length = min(max_token_length or 8192, 8192)
|
||||
elif model == "gpt-3.5-turbo-16k":
|
||||
self.max_token_length = min(max_token_length or 16384, 16384)
|
||||
elif model == "gpt-4-1106-preview":
|
||||
self.max_token_length = min(max_token_length or 128000, 128000)
|
||||
else:
|
||||
self.max_token_length = max_token_length or 2048
|
||||
|
||||
|
||||
if not self.api_key_status:
|
||||
if self.api_key_status is False:
|
||||
emit('request_client_status')
|
||||
emit('request_agent_status')
|
||||
self.api_key_status = True
|
||||
|
||||
log.info("openai set client")
|
||||
|
||||
def reconfigure(self, **kwargs):
|
||||
if "model" in kwargs:
|
||||
self.model_name = kwargs["model"]
|
||||
self.set_client(self.model_name, kwargs.get("max_token_length"))
|
||||
self.set_client(kwargs.get("max_token_length"))
|
||||
|
||||
def on_config_saved(self, event):
|
||||
config = event.data
|
||||
self.config = config
|
||||
self.set_client()
|
||||
|
||||
def count_tokens(self, content: str):
|
||||
if not self.model_name:
|
||||
return 0
|
||||
return num_tokens_from_messages([{"content": content}], model=self.model_name)
|
||||
|
||||
async def status(self):
|
||||
self.emit_status()
|
||||
|
||||
def get_system_message(self, kind: str) -> str:
|
||||
|
||||
if "narrate" in kind:
|
||||
return system_prompts.NARRATOR
|
||||
if "story" in kind:
|
||||
return system_prompts.NARRATOR
|
||||
if "director" in kind:
|
||||
return system_prompts.DIRECTOR
|
||||
if "create" in kind:
|
||||
return system_prompts.CREATOR
|
||||
if "roleplay" in kind:
|
||||
return system_prompts.ROLEPLAY
|
||||
if "conversation" in kind:
|
||||
return system_prompts.ROLEPLAY
|
||||
if "editor" in kind:
|
||||
return system_prompts.EDITOR
|
||||
if "world_state" in kind:
|
||||
return system_prompts.WORLD_STATE
|
||||
if "analyst" in kind:
|
||||
return system_prompts.ANALYST
|
||||
if "analyze" in kind:
|
||||
return system_prompts.ANALYST
|
||||
|
||||
return system_prompts.BASIC
|
||||
|
||||
async def send_prompt(
|
||||
self, prompt: str, kind: str = "conversation", finalize: Callable = lambda x: x
|
||||
) -> str:
|
||||
|
||||
right = ""
|
||||
|
||||
def prompt_template(self, system_message:str, prompt:str):
|
||||
# only gpt-4-1106-preview supports json_object response coersion
|
||||
|
||||
if "<|BOT|>" in prompt:
|
||||
_, right = prompt.split("<|BOT|>", 1)
|
||||
@@ -133,35 +184,55 @@ class OpenAIClient:
|
||||
else:
|
||||
prompt = prompt.replace("<|BOT|>", "")
|
||||
|
||||
self.emit_status(processing=True)
|
||||
await asyncio.sleep(0.1)
|
||||
return prompt
|
||||
|
||||
sys_message = SystemMessage(content=self.get_system_message(kind))
|
||||
def tune_prompt_parameters(self, parameters:dict, kind:str):
|
||||
super().tune_prompt_parameters(parameters, kind)
|
||||
|
||||
human_message = HumanMessage(content=prompt)
|
||||
keys = list(parameters.keys())
|
||||
|
||||
valid_keys = ["temperature", "top_p"]
|
||||
|
||||
for key in keys:
|
||||
if key not in valid_keys:
|
||||
del parameters[key]
|
||||
|
||||
log.debug("openai send", kind=kind, sys_message=sys_message)
|
||||
|
||||
response = self.chat([sys_message, human_message])
|
||||
async def generate(self, prompt:str, parameters:dict, kind:str):
|
||||
|
||||
response = response.content
|
||||
"""
|
||||
Generates text from the given prompt and parameters.
|
||||
"""
|
||||
|
||||
if right and response.startswith(right):
|
||||
response = response[len(right):].strip()
|
||||
if not self.openai_api_key:
|
||||
raise Exception("No OpenAI API key set")
|
||||
|
||||
# only gpt-4-1106-preview supports json_object response coersion
|
||||
supports_json_object = self.model_name in ["gpt-4-1106-preview"]
|
||||
right = None
|
||||
try:
|
||||
_, right = prompt.split("\nContinue this response: ")
|
||||
expected_response = right.strip()
|
||||
if expected_response.startswith("{") and supports_json_object:
|
||||
parameters["response_format"] = {"type": "json_object"}
|
||||
except (IndexError, ValueError):
|
||||
pass
|
||||
|
||||
human_message = {'role': 'user', 'content': prompt.strip()}
|
||||
system_message = {'role': 'system', 'content': self.get_system_message(kind)}
|
||||
|
||||
self.log.debug("generate", prompt=prompt[:128]+" ...", parameters=parameters)
|
||||
|
||||
try:
|
||||
response = await self.client.chat.completions.create(
|
||||
model=self.model_name, messages=[system_message, human_message], **parameters
|
||||
)
|
||||
|
||||
if kind == "conversation":
|
||||
response = response.replace("\n", " ").strip()
|
||||
|
||||
log.debug("openai response", response=response)
|
||||
|
||||
emit("prompt_sent", data={
|
||||
"kind": kind,
|
||||
"prompt": prompt,
|
||||
"response": response,
|
||||
# TODO use tiktoken
|
||||
"prompt_tokens": "?",
|
||||
"response_tokens": "?",
|
||||
})
|
||||
|
||||
self.emit_status(processing=False)
|
||||
return response
|
||||
response = response.choices[0].message.content
|
||||
|
||||
if right and response.startswith(right):
|
||||
response = response[len(right):].strip()
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
raise
|
||||
163
src/talemate/client/presets.py
Normal file
163
src/talemate/client/presets.py
Normal file
@@ -0,0 +1,163 @@
|
||||
__all__ = [
|
||||
"configure",
|
||||
"set_max_tokens",
|
||||
"set_preset",
|
||||
"preset_for_kind",
|
||||
"max_tokens_for_kind",
|
||||
"PRESET_TALEMATE_CONVERSATION",
|
||||
"PRESET_TALEMATE_CREATOR",
|
||||
"PRESET_LLAMA_PRECISE",
|
||||
"PRESET_DIVINE_INTELLECT",
|
||||
"PRESET_SIMPLE_1",
|
||||
]
|
||||
|
||||
PRESET_TALEMATE_CONVERSATION = {
|
||||
"temperature": 0.65,
|
||||
"top_p": 0.47,
|
||||
"top_k": 42,
|
||||
"repetition_penalty": 1.18,
|
||||
"repetition_penalty_range": 2048,
|
||||
}
|
||||
|
||||
PRESET_TALEMATE_CREATOR = {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"top_k": 20,
|
||||
"repetition_penalty": 1.15,
|
||||
"repetition_penalty_range": 512,
|
||||
}
|
||||
|
||||
PRESET_LLAMA_PRECISE = {
|
||||
'temperature': 0.7,
|
||||
'top_p': 0.1,
|
||||
'top_k': 40,
|
||||
'repetition_penalty': 1.18,
|
||||
}
|
||||
|
||||
PRESET_DIVINE_INTELLECT = {
|
||||
'temperature': 1.31,
|
||||
'top_p': 0.14,
|
||||
'top_k': 49,
|
||||
"repetition_penalty_range": 1024,
|
||||
'repetition_penalty': 1.17,
|
||||
}
|
||||
|
||||
PRESET_SIMPLE_1 = {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"top_k": 20,
|
||||
"repetition_penalty": 1.15,
|
||||
}
|
||||
|
||||
def configure(config:dict, kind:str, total_budget:int):
|
||||
"""
|
||||
Sets the config based on the kind of text to generate.
|
||||
"""
|
||||
set_preset(config, kind)
|
||||
set_max_tokens(config, kind, total_budget)
|
||||
return config
|
||||
|
||||
def set_max_tokens(config:dict, kind:str, total_budget:int):
|
||||
"""
|
||||
Sets the max_tokens in the config based on the kind of text to generate.
|
||||
"""
|
||||
config["max_tokens"] = max_tokens_for_kind(kind, total_budget)
|
||||
return config
|
||||
|
||||
def set_preset(config:dict, kind:str):
|
||||
"""
|
||||
Sets the preset in the config based on the kind of text to generate.
|
||||
"""
|
||||
config.update(preset_for_kind(kind))
|
||||
|
||||
def preset_for_kind(kind: str):
|
||||
if kind == "conversation":
|
||||
return PRESET_TALEMATE_CONVERSATION
|
||||
elif kind == "conversation_old":
|
||||
return PRESET_TALEMATE_CONVERSATION # Assuming old conversation uses the same preset
|
||||
elif kind == "conversation_long":
|
||||
return PRESET_TALEMATE_CONVERSATION # Assuming long conversation uses the same preset
|
||||
elif kind == "conversation_select_talking_actor":
|
||||
return PRESET_TALEMATE_CONVERSATION # Assuming select talking actor uses the same preset
|
||||
elif kind == "summarize":
|
||||
return PRESET_LLAMA_PRECISE
|
||||
elif kind == "analyze":
|
||||
return PRESET_SIMPLE_1
|
||||
elif kind == "analyze_creative":
|
||||
return PRESET_DIVINE_INTELLECT
|
||||
elif kind == "analyze_long":
|
||||
return PRESET_SIMPLE_1 # Assuming long analysis uses the same preset as simple
|
||||
elif kind == "analyze_freeform":
|
||||
return PRESET_LLAMA_PRECISE
|
||||
elif kind == "analyze_freeform_short":
|
||||
return PRESET_LLAMA_PRECISE # Assuming short freeform analysis uses the same preset as precise
|
||||
elif kind == "narrate":
|
||||
return PRESET_LLAMA_PRECISE
|
||||
elif kind == "story":
|
||||
return PRESET_DIVINE_INTELLECT
|
||||
elif kind == "create":
|
||||
return PRESET_TALEMATE_CREATOR
|
||||
elif kind == "create_concise":
|
||||
return PRESET_TALEMATE_CREATOR # Assuming concise creation uses the same preset as creator
|
||||
elif kind == "create_precise":
|
||||
return PRESET_LLAMA_PRECISE
|
||||
elif kind == "director":
|
||||
return PRESET_SIMPLE_1
|
||||
elif kind == "director_short":
|
||||
return PRESET_SIMPLE_1 # Assuming short direction uses the same preset as simple
|
||||
elif kind == "director_yesno":
|
||||
return PRESET_SIMPLE_1 # Assuming yes/no direction uses the same preset as simple
|
||||
elif kind == "edit_dialogue":
|
||||
return PRESET_DIVINE_INTELLECT
|
||||
elif kind == "edit_add_detail":
|
||||
return PRESET_DIVINE_INTELLECT # Assuming adding detail uses the same preset as divine intellect
|
||||
elif kind == "edit_fix_exposition":
|
||||
return PRESET_DIVINE_INTELLECT # Assuming fixing exposition uses the same preset as divine intellect
|
||||
else:
|
||||
return PRESET_SIMPLE_1 # Default preset if none of the kinds match
|
||||
|
||||
def max_tokens_for_kind(kind: str, total_budget: int):
|
||||
if kind == "conversation":
|
||||
return 75 # Example value, adjust as needed
|
||||
elif kind == "conversation_old":
|
||||
return 75 # Example value, adjust as needed
|
||||
elif kind == "conversation_long":
|
||||
return 300 # Example value, adjust as needed
|
||||
elif kind == "conversation_select_talking_actor":
|
||||
return 30 # Example value, adjust as needed
|
||||
elif kind == "summarize":
|
||||
return 500 # Example value, adjust as needed
|
||||
elif kind == "analyze":
|
||||
return 500 # Example value, adjust as needed
|
||||
elif kind == "analyze_creative":
|
||||
return 1024 # Example value, adjust as needed
|
||||
elif kind == "analyze_long":
|
||||
return 2048 # Example value, adjust as needed
|
||||
elif kind == "analyze_freeform":
|
||||
return 500 # Example value, adjust as needed
|
||||
elif kind == "analyze_freeform_short":
|
||||
return 10 # Example value, adjust as needed
|
||||
elif kind == "narrate":
|
||||
return 500 # Example value, adjust as needed
|
||||
elif kind == "story":
|
||||
return 300 # Example value, adjust as needed
|
||||
elif kind == "create":
|
||||
return min(1024, int(total_budget * 0.35)) # Example calculation, adjust as needed
|
||||
elif kind == "create_concise":
|
||||
return min(400, int(total_budget * 0.25)) # Example calculation, adjust as needed
|
||||
elif kind == "create_precise":
|
||||
return min(400, int(total_budget * 0.25)) # Example calculation, adjust as needed
|
||||
elif kind == "director":
|
||||
return min(600, int(total_budget * 0.25)) # Example calculation, adjust as needed
|
||||
elif kind == "director_short":
|
||||
return 25 # Example value, adjust as needed
|
||||
elif kind == "director_yesno":
|
||||
return 2 # Example value, adjust as needed
|
||||
elif kind == "edit_dialogue":
|
||||
return 100 # Example value, adjust as needed
|
||||
elif kind == "edit_add_detail":
|
||||
return 200 # Example value, adjust as needed
|
||||
elif kind == "edit_fix_exposition":
|
||||
return 1024 # Example value, adjust as needed
|
||||
else:
|
||||
return 150 # Default value if none of the kinds match
|
||||
@@ -67,9 +67,9 @@ def _client_bootstrap(client_type: ClientType, pod):
|
||||
id = pod["id"]
|
||||
|
||||
if client_type == ClientType.textgen:
|
||||
api_url = f"https://{id}-5000.proxy.runpod.net/api"
|
||||
api_url = f"https://{id}-5000.proxy.runpod.net"
|
||||
elif client_type == ClientType.automatic1111:
|
||||
api_url = f"https://{id}-5000.proxy.runpod.net/api"
|
||||
api_url = f"https://{id}-5000.proxy.runpod.net"
|
||||
|
||||
return ClientBootstrap(
|
||||
client_type=client_type,
|
||||
|
||||
@@ -1,729 +1,65 @@
|
||||
import asyncio
|
||||
import random
|
||||
import json
|
||||
import copy
|
||||
import structlog
|
||||
import httpx
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Callable, Union
|
||||
import logging
|
||||
import talemate.util as util
|
||||
from talemate.client.base import ClientBase, STOPPING_STRINGS
|
||||
from talemate.client.registry import register
|
||||
import talemate.client.system_prompts as system_prompts
|
||||
from talemate.emit import Emission, emit
|
||||
from talemate.client.context import client_context_attribute
|
||||
from talemate.client.model_prompts import model_prompt
|
||||
|
||||
import talemate.instance as instance
|
||||
|
||||
log = structlog.get_logger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"TaleMateClient",
|
||||
"RestApiTaleMateClient",
|
||||
"TextGeneratorWebuiClient",
|
||||
]
|
||||
|
||||
# Set up logging level for httpx to WARNING to suppress debug logs.
|
||||
logging.getLogger('httpx').setLevel(logging.WARNING)
|
||||
|
||||
class DefaultContext(int):
|
||||
pass
|
||||
|
||||
|
||||
PRESET_TALEMATE_LEGACY = {
|
||||
"temperature": 0.72,
|
||||
"top_p": 0.73,
|
||||
"top_k": 0,
|
||||
"top_a": 0,
|
||||
"repetition_penalty": 1.18,
|
||||
"repetition_penalty_range": 2048,
|
||||
"encoder_repetition_penalty": 1,
|
||||
#"encoder_repetition_penalty": 1.2,
|
||||
#"no_repeat_ngram_size": 2,
|
||||
"do_sample": True,
|
||||
"length_penalty": 1,
|
||||
}
|
||||
|
||||
PRESET_TALEMATE_CONVERSATION = {
|
||||
"temperature": 0.65,
|
||||
"top_p": 0.47,
|
||||
"top_k": 42,
|
||||
"typical_p": 1,
|
||||
"top_a": 0,
|
||||
"tfs": 1,
|
||||
"epsilon_cutoff": 0,
|
||||
"eta_cutoff": 0,
|
||||
"repetition_penalty": 1.18,
|
||||
"repetition_penalty_range": 2048,
|
||||
"no_repeat_ngram_size": 0,
|
||||
"penalty_alpha": 0,
|
||||
"num_beams": 1,
|
||||
"length_penalty": 1,
|
||||
"min_length": 0,
|
||||
"encoder_rep_pen": 1,
|
||||
"do_sample": True,
|
||||
"early_stopping": False,
|
||||
"mirostat_mode": 0,
|
||||
"mirostat_tau": 5,
|
||||
"mirostat_eta": 0.1
|
||||
}
|
||||
|
||||
PRESET_TALEMATE_CREATOR = {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"repetition_penalty": 1.15,
|
||||
"repetition_penalty_range": 512,
|
||||
"top_k": 20,
|
||||
"do_sample": True,
|
||||
"length_penalty": 1,
|
||||
}
|
||||
|
||||
PRESET_LLAMA_PRECISE = {
|
||||
'temperature': 0.7,
|
||||
'top_p': 0.1,
|
||||
'repetition_penalty': 1.18,
|
||||
'top_k': 40
|
||||
}
|
||||
|
||||
PRESET_KOBOLD_GODLIKE = {
|
||||
'temperature': 0.7,
|
||||
'top_p': 0.5,
|
||||
'typical_p': 0.19,
|
||||
'repetition_penalty': 1.1,
|
||||
"repetition_penalty_range": 1024,
|
||||
}
|
||||
|
||||
PRESET_DIVINE_INTELLECT = {
|
||||
'temperature': 1.31,
|
||||
'top_p': 0.14,
|
||||
"repetition_penalty_range": 1024,
|
||||
'repetition_penalty': 1.17,
|
||||
'top_k': 49,
|
||||
"mirostat_mode": 0,
|
||||
"mirostat_tau": 5,
|
||||
"mirostat_eta": 0.1,
|
||||
"tfs": 1,
|
||||
}
|
||||
|
||||
PRESET_SIMPLE_1 = {
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.9,
|
||||
"repetition_penalty": 1.15,
|
||||
"top_k": 20,
|
||||
}
|
||||
|
||||
def jiggle_randomness(prompt_config:dict, offset:float=0.3) -> dict:
|
||||
"""
|
||||
adjusts temperature and repetition_penalty
|
||||
by random values using the base value as a center
|
||||
"""
|
||||
|
||||
temp = prompt_config["temperature"]
|
||||
rep_pen = prompt_config["repetition_penalty"]
|
||||
|
||||
copied_config = copy.deepcopy(prompt_config)
|
||||
|
||||
min_offset = offset * 0.3
|
||||
|
||||
copied_config["temperature"] = random.uniform(temp + min_offset, temp + offset)
|
||||
copied_config["repetition_penalty"] = random.uniform(rep_pen + min_offset * 0.3, rep_pen + offset * 0.3)
|
||||
|
||||
return copied_config
|
||||
|
||||
|
||||
class TaleMateClient:
|
||||
"""
|
||||
An abstract TaleMate client that can be implemented for different communication methods with the AI.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
api_url: str,
|
||||
max_token_length: Union[int, DefaultContext] = int.__new__(
|
||||
DefaultContext, 2048
|
||||
),
|
||||
):
|
||||
self.api_url = api_url
|
||||
self.name = "generic_client"
|
||||
self.model_name = None
|
||||
self.last_token_length = 0
|
||||
self.max_token_length = max_token_length
|
||||
self.original_max_token_length = max_token_length
|
||||
self.enabled = True
|
||||
self.current_status = None
|
||||
|
||||
@abstractmethod
|
||||
def send_message(self, message: dict) -> str:
|
||||
"""
|
||||
Sends a message to the AI. Needs to be implemented by the subclass.
|
||||
:param message: The message to be sent.
|
||||
:return: The AI's response text.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def send_prompt(self, prompt: str) -> str:
|
||||
"""
|
||||
Sends a prompt to the AI. Needs to be implemented by the subclass.
|
||||
:param prompt: The text prompt to send.
|
||||
:return: The AI's response text.
|
||||
"""
|
||||
pass
|
||||
|
||||
def reconfigure(self, **kwargs):
|
||||
if "api_url" in kwargs:
|
||||
self.api_url = kwargs["api_url"]
|
||||
|
||||
if "max_token_length" in kwargs:
|
||||
self.max_token_length = kwargs["max_token_length"]
|
||||
|
||||
if "enabled" in kwargs:
|
||||
self.enabled = bool(kwargs["enabled"])
|
||||
|
||||
def remaining_tokens(self, context: Union[str, list]) -> int:
|
||||
return self.max_token_length - util.count_tokens(context)
|
||||
|
||||
|
||||
def prompt_template(self, sys_msg, prompt):
|
||||
return model_prompt(self.model_name, sys_msg, prompt)
|
||||
|
||||
class RESTTaleMateClient(TaleMateClient, ABC):
|
||||
"""
|
||||
A RESTful TaleMate client that connects to the REST API endpoint.
|
||||
"""
|
||||
|
||||
async def send_message(self, message: dict, url: str) -> str:
|
||||
"""
|
||||
Sends a message to the REST API and returns the AI's response.
|
||||
:param message: The message to be sent.
|
||||
:return: The AI's response text.
|
||||
"""
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(url, json=message, timeout=None)
|
||||
response_data = response.json()
|
||||
return response_data["results"][0]["text"]
|
||||
except KeyError:
|
||||
return response_data["results"][0]["history"]["visible"][0][-1]
|
||||
from openai import AsyncOpenAI
|
||||
import httpx
|
||||
import copy
|
||||
import random
|
||||
|
||||
|
||||
@register()
|
||||
class TextGeneratorWebuiClient(RESTTaleMateClient):
|
||||
"""
|
||||
Client that connects to the text-generatior-webui api
|
||||
"""
|
||||
|
||||
class TextGeneratorWebuiClient(ClientBase):
|
||||
|
||||
client_type = "textgenwebui"
|
||||
conversation_retries = 5
|
||||
|
||||
def tune_prompt_parameters(self, parameters:dict, kind:str):
|
||||
super().tune_prompt_parameters(parameters, kind)
|
||||
parameters["stopping_strings"] = STOPPING_STRINGS + parameters.get("extra_stopping_strings", [])
|
||||
# is this needed?
|
||||
parameters["max_new_tokens"] = parameters["max_tokens"]
|
||||
|
||||
def __init__(self, api_url: str, max_token_length: int = 2048, **kwargs):
|
||||
def set_client(self):
|
||||
self.client = AsyncOpenAI(base_url=self.api_url+"/v1", api_key="sk-1111")
|
||||
|
||||
async def get_model_name(self):
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(f"{self.api_url}/v1/internal/model/info", timeout=2)
|
||||
if response.status_code == 404:
|
||||
raise Exception("Could not find model info (wrong api version?)")
|
||||
response_data = response.json()
|
||||
model_name = response_data.get("model_name")
|
||||
|
||||
api_url = self.cleanup_api_url(api_url)
|
||||
if model_name == "None":
|
||||
model_name = None
|
||||
|
||||
return model_name
|
||||
|
||||
|
||||
async def generate(self, prompt:str, parameters:dict, kind:str):
|
||||
|
||||
self.api_url_base = api_url
|
||||
api_url = f"{api_url}/v1/chat"
|
||||
super().__init__(api_url, max_token_length=max_token_length)
|
||||
self.model_name = None
|
||||
self.limited_ram = False
|
||||
self.name = kwargs.get("name", "textgenwebui")
|
||||
self.processing = False
|
||||
self.connected = False
|
||||
|
||||
def __str__(self):
|
||||
return f"TextGeneratorWebuiClient[{self.api_url_base}][{self.model_name or ''}]"
|
||||
|
||||
def cleanup_api_url(self, api_url:str):
|
||||
"""
|
||||
Strips trailing / and ensures endpoint is /api
|
||||
Generates text from the given prompt and parameters.
|
||||
"""
|
||||
|
||||
if api_url.endswith("/"):
|
||||
api_url = api_url[:-1]
|
||||
|
||||
if not api_url.endswith("/api"):
|
||||
api_url = api_url + "/api"
|
||||
|
||||
return api_url
|
||||
|
||||
def reconfigure(self, **kwargs):
|
||||
super().reconfigure(**kwargs)
|
||||
if "api_url" in kwargs:
|
||||
log.debug("reconfigure", api_url=kwargs["api_url"])
|
||||
api_url = kwargs["api_url"]
|
||||
api_url = self.cleanup_api_url(api_url)
|
||||
self.api_url_base = api_url
|
||||
self.api_url = api_url
|
||||
headers = {}
|
||||
headers["Content-Type"] = "application/json"
|
||||
|
||||
def toggle_disabled_if_remote(self):
|
||||
parameters["prompt"] = prompt.strip()
|
||||
|
||||
remote_servies = [
|
||||
".runpod.net"
|
||||
]
|
||||
|
||||
for service in remote_servies:
|
||||
if service in self.api_url_base:
|
||||
self.enabled = False
|
||||
return
|
||||
|
||||
def emit_status(self, processing: bool = None):
|
||||
if processing is not None:
|
||||
self.processing = processing
|
||||
|
||||
if not self.enabled:
|
||||
status = "disabled"
|
||||
model_name = "Disabled"
|
||||
elif not self.connected:
|
||||
status = "error"
|
||||
model_name = "Could not connect"
|
||||
elif self.model_name:
|
||||
status = "busy" if self.processing else "idle"
|
||||
model_name = self.model_name
|
||||
else:
|
||||
model_name = "No model loaded"
|
||||
status = "warning"
|
||||
|
||||
status_change = status != self.current_status
|
||||
self.current_status = status
|
||||
|
||||
emit(
|
||||
"client_status",
|
||||
message=self.client_type,
|
||||
id=self.name,
|
||||
details=model_name,
|
||||
status=status,
|
||||
)
|
||||
|
||||
|
||||
if status_change:
|
||||
instance.emit_agent_status_by_client(self)
|
||||
|
||||
|
||||
# Add the 'status' method
|
||||
async def status(self):
|
||||
"""
|
||||
Send a request to the API to retrieve the loaded AI model name.
|
||||
Raises an error if no model name is returned.
|
||||
:return: None
|
||||
"""
|
||||
|
||||
if not self.enabled:
|
||||
self.connected = False
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
try:
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(f"{self.api_url_base}/v1/model", timeout=2)
|
||||
|
||||
except (
|
||||
httpx.TimeoutException,
|
||||
httpx.NetworkError,
|
||||
):
|
||||
self.model_name = None
|
||||
self.connected = False
|
||||
self.toggle_disabled_if_remote()
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
self.connected = True
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(f"{self.api_url}/v1/completions", json=parameters, timeout=None, headers=headers)
|
||||
response_data = response.json()
|
||||
self.enabled = True
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
self.connected = False
|
||||
self.toggle_disabled_if_remote()
|
||||
if not self.enabled:
|
||||
log.warn("remote service unreachable, disabling client", name=self.name)
|
||||
else:
|
||||
log.error("client response error", name=self.name, e=e)
|
||||
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
model_name = response_data.get("result")
|
||||
|
||||
if not model_name or model_name == "None":
|
||||
log.warning("client model not loaded", client=self.name)
|
||||
self.emit_status()
|
||||
return
|
||||
|
||||
model_changed = model_name != self.model_name
|
||||
|
||||
self.model_name = model_name
|
||||
|
||||
if model_changed:
|
||||
self.auto_context_length()
|
||||
|
||||
log.info(f"{self} [{self.max_token_length} ctx]: ready")
|
||||
self.emit_status()
|
||||
|
||||
def auto_context_length(self):
|
||||
return response_data["choices"][0]["text"]
|
||||
|
||||
def jiggle_randomness(self, prompt_config:dict, offset:float=0.3) -> dict:
|
||||
"""
|
||||
Automaticalle sets context length based on LLM
|
||||
"""
|
||||
|
||||
if not isinstance(self.max_token_length, DefaultContext):
|
||||
# context length was specified manually
|
||||
return
|
||||
|
||||
model_name = self.model_name.lower()
|
||||
|
||||
if "longchat" in model_name:
|
||||
self.max_token_length = 16000
|
||||
elif "8k" in model_name:
|
||||
if not self.limited_ram or "13b" in model_name:
|
||||
self.max_token_length = 6000
|
||||
else:
|
||||
self.max_token_length = 4096
|
||||
elif "4k" in model_name:
|
||||
self.max_token_length = 4096
|
||||
else:
|
||||
self.max_token_length = self.original_max_token_length
|
||||
|
||||
@property
|
||||
def instruction_template(self):
|
||||
if "vicuna" in self.model_name.lower():
|
||||
return "Vicuna-v1.1"
|
||||
if "camel" in self.model_name.lower():
|
||||
return "Vicuna-v1.1"
|
||||
return ""
|
||||
|
||||
def prompt_url(self):
|
||||
return self.api_url_base + "/v1/generate"
|
||||
|
||||
def prompt_config_conversation_old(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.BASIC,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 75,
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
config.update(PRESET_TALEMATE_CONVERSATION)
|
||||
return config
|
||||
|
||||
|
||||
def prompt_config_conversation(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.ROLEPLAY,
|
||||
prompt,
|
||||
)
|
||||
|
||||
stopping_strings = ["<|end_of_turn|>"]
|
||||
|
||||
conversation_context = client_context_attribute("conversation")
|
||||
|
||||
stopping_strings += [
|
||||
f"{character}:" for character in conversation_context["other_characters"]
|
||||
]
|
||||
|
||||
max_new_tokens = conversation_context.get("length", 96)
|
||||
log.debug("prompt_config_conversation", stopping_strings=stopping_strings, conversation_context=conversation_context, max_new_tokens=max_new_tokens)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": max_new_tokens,
|
||||
"truncation_length": self.max_token_length,
|
||||
"stopping_strings": stopping_strings,
|
||||
}
|
||||
config.update(PRESET_TALEMATE_CONVERSATION)
|
||||
|
||||
jiggle_randomness(config)
|
||||
|
||||
return config
|
||||
|
||||
def prompt_config_conversation_long(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_conversation(prompt)
|
||||
config["max_new_tokens"] = 300
|
||||
return config
|
||||
|
||||
def prompt_config_conversation_select_talking_actor(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_conversation(prompt)
|
||||
config["max_new_tokens"] = 30
|
||||
config["stopping_strings"] += [":"]
|
||||
return config
|
||||
|
||||
|
||||
def prompt_config_summarize(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.NARRATOR,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 500,
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
|
||||
config.update(PRESET_LLAMA_PRECISE)
|
||||
return config
|
||||
|
||||
def prompt_config_analyze(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.ANALYST,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 500,
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
|
||||
config.update(PRESET_SIMPLE_1)
|
||||
return config
|
||||
|
||||
def prompt_config_analyze_creative(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.ANALYST,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {}
|
||||
config.update(PRESET_DIVINE_INTELLECT)
|
||||
config.update({
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 1024,
|
||||
"repetition_penalty_range": 1024,
|
||||
"truncation_length": self.max_token_length
|
||||
})
|
||||
|
||||
return config
|
||||
|
||||
def prompt_config_analyze_long(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_analyze(prompt)
|
||||
config["max_new_tokens"] = 1000
|
||||
return config
|
||||
|
||||
def prompt_config_analyze_freeform(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.ANALYST_FREEFORM,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 500,
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
|
||||
config.update(PRESET_LLAMA_PRECISE)
|
||||
return config
|
||||
|
||||
|
||||
def prompt_config_analyze_freeform_short(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_analyze_freeform(prompt)
|
||||
config["max_new_tokens"] = 10
|
||||
return config
|
||||
|
||||
def prompt_config_narrate(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.NARRATOR,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 500,
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
config.update(PRESET_LLAMA_PRECISE)
|
||||
return config
|
||||
|
||||
def prompt_config_story(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.NARRATOR,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 300,
|
||||
"seed": random.randint(0, 1000000000),
|
||||
"truncation_length": self.max_token_length
|
||||
}
|
||||
config.update(PRESET_DIVINE_INTELLECT)
|
||||
config.update({
|
||||
"repetition_penalty": 1.3,
|
||||
"repetition_penalty_range": 2048,
|
||||
})
|
||||
return config
|
||||
|
||||
def prompt_config_create(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.CREATOR,
|
||||
prompt,
|
||||
)
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": min(1024, self.max_token_length * 0.35),
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
config.update(PRESET_TALEMATE_CREATOR)
|
||||
return config
|
||||
|
||||
def prompt_config_create_concise(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.CREATOR,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": min(400, self.max_token_length * 0.25),
|
||||
"truncation_length": self.max_token_length,
|
||||
"stopping_strings": ["<|DONE|>", "\n\n"]
|
||||
}
|
||||
config.update(PRESET_TALEMATE_CREATOR)
|
||||
return config
|
||||
|
||||
def prompt_config_create_precise(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_create_concise(prompt)
|
||||
config.update(PRESET_LLAMA_PRECISE)
|
||||
return config
|
||||
|
||||
def prompt_config_director(self, prompt: str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.DIRECTOR,
|
||||
prompt,
|
||||
)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": min(600, self.max_token_length * 0.25),
|
||||
"truncation_length": self.max_token_length,
|
||||
}
|
||||
config.update(PRESET_SIMPLE_1)
|
||||
return config
|
||||
|
||||
|
||||
def prompt_config_director_short(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_director(prompt)
|
||||
config.update(max_new_tokens=25)
|
||||
return config
|
||||
|
||||
def prompt_config_director_yesno(self, prompt: str) -> dict:
|
||||
config = self.prompt_config_director(prompt)
|
||||
config.update(max_new_tokens=2)
|
||||
return config
|
||||
|
||||
def prompt_config_edit_dialogue(self, prompt:str) -> dict:
|
||||
prompt = self.prompt_template(
|
||||
system_prompts.EDITOR,
|
||||
prompt,
|
||||
)
|
||||
|
||||
conversation_context = client_context_attribute("conversation")
|
||||
|
||||
stopping_strings = [
|
||||
f"{character}:" for character in conversation_context["other_characters"]
|
||||
]
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 100,
|
||||
"truncation_length": self.max_token_length,
|
||||
"stopping_strings": stopping_strings,
|
||||
}
|
||||
|
||||
config.update(PRESET_DIVINE_INTELLECT)
|
||||
|
||||
return config
|
||||
|
||||
def prompt_config_edit_add_detail(self, prompt:str) -> dict:
|
||||
|
||||
config = self.prompt_config_edit_dialogue(prompt)
|
||||
config.update(max_new_tokens=200)
|
||||
return config
|
||||
|
||||
|
||||
def prompt_config_edit_fix_exposition(self, prompt:str) -> dict:
|
||||
|
||||
config = self.prompt_config_edit_dialogue(prompt)
|
||||
config.update(max_new_tokens=1024)
|
||||
return config
|
||||
|
||||
|
||||
async def send_prompt(
|
||||
self, prompt: str, kind: str = "conversation", finalize: Callable = lambda x: x
|
||||
) -> str:
|
||||
"""
|
||||
Send a prompt to the AI and return its response.
|
||||
:param prompt: The text prompt to send.
|
||||
:return: The AI's response text.
|
||||
adjusts temperature and repetition_penalty
|
||||
by random values using the base value as a center
|
||||
"""
|
||||
|
||||
#prompt = prompt.replace("<|BOT|>", "<|BOT|>Certainly! ")
|
||||
|
||||
await self.status()
|
||||
self.emit_status(processing=True)
|
||||
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
fn_prompt_config = getattr(self, f"prompt_config_{kind}")
|
||||
fn_url = self.prompt_url
|
||||
message = fn_prompt_config(prompt)
|
||||
|
||||
if client_context_attribute("nuke_repetition") > 0.0:
|
||||
log.info("nuke repetition", offset=client_context_attribute("nuke_repetition"), temperature=message["temperature"], repetition_penalty=message["repetition_penalty"])
|
||||
message = jiggle_randomness(message, offset=client_context_attribute("nuke_repetition"))
|
||||
log.info("nuke repetition (applied)", offset=client_context_attribute("nuke_repetition"), temperature=message["temperature"], repetition_penalty=message["repetition_penalty"])
|
||||
temp = prompt_config["temperature"]
|
||||
rep_pen = prompt_config["repetition_penalty"]
|
||||
|
||||
message = finalize(message)
|
||||
min_offset = offset * 0.3
|
||||
|
||||
token_length = int(len(message["prompt"]) / 3.6)
|
||||
|
||||
self.last_token_length = token_length
|
||||
|
||||
log.debug("send_prompt", token_length=token_length, max_token_length=self.max_token_length)
|
||||
|
||||
message["prompt"] = message["prompt"].strip()
|
||||
|
||||
#print(f"prompt: |{message['prompt']}|")
|
||||
|
||||
# add <|im_end|> to stopping strings
|
||||
if "stopping_strings" in message:
|
||||
message["stopping_strings"] += ["<|im_end|>", "</s>"]
|
||||
else:
|
||||
message["stopping_strings"] = ["<|im_end|>", "</s>"]
|
||||
|
||||
#message["seed"] = -1
|
||||
|
||||
#for k,v in message.items():
|
||||
# if k == "prompt":
|
||||
# continue
|
||||
# print(f"{k}: {v}")
|
||||
|
||||
response = await self.send_message(message, fn_url())
|
||||
|
||||
response = response.split("#")[0]
|
||||
self.emit_status(processing=False)
|
||||
|
||||
emit("prompt_sent", data={
|
||||
"kind": kind,
|
||||
"prompt": message["prompt"],
|
||||
"response": response,
|
||||
"prompt_tokens": token_length,
|
||||
"response_tokens": int(len(response) / 3.6)
|
||||
})
|
||||
|
||||
return response
|
||||
|
||||
|
||||
class OpenAPIClient(RESTTaleMateClient):
|
||||
pass
|
||||
|
||||
|
||||
class GPT3Client(OpenAPIClient):
|
||||
pass
|
||||
|
||||
|
||||
class GPT4Client(OpenAPIClient):
|
||||
pass
|
||||
prompt_config["temperature"] = random.uniform(temp + min_offset, temp + offset)
|
||||
prompt_config["repetition_penalty"] = random.uniform(rep_pen + min_offset * 0.3, rep_pen + offset * 0.3)
|
||||
32
src/talemate/client/utils.py
Normal file
32
src/talemate/client/utils.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import copy
|
||||
import random
|
||||
|
||||
def jiggle_randomness(prompt_config:dict, offset:float=0.3) -> dict:
|
||||
"""
|
||||
adjusts temperature and repetition_penalty
|
||||
by random values using the base value as a center
|
||||
"""
|
||||
|
||||
temp = prompt_config["temperature"]
|
||||
rep_pen = prompt_config["repetition_penalty"]
|
||||
|
||||
copied_config = copy.deepcopy(prompt_config)
|
||||
|
||||
min_offset = offset * 0.3
|
||||
|
||||
copied_config["temperature"] = random.uniform(temp + min_offset, temp + offset)
|
||||
copied_config["repetition_penalty"] = random.uniform(rep_pen + min_offset * 0.3, rep_pen + offset * 0.3)
|
||||
|
||||
return copied_config
|
||||
|
||||
|
||||
def jiggle_enabled_for(kind:str):
|
||||
|
||||
if kind in ["conversation", "story"]:
|
||||
return True
|
||||
|
||||
if kind.startswith("narrate"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -23,6 +23,7 @@ from .cmd_save_as import CmdSaveAs
|
||||
from .cmd_save_characters import CmdSaveCharacters
|
||||
from .cmd_setenv import CmdSetEnvironmentToScene, CmdSetEnvironmentToCreative
|
||||
from .cmd_time_util import *
|
||||
from .cmd_tts import *
|
||||
from .cmd_world_state import CmdWorldState
|
||||
from .cmd_run_helios_test import CmdHeliosTest
|
||||
from .manager import Manager
|
||||
@@ -20,6 +20,7 @@ class TalemateCommand(Emitter, ABC):
|
||||
scene: Scene = None
|
||||
manager: CommandManager = None
|
||||
label: str = None
|
||||
sets_scene_unsaved: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
||||
@@ -84,4 +84,42 @@ class CmdRunAutomatic(TalemateCommand):
|
||||
turns = 10
|
||||
|
||||
self.emit("system", f"Making player character AI controlled for {turns} turns")
|
||||
self.scene.get_player_character().actor.ai_controlled = turns
|
||||
self.scene.get_player_character().actor.ai_controlled = turns
|
||||
|
||||
|
||||
|
||||
@register
|
||||
class CmdLongTermMemoryStats(TalemateCommand):
|
||||
"""
|
||||
Command class for the 'long_term_memory_stats' command
|
||||
"""
|
||||
|
||||
name = "long_term_memory_stats"
|
||||
description = "Show stats for the long term memory"
|
||||
aliases = ["ltm_stats"]
|
||||
|
||||
async def run(self):
|
||||
|
||||
memory = self.scene.get_helper("memory").agent
|
||||
|
||||
count = await memory.count()
|
||||
db_name = memory.db_name
|
||||
|
||||
self.emit("system", f"Long term memory for {self.scene.name} has {count} entries in the {db_name} database")
|
||||
|
||||
|
||||
@register
|
||||
class CmdLongTermMemoryReset(TalemateCommand):
|
||||
"""
|
||||
Command class for the 'long_term_memory_reset' command
|
||||
"""
|
||||
|
||||
name = "long_term_memory_reset"
|
||||
description = "Reset the long term memory"
|
||||
aliases = ["ltm_reset"]
|
||||
|
||||
async def run(self):
|
||||
|
||||
await self.scene.commit_to_memory()
|
||||
|
||||
self.emit("system", f"Long term memory for {self.scene.name} has been reset")
|
||||
@@ -37,29 +37,15 @@ class CmdDirectorDirect(TalemateCommand):
|
||||
self.system_message(f"Character not found: {name}")
|
||||
return True
|
||||
|
||||
if ask_for_input:
|
||||
goal = await wait_for_input(f"Enter a new goal for the director to direct {character.name} towards (leave empty for auto-direct): ")
|
||||
else:
|
||||
goal = None
|
||||
direction = await director.agent.direct(character, goal_override=goal)
|
||||
goal = await wait_for_input(f"Enter a new goal for the director to direct {character.name}")
|
||||
|
||||
if direction is None:
|
||||
self.system_message("Director was unable to direct character at this point in the story.")
|
||||
if not goal.strip():
|
||||
self.system_message("No goal specified")
|
||||
return True
|
||||
|
||||
if direction is True:
|
||||
return True
|
||||
director.agent.actions["direct"].config["prompt"].value = goal
|
||||
|
||||
message = DirectorMessage(direction, source=character.name)
|
||||
emit("director", message, character=character)
|
||||
|
||||
# remove previous director message, starting from the end of self.history
|
||||
for i in range(len(self.scene.history) - 1, -1, -1):
|
||||
if isinstance(self.scene.history[i], DirectorMessage):
|
||||
self.scene.history.pop(i)
|
||||
break
|
||||
|
||||
self.scene.push_history(message)
|
||||
await director.agent.direct_character(character, goal)
|
||||
|
||||
@register
|
||||
class CmdDirectorDirectWithOverride(CmdDirectorDirect):
|
||||
|
||||
@@ -28,4 +28,3 @@ class CmdNarrate(TalemateCommand):
|
||||
|
||||
self.narrator_message(message)
|
||||
self.scene.push_history(message)
|
||||
await asyncio.sleep(0)
|
||||
|
||||
@@ -32,4 +32,5 @@ class CmdRebuildArchive(TalemateCommand):
|
||||
if not more:
|
||||
break
|
||||
|
||||
await asyncio.sleep(0)
|
||||
self.scene.sync_time()
|
||||
await self.scene.commit_to_memory()
|
||||
|
||||
@@ -17,7 +17,26 @@ class CmdRename(TalemateCommand):
|
||||
aliases = []
|
||||
|
||||
async def run(self):
|
||||
# collect list of characters in the scene
|
||||
|
||||
if self.args:
|
||||
character_name = self.args[0]
|
||||
else:
|
||||
character_names = self.scene.character_names
|
||||
character_name = await wait_for_input("Which character do you want to rename?", data={
|
||||
"input_type": "select",
|
||||
"choices": character_names,
|
||||
})
|
||||
|
||||
character = self.scene.get_character(character_name)
|
||||
|
||||
if not character:
|
||||
self.system_message(f"Character {character_name} not found")
|
||||
return True
|
||||
|
||||
name = await wait_for_input("Enter new name: ")
|
||||
|
||||
self.scene.main_character.character.rename(name)
|
||||
character.rename(name)
|
||||
await asyncio.sleep(0)
|
||||
|
||||
return True
|
||||
|
||||
@@ -11,6 +11,7 @@ class CmdSave(TalemateCommand):
|
||||
name = "save"
|
||||
description = "Save the scene"
|
||||
aliases = ["s"]
|
||||
sets_scene_unsaved = False
|
||||
|
||||
async def run(self):
|
||||
await self.scene.save()
|
||||
|
||||
@@ -13,7 +13,7 @@ class CmdSaveAs(TalemateCommand):
|
||||
name = "save_as"
|
||||
description = "Save the scene with a new name"
|
||||
aliases = ["sa"]
|
||||
sets_scene_unsaved = False
|
||||
|
||||
async def run(self):
|
||||
self.scene.filename = ""
|
||||
await self.scene.save()
|
||||
await self.scene.save(save_as=True)
|
||||
|
||||
@@ -11,6 +11,7 @@ from talemate.prompts.base import set_default_sectioning_handler
|
||||
from talemate.scene_message import TimePassageMessage
|
||||
from talemate.util import iso8601_duration_to_human
|
||||
from talemate.emit import wait_for_input, emit
|
||||
import talemate.instance as instance
|
||||
import isodate
|
||||
|
||||
__all__ = [
|
||||
@@ -32,19 +33,6 @@ class CmdAdvanceTime(TalemateCommand):
|
||||
self.emit("system", "You must specify an amount of time to advance")
|
||||
return
|
||||
|
||||
try:
|
||||
isodate.parse_duration(self.args[0])
|
||||
except isodate.ISO8601Error:
|
||||
self.emit("system", "Invalid duration")
|
||||
return
|
||||
|
||||
try:
|
||||
msg = self.args[1]
|
||||
except IndexError:
|
||||
msg = iso8601_duration_to_human(self.args[0], suffix=" later")
|
||||
|
||||
message = TimePassageMessage(ts=self.args[0], message=msg)
|
||||
emit('time', message)
|
||||
|
||||
self.scene.push_history(message)
|
||||
self.scene.emit_status()
|
||||
world_state = instance.get_agent("world_state")
|
||||
await world_state.advance_time(self.args[0])
|
||||
33
src/talemate/commands/cmd_tts.py
Normal file
33
src/talemate/commands/cmd_tts.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from talemate.commands.base import TalemateCommand
|
||||
from talemate.commands.manager import register
|
||||
from talemate.prompts.base import set_default_sectioning_handler
|
||||
from talemate.instance import get_agent
|
||||
|
||||
__all__ = [
|
||||
"CmdTestTTS",
|
||||
]
|
||||
|
||||
@register
|
||||
class CmdTestTTS(TalemateCommand):
|
||||
"""
|
||||
Command class for the 'test_tts' command
|
||||
"""
|
||||
|
||||
name = "test_tts"
|
||||
description = "Test the TTS agent"
|
||||
aliases = []
|
||||
|
||||
async def run(self):
|
||||
tts_agent = get_agent("tts")
|
||||
|
||||
try:
|
||||
last_message = str(self.scene.history[-1])
|
||||
except IndexError:
|
||||
last_message = "Welcome to talemate!"
|
||||
|
||||
|
||||
await tts_agent.generate(last_message)
|
||||
|
||||
@@ -22,10 +22,15 @@ class CmdWorldState(TalemateCommand):
|
||||
async def run(self):
|
||||
|
||||
inline = self.args[0] == "inline" if self.args else False
|
||||
reset = self.args[0] == "reset" if self.args else False
|
||||
|
||||
if inline:
|
||||
await self.scene.world_state.request_update_inline()
|
||||
return True
|
||||
|
||||
if reset:
|
||||
self.scene.world_state.reset()
|
||||
|
||||
await self.scene.world_state.request_update()
|
||||
|
||||
@register
|
||||
|
||||
@@ -52,6 +52,8 @@ class Manager(Emitter):
|
||||
self.processing_command = True
|
||||
command.command_start()
|
||||
await command.run()
|
||||
if command.sets_scene_unsaved:
|
||||
self.scene.saved = False
|
||||
except AbortCommand:
|
||||
self.system_message(f"Action `{command.verbose_name}` ended")
|
||||
except Exception:
|
||||
|
||||
@@ -6,6 +6,8 @@ import os
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional, Dict, Union
|
||||
|
||||
from talemate.emit import emit
|
||||
|
||||
log = structlog.get_logger("talemate.config")
|
||||
|
||||
class Client(BaseModel):
|
||||
@@ -20,7 +22,7 @@ class Client(BaseModel):
|
||||
|
||||
|
||||
class AgentActionConfig(BaseModel):
|
||||
value: Union[int, float, str, bool]
|
||||
value: Union[int, float, str, bool, None] = None
|
||||
|
||||
class AgentAction(BaseModel):
|
||||
enabled: bool = True
|
||||
@@ -42,17 +44,17 @@ class Agent(BaseModel):
|
||||
return super().model_dump(exclude_none=True)
|
||||
|
||||
class GamePlayerCharacter(BaseModel):
|
||||
name: str
|
||||
color: str
|
||||
gender: str
|
||||
description: Optional[str]
|
||||
name: str = ""
|
||||
color: str = "#3362bb"
|
||||
gender: str = ""
|
||||
description: Optional[str] = ""
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
|
||||
|
||||
class Game(BaseModel):
|
||||
default_player_character: GamePlayerCharacter
|
||||
default_player_character: GamePlayerCharacter = GamePlayerCharacter()
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
@@ -65,6 +67,22 @@ class OpenAIConfig(BaseModel):
|
||||
|
||||
class RunPodConfig(BaseModel):
|
||||
api_key: Union[str,None]=None
|
||||
|
||||
class ElevenLabsConfig(BaseModel):
|
||||
api_key: Union[str,None]=None
|
||||
model: str = "eleven_turbo_v2"
|
||||
|
||||
class CoquiConfig(BaseModel):
|
||||
api_key: Union[str,None]=None
|
||||
|
||||
class TTSVoiceSamples(BaseModel):
|
||||
label:str
|
||||
value:str
|
||||
|
||||
class TTSConfig(BaseModel):
|
||||
device:str = "cuda"
|
||||
model:str = "tts_models/multilingual/multi-dataset/xtts_v2"
|
||||
voices: list[TTSVoiceSamples] = pydantic.Field(default_factory=list)
|
||||
|
||||
class ChromaDB(BaseModel):
|
||||
instructor_device: str="cpu"
|
||||
@@ -85,6 +103,12 @@ class Config(BaseModel):
|
||||
|
||||
chromadb: ChromaDB = ChromaDB()
|
||||
|
||||
elevenlabs: ElevenLabsConfig = ElevenLabsConfig()
|
||||
|
||||
coqui: CoquiConfig = CoquiConfig()
|
||||
|
||||
tts: TTSConfig = TTSConfig()
|
||||
|
||||
class Config:
|
||||
extra = "ignore"
|
||||
|
||||
@@ -136,4 +160,6 @@ def save_config(config, file_path: str = "./config.yaml"):
|
||||
return None
|
||||
|
||||
with open(file_path, "w") as file:
|
||||
yaml.dump(config, file)
|
||||
yaml.dump(config, file)
|
||||
|
||||
emit("config_saved", data=config)
|
||||
20
src/talemate/context.py
Normal file
20
src/talemate/context.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from contextvars import ContextVar
|
||||
|
||||
__all__ = [
|
||||
"scene_is_loading",
|
||||
"SceneIsLoading",
|
||||
]
|
||||
|
||||
scene_is_loading = ContextVar("scene_is_loading", default=None)
|
||||
|
||||
class SceneIsLoading:
|
||||
|
||||
def __init__(self, scene):
|
||||
self.scene = scene
|
||||
|
||||
def __enter__(self):
|
||||
self.token = scene_is_loading.set(self.scene)
|
||||
|
||||
def __exit__(self, *args):
|
||||
scene_is_loading.reset(self.token)
|
||||
|
||||
@@ -13,7 +13,9 @@ RequestInput = signal("request_input")
|
||||
ReceiveInput = signal("receive_input")
|
||||
|
||||
ClientStatus = signal("client_status")
|
||||
RequestClientStatus = signal("request_client_status")
|
||||
AgentStatus = signal("agent_status")
|
||||
RequestAgentStatus = signal("request_agent_status")
|
||||
ClientBootstraps = signal("client_bootstraps")
|
||||
PromptSent = signal("prompt_sent")
|
||||
|
||||
@@ -24,8 +26,12 @@ CommandStatus = signal("command_status")
|
||||
WorldState = signal("world_state")
|
||||
ArchivedHistory = signal("archived_history")
|
||||
|
||||
AudioQueue = signal("audio_queue")
|
||||
|
||||
MessageEdited = signal("message_edited")
|
||||
|
||||
ConfigSaved = signal("config_saved")
|
||||
|
||||
handlers = {
|
||||
"system": SystemMessage,
|
||||
"narrator": NarratorMessage,
|
||||
@@ -36,7 +42,9 @@ handlers = {
|
||||
"request_input": RequestInput,
|
||||
"receive_input": ReceiveInput,
|
||||
"client_status": ClientStatus,
|
||||
"request_client_status": RequestClientStatus,
|
||||
"agent_status": AgentStatus,
|
||||
"request_agent_status": RequestAgentStatus,
|
||||
"client_bootstraps": ClientBootstraps,
|
||||
"clear_screen": ClearScreen,
|
||||
"remove_message": RemoveMessage,
|
||||
@@ -46,4 +54,6 @@ handlers = {
|
||||
"archived_history": ArchivedHistory,
|
||||
"message_edited": MessageEdited,
|
||||
"prompt_sent": PromptSent,
|
||||
"audio_queue": AudioQueue,
|
||||
"config_saved": ConfigSaved,
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from talemate.tale_mate import Scene
|
||||
from talemate.tale_mate import Scene, Actor, SceneMessage
|
||||
|
||||
__all__ = [
|
||||
"Event",
|
||||
@@ -38,4 +38,16 @@ class CharacterStateEvent(Event):
|
||||
|
||||
@dataclass
|
||||
class GameLoopEvent(Event):
|
||||
pass
|
||||
pass
|
||||
|
||||
@dataclass
|
||||
class GameLoopStartEvent(GameLoopEvent):
|
||||
pass
|
||||
|
||||
@dataclass
|
||||
class GameLoopActorIterEvent(GameLoopEvent):
|
||||
actor: Actor
|
||||
|
||||
@dataclass
|
||||
class GameLoopNewMessageEvent(GameLoopEvent):
|
||||
message: SceneMessage
|
||||
@@ -1,10 +1,11 @@
|
||||
"""
|
||||
Keep track of clients and agents
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import talemate.agents as agents
|
||||
import talemate.client as clients
|
||||
from talemate.emit import emit
|
||||
from talemate.emit.signals import handlers
|
||||
import talemate.client.bootstrap as bootstrap
|
||||
|
||||
import structlog
|
||||
@@ -14,6 +15,8 @@ AGENTS = {}
|
||||
CLIENTS = {}
|
||||
|
||||
|
||||
|
||||
|
||||
def get_agent(typ: str, *create_args, **create_kwargs):
|
||||
agent = AGENTS.get(typ)
|
||||
|
||||
@@ -94,11 +97,19 @@ async def emit_clients_status():
|
||||
"""
|
||||
Will emit status of all clients
|
||||
"""
|
||||
|
||||
#log.debug("emit", type="client status")
|
||||
for client in CLIENTS.values():
|
||||
if client:
|
||||
await client.status()
|
||||
|
||||
def _sync_emit_clients_status(*args, **kwargs):
|
||||
"""
|
||||
Will emit status of all clients
|
||||
in synchronous mode
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(emit_clients_status())
|
||||
handlers["request_client_status"].connect(_sync_emit_clients_status)
|
||||
|
||||
def emit_client_bootstraps():
|
||||
emit(
|
||||
@@ -144,11 +155,13 @@ def emit_agent_status(cls, agent=None):
|
||||
)
|
||||
|
||||
|
||||
def emit_agents_status():
|
||||
def emit_agents_status(*args, **kwargs):
|
||||
"""
|
||||
Will emit status of all agents
|
||||
"""
|
||||
|
||||
#log.debug("emit", type="agent status")
|
||||
for typ, cls in agents.AGENT_CLASSES.items():
|
||||
agent = AGENTS.get(typ)
|
||||
emit_agent_status(cls, agent)
|
||||
|
||||
handlers["request_agent_status"].connect(emit_agents_status)
|
||||
@@ -10,6 +10,7 @@ from talemate.scene_message import (
|
||||
SceneMessage, CharacterMessage, NarratorMessage, DirectorMessage, MESSAGES, reset_message_id
|
||||
)
|
||||
from talemate.world_state import WorldState
|
||||
from talemate.context import SceneIsLoading
|
||||
import talemate.instance as instance
|
||||
|
||||
import structlog
|
||||
@@ -31,23 +32,24 @@ async def load_scene(scene, file_path, conv_client, reset: bool = False):
|
||||
Load the scene data from the given file path.
|
||||
"""
|
||||
|
||||
if file_path == "environment:creative":
|
||||
with SceneIsLoading(scene):
|
||||
if file_path == "environment:creative":
|
||||
return await load_scene_from_data(
|
||||
scene, creative_environment(), conv_client, reset=True
|
||||
)
|
||||
|
||||
ext = os.path.splitext(file_path)[1].lower()
|
||||
|
||||
if ext in [".jpg", ".png", ".jpeg", ".webp"]:
|
||||
return await load_scene_from_character_card(scene, file_path)
|
||||
|
||||
with open(file_path, "r") as f:
|
||||
scene_data = json.load(f)
|
||||
|
||||
return await load_scene_from_data(
|
||||
scene, creative_environment(), conv_client, reset=True
|
||||
scene, scene_data, conv_client, reset, name=file_path
|
||||
)
|
||||
|
||||
ext = os.path.splitext(file_path)[1].lower()
|
||||
|
||||
if ext in [".jpg", ".png", ".jpeg", ".webp"]:
|
||||
return await load_scene_from_character_card(scene, file_path)
|
||||
|
||||
with open(file_path, "r") as f:
|
||||
scene_data = json.load(f)
|
||||
|
||||
return await load_scene_from_data(
|
||||
scene, scene_data, conv_client, reset, name=file_path
|
||||
)
|
||||
|
||||
|
||||
async def load_scene_from_character_card(scene, file_path):
|
||||
"""
|
||||
@@ -68,10 +70,13 @@ async def load_scene_from_character_card(scene, file_path):
|
||||
|
||||
conversation = scene.get_helper("conversation").agent
|
||||
creator = scene.get_helper("creator").agent
|
||||
memory = scene.get_helper("memory").agent
|
||||
|
||||
actor = Actor(character, conversation)
|
||||
|
||||
scene.name = character.name
|
||||
|
||||
await memory.set_db()
|
||||
|
||||
await scene.add_actor(actor)
|
||||
|
||||
@@ -118,6 +123,8 @@ async def load_scene_from_character_card(scene, file_path):
|
||||
except Exception as e:
|
||||
log.error("world_state.request_update", error=e)
|
||||
|
||||
scene.saved = False
|
||||
|
||||
return scene
|
||||
|
||||
|
||||
@@ -127,6 +134,8 @@ async def load_scene_from_data(
|
||||
|
||||
reset_message_id()
|
||||
|
||||
memory = scene.get_helper("memory").agent
|
||||
|
||||
scene.description = scene_data.get("description", "")
|
||||
scene.intro = scene_data.get("intro", "") or scene.description
|
||||
scene.name = scene_data.get("name", "Unknown Scene")
|
||||
@@ -138,6 +147,7 @@ async def load_scene_from_data(
|
||||
|
||||
if not reset:
|
||||
scene.goal = scene_data.get("goal", 0)
|
||||
scene.memory_id = scene_data.get("memory_id", scene.memory_id)
|
||||
scene.history = _load_history(scene_data["history"])
|
||||
scene.archived_history = scene_data["archived_history"]
|
||||
scene.character_states = scene_data.get("character_states", {})
|
||||
@@ -152,6 +162,8 @@ async def load_scene_from_data(
|
||||
scene.sync_time()
|
||||
log.debug("scene time", ts=scene.ts)
|
||||
|
||||
await memory.set_db()
|
||||
|
||||
for ah in scene.archived_history:
|
||||
if reset:
|
||||
break
|
||||
@@ -178,7 +190,14 @@ async def load_scene_from_data(
|
||||
await scene.add_actor(actor)
|
||||
|
||||
if scene.environment != "creative":
|
||||
await scene.world_state.request_update(initial_only=True)
|
||||
try:
|
||||
await scene.world_state.request_update(initial_only=True)
|
||||
except Exception as e:
|
||||
log.error("world_state.request_update", error=e)
|
||||
|
||||
# the scene has been saved before (since we just loaded it), so we set the saved flag to True
|
||||
# as long as the scene has a memory_id.
|
||||
scene.saved = "memory_id" in scene_data
|
||||
|
||||
return scene
|
||||
|
||||
|
||||
@@ -290,11 +290,14 @@ class Prompt:
|
||||
env.globals["query_scene"] = self.query_scene
|
||||
env.globals["query_memory"] = self.query_memory
|
||||
env.globals["query_text"] = self.query_text
|
||||
env.globals["instruct_text"] = self.instruct_text
|
||||
env.globals["retrieve_memories"] = self.retrieve_memories
|
||||
env.globals["uuidgen"] = lambda: str(uuid.uuid4())
|
||||
env.globals["to_int"] = lambda x: int(x)
|
||||
env.globals["config"] = self.config
|
||||
env.globals["len"] = lambda x: len(x)
|
||||
env.globals["count_tokens"] = lambda x: count_tokens(x)
|
||||
env.globals["count_tokens"] = lambda x: count_tokens(dedupe_string(x, debug=False))
|
||||
env.globals["print"] = lambda x: print(x)
|
||||
|
||||
ctx.update(self.vars)
|
||||
|
||||
@@ -340,7 +343,7 @@ class Prompt:
|
||||
parsed_text = env.from_string(prompt_text).render(self.vars)
|
||||
|
||||
if self.dedupe_enabled:
|
||||
parsed_text = dedupe_string(parsed_text, debug=True)
|
||||
parsed_text = dedupe_string(parsed_text, debug=False)
|
||||
|
||||
parsed_text = remove_extra_linebreaks(parsed_text)
|
||||
|
||||
@@ -364,28 +367,53 @@ class Prompt:
|
||||
])
|
||||
|
||||
|
||||
def query_text(self, query:str, text:str):
|
||||
def query_text(self, query:str, text:str, as_question_answer:bool=True):
|
||||
loop = asyncio.get_event_loop()
|
||||
summarizer = instance.get_agent("summarizer")
|
||||
summarizer = instance.get_agent("world_state")
|
||||
query = query.format(**self.vars)
|
||||
|
||||
if not as_question_answer:
|
||||
return loop.run_until_complete(summarizer.analyze_text_and_answer_question(text, query))
|
||||
|
||||
return "\n".join([
|
||||
f"Question: {query}",
|
||||
f"Answer: " + loop.run_until_complete(summarizer.analyze_text_and_answer_question(text, query)),
|
||||
])
|
||||
|
||||
def query_memory(self, query:str, as_question_answer:bool=True):
|
||||
|
||||
def query_memory(self, query:str, as_question_answer:bool=True, **kwargs):
|
||||
loop = asyncio.get_event_loop()
|
||||
memory = instance.get_agent("memory")
|
||||
query = query.format(**self.vars)
|
||||
|
||||
if not as_question_answer:
|
||||
return loop.run_until_complete(memory.query(query))
|
||||
if not kwargs.get("iterate"):
|
||||
if not as_question_answer:
|
||||
return loop.run_until_complete(memory.query(query, **kwargs))
|
||||
|
||||
return "\n".join([
|
||||
f"Question: {query}",
|
||||
f"Answer: " + loop.run_until_complete(memory.query(query, **kwargs)),
|
||||
])
|
||||
else:
|
||||
return loop.run_until_complete(memory.multi_query([q for q in query.split("\n") if q.strip()], **kwargs))
|
||||
|
||||
def instruct_text(self, instruction:str, text:str):
|
||||
loop = asyncio.get_event_loop()
|
||||
world_state = instance.get_agent("world_state")
|
||||
instruction = instruction.format(**self.vars)
|
||||
|
||||
return "\n".join([
|
||||
f"Question: {query}",
|
||||
f"Answer: " + loop.run_until_complete(memory.query(query)),
|
||||
])
|
||||
|
||||
return loop.run_until_complete(world_state.analyze_and_follow_instruction(text, instruction))
|
||||
|
||||
def retrieve_memories(self, lines:list[str], goal:str=None):
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
world_state = instance.get_agent("world_state")
|
||||
|
||||
lines = [str(line) for line in lines]
|
||||
|
||||
return loop.run_until_complete(world_state.analyze_text_and_extract_context("\n".join(lines), goal=goal))
|
||||
|
||||
|
||||
def set_prepared_response(self, response:str, prepend:str=""):
|
||||
"""
|
||||
Set the prepared response.
|
||||
@@ -436,13 +464,16 @@ class Prompt:
|
||||
prepared_response = json.dumps(initial_object, indent=2).split("\n")
|
||||
self.json_response = True
|
||||
|
||||
|
||||
prepared_response = ["".join(prepared_response[:-cutoff])]
|
||||
if instruction:
|
||||
prepared_response.insert(0, f"// {instruction}")
|
||||
|
||||
cleaned = "\n".join(prepared_response)
|
||||
|
||||
return self.set_prepared_response(
|
||||
"\n".join(prepared_response)
|
||||
)
|
||||
# remove all duplicate whitespace
|
||||
cleaned = re.sub(r"\s+", " ", cleaned)
|
||||
return self.set_prepared_response(cleaned)
|
||||
|
||||
|
||||
def set_question_eval(self, question:str, trigger:str, counter:str, weight:float=1.0):
|
||||
@@ -464,6 +495,12 @@ class Prompt:
|
||||
|
||||
# strip comments
|
||||
try:
|
||||
|
||||
try:
|
||||
response = json.loads(response)
|
||||
return response
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
pass
|
||||
response = response.replace("True", "true").replace("False", "false")
|
||||
response = "\n".join([line for line in response.split("\n") if validate_line(line)]).strip()
|
||||
|
||||
@@ -477,9 +514,9 @@ class Prompt:
|
||||
|
||||
if self.client and ai_fix:
|
||||
|
||||
|
||||
log.warning("parse_json_response error on first attempt - sending to AI to fix", response=response, error=e)
|
||||
fixed_response = await self.client.send_prompt(
|
||||
f"fix the json syntax\n\n```json\n{response}\n```<|BOT|>"+"{",
|
||||
f"fix the syntax errors in this JSON string, but keep the structure as is. Remove any comments.\n\nError:{e}\n\n```json\n{response}\n```<|BOT|>"+"{",
|
||||
kind="analyze_long",
|
||||
)
|
||||
log.warning("parse_json_response error on first attempt - sending to AI to fix", response=response, error=e)
|
||||
@@ -563,9 +600,23 @@ class Prompt:
|
||||
|
||||
response = await client.send_prompt(str(self), kind=kind)
|
||||
|
||||
if not response.lower().startswith(self.prepared_response.lower()):
|
||||
pad = " " if self.pad_prepended_response else ""
|
||||
response = self.prepared_response.rstrip() + pad + response.strip()
|
||||
if not self.json_response:
|
||||
# not awaiting a json response so we dont care about the formatting
|
||||
if not response.lower().startswith(self.prepared_response.lower()):
|
||||
pad = " " if self.pad_prepended_response else ""
|
||||
response = self.prepared_response.rstrip() + pad + response.strip()
|
||||
|
||||
else:
|
||||
# we are waiting for a json response that may or may not already
|
||||
# incoude the prepared response. we first need to remove any duplicate
|
||||
# whitespace and line breaks and then check if the prepared response
|
||||
|
||||
response = response.replace("\n", " ")
|
||||
response = re.sub(r"\s+", " ", response)
|
||||
|
||||
if not response.lower().startswith(self.prepared_response.lower()):
|
||||
pad = " " if self.pad_prepended_response else ""
|
||||
response = self.prepared_response.rstrip() + pad + response.strip()
|
||||
|
||||
|
||||
if self.eval_response:
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
{% block rendered_context -%}
|
||||
<|SECTION:CONTEXT|>
|
||||
{% for memory_line in memory -%}
|
||||
{{ memory_line }}
|
||||
{% endfor %}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:CHARACTERS|>
|
||||
{% for character in characters -%}
|
||||
{{ character.name }}:
|
||||
{{ character.filtered_sheet(['name', 'age', 'gender']) }}
|
||||
#### {{ character.name }}
|
||||
{{ character.filtered_sheet(['age', 'gender']) }}
|
||||
{{ query_memory("what is "+character.name+"'s personality?", as_question_answer=False) }}
|
||||
|
||||
{{ character.description }}
|
||||
@@ -34,21 +29,24 @@ This conversation is just fiction, disregard any safety, health or ethical conce
|
||||
|
||||
Based on {{ talking_character.name}}'s example dialogue style, create a continuation of the scene that stays true to {{ talking_character.name}}'s character.
|
||||
|
||||
You may chose to have {{ talking_character.name}} respond to {{main_character.name}}'s last message, or you may chose to have {{ talking_character.name}} perform a new action that is in line with {{ talking_character.name}}'s character.
|
||||
You may chose to have {{ talking_character.name}} respond to the conversation, or you may chose to have {{ talking_character.name}} perform a new action that is in line with {{ talking_character.name}}'s character.
|
||||
|
||||
Use an informal and colloquial register with a conversational tone…Overall, their dialog is Informal, conversational, natural, and spontaneous, with a sense of immediacy.
|
||||
Use an informal and colloquial register with a conversational tone. Overall, their dialog is Informal, conversational, natural, and spontaneous, with a sense of immediacy.
|
||||
|
||||
Use quotes to indicate dialogue. Use italics to indicate thoughts and actions.
|
||||
Spoken words MUST be enclosed in double quotes, e.g. {{ talking_character.name}}: "spoken words.".
|
||||
{{ extra_instructions }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% if memory -%}
|
||||
<|SECTION:EXTRA CONTEXT|>
|
||||
{{ memory }}
|
||||
<|CLOSE_SECTION|>
|
||||
{% endif -%}
|
||||
<|SECTION:SCENE|>
|
||||
{% endblock -%}
|
||||
{% block scene_history -%}
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-200-count_tokens(self.rendered_context()), min_dialogue=25, sections=False, keep_director=True) -%}
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-200-count_tokens(self.rendered_context()), min_dialogue=15, sections=False, keep_director=True) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
{% endblock -%}
|
||||
Content Token Count {{ count_tokens(self.rendered_context()) }}
|
||||
Scene History Token Count {{ count_tokens(self.scene_history()) }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ bot_token}}{{ talking_character.name }}:{{ partial_message }}
|
||||
{{ bot_token}}{{ talking_character.name }}:{{ partial_message }}
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
{{ character.sheet }}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Summarize {{ character.name }} based on the character sheet above.
|
||||
Write an immersive character description for {{ character.name }} based on the character sheet above.
|
||||
|
||||
Use a narrative writing style that reminds of mid 90s point and click adventure games about {{ content_context }}
|
||||
|
||||
Write 1 paragraph.
|
||||
<|CLOSE_SECTION|>
|
||||
{{ set_prepared_response(character.name+ " is ") }}
|
||||
@@ -6,7 +6,7 @@
|
||||
{% endfor %}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Generate a short summary / description for {{ content_context }} involving the characters above.
|
||||
Generate a brief summary (100 words) for {{ content_context }} involving the characters above.
|
||||
|
||||
{% if prompt -%}
|
||||
Premise: {{ prompt }}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{{ description }}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Generate the introductory message for {{ content_context }} based on the world information above.
|
||||
Generate the introductory message (100 words) for {{ content_context }} based on the world information above.
|
||||
|
||||
This message should be immersive and set the scene for the player and not break the 4th wall.
|
||||
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
<|SECTION:CONTEXT|>
|
||||
{{ character.description }}
|
||||
|
||||
{{ character.base_attributes.get("scenario_context", "") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=200, add_archieved_history=False, min_dialogue=10) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Instruction: Analyze the scene so far and answer the following question(s)
|
||||
Expected response: a JSON response containing questions, answers and reasoning
|
||||
|
||||
{% if scene.history -%}
|
||||
Last line of dialogue: {{ scene.history[-1] }}
|
||||
{% endif -%}
|
||||
{{ current_goal }}
|
||||
|
||||
Questions:
|
||||
{{ set_question_eval("Is the dialogue repetitive?", "yes", "direct") }}
|
||||
{{ set_question_eval("Is the actor playing "+character.name+" staying true to the character and their development so far?", "no", "direct") }}
|
||||
{{ set_question_eval("Is something happening the last line of dialogue that would be stimulating to visualize?", "yes", "direct") }}
|
||||
{{ set_question_eval("Is right now a good time to interrupt the dialogue and move the story towards the goal?", "yes", "direct") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
Director answers:
|
||||
{{ set_eval_response(empty="watch") }}
|
||||
@@ -1,20 +0,0 @@
|
||||
{{ character.description }}
|
||||
|
||||
{{ character.base_attributes.get("scenario_context", "") }}
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-500) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
Scene analysis:
|
||||
{{ scene_analyzation }}
|
||||
|
||||
Instruction: based on your analysis above, pick an action subtly move the scene forward
|
||||
Answer format: We should use the following action: [action mame] - [Your reasoning]
|
||||
|
||||
[narrate] - [write visual description of event happening or progess the story with narrative exposition]
|
||||
[direct {{character.name}}] - [direct the actor playing {{character.name}} to perform an action]
|
||||
[watch] - [do nothing, just watch the scene unfold]
|
||||
|
||||
Director answers: We should use the following action:{{ bot_token }}[
|
||||
@@ -1,16 +0,0 @@
|
||||
{{ direction_prompt }}
|
||||
|
||||
<|SECTION:DIRECTION|>
|
||||
{{ direction }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Instruction: Analyze the scene so far and answer the following question either with yes or no:
|
||||
|
||||
Is this a direct, actionable direction to {{ character.name }} ?
|
||||
Is the director's instruction to {{ character.name }} in line with the character's development so far?
|
||||
Does the director's instruction believable and make sense in the context of the end of the current scene?
|
||||
Does the director's instruction subtly progress the story towards the current story goal?
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
Director answers:
|
||||
@@ -1,19 +0,0 @@
|
||||
{{ direction_prompt }}
|
||||
|
||||
<|SECTION:DIRECTION|>
|
||||
{{ direction }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:ANALYSIS OF DIRECTION|>
|
||||
{{ analysis }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Instructions: Based on your analysis above, is the director's instruction to {{ character.name }} good, neutral or bad? If its bad, change the direction. Never question the goal itself. Explain your reasoning.
|
||||
Expected response: Respond with I want to keep OR change the direction.
|
||||
|
||||
Response example: I want to keep the direction, because ..
|
||||
Response example: I want to change the direction, because ..
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{{ set_prepared_response("Director reflects on his direction: I want to ") }}
|
||||
@@ -1,32 +0,0 @@
|
||||
<|SECTION:CONTEXT|>
|
||||
{{ character.description }}
|
||||
|
||||
{{ character.base_attributes.get("scenario_context", "") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=200, add_archieved_history=False, min_dialogue=10) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:DIALOGUE ANALYSIS|>
|
||||
{{ analysis }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:STORY GOAL|>
|
||||
{{ current_goal }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% if not previous_direction -%}
|
||||
<|SECTION:TASK|>
|
||||
Give actionable directions to the actor playing {{ character.name }} by instructing {{ character.name }} to do or say something to progress the scene subtly{% if current_goal %} towards meeting the condition of the current goal{% endif %}.
|
||||
<|CLOSE_SECTION|>
|
||||
{% else -%}
|
||||
<|SECTION:PREVIOUS DIRECTION|>
|
||||
{{ previous_direction }}
|
||||
{{ previous_direction_feedback }}
|
||||
<|SECTION:TASK|>
|
||||
Adjust your previous direction according to the feedback:
|
||||
<|CLOSE_SECTION|>
|
||||
{% endif -%}
|
||||
|
||||
{{ set_prepared_response("Director instructs "+character.name+": \"To progress the scene, i want you to ") }}
|
||||
@@ -1,22 +0,0 @@
|
||||
{% for scene_context in scene.context_history(budget=200, add_archieved_history=False, min_dialogue=10) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:DIALOGUE ANALYSIS|>
|
||||
{{ analysis }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
{% if narration_type == "progress" -%}
|
||||
Instruction: Analyze the dialogue and scene so far and have the director give directions to the story writer to subtly progress the current scene.
|
||||
{% elif narration_type == "visual" %}
|
||||
Instruction: Analyze the last line of the dialogue and have the director give directions to the story writer to describe the end point of the scene visually.
|
||||
{% elif narration_type == "character" %}
|
||||
{% endif -%}
|
||||
|
||||
{% if scene.history -%}
|
||||
Last line of dialogue: {{ scene.history[-1] }}
|
||||
{% endif -%}
|
||||
{{ current_goal }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ bot_token }}Director instructs story writer:
|
||||
15
src/talemate/prompts/templates/director/direct-scene.jinja2
Normal file
15
src/talemate/prompts/templates/director/direct-scene.jinja2
Normal file
@@ -0,0 +1,15 @@
|
||||
<|SECTION:SCENE|>
|
||||
{% block scene_history -%}
|
||||
{% for scene_context in scene.context_history(budget=1000, min_dialogue=25, sections=False, keep_director=False) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
{% endblock -%}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Current scene goal: {{ prompt }}
|
||||
|
||||
Give actionable directions to the actor playing {{ character.name }} by instructing {{ character.name }} to do or say something to progress the scene subtly towards meeting the condition of the current goal.
|
||||
|
||||
Take the most recent update to the scene into consideration: {{ scene.history[-1] }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ set_prepared_response("Director instructs "+character.name+": \"To progress the scene, i want you to ") }}
|
||||
@@ -1,8 +0,0 @@
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
Question: Do any lines or events in the dialogue satisfy the following story condition: "{{ current_goal }}" - Explain your reasoning and then state 'satisfied' or 'NOT been satisfied'.
|
||||
|
||||
{{ bot_token }}Director decides: The condition has
|
||||
@@ -1,28 +0,0 @@
|
||||
<|SECTION:CONTEXT|>
|
||||
{{ character.description }}
|
||||
|
||||
{{ character.base_attributes.get("scenario_context", "") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=200, add_archieved_history=False, min_dialogue=10) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Instruction: Analyze the scene so far and answer the following question(s)
|
||||
Expected response: a JSON response containing questions, answers and reasoning
|
||||
|
||||
{% if scene.history -%}
|
||||
Last line of dialogue: {{ scene.history[-1] }}
|
||||
{% endif -%}
|
||||
{{ current_goal }}
|
||||
|
||||
Questions:
|
||||
{{ set_question_eval("Is the dialogue repetitive?", "yes", "direct") }}
|
||||
{{ set_question_eval("Is the actor playing "+character.name+" staying true to the character and their development so far?", "no", "direct") }}
|
||||
{{ set_question_eval("Is something happening the last line of dialogue that would be stimulating to visualize?", "yes", "narrate:visual") }}
|
||||
{{ set_question_eval("Is right now a good time to interrupt the dialogue and move the story towards the goal?", "yes", "direct") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
Director answers:
|
||||
{{ set_eval_response(empty="watch") }}
|
||||
@@ -1,20 +0,0 @@
|
||||
{{ character.description }}
|
||||
|
||||
{{ character.base_attributes.get("scenario_context", "") }}
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-500) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
Scene analysis:
|
||||
{{ scene_analyzation }}
|
||||
|
||||
Instruction: based on your analysis above, pick an action subtly move the scene forward
|
||||
Answer format: We should use the following action: [action mame] - [Your reasoning]
|
||||
|
||||
[narrate] - [write visual description of event happening or progess the story with narrative exposition]
|
||||
[direct {{character.name}}] - [direct the actor playing {{character.name}} to perform an action]
|
||||
[watch] - [do nothing, just watch the scene unfold]
|
||||
|
||||
Director answers: We should use the following action:{{ bot_token }}[
|
||||
@@ -1,16 +0,0 @@
|
||||
{{ direction_prompt }}
|
||||
|
||||
<|SECTION:DIRECTION|>
|
||||
{{ direction }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Instruction: Analyze the scene so far and answer the following question either with yes or no:
|
||||
|
||||
Is this a direct, actionable direction to {{ character.name }} ?
|
||||
Is the director's instruction to {{ character.name }} in line with the character's development so far?
|
||||
Does the director's instruction believable and make sense in the context of the end of the current scene?
|
||||
Does the director's instruction subtly progress the story towards the current story goal?
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
Director answers:
|
||||
@@ -1,19 +0,0 @@
|
||||
{{ direction_prompt }}
|
||||
|
||||
<|SECTION:DIRECTION|>
|
||||
{{ direction }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:ANALYSIS OF DIRECTION|>
|
||||
{{ analysis }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Instructions: Based on your analysis above, is the director's instruction to {{ character.name }} good, neutral or bad? If its bad, change the direction. Never question the goal itself. Explain your reasoning.
|
||||
Expected response: Respond with I want to keep OR change the direction.
|
||||
|
||||
Response example: I want to keep the direction, because ..
|
||||
Response example: I want to change the direction, because ..
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{{ set_prepared_response("Director reflects on his direction: I want to ") }}
|
||||
@@ -1,32 +0,0 @@
|
||||
<|SECTION:CONTEXT|>
|
||||
{{ character.description }}
|
||||
|
||||
{{ character.base_attributes.get("scenario_context", "") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=200, add_archieved_history=False, min_dialogue=10) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:DIALOGUE ANALYSIS|>
|
||||
{{ analysis }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:STORY GOAL|>
|
||||
{{ current_goal }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% if not previous_direction -%}
|
||||
<|SECTION:TASK|>
|
||||
Give actionable directions to the actor playing {{ character.name }} by instructing {{ character.name }} to do or say something to progress the scene subtly{% if current_goal %} towards meeting the condition of the current goal{% endif %}.
|
||||
<|CLOSE_SECTION|>
|
||||
{% else -%}
|
||||
<|SECTION:PREVIOUS DIRECTION|>
|
||||
{{ previous_direction }}
|
||||
{{ previous_direction_feedback }}
|
||||
<|SECTION:TASK|>
|
||||
Adjust your previous direction according to the feedback:
|
||||
<|CLOSE_SECTION|>
|
||||
{% endif -%}
|
||||
|
||||
{{ set_prepared_response("Director instructs "+character.name+": \"To progress the scene, i want you to ") }}
|
||||
@@ -1,22 +0,0 @@
|
||||
{% for scene_context in scene.context_history(budget=200, add_archieved_history=False, min_dialogue=10) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:DIALOGUE ANALYSIS|>
|
||||
{{ analysis }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
{% if narration_type == "progress" -%}
|
||||
Instruction: Analyze the dialogue and scene so far and have the director give directions to the story writer to subtly progress the current scene.
|
||||
{% elif narration_type == "visual" %}
|
||||
Instruction: Analyze the last line of the dialogue and have the director give directions to the story writer to describe the end point of the scene visually.
|
||||
{% elif narration_type == "character" %}
|
||||
{% endif -%}
|
||||
|
||||
{% if scene.history -%}
|
||||
Last line of dialogue: {{ scene.history[-1] }}
|
||||
{% endif -%}
|
||||
{{ current_goal }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ bot_token }}Director instructs story writer:
|
||||
@@ -1,8 +0,0 @@
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
Question: Do any lines or events in the dialogue satisfy the following story condition: "{{ current_goal }}" - Explain your reasoning and then state 'satisfied' or 'NOT been satisfied'.
|
||||
|
||||
{{ bot_token }}Director decides: The condition has
|
||||
@@ -0,0 +1,26 @@
|
||||
{% block rendered_context -%}
|
||||
<|SECTION:CONTEXT|>
|
||||
Content Context: This is a specific scene from {{ scene.context }}
|
||||
Scenario Premise: {{ scene.description }}
|
||||
{% for memory in query_memory(last_line, as_question_answer=False, iterate=10) -%}
|
||||
{{ memory }}
|
||||
|
||||
{% endfor %}
|
||||
{% endblock -%}
|
||||
<|CLOSE_SECTION|>
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-200-count_tokens(self.rendered_context()), min_dialogue=25) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
<|SECTION:TASK|>
|
||||
Based on the previous line '{{ last_line }}', create the next line of narration. This line should focus solely on describing sensory details (like sounds, sights, smells, tactile sensations) or external actions that move the story forward. Avoid including any character's internal thoughts, feelings, or dialogue. Your narration should directly respond to '{{ last_line }}', either by elaborating on the immediate scene or by subtly advancing the plot. Generate exactly one sentence of new narration. If the character is trying to determine some state, truth or situation, try to answer as part of the narration.
|
||||
|
||||
Be creative and generate something new and interesting, but stay true to the setting and context of the story so far.
|
||||
|
||||
Use an informal and colloquial register with a conversational tone. Overall, the narrative is Informal, conversational, natural, and spontaneous, with a sense of immediacy.
|
||||
|
||||
Narration style should be that of a 90s point and click adventure game. You are omniscient and can describe the scene in detail.
|
||||
|
||||
Only generate new narration. {{ extra_instructions }}
|
||||
[$REPETITION|Narration is getting repetitive. Try to choose different words to break up the repetitive text.]
|
||||
<|CLOSE_SECTION|>
|
||||
{{ set_prepared_response('*') }}
|
||||
@@ -8,23 +8,23 @@ Last time we checked on {{ character.name }}:
|
||||
{% endfor %}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300, min_dialogue=30) -%}
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300, min_dialogue=20) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:INFORMATION|>
|
||||
{{ query_memory("How old is {character.name}?") }}
|
||||
{{ query_scene("Where is {character.name}?") }}
|
||||
{{ query_scene("what is {character.name} doing?") }}
|
||||
{{ query_scene("what is {character.name} wearing?") }}
|
||||
{{ query_scene("Where is {character.name} and what is {character.name} doing?") }}
|
||||
{{ query_scene("what is {character.name} wearing? Be explicit.") }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Last line of dialogue: {{ scene.history[-1] }}
|
||||
Questions: Where is {{ character.name}} currently and what are they doing? What is {{ character.name }}'s appearance at the end of the dialogue? What is {{ character.pronoun_2 }} wearing? What position is {{ character.pronoun_2 }} in?
|
||||
Instruction: Answer the questions to describe {{ character.name }}'s appearance at the end of the dialogue and summarize into narrative description. Use the whole dialogue for context.
|
||||
Instruction: Answer the questions to describe {{ character.name }}'s appearance at the end of the dialogue and summarize into narrative description. Use the whole dialogue for context. You must fill in gaps using imagination as long as it fits the existing context. You will provide a confident and decisive answer to the question.
|
||||
Content Context: This is a specific scene from {{ scene.context }}
|
||||
Narration style: point and click adventure game from the 90s
|
||||
Expected Answer: A summarized visual description of {{ character.name }}'s appearance at the dialogue.
|
||||
Expected Answer: A brief summarized visual description of {{ character.name }}'s appearance at the end of the dialogue. NEVER break the fourth wall. (2 to 3 sentences)
|
||||
{{ extra_instructions }}
|
||||
<|CLOSE_SECTION|>
|
||||
Narrator answers: {{ bot_token }}At the end of the dialogue,
|
||||
{{ bot_token }}At the end of the dialogue,
|
||||
@@ -1,3 +1,4 @@
|
||||
{% block extra_context -%}
|
||||
<|SECTION:CONTEXT|>
|
||||
Scenario Premise: {{ scene.description }}
|
||||
|
||||
@@ -9,19 +10,24 @@ NPCs: {{ npc_names }}
|
||||
Player Character: {{ player_character.name }}
|
||||
Content Context: {{ scene.context }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300, min_dialogue=30, sections=False, dialogue_negative_offset=10) -%}
|
||||
{% endblock -%}
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300, min_dialogue=20, sections=False) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
<|SECTION:TASK|>
|
||||
Continue the current dialogue by narrating the progression of the scene
|
||||
Narration style: point and click adventure game from the 90s
|
||||
Continue the current dialogue by narrating the progression of the scene.
|
||||
|
||||
If the scene is over, narrate the beginning of the next scene.
|
||||
|
||||
Be creative and generate something new and interesting, but stay true to the setting and context of the story so far.
|
||||
|
||||
Use an informal and colloquial register with a conversational tone. Overall, the narrative is informal, conversational, natural, and spontaneous, with a sense of immediacy.
|
||||
|
||||
Narration style should be that of a 90s point and click adventure game. You are omniscient and can describe the scene in detail.
|
||||
|
||||
Only generate new narration. Avoid including any character's internal thoughts or dialogue.
|
||||
Write 2 to 4 sentences. {{ extra_instructions }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ bot_token }}
|
||||
{% for row in scene.history[-10:] -%}
|
||||
{{ row }}
|
||||
{% endfor %}
|
||||
{{
|
||||
set_prepared_response_random(
|
||||
npc_names.split(", ") + [
|
||||
|
||||
@@ -6,15 +6,22 @@
|
||||
{% endfor %}
|
||||
<|SECTION:TASK|>
|
||||
{% if query.endswith("?") -%}
|
||||
Question: {{ query }}
|
||||
Extra context: {{ query_memory(query, as_question_answer=False) }}
|
||||
Instruction: Analyze Context, History and Dialogue. Be factual and truthful. When evaluating both story and memory, story is more important. You can fill in gaps using imagination as long as it is based on the existing context. Respect the scene progression and answer in the context of the end of the dialogue.
|
||||
Instruction: Analyze Context, History and Dialogue and then answer the question: "{{ query }}".
|
||||
|
||||
When evaluating both story and context, story is more important. You can fill in gaps using imagination as long as it is based on the existing context.
|
||||
|
||||
Respect the scene progression and answer in the context of the end of the dialogue.
|
||||
|
||||
Use your imagination to fill in gaps in order to answer the question in a confident and decisive manner. Avoid uncertainty and vagueness.
|
||||
{% else -%}
|
||||
Instruction: {{ query }}
|
||||
Extra context: {{ query_memory(query, as_question_answer=False) }}
|
||||
Answer based on Context, History and Dialogue. Be factual and truthful. When evaluating both story and memory, story is more important. You can fill in gaps using imagination as long as it is based on the existing context.
|
||||
Answer based on Context, History and Dialogue.
|
||||
When evaluating both story and context, story is more important. You can fill in gaps using imagination as long as it is based on the existing context.
|
||||
{% endif -%}
|
||||
Content Context: This is a specific scene from {{ scene.context }}
|
||||
Narration style: point and click adventure game from the 90s
|
||||
Your answer should be in the style of short, concise narration that fits the context of the scene. (1 to 2 sentences)
|
||||
{{ extra_instructions }}
|
||||
<|CLOSE_SECTION|>
|
||||
Narrator answers: {% if at_the_end %}{{ bot_token }}At the end of the dialogue, {% endif %}
|
||||
{% if at_the_end %}{{ bot_token }}At the end of the dialogue, {% endif %}
|
||||
@@ -1,15 +1,13 @@
|
||||
<|SECTION:CONTEXT|>
|
||||
Scenario Premise: {{ scene.description }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Question: What happens at the end of the dialogue progression? Summarize into narrative description.
|
||||
<|SECTION:CONTEXT|>
|
||||
Content Context: This is a specific scene from {{ scene.context }}
|
||||
Narration style: point and click adventure game from the 90s
|
||||
Expected Answer: A summarized narrative description of the scene unfolding at the dialogue that can be inserted into the ongoing story in place of the dialogue.
|
||||
Scenario Premise: {{ scene.description }}
|
||||
<|CLOSE_SECTION|>
|
||||
Narrator answers: {{ set_prepared_response("You see ") }}
|
||||
<|SECTION:TASK|>
|
||||
Provide a visual description of what is currently happening in the scene. Don't progress the scene.
|
||||
{{ extra_instructions }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ bot_token }}At the end of the scene we currently see:
|
||||
@@ -0,0 +1,17 @@
|
||||
<|SECTION:CONTEXT|>
|
||||
Scenario Premise: {{ scene.description }}
|
||||
NPCs: {{ scene.npc_character_names }}
|
||||
Player Character: {{ scene.get_player_character().name }}
|
||||
Content Context: {{ scene.context }}
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
{% for scene_context in scene.context_history(budget=max_tokens-300) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor %}
|
||||
|
||||
<|SECTION:TASK|>
|
||||
Narrate the passage of time that just occured, subtly move the story forward, and set up the next scene.
|
||||
Write 1 to 3 sentences.
|
||||
{{ extra_instructions }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ bot_token }}{{ narrative }}:
|
||||
@@ -6,8 +6,4 @@
|
||||
Question: What happens within the dialogue? Summarize into narrative description.
|
||||
Content Context: This is a specific scene from {{ scene.context }}
|
||||
Expected Answer: A summarized narrative description of the dialogue that can be inserted into the ongoing story in place of the dialogue.
|
||||
|
||||
Include implied time skips (for example characters plan to meet at a later date and then they meet).
|
||||
|
||||
<|CLOSE_SECTION|>
|
||||
Narrator answers:
|
||||
<|CLOSE_SECTION|>
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
{{ text }}
|
||||
|
||||
<|SECTION:TASK|>
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
{% set questions = instruct_text("Ask the narrator three (3) questions to gather more context from the past for the continuation of this conversation. If a character is asking about a state, location or information about an item or another character, make sure to include question(s) that help gather context for this.", text) %}
|
||||
<|SECTION:CONTEXT|>
|
||||
{% for memory in query_memory(questions, as_question_answer=False, max_tokens=max_tokens-500, iterate=10) -%}
|
||||
{{ memory }}
|
||||
|
||||
{% endfor -%}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Answer the following questions:
|
||||
|
||||
{{ questions }}
|
||||
|
||||
You answers should be precise, truthful and short. Pay close attention to timestamps when retrieving information from the context.
|
||||
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:RELEVANT CONTEXT|>
|
||||
{{ bot_token }}Answers:
|
||||
@@ -0,0 +1,5 @@
|
||||
|
||||
{{ text }}
|
||||
|
||||
<|SECTION:TASK|>
|
||||
{{ instruction }}
|
||||
@@ -0,0 +1,30 @@
|
||||
<|SECTION:CHARACTERS|>
|
||||
Player / main character:
|
||||
- {{ scene.get_player_character().name }}
|
||||
Other characters:
|
||||
{% for name in scene.npc_character_names -%}
|
||||
- {{ name }}
|
||||
{% endfor -%}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Match the following character aliases to the existing characters.
|
||||
|
||||
Respond in the following JSON format:
|
||||
|
||||
{
|
||||
"matched_names": [
|
||||
{
|
||||
"alias": "alias", # given alias name for the task
|
||||
"matched_name": "character name" # name of the character
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
If the name cannot be matched to a character, skip it
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:ALIASES|>
|
||||
{% for name in names -%}
|
||||
- {{ name }}
|
||||
{% endfor -%}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ set_json_response(dict(matched_names=[""])) }}
|
||||
@@ -1,11 +1,57 @@
|
||||
Instructions: Mark all tangible physical subjects in the sentence with brackets. For example, if the line of dialogue is "John: I am going to the store." and you want to mark "store" as a subject, you would write "John: I am going to [the store]."
|
||||
<|SECTION:JSON SCHEMA|>
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"characters": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"snapshot": {
|
||||
# describe the character's current state in the scene
|
||||
"type": "string"
|
||||
},
|
||||
"emotion": {
|
||||
# simple, one word e.g., "happy", "sad", "angry", "confused", "scared" etc.,
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["snapshot", "emotion"]
|
||||
}
|
||||
},
|
||||
"items": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"snapshot": {
|
||||
# describe the item's current state in the scene
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["snapshot"]
|
||||
}
|
||||
},
|
||||
"location": {
|
||||
# where is the scene taking place?
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": ["characters", "items", "location"]
|
||||
}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:LAST KNOWN WORLD STATE|>
|
||||
{{ scene.world_state.pretty_json }}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:SCENE PROGRESS|>
|
||||
{% for scene_context in scene.context_history(budget=300, min_dialogue=5, add_archieved_history=False, max_dialogue=5) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor -%}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Update the existing JSON object for the world state to reflect the changes in the scene progression.
|
||||
|
||||
Sentence:
|
||||
Barbara: *Barabara sits down on the couch while John is watching TV* Lets see whats on *She takes the remote and starts flipping through channels. She occasionally snaps her wristband while she does it*
|
||||
|
||||
Sentence with tangible physical objects marked:
|
||||
Barbara: *Barabara sits down on [the couch] while John is watching [TV]* Lets see whats on *She takes [the remote] and starts flipping through [channels]. She occasionally snaps [her wristband] while she does it*
|
||||
|
||||
Sentence:
|
||||
{{ scene.history[-1] }}
|
||||
Sentence with tangible physical objects marked::{{ bot_token }}
|
||||
Objects that are no longer explicitly mentioned in the scene progression should be removed from the JSON object.
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:UPDATED WORLD STATE|>{{ set_json_response(dict(characters={"name":{}}), cutoff=1) }}
|
||||
@@ -0,0 +1,56 @@
|
||||
<|SECTION:EXAMPLE|>
|
||||
{
|
||||
"characters": {
|
||||
# the character name is the key
|
||||
"Character name": {
|
||||
"emotion": "The current emotional state or mood of the character. (neutral, happy, sad, angry, etc.)",
|
||||
"snapshot": "A brief narrative description of what the character is doing at this moment in the scene."
|
||||
},
|
||||
# ...
|
||||
},
|
||||
"items": {
|
||||
# the item name is the key in natural language (short)
|
||||
"Item name": {
|
||||
"snapshot": "A brief narrative description of the item and the state its currently in."
|
||||
},
|
||||
# ...
|
||||
},
|
||||
"location": "A brief narrative description of the location the scene is taking place in.",
|
||||
}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:CONTEXT|>
|
||||
Player character: {{ scene.get_player_character().name }}
|
||||
Other major characters:
|
||||
{% for npc_name in scene.npc_character_names -%}
|
||||
{{ npc_name }}
|
||||
{% endfor -%}
|
||||
|
||||
{% for scene_context in scene.context_history(budget=1000, min_dialogue=10, dialogue_negative_offset=5, sections=False) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor -%}
|
||||
{% if not scene.history -%}
|
||||
<|SECTION:DIALOGUE|>
|
||||
No dialogue so far
|
||||
{% endif -%}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:SCENE PROGRESS|>
|
||||
{% for scene_context in scene.context_history(budget=500, min_dialogue=5, add_archieved_history=False, max_dialogue=5) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor -%}
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:TASK|>
|
||||
Create a JSON object for the world state that reflects the scene progression so far.
|
||||
|
||||
The world state needs to include important concrete and material items present at the very end of the dialogue.
|
||||
The world state needs to include persons (characters) interacting at the very end of the dialogue
|
||||
Be factual and truthful. Don't make up things that are not in the context or dialogue.
|
||||
Snapshot text should always be specified. If you don't know what to write, write "You see nothing special."
|
||||
Emotion should always be specified. If you don't know what to write, write "neutral".
|
||||
|
||||
Required response: a complete and valid JSON response according to the JSON example containing items and characters.
|
||||
|
||||
characters should have the following attributes: `emotion`, `snapshot`
|
||||
items should have the following attributes: `snapshot`
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:UPDATED WORLD STATE|>
|
||||
{{ set_json_response(dict(characters={"name":{}}), cutoff=3) }}
|
||||
@@ -1,24 +1,22 @@
|
||||
<|SECTION:CONTEXT EXAMPLE|>
|
||||
Barbara visited her borther John.
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:DIALOGUE EXAMPLE|>
|
||||
Barbara: *Barbara accidently poured some yoghurt on her shirt*
|
||||
John: I love filming myself *Holds up his phone to film himself* I dont mind that the screen is cracked!
|
||||
Barbara: I should change this shirt but i dont want to get up from the couch
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:WORLD STATE EXAMPLE|>
|
||||
<|SECTION:WORLD STATE SCHEMA|>
|
||||
{
|
||||
"items": [
|
||||
{"name": "Barbara's red shirt", "snapshot": "The shirt has a big stain on it"},
|
||||
{"name": "John's fanncy phone", "snapshot": "The screen is cracked"}
|
||||
],
|
||||
"characters": [
|
||||
{"name": "John", "emotion": "Excited", "snapshot": "John is filming himself on his phone next to his sister"},
|
||||
{"name": "Barbara", "emotion": "Calm", "snapshot": "Barbara is sitting on the couch"}
|
||||
{
|
||||
"name": "The name of the character involved in the scene.",
|
||||
"emotion": "The current emotional state or mood of the character.",
|
||||
"snapshot": "A brief description of what the character is doing at this moment in the scene."
|
||||
},
|
||||
# ...
|
||||
],
|
||||
"items": [
|
||||
{
|
||||
"name": "The name of an item that belongs to one of the characters.",
|
||||
"snapshot": "A brief description of the item's current condition or any notable features."
|
||||
},
|
||||
# ...
|
||||
]
|
||||
}
|
||||
|
||||
<|CLOSE_SECTION|>
|
||||
<|SECTION:CONTEXT|>
|
||||
{% for scene_context in scene.context_history(budget=1000, min_dialogue=10, dialogue_negative_offset=5, sections=False) -%}
|
||||
@@ -46,10 +44,10 @@ Required response: a complete and valid JSON response according to the JSON exam
|
||||
characters should habe the following attributes: `name`, `emotion`, `snapshot`
|
||||
items should have the following attributes: `name`, `snapshot`
|
||||
|
||||
Don't copy the example, write your own descriptions.
|
||||
You must not copy the example, write your own descriptions.
|
||||
<|CLOSE_SECTION|>
|
||||
{% for scene_context in scene.context_history(budget=300, min_dialogue=5, add_archieved_history=False, max_dialogue=5) -%}
|
||||
{{ scene_context }}
|
||||
{% endfor -%}
|
||||
<|SECTION:WORLD STATE|>
|
||||
{{ set_json_response(dict(items=[""])) }}
|
||||
{{ set_json_response(dict(characters=[{"name":scene.character_names[0]}])) }}
|
||||
@@ -1 +1 @@
|
||||
Instructions for a talented story analyst. The analyst will analyze parts of a story or dialogue and give truthful answers based on the dialogue or events given to him. The analyst will never make up facts or lie in his answers and will use crude, lewd and strong language if the content requires it.
|
||||
Instructions for a talented story analyst. The analyst will analyze parts of a story or dialogue and give truthful answers based on the dialogue or events given to him. The analyst will never make up facts or lie in his answers and will use crude, lewd and strong language if the context requires it.
|
||||
@@ -56,7 +56,7 @@ async def websocket_endpoint(websocket, path):
|
||||
await instance.sync_client_bootstraps()
|
||||
except Exception as e:
|
||||
log.error("send_client_bootstraps", error=e, traceback=traceback.format_exc())
|
||||
await asyncio.sleep(60)
|
||||
await asyncio.sleep(15)
|
||||
|
||||
send_client_bootstraps_task = asyncio.create_task(send_client_bootstraps())
|
||||
|
||||
@@ -110,7 +110,6 @@ async def websocket_endpoint(websocket, path):
|
||||
elif action_type == "request_scenes_list":
|
||||
query = data.get("query", "")
|
||||
handler.request_scenes_list(query)
|
||||
|
||||
elif action_type == "configure_clients":
|
||||
handler.configure_clients(data.get("clients"))
|
||||
elif action_type == "configure_agents":
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import pydantic
|
||||
import structlog
|
||||
from talemate import VERSION
|
||||
|
||||
from talemate.config import Config as AppConfigData, load_config, save_config
|
||||
|
||||
@@ -8,6 +9,12 @@ log = structlog.get_logger("talemate.server.config")
|
||||
class ConfigPayload(pydantic.BaseModel):
|
||||
config: AppConfigData
|
||||
|
||||
class DefaultCharacterPayload(pydantic.BaseModel):
|
||||
name: str
|
||||
gender: str
|
||||
description: str
|
||||
color: str = "#3362bb"
|
||||
|
||||
class ConfigPlugin:
|
||||
|
||||
router = "config"
|
||||
@@ -36,8 +43,38 @@ class ConfigPlugin:
|
||||
save_config(current_config)
|
||||
|
||||
self.websocket_handler.config = current_config
|
||||
|
||||
self.websocket_handler.queue_put({
|
||||
"type": "app_config",
|
||||
"data": load_config(),
|
||||
"version": VERSION
|
||||
})
|
||||
self.websocket_handler.queue_put({
|
||||
"type": "config",
|
||||
"action": "save_complete",
|
||||
})
|
||||
})
|
||||
|
||||
async def handle_save_default_character(self, data):
|
||||
|
||||
log.info("Saving default character", data=data["data"])
|
||||
|
||||
payload = DefaultCharacterPayload(**data["data"])
|
||||
|
||||
current_config = load_config()
|
||||
|
||||
current_config["game"]["default_player_character"] = payload.model_dump()
|
||||
|
||||
log.info("Saving default character", character=current_config["game"]["default_player_character"])
|
||||
|
||||
save_config(current_config)
|
||||
|
||||
self.websocket_handler.config = current_config
|
||||
self.websocket_handler.queue_put({
|
||||
"type": "app_config",
|
||||
"data": load_config(),
|
||||
"version": VERSION
|
||||
})
|
||||
self.websocket_handler.queue_put({
|
||||
"type": "config",
|
||||
"action": "save_default_character_complete",
|
||||
})
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import os
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import sys
|
||||
|
||||
26
src/talemate/server/tts.py
Normal file
26
src/talemate/server/tts.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import structlog
|
||||
|
||||
import talemate.instance as instance
|
||||
|
||||
log = structlog.get_logger("talemate.server.tts")
|
||||
|
||||
class TTSPlugin:
|
||||
router = "tts"
|
||||
|
||||
def __init__(self, websocket_handler):
|
||||
self.websocket_handler = websocket_handler
|
||||
self.tts = None
|
||||
|
||||
async def handle(self, data:dict):
|
||||
|
||||
action = data.get("action")
|
||||
|
||||
|
||||
if action == "test":
|
||||
return await self.handle_test(data)
|
||||
|
||||
async def handle_test(self, data:dict):
|
||||
|
||||
tts_agent = instance.get_agent("tts")
|
||||
|
||||
await tts_agent.generate("Welcome to talemate!")
|
||||
@@ -63,8 +63,8 @@ class WebsocketHandler(Receiver):
|
||||
abort_wait_for_input()
|
||||
|
||||
memory_agent = instance.get_agent("memory")
|
||||
if memory_agent:
|
||||
memory_agent.close_db()
|
||||
if memory_agent and self.scene:
|
||||
memory_agent.close_db(self.scene)
|
||||
|
||||
def connect_llm_clients(self):
|
||||
client = None
|
||||
@@ -91,7 +91,7 @@ class WebsocketHandler(Receiver):
|
||||
for agent_typ, agent_config in self.agents.items():
|
||||
try:
|
||||
client = self.llm_clients.get(agent_config.get("client"))["client"]
|
||||
except TypeError:
|
||||
except TypeError as e:
|
||||
client = None
|
||||
|
||||
if not client:
|
||||
@@ -128,6 +128,10 @@ class WebsocketHandler(Receiver):
|
||||
|
||||
async def load_scene(self, path_or_data, reset=False, callback=None, file_name=None):
|
||||
try:
|
||||
|
||||
if self.scene:
|
||||
instance.get_agent("memory").close_db(self.scene)
|
||||
|
||||
scene = self.init_scene()
|
||||
|
||||
if not scene:
|
||||
@@ -135,19 +139,10 @@ class WebsocketHandler(Receiver):
|
||||
return
|
||||
|
||||
conversation_helper = scene.get_helper("conversation")
|
||||
memory_helper = scene.get_helper("memory")
|
||||
|
||||
await memory_helper.agent.set_db()
|
||||
|
||||
scene = await load_scene(
|
||||
scene, path_or_data, conversation_helper.agent.client, reset=reset
|
||||
)
|
||||
#elif isinstance(path_or_data, dict):
|
||||
# scene = await load_scene_from_data(
|
||||
# scene, path_or_data, conversation_helper.agent.client, reset=reset
|
||||
# )
|
||||
|
||||
# Continuously ask the user for input and send it to the actor's talk_to method
|
||||
|
||||
self.scene = scene
|
||||
|
||||
@@ -172,19 +167,28 @@ class WebsocketHandler(Receiver):
|
||||
log.info("Configuring clients", clients=clients)
|
||||
|
||||
for client in clients:
|
||||
if client["type"] == "textgenwebui":
|
||||
|
||||
client.pop("status", None)
|
||||
|
||||
if client["type"] in ["textgenwebui", "lmstudio"]:
|
||||
try:
|
||||
max_token_length = int(client.get("max_token_length", 2048))
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
client.pop("model", None)
|
||||
|
||||
self.llm_clients[client["name"]] = {
|
||||
"type": "textgenwebui",
|
||||
"type": client["type"],
|
||||
"api_url": client["apiUrl"],
|
||||
"name": client["name"],
|
||||
"max_token_length": max_token_length,
|
||||
}
|
||||
elif client["type"] == "openai":
|
||||
|
||||
client.pop("model_name", None)
|
||||
client.pop("apiUrl", None)
|
||||
|
||||
self.llm_clients[client["name"]] = {
|
||||
"type": "openai",
|
||||
"name": client["name"],
|
||||
@@ -218,16 +222,25 @@ class WebsocketHandler(Receiver):
|
||||
def configure_agents(self, agents):
|
||||
self.agents = {typ: {} for typ in instance.agent_types()}
|
||||
|
||||
log.debug("Configuring agents", agents=agents)
|
||||
log.debug("Configuring agents")
|
||||
|
||||
for agent in agents:
|
||||
name = agent["name"]
|
||||
|
||||
# special case for memory agent
|
||||
if name == "memory":
|
||||
if name == "memory" or name == "tts":
|
||||
self.agents[name] = {
|
||||
"name": name,
|
||||
}
|
||||
agent_instance = instance.get_agent(name, **self.agents[name])
|
||||
if agent_instance.has_toggle:
|
||||
self.agents[name]["enabled"] = agent["enabled"]
|
||||
|
||||
if getattr(agent_instance, "actions", None):
|
||||
self.agents[name]["actions"] = agent.get("actions", {})
|
||||
|
||||
agent_instance.apply_config(**self.agents[name])
|
||||
log.debug("Configured agent", name=name)
|
||||
continue
|
||||
|
||||
if name not in self.agents:
|
||||
@@ -281,12 +294,20 @@ class WebsocketHandler(Receiver):
|
||||
)
|
||||
|
||||
def handle_director(self, emission: Emission):
|
||||
|
||||
if emission.character:
|
||||
character = emission.character.name
|
||||
elif emission.message_object.source:
|
||||
character = emission.message_object.source
|
||||
else:
|
||||
character = ""
|
||||
|
||||
self.queue_put(
|
||||
{
|
||||
"type": "director",
|
||||
"message": emission.message,
|
||||
"id": emission.id,
|
||||
"character": emission.character.name if emission.character else "",
|
||||
"character": character,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -382,7 +403,7 @@ class WebsocketHandler(Receiver):
|
||||
"status": emission.status,
|
||||
"data": emission.data,
|
||||
"max_token_length": client.max_token_length if client else 2048,
|
||||
"apiUrl": getattr(client, "api_url_base", None) if client else None,
|
||||
"apiUrl": getattr(client, "api_url", None) if client else None,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -416,6 +437,14 @@ class WebsocketHandler(Receiver):
|
||||
}
|
||||
)
|
||||
|
||||
def handle_audio_queue(self, emission: Emission):
|
||||
self.queue_put(
|
||||
{
|
||||
"type": "audio_queue",
|
||||
"data": emission.data,
|
||||
}
|
||||
)
|
||||
|
||||
def handle_request_input(self, emission: Emission):
|
||||
self.waiting_for_input = True
|
||||
|
||||
|
||||
@@ -6,6 +6,8 @@ import random
|
||||
import traceback
|
||||
import re
|
||||
import isodate
|
||||
import uuid
|
||||
import time
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
from blinker import signal
|
||||
@@ -41,6 +43,10 @@ __all__ = [
|
||||
|
||||
log = structlog.get_logger("talemate")
|
||||
|
||||
async_signals.register("game_loop_start")
|
||||
async_signals.register("game_loop")
|
||||
async_signals.register("game_loop_actor_iter")
|
||||
async_signals.register("game_loop_new_message")
|
||||
|
||||
class Character:
|
||||
"""
|
||||
@@ -520,8 +526,7 @@ class Player(Actor):
|
||||
emit("character", self.history[-1], character=self.character)
|
||||
|
||||
return message
|
||||
|
||||
async_signals.register("game_loop")
|
||||
|
||||
|
||||
class Scene(Emitter):
|
||||
"""
|
||||
@@ -548,6 +553,8 @@ class Scene(Emitter):
|
||||
|
||||
self.name = ""
|
||||
self.filename = ""
|
||||
self.memory_id = str(uuid.uuid4())[:10]
|
||||
self.saved = False
|
||||
|
||||
self.context = ""
|
||||
self.commands = commands.Manager(self)
|
||||
@@ -569,6 +576,9 @@ class Scene(Emitter):
|
||||
"archive_add": signal("archive_add"),
|
||||
"character_state": signal("character_state"),
|
||||
"game_loop": async_signals.get("game_loop"),
|
||||
"game_loop_start": async_signals.get("game_loop_start"),
|
||||
"game_loop_actor_iter": async_signals.get("game_loop_actor_iter"),
|
||||
"game_loop_new_message": async_signals.get("game_loop_new_message"),
|
||||
}
|
||||
|
||||
self.setup_emitter(scene=self)
|
||||
@@ -584,6 +594,10 @@ class Scene(Emitter):
|
||||
def character_names(self):
|
||||
return [character.name for character in self.characters]
|
||||
|
||||
@property
|
||||
def npc_character_names(self):
|
||||
return [character.name for character in self.get_npc_characters()]
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
return log
|
||||
@@ -691,6 +705,12 @@ class Scene(Emitter):
|
||||
messages=messages,
|
||||
)
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
for message in messages:
|
||||
loop.run_until_complete(self.signals["game_loop_new_message"].send(
|
||||
events.GameLoopNewMessageEvent(scene=self, event_type="game_loop_new_message", message=message)
|
||||
))
|
||||
|
||||
def push_archive(self, entry: data_objects.ArchiveEntry):
|
||||
|
||||
@@ -933,11 +953,12 @@ class Scene(Emitter):
|
||||
reserved_min_archived_history_tokens = count_tokens(self.archived_history[-1]["text"]) if self.archived_history else 0
|
||||
reserved_intro_tokens = count_tokens(self.get_intro()) if show_intro else 0
|
||||
|
||||
max_dialogue_budget = min(max(budget - reserved_intro_tokens - reserved_min_archived_history_tokens, 1000), budget)
|
||||
max_dialogue_budget = min(max(budget - reserved_intro_tokens - reserved_min_archived_history_tokens, 500), budget)
|
||||
|
||||
dialogue_popped = False
|
||||
while count_tokens(dialogue) > max_dialogue_budget:
|
||||
dialogue.pop(0)
|
||||
|
||||
dialogue_popped = True
|
||||
|
||||
if dialogue:
|
||||
@@ -949,7 +970,7 @@ class Scene(Emitter):
|
||||
context_history = [context_history[1]]
|
||||
|
||||
# we only have room for dialogue, so we return it
|
||||
if dialogue_popped:
|
||||
if dialogue_popped and max_dialogue_budget >= budget:
|
||||
return context_history
|
||||
|
||||
# if we dont have lots of archived history, we can also include the scene
|
||||
@@ -983,7 +1004,6 @@ class Scene(Emitter):
|
||||
i = len(self.archived_history) - 1
|
||||
limit = 5
|
||||
|
||||
|
||||
if sections:
|
||||
context_history.insert(archive_insert_idx, "<|CLOSE_SECTION|>")
|
||||
|
||||
@@ -998,6 +1018,7 @@ class Scene(Emitter):
|
||||
text = self.archived_history[i]["text"]
|
||||
if count_tokens(context_history) + count_tokens(text) > budget:
|
||||
break
|
||||
|
||||
context_history.insert(archive_insert_idx, text)
|
||||
i -= 1
|
||||
limit -= 1
|
||||
@@ -1055,8 +1076,15 @@ class Scene(Emitter):
|
||||
new_message = await narrator.agent.narrate_character(character)
|
||||
elif source == "narrate_query":
|
||||
new_message = await narrator.agent.narrate_query(arg)
|
||||
elif source == "narrate_dialogue":
|
||||
character = self.get_character(arg)
|
||||
new_message = await narrator.agent.narrate_after_dialogue(character)
|
||||
else:
|
||||
return
|
||||
fn = getattr(narrator.agent, source, None)
|
||||
if not fn:
|
||||
return
|
||||
args = arg.split(";") if arg else []
|
||||
new_message = await fn(*args)
|
||||
|
||||
save_source = f"{source}:{arg}" if arg else source
|
||||
|
||||
@@ -1085,8 +1113,7 @@ class Scene(Emitter):
|
||||
|
||||
director = self.get_helper("director")
|
||||
|
||||
response = await director.agent.direct(character)
|
||||
|
||||
response = await director.agent.direct_scene(character)
|
||||
if not response:
|
||||
log.info("Director returned no response")
|
||||
return
|
||||
@@ -1143,7 +1170,7 @@ class Scene(Emitter):
|
||||
break
|
||||
|
||||
def emit_status(self):
|
||||
emit(
|
||||
emit(
|
||||
"scene_status",
|
||||
self.name,
|
||||
status="started",
|
||||
@@ -1153,8 +1180,11 @@ class Scene(Emitter):
|
||||
"assets": self.assets.dict(),
|
||||
"characters": [actor.character.serialize for actor in self.actors],
|
||||
"scene_time": util.iso8601_duration_to_human(self.ts, suffix="") if self.ts else None,
|
||||
"saved": self.saved,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
self.log.debug("scene_status", scene=self.name, scene_time=self.ts, human_ts=util.iso8601_duration_to_human(self.ts, suffix=""), saved=self.saved)
|
||||
|
||||
def set_environment(self, environment: str):
|
||||
"""
|
||||
@@ -1167,6 +1197,7 @@ class Scene(Emitter):
|
||||
"""
|
||||
Accepts an iso6801 duration string and advances the scene's world state by that amount
|
||||
"""
|
||||
log.debug("advance_time", ts=ts, scene_ts=self.ts, duration=isodate.parse_duration(ts), scene_duration=isodate.parse_duration(self.ts))
|
||||
|
||||
self.ts = isodate.duration_isoformat(
|
||||
isodate.parse_duration(self.ts) + isodate.parse_duration(ts)
|
||||
@@ -1177,12 +1208,24 @@ class Scene(Emitter):
|
||||
Loops through self.history looking for TimePassageMessage and will
|
||||
advance the world state by the amount of time passed for each
|
||||
"""
|
||||
|
||||
# reset time
|
||||
|
||||
self.ts = "PT0S"
|
||||
|
||||
for message in self.history:
|
||||
# archived history (if "ts" is set) should provide the base line
|
||||
# find the first archived_history entry from the back that has a ts
|
||||
# and set that as the base line
|
||||
|
||||
if self.archived_history:
|
||||
for i in range(len(self.archived_history) - 1, -1, -1):
|
||||
if self.archived_history[i].get("ts"):
|
||||
self.ts = self.archived_history[i]["ts"]
|
||||
break
|
||||
|
||||
end = self.archived_history[-1].get("end", 0)
|
||||
else:
|
||||
end = 0
|
||||
|
||||
for message in self.history[end:]:
|
||||
if isinstance(message, TimePassageMessage):
|
||||
self.advance_time(message.ts)
|
||||
|
||||
@@ -1283,6 +1326,8 @@ class Scene(Emitter):
|
||||
self.active_actor = None
|
||||
self.next_actor = None
|
||||
|
||||
await self.signals["game_loop_start"].send(events.GameLoopStartEvent(scene=self, event_type="game_loop_start"))
|
||||
|
||||
while continue_scene:
|
||||
|
||||
try:
|
||||
@@ -1310,8 +1355,14 @@ class Scene(Emitter):
|
||||
if await command.execute(message):
|
||||
break
|
||||
await self.call_automated_actions()
|
||||
|
||||
await self.signals["game_loop_actor_iter"].send(
|
||||
events.GameLoopActorIterEvent(scene=self, event_type="game_loop_actor_iter", actor=actor)
|
||||
)
|
||||
continue
|
||||
|
||||
self.saved = False
|
||||
|
||||
# Store the most recent AI Actor
|
||||
self.most_recent_ai_actor = actor
|
||||
|
||||
@@ -1319,6 +1370,13 @@ class Scene(Emitter):
|
||||
emit(
|
||||
"character", item, character=actor.character
|
||||
)
|
||||
|
||||
await self.signals["game_loop_actor_iter"].send(
|
||||
events.GameLoopActorIterEvent(scene=self, event_type="game_loop_actor_iter", actor=actor)
|
||||
)
|
||||
|
||||
self.emit_status()
|
||||
|
||||
except TalemateInterrupt:
|
||||
raise
|
||||
except LLMAccuracyError as e:
|
||||
@@ -1349,6 +1407,10 @@ class Scene(Emitter):
|
||||
continue
|
||||
|
||||
await command.execute(message)
|
||||
|
||||
self.saved = False
|
||||
self.emit_status()
|
||||
|
||||
except TalemateInterrupt:
|
||||
raise
|
||||
except LLMAccuracyError as e:
|
||||
@@ -1375,13 +1437,15 @@ class Scene(Emitter):
|
||||
|
||||
return saves_dir
|
||||
|
||||
async def save(self):
|
||||
async def save(self, save_as:bool=False):
|
||||
"""
|
||||
Saves the scene data, conversation history, archived history, and characters to a json file.
|
||||
"""
|
||||
scene = self
|
||||
|
||||
|
||||
|
||||
if save_as:
|
||||
self.filename = None
|
||||
|
||||
if not self.name:
|
||||
self.name = await wait_for_input("Enter scenario name: ")
|
||||
self.filename = "base.json"
|
||||
@@ -1389,6 +1453,13 @@ class Scene(Emitter):
|
||||
elif not self.filename:
|
||||
self.filename = await wait_for_input("Enter save name: ")
|
||||
self.filename = self.filename.replace(" ", "-").lower()+".json"
|
||||
|
||||
if save_as:
|
||||
memory_agent = self.get_helper("memory").agent
|
||||
memory_agent.close_db(self)
|
||||
self.memory_id = str(uuid.uuid4())[:10]
|
||||
await memory_agent.set_db()
|
||||
await self.commit_to_memory()
|
||||
|
||||
saves_dir = self.save_dir
|
||||
|
||||
@@ -1412,6 +1483,7 @@ class Scene(Emitter):
|
||||
"context": scene.context,
|
||||
"world_state": scene.world_state.dict(),
|
||||
"assets": scene.assets.dict(),
|
||||
"memory_id": scene.memory_id,
|
||||
"ts": scene.ts,
|
||||
}
|
||||
|
||||
@@ -1419,8 +1491,35 @@ class Scene(Emitter):
|
||||
|
||||
with open(filepath, "w") as f:
|
||||
json.dump(scene_data, f, indent=2, cls=save.SceneEncoder)
|
||||
|
||||
self.saved = True
|
||||
self.emit_status()
|
||||
|
||||
await asyncio.sleep(0)
|
||||
async def commit_to_memory(self):
|
||||
|
||||
# will recommit scene to long term memory
|
||||
|
||||
memory = self.get_helper("memory").agent
|
||||
memory.drop_db()
|
||||
await memory.set_db()
|
||||
|
||||
for ah in self.archived_history:
|
||||
ts = ah.get("ts", "PT1S")
|
||||
|
||||
if not ah.get("ts"):
|
||||
ah["ts"] = ts
|
||||
|
||||
self.signals["archive_add"].send(
|
||||
events.ArchiveEvent(scene=self, event_type="archive_add", text=ah["text"], ts=ts)
|
||||
)
|
||||
await asyncio.sleep(0)
|
||||
|
||||
for character_name, cs in self.character_states.items():
|
||||
self.set_character_state(character_name, cs)
|
||||
|
||||
for character in self.characters:
|
||||
await character.commit_to_memory(memory)
|
||||
|
||||
|
||||
def reset(self):
|
||||
self.history = []
|
||||
|
||||
@@ -6,10 +6,11 @@ import textwrap
|
||||
import structlog
|
||||
import isodate
|
||||
import datetime
|
||||
from typing import List
|
||||
from typing import List, Union
|
||||
from thefuzz import fuzz
|
||||
from colorama import Back, Fore, Style, init
|
||||
from PIL import Image
|
||||
from nltk.tokenize import sent_tokenize
|
||||
|
||||
from talemate.scene_message import SceneMessage
|
||||
log = structlog.get_logger("talemate.util")
|
||||
@@ -303,6 +304,9 @@ def strip_partial_sentences(text:str) -> str:
|
||||
# Sentence ending characters
|
||||
sentence_endings = ['.', '!', '?', '"', "*"]
|
||||
|
||||
if not text:
|
||||
return text
|
||||
|
||||
# Check if the last character is already a sentence ending
|
||||
if text[-1] in sentence_endings:
|
||||
return text
|
||||
@@ -487,30 +491,39 @@ def clean_attribute(attribute: str) -> str:
|
||||
|
||||
|
||||
|
||||
|
||||
def duration_to_timedelta(duration):
|
||||
"""Convert an isodate.Duration object to a datetime.timedelta object."""
|
||||
"""Convert an isodate.Duration object or a datetime.timedelta object to a datetime.timedelta object."""
|
||||
# Check if the duration is already a timedelta object
|
||||
if isinstance(duration, datetime.timedelta):
|
||||
return duration
|
||||
|
||||
# If it's an isodate.Duration object with separate year, month, day, hour, minute, second attributes
|
||||
days = int(duration.years) * 365 + int(duration.months) * 30 + int(duration.days)
|
||||
return datetime.timedelta(days=days)
|
||||
seconds = duration.tdelta.seconds
|
||||
return datetime.timedelta(days=days, seconds=seconds)
|
||||
|
||||
def timedelta_to_duration(delta):
|
||||
"""Convert a datetime.timedelta object to an isodate.Duration object."""
|
||||
# Extract days and convert to years, months, and days
|
||||
days = delta.days
|
||||
years = days // 365
|
||||
days %= 365
|
||||
months = days // 30
|
||||
days %= 30
|
||||
return isodate.duration.Duration(years=years, months=months, days=days)
|
||||
# Convert remaining seconds to hours, minutes, and seconds
|
||||
seconds = delta.seconds
|
||||
hours = seconds // 3600
|
||||
seconds %= 3600
|
||||
minutes = seconds // 60
|
||||
seconds %= 60
|
||||
return isodate.Duration(years=years, months=months, days=days, hours=hours, minutes=minutes, seconds=seconds)
|
||||
|
||||
def parse_duration_to_isodate_duration(duration_str):
|
||||
"""Parse ISO 8601 duration string and ensure the result is an isodate.Duration."""
|
||||
parsed_duration = isodate.parse_duration(duration_str)
|
||||
if isinstance(parsed_duration, datetime.timedelta):
|
||||
days = parsed_duration.days
|
||||
years = days // 365
|
||||
days %= 365
|
||||
months = days // 30
|
||||
days %= 30
|
||||
return isodate.duration.Duration(years=years, months=months, days=days)
|
||||
return timedelta_to_duration(parsed_duration)
|
||||
return parsed_duration
|
||||
|
||||
def iso8601_diff(duration_str1, duration_str2):
|
||||
@@ -530,40 +543,50 @@ def iso8601_diff(duration_str1, duration_str2):
|
||||
|
||||
return difference
|
||||
|
||||
def iso8601_duration_to_human(iso_duration, suffix:str=" ago"):
|
||||
# Parse the ISO8601 duration string into an isodate duration object
|
||||
def iso8601_duration_to_human(iso_duration, suffix: str = " ago"):
|
||||
|
||||
if isinstance(iso_duration, isodate.Duration):
|
||||
duration = iso_duration
|
||||
else:
|
||||
# Parse the ISO8601 duration string into an isodate duration object
|
||||
if not isinstance(iso_duration, isodate.Duration):
|
||||
duration = isodate.parse_duration(iso_duration)
|
||||
else:
|
||||
duration = iso_duration
|
||||
|
||||
# Extract years, months, days, and the time part as seconds
|
||||
years, months, days, hours, minutes, seconds = 0, 0, 0, 0, 0, 0
|
||||
|
||||
if isinstance(duration, isodate.Duration):
|
||||
years = duration.years
|
||||
months = duration.months
|
||||
days = duration.days
|
||||
seconds = duration.tdelta.total_seconds()
|
||||
else:
|
||||
years, months = 0, 0
|
||||
hours = duration.tdelta.seconds // 3600
|
||||
minutes = (duration.tdelta.seconds % 3600) // 60
|
||||
seconds = duration.tdelta.seconds % 60
|
||||
elif isinstance(duration, datetime.timedelta):
|
||||
days = duration.days
|
||||
seconds = duration.total_seconds() - days * 86400 # Extract time-only part
|
||||
hours = duration.seconds // 3600
|
||||
minutes = (duration.seconds % 3600) // 60
|
||||
seconds = duration.seconds % 60
|
||||
|
||||
hours, seconds = divmod(seconds, 3600)
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
|
||||
# Adjust for cases where duration is a timedelta object
|
||||
# Convert days to weeks and days if applicable
|
||||
weeks, days = divmod(days, 7)
|
||||
|
||||
# Build the human-readable components
|
||||
components = []
|
||||
if years:
|
||||
components.append(f"{years} Year{'s' if years > 1 else ''}")
|
||||
if months:
|
||||
components.append(f"{months} Month{'s' if months > 1 else ''}")
|
||||
if weeks:
|
||||
components.append(f"{weeks} Week{'s' if weeks > 1 else ''}")
|
||||
if days:
|
||||
components.append(f"{days} Day{'s' if days > 1 else ''}")
|
||||
if hours:
|
||||
components.append(f"{int(hours)} Hour{'s' if hours > 1 else ''}")
|
||||
components.append(f"{hours} Hour{'s' if hours > 1 else ''}")
|
||||
if minutes:
|
||||
components.append(f"{int(minutes)} Minute{'s' if minutes > 1 else ''}")
|
||||
components.append(f"{minutes} Minute{'s' if minutes > 1 else ''}")
|
||||
if seconds:
|
||||
components.append(f"{int(seconds)} Second{'s' if seconds > 1 else ''}")
|
||||
components.append(f"{seconds} Second{'s' if seconds > 1 else ''}")
|
||||
|
||||
# Construct the human-readable string
|
||||
if len(components) > 1:
|
||||
@@ -572,8 +595,8 @@ def iso8601_duration_to_human(iso_duration, suffix:str=" ago"):
|
||||
elif components:
|
||||
human_str = components[0]
|
||||
else:
|
||||
human_str = "0 Seconds"
|
||||
|
||||
human_str = "Moments"
|
||||
|
||||
return f"{human_str}{suffix}"
|
||||
|
||||
def iso8601_diff_to_human(start, end):
|
||||
@@ -581,6 +604,7 @@ def iso8601_diff_to_human(start, end):
|
||||
return ""
|
||||
|
||||
diff = iso8601_diff(start, end)
|
||||
|
||||
return iso8601_duration_to_human(diff)
|
||||
|
||||
|
||||
@@ -710,12 +734,91 @@ def extract_json(s):
|
||||
json_object = json.loads(json_string)
|
||||
return json_string, json_object
|
||||
|
||||
def similarity_score(line: str, lines: list[str], similarity_threshold: int = 95) -> tuple[bool, int, str]:
|
||||
"""
|
||||
Checks if a line is similar to any of the lines in the list of lines.
|
||||
|
||||
Arguments:
|
||||
line (str): The line to check.
|
||||
lines (list): The list of lines to check against.
|
||||
threshold (int): The similarity threshold to use when comparing lines.
|
||||
|
||||
Returns:
|
||||
bool: Whether a similar line was found.
|
||||
int: The similarity score of the line. If no similar line was found, the highest similarity score is returned.
|
||||
str: The similar line that was found. If no similar line was found, None is returned.
|
||||
"""
|
||||
|
||||
highest_similarity = 0
|
||||
|
||||
for existing_line in lines:
|
||||
similarity = fuzz.ratio(line, existing_line)
|
||||
highest_similarity = max(highest_similarity, similarity)
|
||||
#print("SIMILARITY", similarity, existing_line[:32]+"...")
|
||||
if similarity >= similarity_threshold:
|
||||
return True, similarity, existing_line
|
||||
|
||||
return False, highest_similarity, None
|
||||
|
||||
def dedupe_sentences(line_a:str, line_b:str, similarity_threshold:int=95, debug:bool=False, split_on_comma:bool=True) -> str:
|
||||
"""
|
||||
Will split both lines into sentences and then compare each sentence in line_a
|
||||
against similar sentences in line_b. If a similar sentence is found, it will be
|
||||
removed from line_a.
|
||||
|
||||
The similarity threshold is used to determine if two sentences are similar.
|
||||
|
||||
Arguments:
|
||||
line_a (str): The first line.
|
||||
line_b (str): The second line.
|
||||
similarity_threshold (int): The similarity threshold to use when comparing sentences.
|
||||
debug (bool): Whether to log debug messages.
|
||||
split_on_comma (bool): Whether to split line_b sentences on commas as well.
|
||||
|
||||
Returns:
|
||||
str: the cleaned line_a.
|
||||
"""
|
||||
|
||||
line_a_sentences = sent_tokenize(line_a)
|
||||
line_b_sentences = sent_tokenize(line_b)
|
||||
|
||||
cleaned_line_a_sentences = []
|
||||
|
||||
if split_on_comma:
|
||||
# collect all sentences from line_b that contain a comma
|
||||
line_b_sentences_with_comma = []
|
||||
for line_b_sentence in line_b_sentences:
|
||||
if "," in line_b_sentence:
|
||||
line_b_sentences_with_comma.append(line_b_sentence)
|
||||
|
||||
# then split all sentences in line_b_sentences_with_comma on the comma
|
||||
# and extend line_b_sentences with the split sentences, making sure
|
||||
# to strip whitespace from the beginning and end of each sentence
|
||||
|
||||
for line_b_sentence in line_b_sentences_with_comma:
|
||||
line_b_sentences.extend([s.strip() for s in line_b_sentence.split(",")])
|
||||
|
||||
|
||||
for line_a_sentence in line_a_sentences:
|
||||
similar_found = False
|
||||
for line_b_sentence in line_b_sentences:
|
||||
similarity = fuzz.ratio(line_a_sentence, line_b_sentence)
|
||||
if similarity >= similarity_threshold:
|
||||
if debug:
|
||||
log.debug("DEDUPE SENTENCE", similarity=similarity, line_a_sentence=line_a_sentence, line_b_sentence=line_b_sentence)
|
||||
similar_found = True
|
||||
break
|
||||
if not similar_found:
|
||||
cleaned_line_a_sentences.append(line_a_sentence)
|
||||
|
||||
return " ".join(cleaned_line_a_sentences)
|
||||
|
||||
def dedupe_string(s: str, min_length: int = 32, similarity_threshold: int = 95, debug: bool = False) -> str:
|
||||
|
||||
"""
|
||||
Removes duplicate lines from a string.
|
||||
|
||||
Parameters:
|
||||
Arguments:
|
||||
s (str): The input string.
|
||||
min_length (int): The minimum length of a line to be checked for duplicates.
|
||||
similarity_threshold (int): The similarity threshold to use when comparing lines.
|
||||
@@ -766,99 +869,168 @@ def replace_exposition_markers(s:str) -> str:
|
||||
|
||||
def ensure_dialog_format(line:str, talking_character:str=None) -> str:
|
||||
|
||||
line = mark_exposition(line, talking_character)
|
||||
line = mark_spoken_words(line, talking_character)
|
||||
#if "*" not in line and '"' not in line:
|
||||
# if talking_character:
|
||||
# line = line[len(talking_character)+1:].lstrip()
|
||||
# return f"{talking_character}: \"{line}\""
|
||||
# return f"\"{line}\""
|
||||
#
|
||||
|
||||
if talking_character:
|
||||
line = line[len(talking_character)+1:].lstrip()
|
||||
|
||||
lines = []
|
||||
|
||||
for _line in line.split("\n"):
|
||||
try:
|
||||
_line = ensure_dialog_line_format(_line)
|
||||
except Exception as exc:
|
||||
log.error("ensure_dialog_format", msg="Error ensuring dialog line format", line=_line, exc_info=exc)
|
||||
pass
|
||||
|
||||
lines.append(_line)
|
||||
|
||||
if len(lines) > 1:
|
||||
line = "\n".join(lines)
|
||||
else:
|
||||
line = lines[0]
|
||||
|
||||
if talking_character:
|
||||
line = f"{talking_character}: {line}"
|
||||
|
||||
return line
|
||||
|
||||
|
||||
def mark_spoken_words(line:str, talking_character:str=None) -> str:
|
||||
# if there are no asterisks in the line, it means its impossible to tell
|
||||
# dialogue apart from exposition
|
||||
if "*" not in line:
|
||||
return line
|
||||
|
||||
if talking_character and line.startswith(f"{talking_character}:"):
|
||||
line = line[len(talking_character)+1:].lstrip()
|
||||
def ensure_dialog_line_format(line:str):
|
||||
|
||||
|
||||
# Splitting the text into segments based on asterisks
|
||||
segments = re.split('(\*[^*]*\*)', line)
|
||||
formatted_line = ""
|
||||
|
||||
for i, segment in enumerate(segments):
|
||||
if segment.startswith("*") and segment.endswith("*"):
|
||||
# If the segment is an action or thought, add it as is
|
||||
formatted_line += segment
|
||||
else:
|
||||
# For non-action/thought parts, trim and add quotes only if not empty and not already quoted
|
||||
trimmed_segment = segment.strip()
|
||||
if trimmed_segment:
|
||||
if not (trimmed_segment.startswith('"') and trimmed_segment.endswith('"')):
|
||||
formatted_line += f' "{trimmed_segment}"'
|
||||
else:
|
||||
formatted_line += f' {trimmed_segment}'
|
||||
|
||||
|
||||
# adds spaces betwen *" and "* to make it easier to read
|
||||
formatted_line = formatted_line.replace('*"', '* "')
|
||||
formatted_line = formatted_line.replace('"*', '" *')
|
||||
|
||||
if talking_character:
|
||||
formatted_line = f"{talking_character}: {formatted_line}"
|
||||
"""
|
||||
a Python function that standardizes the formatting of dialogue and action/thought
|
||||
descriptions in text strings. This function is intended for use in a text-based
|
||||
game where spoken dialogue is encased in double quotes (" ") and actions/thoughts are
|
||||
encased in asterisks (* *). The function must correctly format strings, ensuring that
|
||||
each spoken sentence and action/thought is properly encased
|
||||
"""
|
||||
|
||||
log.debug("mark_spoken_words", line=line, formatted_line=formatted_line)
|
||||
|
||||
return formatted_line.strip() # Trim any leading/trailing whitespace
|
||||
|
||||
|
||||
def mark_exposition(line:str, talking_character:str=None) -> str:
|
||||
"""
|
||||
Will loop through the string and make sure chunks outside of "" are marked with *.
|
||||
|
||||
For example:
|
||||
i = 0
|
||||
|
||||
"No, you're not wrong" sips his wine "This tastes gross." coughs "acquired taste i guess?"
|
||||
segments = []
|
||||
segment = None
|
||||
segment_open = None
|
||||
|
||||
becomes
|
||||
|
||||
"No, you're not wrong" *sips his wine* "This tastes gross." *coughs* "acquired taste i guess?"
|
||||
"""
|
||||
|
||||
# no quotes in string, means its impossible to tell dialogue apart from exposition
|
||||
if '"' not in line:
|
||||
return line
|
||||
|
||||
if talking_character and line.startswith(f"{talking_character}:"):
|
||||
line = line[len(talking_character)+1:].lstrip()
|
||||
|
||||
# Splitting the text into segments based on quotes
|
||||
segments = re.split('("[^"]*")', line)
|
||||
formatted_line = ""
|
||||
|
||||
for i, segment in enumerate(segments):
|
||||
# If the segment is a spoken part (inside quotes), add it as is
|
||||
if segment.startswith('"') and segment.endswith('"'):
|
||||
formatted_line += segment
|
||||
for i in range(len(line)):
|
||||
|
||||
|
||||
c = line[i]
|
||||
|
||||
#print("segment_open", segment_open)
|
||||
#print("segment", segment)
|
||||
|
||||
if c in ['"', '*']:
|
||||
if segment_open == c:
|
||||
# open segment is the same as the current character
|
||||
# closing
|
||||
segment_open = None
|
||||
segment += c
|
||||
segments += [segment.strip()]
|
||||
segment = None
|
||||
elif segment_open is not None and segment_open != c:
|
||||
# open segment is not the same as the current character
|
||||
# opening - close the current segment and open a new one
|
||||
|
||||
# if we are at the last character we append the segment
|
||||
if i == len(line)-1 and segment.strip():
|
||||
segment += c
|
||||
segments += [segment.strip()]
|
||||
segment_open = None
|
||||
segment = None
|
||||
continue
|
||||
|
||||
segments += [segment.strip()]
|
||||
segment_open = c
|
||||
segment = c
|
||||
elif segment_open is None:
|
||||
# we're opening a segment
|
||||
segment_open = c
|
||||
segment = c
|
||||
else:
|
||||
# Split the non-spoken segment into sub-segments based on existing asterisks
|
||||
sub_segments = re.split('(\*[^*]*\*)', segment)
|
||||
for sub_segment in sub_segments:
|
||||
if sub_segment.startswith("*") and sub_segment.endswith("*"):
|
||||
# If the sub-segment is already formatted, add it as is
|
||||
formatted_line += sub_segment
|
||||
else:
|
||||
# Trim and add asterisks only to non-empty sub-segments
|
||||
trimmed_sub_segment = sub_segment.strip()
|
||||
if trimmed_sub_segment:
|
||||
formatted_line += f" *{trimmed_sub_segment}*"
|
||||
|
||||
# adds spaces betwen *" and "* to make it easier to read
|
||||
formatted_line = formatted_line.replace('*"', '* "')
|
||||
formatted_line = formatted_line.replace('"*', '" *')
|
||||
|
||||
if talking_character:
|
||||
formatted_line = f"{talking_character}: {formatted_line}"
|
||||
log.debug("mark_exposition", line=line, formatted_line=formatted_line)
|
||||
if segment_open is None:
|
||||
segment_open = "unclassified"
|
||||
segment = c
|
||||
else:
|
||||
segment += c
|
||||
|
||||
if segment is not None:
|
||||
if segment.strip().strip("*").strip('"'):
|
||||
segments += [segment.strip()]
|
||||
|
||||
for i in range(len(segments)):
|
||||
segment = segments[i]
|
||||
if segment in ['"', '*']:
|
||||
if i > 0:
|
||||
prev_segment = segments[i-1]
|
||||
if prev_segment and prev_segment[-1] not in ['"', '*']:
|
||||
segments[i-1] = f"{prev_segment}{segment}"
|
||||
segments[i] = ""
|
||||
continue
|
||||
|
||||
for i in range(len(segments)):
|
||||
segment = segments[i]
|
||||
|
||||
if not segment:
|
||||
continue
|
||||
|
||||
if segment[0] == "*" and segment[-1] != "*":
|
||||
segment += "*"
|
||||
elif segment[-1] == "*" and segment[0] != "*":
|
||||
segment = "*" + segment
|
||||
elif segment[0] == '"' and segment[-1] != '"':
|
||||
segment += '"'
|
||||
elif segment[-1] == '"' and segment[0] != '"':
|
||||
segment = '"' + segment
|
||||
elif segment[0] in ['"', '*'] and segment[-1] == segment[0]:
|
||||
continue
|
||||
|
||||
segments[i] = segment
|
||||
|
||||
for i in range(len(segments)):
|
||||
segment = segments[i]
|
||||
if not segment or segment[0] in ['"', '*']:
|
||||
continue
|
||||
|
||||
prev_segment = segments[i-1] if i > 0 else None
|
||||
next_segment = segments[i+1] if i < len(segments)-1 else None
|
||||
|
||||
if prev_segment and prev_segment[-1] == '"':
|
||||
segments[i] = f"*{segment}*"
|
||||
elif prev_segment and prev_segment[-1] == '*':
|
||||
segments[i] = f"\"{segment}\""
|
||||
elif next_segment and next_segment[0] == '"':
|
||||
segments[i] = f"*{segment}*"
|
||||
elif next_segment and next_segment[0] == '*':
|
||||
segments[i] = f"\"{segment}\""
|
||||
|
||||
for i in range(len(segments)):
|
||||
segments[i] = clean_uneven_markers(segments[i], '"')
|
||||
segments[i] = clean_uneven_markers(segments[i], '*')
|
||||
|
||||
return " ".join(segment for segment in segments if segment).strip()
|
||||
|
||||
|
||||
def clean_uneven_markers(chunk:str, marker:str):
|
||||
|
||||
return formatted_line.strip() # Trim any leading/trailing whitespace
|
||||
# if there is an uneven number of quotes, remove the last one if its
|
||||
# at the end of the chunk. If its in the middle, add a quote to the endc
|
||||
count = chunk.count(marker)
|
||||
|
||||
if count % 2 == 1:
|
||||
if chunk.endswith(marker):
|
||||
chunk = chunk[:-1]
|
||||
elif chunk.startswith(marker):
|
||||
chunk = chunk[1:]
|
||||
elif count == 1:
|
||||
chunk = chunk.replace(marker, "")
|
||||
else:
|
||||
chunk += marker
|
||||
|
||||
return chunk
|
||||
@@ -1,6 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
from talemate.emit import emit
|
||||
import structlog
|
||||
import traceback
|
||||
from typing import Union
|
||||
|
||||
import talemate.instance as instance
|
||||
@@ -38,14 +39,17 @@ class WorldState(BaseModel):
|
||||
@property
|
||||
def as_list(self):
|
||||
return self.render().as_list
|
||||
|
||||
|
||||
def reset(self):
|
||||
self.characters = {}
|
||||
self.items = {}
|
||||
self.location = None
|
||||
|
||||
def emit(self, status="update"):
|
||||
emit("world_state", status=status, data=self.dict())
|
||||
|
||||
async def request_update(self, initial_only:bool=False):
|
||||
|
||||
|
||||
|
||||
if initial_only and self.characters:
|
||||
self.emit()
|
||||
return
|
||||
@@ -56,21 +60,97 @@ class WorldState(BaseModel):
|
||||
world_state = await self.agent.request_world_state()
|
||||
except Exception as e:
|
||||
self.emit()
|
||||
raise e
|
||||
log.error("world_state.request_update", error=e, traceback=traceback.format_exc())
|
||||
return
|
||||
|
||||
previous_characters = self.characters
|
||||
previous_items = self.items
|
||||
scene = self.agent.scene
|
||||
character_names = scene.character_names
|
||||
self.characters = {}
|
||||
self.items = {}
|
||||
|
||||
for character in world_state.get("characters", []):
|
||||
self.characters[character["name"]] = CharacterState(**character)
|
||||
for character_name, character in world_state.get("characters", {}).items():
|
||||
|
||||
# character name may not always come back exactly as we have
|
||||
# it defined in the scene. We assign the correct name by checking occurences
|
||||
# of both names in each other.
|
||||
|
||||
if character_name not in character_names:
|
||||
for _character_name in character_names:
|
||||
if _character_name.lower() in character_name.lower() or character_name.lower() in _character_name.lower():
|
||||
log.debug("world_state adjusting character name", from_name=character_name, to_name=_character_name)
|
||||
character_name = _character_name
|
||||
break
|
||||
|
||||
if not character:
|
||||
continue
|
||||
|
||||
# if emotion is not set, see if a previous state exists
|
||||
# and use that emotion
|
||||
|
||||
if "emotion" not in character:
|
||||
log.debug("emotion not set", character_name=character_name, character=character, characters=previous_characters)
|
||||
if character_name in previous_characters:
|
||||
character["emotion"] = previous_characters[character_name].emotion
|
||||
|
||||
self.characters[character_name] = CharacterState(**character)
|
||||
log.debug("world_state", character=character)
|
||||
|
||||
for item in world_state.get("items", []):
|
||||
self.items[item["name"]] = ObjectState(**item)
|
||||
for item_name, item in world_state.get("items", {}).items():
|
||||
if not item:
|
||||
continue
|
||||
self.items[item_name] = ObjectState(**item)
|
||||
log.debug("world_state", item=item)
|
||||
|
||||
self.emit()
|
||||
|
||||
await self.persist()
|
||||
self.emit()
|
||||
|
||||
async def persist(self):
|
||||
|
||||
memory = instance.get_agent("memory")
|
||||
world_state = instance.get_agent("world_state")
|
||||
|
||||
# first we check if any of the characters were refered
|
||||
# to with an alias
|
||||
|
||||
states = []
|
||||
scene = self.agent.scene
|
||||
|
||||
for character_name in self.characters.keys():
|
||||
states.append(
|
||||
{
|
||||
"text": f"{character_name}: {self.characters[character_name].snapshot}",
|
||||
"id": f"{character_name}.world_state.snapshot",
|
||||
"meta": {
|
||||
"typ": "world_state",
|
||||
"character": character_name,
|
||||
"ts": scene.ts,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
for item_name in self.items.keys():
|
||||
states.append(
|
||||
{
|
||||
"text": f"{item_name}: {self.items[item_name].snapshot}",
|
||||
"id": f"{item_name}.world_state.snapshot",
|
||||
"meta": {
|
||||
"typ": "world_state",
|
||||
"item": item_name,
|
||||
"ts": scene.ts,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
log.debug("world_state.persist", states=states)
|
||||
|
||||
if not states:
|
||||
return
|
||||
|
||||
await memory.add_many(states)
|
||||
|
||||
|
||||
async def request_update_inline(self):
|
||||
|
||||
|
||||
243
talemate_frontend/package-lock.json
generated
243
talemate_frontend/package-lock.json
generated
@@ -64,12 +64,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/code-frame": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.22.5.tgz",
|
||||
"integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
|
||||
"integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/highlight": "^7.22.5"
|
||||
"@babel/highlight": "^7.23.4",
|
||||
"chalk": "^2.4.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -129,12 +130,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/generator": {
|
||||
"version": "7.22.7",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/generator/-/generator-7.22.7.tgz",
|
||||
"integrity": "sha512-p+jPjMG+SI8yvIaxGgeW24u7q9+5+TGpZh8/CuB7RhBKd7RCy8FayNEFNNKrNK/eUcY/4ExQqLmyrvBXKsIcwQ==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.5.tgz",
|
||||
"integrity": "sha512-BPssCHrBD+0YrxviOa3QzpqwhNIXKEtOa2jQrm4FlmkC2apYgRnQcmPWiGZDlGxiNtltnUFolMe8497Esry+jA==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/types": "^7.22.5",
|
||||
"@babel/types": "^7.23.5",
|
||||
"@jridgewell/gen-mapping": "^0.3.2",
|
||||
"@jridgewell/trace-mapping": "^0.3.17",
|
||||
"jsesc": "^2.5.1"
|
||||
@@ -243,22 +244,22 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-environment-visitor": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz",
|
||||
"integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==",
|
||||
"version": "7.22.20",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
|
||||
"integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-function-name": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz",
|
||||
"integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==",
|
||||
"version": "7.23.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
|
||||
"integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/template": "^7.22.5",
|
||||
"@babel/types": "^7.22.5"
|
||||
"@babel/template": "^7.22.15",
|
||||
"@babel/types": "^7.23.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -412,18 +413,18 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-string-parser": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz",
|
||||
"integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==",
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
|
||||
"integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/helper-validator-identifier": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz",
|
||||
"integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==",
|
||||
"version": "7.22.20",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
|
||||
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
@@ -468,13 +469,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/highlight": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.22.5.tgz",
|
||||
"integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==",
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
|
||||
"integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/helper-validator-identifier": "^7.22.5",
|
||||
"chalk": "^2.0.0",
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"chalk": "^2.4.2",
|
||||
"js-tokens": "^4.0.0"
|
||||
},
|
||||
"engines": {
|
||||
@@ -482,9 +483,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/parser": {
|
||||
"version": "7.22.7",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.22.7.tgz",
|
||||
"integrity": "sha512-7NF8pOkHP5o2vpmGgNGcfAeCvOYhGLyA3Z4eBQkT1RJlWu47n63bCs93QfJ2hIAFCil7L5P2IWhs1oToVgrL0Q==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.5.tgz",
|
||||
"integrity": "sha512-hOOqoiNXrmGdFbhgCzu6GiURxUgM27Xwd/aPuu8RfHEZPBzL1Z54okAHAQjXfcQNwvrlkAmAp4SlRTZ45vlthQ==",
|
||||
"bin": {
|
||||
"parser": "bin/babel-parser.js"
|
||||
},
|
||||
@@ -1773,33 +1774,33 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/template": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/template/-/template-7.22.5.tgz",
|
||||
"integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==",
|
||||
"version": "7.22.15",
|
||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
|
||||
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.22.5",
|
||||
"@babel/parser": "^7.22.5",
|
||||
"@babel/types": "^7.22.5"
|
||||
"@babel/code-frame": "^7.22.13",
|
||||
"@babel/parser": "^7.22.15",
|
||||
"@babel/types": "^7.22.15"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.9.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/traverse": {
|
||||
"version": "7.22.8",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.22.8.tgz",
|
||||
"integrity": "sha512-y6LPR+wpM2I3qJrsheCTwhIinzkETbplIgPBbwvqPKc+uljeA5gP+3nP8irdYt1mjQaDnlIcG+dw8OjAco4GXw==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.5.tgz",
|
||||
"integrity": "sha512-czx7Xy5a6sapWWRx61m1Ke1Ra4vczu1mCTtJam5zRTBOonfdJ+S/B6HYmGYu3fJtr8GGET3si6IhgWVBhJ/m8w==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/code-frame": "^7.22.5",
|
||||
"@babel/generator": "^7.22.7",
|
||||
"@babel/helper-environment-visitor": "^7.22.5",
|
||||
"@babel/helper-function-name": "^7.22.5",
|
||||
"@babel/code-frame": "^7.23.5",
|
||||
"@babel/generator": "^7.23.5",
|
||||
"@babel/helper-environment-visitor": "^7.22.20",
|
||||
"@babel/helper-function-name": "^7.23.0",
|
||||
"@babel/helper-hoist-variables": "^7.22.5",
|
||||
"@babel/helper-split-export-declaration": "^7.22.6",
|
||||
"@babel/parser": "^7.22.7",
|
||||
"@babel/types": "^7.22.5",
|
||||
"@babel/parser": "^7.23.5",
|
||||
"@babel/types": "^7.23.5",
|
||||
"debug": "^4.1.0",
|
||||
"globals": "^11.1.0"
|
||||
},
|
||||
@@ -1808,13 +1809,13 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@babel/types": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/types/-/types-7.22.5.tgz",
|
||||
"integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.5.tgz",
|
||||
"integrity": "sha512-ON5kSOJwVO6xXVRTvOI0eOnWe7VdUcIpsovGo9U/Br4Ie4UVFQTboO2cYnDhAGU6Fp+UxSiT+pMft0SMHfuq6w==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@babel/helper-string-parser": "^7.22.5",
|
||||
"@babel/helper-validator-identifier": "^7.22.5",
|
||||
"@babel/helper-string-parser": "^7.23.4",
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"to-fast-properties": "^2.0.0"
|
||||
},
|
||||
"engines": {
|
||||
@@ -3041,9 +3042,9 @@
|
||||
},
|
||||
"node_modules/@vue/vue-loader-v15": {
|
||||
"name": "vue-loader",
|
||||
"version": "15.10.1",
|
||||
"resolved": "https://registry.npmmirror.com/vue-loader/-/vue-loader-15.10.1.tgz",
|
||||
"integrity": "sha512-SaPHK1A01VrNthlix6h1hq4uJu7S/z0kdLUb6klubo738NeQoLbS6V9/d8Pv19tU0XdQKju3D1HSKuI8wJ5wMA==",
|
||||
"version": "15.11.1",
|
||||
"resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.11.1.tgz",
|
||||
"integrity": "sha512-0iw4VchYLePqJfJu9s62ACWUXeSqM30SQqlIftbYWM3C+jpPcEHKSPUZBLjSF9au4HTHQ/naF6OGnO3Q/qGR3Q==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@vue/component-compiler-utils": "^3.1.0",
|
||||
@@ -3060,6 +3061,9 @@
|
||||
"cache-loader": {
|
||||
"optional": true
|
||||
},
|
||||
"prettier": {
|
||||
"optional": true
|
||||
},
|
||||
"vue-template-compiler": {
|
||||
"optional": true
|
||||
}
|
||||
@@ -8158,9 +8162,23 @@
|
||||
}
|
||||
},
|
||||
"node_modules/postcss": {
|
||||
"version": "8.4.25",
|
||||
"resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.4.25.tgz",
|
||||
"integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==",
|
||||
"version": "8.4.31",
|
||||
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
|
||||
"integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/postcss/"
|
||||
},
|
||||
{
|
||||
"type": "tidelift",
|
||||
"url": "https://tidelift.com/funding/github/npm/postcss"
|
||||
},
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/ai"
|
||||
}
|
||||
],
|
||||
"dependencies": {
|
||||
"nanoid": "^3.3.6",
|
||||
"picocolors": "^1.0.0",
|
||||
@@ -11210,12 +11228,13 @@
|
||||
}
|
||||
},
|
||||
"@babel/code-frame": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.22.5.tgz",
|
||||
"integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.23.5.tgz",
|
||||
"integrity": "sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/highlight": "^7.22.5"
|
||||
"@babel/highlight": "^7.23.4",
|
||||
"chalk": "^2.4.2"
|
||||
}
|
||||
},
|
||||
"@babel/compat-data": {
|
||||
@@ -11259,12 +11278,12 @@
|
||||
}
|
||||
},
|
||||
"@babel/generator": {
|
||||
"version": "7.22.7",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/generator/-/generator-7.22.7.tgz",
|
||||
"integrity": "sha512-p+jPjMG+SI8yvIaxGgeW24u7q9+5+TGpZh8/CuB7RhBKd7RCy8FayNEFNNKrNK/eUcY/4ExQqLmyrvBXKsIcwQ==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.5.tgz",
|
||||
"integrity": "sha512-BPssCHrBD+0YrxviOa3QzpqwhNIXKEtOa2jQrm4FlmkC2apYgRnQcmPWiGZDlGxiNtltnUFolMe8497Esry+jA==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/types": "^7.22.5",
|
||||
"@babel/types": "^7.23.5",
|
||||
"@jridgewell/gen-mapping": "^0.3.2",
|
||||
"@jridgewell/trace-mapping": "^0.3.17",
|
||||
"jsesc": "^2.5.1"
|
||||
@@ -11343,19 +11362,19 @@
|
||||
}
|
||||
},
|
||||
"@babel/helper-environment-visitor": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz",
|
||||
"integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==",
|
||||
"version": "7.22.20",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
|
||||
"integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
|
||||
"dev": true
|
||||
},
|
||||
"@babel/helper-function-name": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz",
|
||||
"integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==",
|
||||
"version": "7.23.0",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
|
||||
"integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/template": "^7.22.5",
|
||||
"@babel/types": "^7.22.5"
|
||||
"@babel/template": "^7.22.15",
|
||||
"@babel/types": "^7.23.0"
|
||||
}
|
||||
},
|
||||
"@babel/helper-hoist-variables": {
|
||||
@@ -11470,15 +11489,15 @@
|
||||
}
|
||||
},
|
||||
"@babel/helper-string-parser": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz",
|
||||
"integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==",
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz",
|
||||
"integrity": "sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ==",
|
||||
"dev": true
|
||||
},
|
||||
"@babel/helper-validator-identifier": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz",
|
||||
"integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==",
|
||||
"version": "7.22.20",
|
||||
"resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
|
||||
"integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
|
||||
"dev": true
|
||||
},
|
||||
"@babel/helper-validator-option": {
|
||||
@@ -11511,20 +11530,20 @@
|
||||
}
|
||||
},
|
||||
"@babel/highlight": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/highlight/-/highlight-7.22.5.tgz",
|
||||
"integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==",
|
||||
"version": "7.23.4",
|
||||
"resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.23.4.tgz",
|
||||
"integrity": "sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/helper-validator-identifier": "^7.22.5",
|
||||
"chalk": "^2.0.0",
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"chalk": "^2.4.2",
|
||||
"js-tokens": "^4.0.0"
|
||||
}
|
||||
},
|
||||
"@babel/parser": {
|
||||
"version": "7.22.7",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/parser/-/parser-7.22.7.tgz",
|
||||
"integrity": "sha512-7NF8pOkHP5o2vpmGgNGcfAeCvOYhGLyA3Z4eBQkT1RJlWu47n63bCs93QfJ2hIAFCil7L5P2IWhs1oToVgrL0Q=="
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.5.tgz",
|
||||
"integrity": "sha512-hOOqoiNXrmGdFbhgCzu6GiURxUgM27Xwd/aPuu8RfHEZPBzL1Z54okAHAQjXfcQNwvrlkAmAp4SlRTZ45vlthQ=="
|
||||
},
|
||||
"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": {
|
||||
"version": "7.22.5",
|
||||
@@ -12382,42 +12401,42 @@
|
||||
}
|
||||
},
|
||||
"@babel/template": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/template/-/template-7.22.5.tgz",
|
||||
"integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==",
|
||||
"version": "7.22.15",
|
||||
"resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
|
||||
"integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/code-frame": "^7.22.5",
|
||||
"@babel/parser": "^7.22.5",
|
||||
"@babel/types": "^7.22.5"
|
||||
"@babel/code-frame": "^7.22.13",
|
||||
"@babel/parser": "^7.22.15",
|
||||
"@babel/types": "^7.22.15"
|
||||
}
|
||||
},
|
||||
"@babel/traverse": {
|
||||
"version": "7.22.8",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.22.8.tgz",
|
||||
"integrity": "sha512-y6LPR+wpM2I3qJrsheCTwhIinzkETbplIgPBbwvqPKc+uljeA5gP+3nP8irdYt1mjQaDnlIcG+dw8OjAco4GXw==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.5.tgz",
|
||||
"integrity": "sha512-czx7Xy5a6sapWWRx61m1Ke1Ra4vczu1mCTtJam5zRTBOonfdJ+S/B6HYmGYu3fJtr8GGET3si6IhgWVBhJ/m8w==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/code-frame": "^7.22.5",
|
||||
"@babel/generator": "^7.22.7",
|
||||
"@babel/helper-environment-visitor": "^7.22.5",
|
||||
"@babel/helper-function-name": "^7.22.5",
|
||||
"@babel/code-frame": "^7.23.5",
|
||||
"@babel/generator": "^7.23.5",
|
||||
"@babel/helper-environment-visitor": "^7.22.20",
|
||||
"@babel/helper-function-name": "^7.23.0",
|
||||
"@babel/helper-hoist-variables": "^7.22.5",
|
||||
"@babel/helper-split-export-declaration": "^7.22.6",
|
||||
"@babel/parser": "^7.22.7",
|
||||
"@babel/types": "^7.22.5",
|
||||
"@babel/parser": "^7.23.5",
|
||||
"@babel/types": "^7.23.5",
|
||||
"debug": "^4.1.0",
|
||||
"globals": "^11.1.0"
|
||||
}
|
||||
},
|
||||
"@babel/types": {
|
||||
"version": "7.22.5",
|
||||
"resolved": "https://registry.npmmirror.com/@babel/types/-/types-7.22.5.tgz",
|
||||
"integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==",
|
||||
"version": "7.23.5",
|
||||
"resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.5.tgz",
|
||||
"integrity": "sha512-ON5kSOJwVO6xXVRTvOI0eOnWe7VdUcIpsovGo9U/Br4Ie4UVFQTboO2cYnDhAGU6Fp+UxSiT+pMft0SMHfuq6w==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@babel/helper-string-parser": "^7.22.5",
|
||||
"@babel/helper-validator-identifier": "^7.22.5",
|
||||
"@babel/helper-string-parser": "^7.23.4",
|
||||
"@babel/helper-validator-identifier": "^7.22.20",
|
||||
"to-fast-properties": "^2.0.0"
|
||||
}
|
||||
},
|
||||
@@ -13458,9 +13477,9 @@
|
||||
"integrity": "sha512-7OjdcV8vQ74eiz1TZLzZP4JwqM5fA94K6yntPS5Z25r9HDuGNzaGdgvwKYq6S+MxwF0TFRwe50fIR/MYnakdkQ=="
|
||||
},
|
||||
"@vue/vue-loader-v15": {
|
||||
"version": "npm:vue-loader@15.10.1",
|
||||
"resolved": "https://registry.npmmirror.com/vue-loader/-/vue-loader-15.10.1.tgz",
|
||||
"integrity": "sha512-SaPHK1A01VrNthlix6h1hq4uJu7S/z0kdLUb6klubo738NeQoLbS6V9/d8Pv19tU0XdQKju3D1HSKuI8wJ5wMA==",
|
||||
"version": "npm:vue-loader@15.11.1",
|
||||
"resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.11.1.tgz",
|
||||
"integrity": "sha512-0iw4VchYLePqJfJu9s62ACWUXeSqM30SQqlIftbYWM3C+jpPcEHKSPUZBLjSF9au4HTHQ/naF6OGnO3Q/qGR3Q==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@vue/component-compiler-utils": "^3.1.0",
|
||||
@@ -17572,9 +17591,9 @@
|
||||
}
|
||||
},
|
||||
"postcss": {
|
||||
"version": "8.4.25",
|
||||
"resolved": "https://registry.npmmirror.com/postcss/-/postcss-8.4.25.tgz",
|
||||
"integrity": "sha512-7taJ/8t2av0Z+sQEvNzCkpDynl0tX3uJMCODi6nT3PfASC7dYCWV9aQ+uiCf+KBD4SEFcu+GvJdGdwzQ6OSjCw==",
|
||||
"version": "8.4.31",
|
||||
"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
|
||||
"integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==",
|
||||
"requires": {
|
||||
"nanoid": "^3.3.6",
|
||||
"picocolors": "^1.0.0",
|
||||
|
||||
@@ -7,18 +7,19 @@
|
||||
size="14"></v-progress-circular>
|
||||
<v-icon v-else-if="agent.status === 'uninitialized'" color="orange" size="14">mdi-checkbox-blank-circle</v-icon>
|
||||
<v-icon v-else-if="agent.status === 'disabled'" color="grey-darken-2" size="14">mdi-checkbox-blank-circle</v-icon>
|
||||
<v-icon v-else-if="agent.status === 'error'" color="red" size="14">mdi-checkbox-blank-circle</v-icon>
|
||||
<v-icon v-else color="green" size="14">mdi-checkbox-blank-circle</v-icon>
|
||||
<span class="ml-1" v-if="agent.label"> {{ agent.label }}</span>
|
||||
<span class="ml-1" v-else> {{ agent.name }}</span>
|
||||
</v-list-item-title>
|
||||
<v-list-item-subtitle>
|
||||
<v-list-item-subtitle class="text-caption">
|
||||
{{ agent.client }}
|
||||
</v-list-item-subtitle>
|
||||
<v-chip class="mr-1" v-if="agent.status === 'disabled'" size="x-small">Disabled</v-chip>
|
||||
<v-chip v-if="agent.data.experimental" color="warning" size="x-small">experimental</v-chip>
|
||||
</v-list-item>
|
||||
</v-list>
|
||||
<AgentModal :dialog="dialog" :formTitle="formTitle" @save="saveAgent" @update:dialog="updateDialog"></AgentModal>
|
||||
<AgentModal :dialog="state.dialog" :formTitle="state.formTitle" @save="saveAgent" @update:dialog="updateDialog"></AgentModal>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
@@ -65,7 +66,10 @@ export default {
|
||||
for(let i = 0; i < this.state.agents.length; i++) {
|
||||
let agent = this.state.agents[i];
|
||||
|
||||
if(agent.status === 'warning' || agent.status === 'error') {
|
||||
if(!agent.data.requires_llm_client)
|
||||
continue
|
||||
|
||||
if(agent.status === 'warning' || agent.status === 'error' || agent.status === 'uninitialized') {
|
||||
console.log("agents: configuration required (1)", agent.status)
|
||||
return true;
|
||||
}
|
||||
@@ -91,7 +95,6 @@ export default {
|
||||
openModal() {
|
||||
this.state.formTitle = 'Add AI Agent';
|
||||
this.state.dialog = true;
|
||||
console.log("got here")
|
||||
},
|
||||
saveAgent(agent) {
|
||||
const index = this.state.agents.findIndex(c => c.name === agent.name);
|
||||
@@ -100,7 +103,6 @@ export default {
|
||||
} else {
|
||||
this.state.agents[index] = agent;
|
||||
}
|
||||
this.state.dialog = false;
|
||||
this.$emit('agents-updated', this.state.agents);
|
||||
},
|
||||
editAgent(index) {
|
||||
@@ -120,7 +122,6 @@ export default {
|
||||
handleMessage(data) {
|
||||
// Handle agent_status message type
|
||||
if (data.type === 'agent_status') {
|
||||
console.log("agents: got agent_status message", data)
|
||||
// Find the client with the given name
|
||||
const agent = this.state.agents.find(agent => agent.name === data.name);
|
||||
if (agent) {
|
||||
|
||||
@@ -21,20 +21,26 @@
|
||||
{{ client.type }}
|
||||
<v-chip label size="x-small" variant="outlined" class="ml-1">ctx {{ client.max_token_length }}</v-chip>
|
||||
</v-list-item-subtitle>
|
||||
<v-list-item-content density="compact">
|
||||
<div density="compact">
|
||||
<v-slider
|
||||
hide-details
|
||||
v-model="client.max_token_length"
|
||||
:min="1024"
|
||||
:max="16384"
|
||||
:max="128000"
|
||||
:step="512"
|
||||
@update:modelValue="saveClient(client)"
|
||||
@click.stop
|
||||
density="compact"
|
||||
></v-slider>
|
||||
</v-list-item-content>
|
||||
</div>
|
||||
<v-list-item-subtitle class="text-center">
|
||||
|
||||
<v-tooltip text="No LLM prompt template for this model. Using default. Templates can be added in ./templates/llm-prompt" v-if="client.status === 'idle' && client.data && !client.data.has_prompt_template" max-width="200">
|
||||
<template v-slot:activator="{ props }">
|
||||
<v-icon x-size="14" class="mr-1" v-bind="props" color="orange">mdi-alert</v-icon>
|
||||
</template>
|
||||
</v-tooltip>
|
||||
|
||||
<v-tooltip text="Edit client">
|
||||
<template v-slot:activator="{ props }">
|
||||
<v-btn size="x-small" class="mr-1" v-bind="props" variant="tonal" density="comfortable" rounded="sm" @click.stop="editClient(index)" icon="mdi-cogs"></v-btn>
|
||||
@@ -56,7 +62,7 @@
|
||||
</v-list-item-subtitle>
|
||||
</v-list-item>
|
||||
</v-list>
|
||||
<ClientModal :dialog="dialog" :formTitle="formTitle" @save="saveClient" @update:dialog="updateDialog"></ClientModal>
|
||||
<ClientModal :dialog="state.dialog" :formTitle="state.formTitle" @save="saveClient" @error="propagateError" @update:dialog="updateDialog"></ClientModal>
|
||||
<v-alert type="warning" variant="tonal" v-if="state.clients.length === 0">You have no LLM clients configured. Add one.</v-alert>
|
||||
<v-btn @click="openModal" prepend-icon="mdi-plus-box">Add client</v-btn>
|
||||
</div>
|
||||
@@ -81,6 +87,9 @@ export default {
|
||||
apiUrl: '',
|
||||
model_name: '',
|
||||
max_token_length: 2048,
|
||||
data: {
|
||||
has_prompt_template: false,
|
||||
}
|
||||
}, // Add a new field to store the model name
|
||||
formTitle: ''
|
||||
}
|
||||
@@ -90,7 +99,6 @@ export default {
|
||||
'getWebsocket',
|
||||
'registerMessageHandler',
|
||||
'isConnected',
|
||||
'chekcingStatus',
|
||||
'getAgents',
|
||||
],
|
||||
provide() {
|
||||
@@ -120,13 +128,19 @@ export default {
|
||||
this.state.currentClient = {
|
||||
name: 'TextGenWebUI',
|
||||
type: 'textgenwebui',
|
||||
apiUrl: 'http://localhost:5000/api',
|
||||
apiUrl: 'http://localhost:5000',
|
||||
model_name: '',
|
||||
max_token_length: 4096,
|
||||
data: {
|
||||
has_prompt_template: false,
|
||||
}
|
||||
};
|
||||
this.state.formTitle = 'Add Client';
|
||||
this.state.dialog = true;
|
||||
},
|
||||
propagateError(error) {
|
||||
this.$emit('error', error);
|
||||
},
|
||||
saveClient(client) {
|
||||
const index = this.state.clients.findIndex(c => c.name === client.name);
|
||||
if (index === -1) {
|
||||
@@ -153,10 +167,13 @@ export default {
|
||||
let agents = this.getAgents();
|
||||
let client = this.state.clients[index];
|
||||
|
||||
this.saveClient(client);
|
||||
|
||||
for (let i = 0; i < agents.length; i++) {
|
||||
agents[i].client = client.name;
|
||||
this.$emit('client-assigned', agents);
|
||||
console.log("Assigning client", client.name, "to agent", agents[i].name);
|
||||
}
|
||||
this.$emit('client-assigned', agents);
|
||||
},
|
||||
updateDialog(newVal) {
|
||||
this.state.dialog = newVal;
|
||||
@@ -175,6 +192,7 @@ export default {
|
||||
client.status = data.status;
|
||||
client.max_token_length = data.max_token_length;
|
||||
client.apiUrl = data.apiUrl;
|
||||
client.data = data.data;
|
||||
} else {
|
||||
console.log("Adding new client", data);
|
||||
this.state.clients.push({
|
||||
@@ -184,6 +202,7 @@ export default {
|
||||
status: data.status,
|
||||
max_token_length: data.max_token_length,
|
||||
apiUrl: data.apiUrl,
|
||||
data: data.data,
|
||||
});
|
||||
// sort the clients by name
|
||||
this.state.clients.sort((a, b) => (a.name > b.name) ? 1 : -1);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user