mirror of
https://github.com/vegu-ai/talemate.git
synced 2025-12-16 19:57:47 +01:00
* no " or * just treat as spoken words * chromadb perist to db * collect name should contain embedding so switching between chromadb configurations doesn't brick your scenes * fix save-as long term memory transfer * add chroma * director agent refactor * tweak director command, prompt reset, ux display * tweak director message ux * allow clearing of prompt log * remove auto adding of quotes if neither quote or * are present * command to reset long term memory for the scene * improve summarization template as it would cause some llms to add extra details * rebuilding history will now also rebuild long term memory * direct scene template * fix scene time reset * dialogue template tweaks * better dialog format fixing * some dialogue template adjustments * adjust default values of director agent * keep track of scene saved/unsaved status and confirm loading a different scene if current scene is unsaved * prompt fixes * remove the collection on recommitting the seen to memory, as the embeddings may have changed * change to the official python api for the openai client and make it async * prompt tweaks * world state prompt parsing fixes * improve handling of json responses * 0 seconds ago changed to moments ago * move memory context closer to scene * token counts for openai client * narrator agent option: narrate passage of time * gitignore * remove memory id * refactor world state with persistence to chromadb (wip) * remove world state update instructions * dont display blank emotion in world state * openai gpt-4 turbo support * conversation agent extra instructions * track prompt response times * Yi and UtopiaXL * long term memory retrieval improvements during conversations * narrate scene tweaks * conversation ltm augment tweaks * hide subconfig if parent config isnt enabled * ai assisted memory recall during conversation default to off * openai json_object coersion only on model that supports it openai client emit prompt processing time * 0.12.0 * remove prompt number from prompt debug list * add prompt number back in but shift it to the upper row * narrate time passage hard content limit restriction for now as gpt-4 would just write a whole chapter. * relock
96 lines
3.0 KiB
Vue
96 lines
3.0 KiB
Vue
<template>
|
|
<v-list-subheader class="text-uppercase"><v-icon>mdi-post-outline</v-icon> Prompts
|
|
<v-chip size="x-small" color="primary">{{ max_prompts }}</v-chip>
|
|
<v-icon color="primary" class="ml-2" @click="clearPrompts">mdi-close</v-icon>
|
|
</v-list-subheader>
|
|
|
|
<v-list-item density="compact">
|
|
<v-slider density="compact" v-model="max_prompts" min="1" hide-details max="250" step="1" color="primary"></v-slider>
|
|
</v-list-item>
|
|
|
|
<v-list-item v-for="(prompt, index) in prompts" :key="index" @click="openPromptView(prompt)">
|
|
<v-list-item-title class="text-caption">
|
|
|
|
<v-row>
|
|
<v-col cols="2" class="text-info">#{{ prompt.num }}</v-col>
|
|
<v-col cols="10" class="text-right">{{ prompt.kind }}</v-col>
|
|
</v-row>
|
|
|
|
</v-list-item-title>
|
|
<v-list-item-subtitle>
|
|
<v-chip size="x-small" class="mr-1" color="primary">{{ prompt.prompt_tokens }}<v-icon size="14"
|
|
class="ml-1">mdi-arrow-down-bold</v-icon></v-chip>
|
|
<v-chip size="x-small" class="mr-1" color="secondary">{{ prompt.response_tokens }}<v-icon size="14"
|
|
class="ml-1">mdi-arrow-up-bold</v-icon></v-chip>
|
|
<v-chip size="x-small">{{ prompt.time }}s<v-icon size="14" class="ml-1">mdi-clock</v-icon></v-chip>
|
|
</v-list-item-subtitle>
|
|
<v-divider class="mt-1"></v-divider>
|
|
</v-list-item>
|
|
|
|
<DebugToolPromptView ref="promptView" />
|
|
</template>
|
|
<script>
|
|
|
|
import DebugToolPromptView from './DebugToolPromptView.vue';
|
|
|
|
export default {
|
|
name: 'DebugToolPromptLog',
|
|
data() {
|
|
return {
|
|
prompts: [],
|
|
total: 1,
|
|
max_prompts: 50,
|
|
}
|
|
},
|
|
components: {
|
|
DebugToolPromptView,
|
|
},
|
|
inject: [
|
|
'getWebsocket',
|
|
'registerMessageHandler',
|
|
'setWaitingForInput',
|
|
],
|
|
|
|
methods: {
|
|
clearPrompts() {
|
|
this.prompts = [];
|
|
this.total = 0;
|
|
},
|
|
handleMessage(data) {
|
|
|
|
if(data.type === "system"&& data.id === "scene.loaded") {
|
|
this.prompts = [];
|
|
this.total = 0;
|
|
return;
|
|
}
|
|
|
|
if(data.type === "prompt_sent") {
|
|
// add to prompts array, and truncate if necessary (max 50)
|
|
this.prompts.unshift({
|
|
prompt: data.data.prompt,
|
|
response: data.data.response,
|
|
kind: data.data.kind,
|
|
response_tokens: data.data.response_tokens,
|
|
prompt_tokens: data.data.prompt_tokens,
|
|
time: parseInt(data.data.time),
|
|
num: this.total++,
|
|
})
|
|
|
|
while(this.prompts.length > this.max_prompts) {
|
|
this.prompts.pop();
|
|
}
|
|
}
|
|
},
|
|
|
|
openPromptView(prompt) {
|
|
this.$refs.promptView.open(prompt);
|
|
}
|
|
},
|
|
|
|
created() {
|
|
this.registerMessageHandler(this.handleMessage);
|
|
},
|
|
|
|
}
|
|
|
|
</script> |