mirror of
https://github.com/open-webui/open-webui.git
synced 2025-12-16 11:57:51 +01:00
perf: fix cache key generation for model list caching
- Replace Request object with user.id in cache key for get_all_models - Request objects are new instances per HTTP request, preventing cache hits - Cache keys now use user.id ensuring proper cache functionality - Affects both Ollama and OpenAI model list endpoints Signed-off-by: Sihyeon Jang <sihyeon.jang@navercorp.com>
This commit is contained in:
@@ -401,7 +401,7 @@ async def get_filtered_models(models, user):
|
||||
return filtered_models
|
||||
|
||||
|
||||
@cached(ttl=MODELS_CACHE_TTL)
|
||||
@cached(ttl=MODELS_CACHE_TTL, key=lambda _, user: f"openai_all_models_{user.id}" if user else "openai_all_models")
|
||||
async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
|
||||
log.info("get_all_models()")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user