diff --git a/backend/services/aiConfigService.js b/backend/services/aiConfigService.js index 4bb018a..b7c41f5 100644 --- a/backend/services/aiConfigService.js +++ b/backend/services/aiConfigService.js @@ -34,7 +34,7 @@ class AIConfigService { this.defaults = { ollama_base_url: process.env.OLLAMA_BASE_URL || 'http://ollama:11434', ollama_llm_model: process.env.OLLAMA_MODEL || 'qwen2.5:7b', - ollama_embedding_model: process.env.OLLAMA_EMBED_MODEL || 'mxbai-embed-large:latest', + ollama_embedding_model: process.env.OLLAMA_EMBED_MODEL || process.env.OLLAMA_EMBEDDINGS_MODEL || 'mxbai-embed-large:latest', vector_search_url: process.env.VECTOR_SEARCH_URL || 'http://vector-search:8001', embedding_parameters: { batch_size: 32, diff --git a/backend/services/ollamaConfig.js b/backend/services/ollamaConfig.js index 211a20e..c44df01 100644 --- a/backend/services/ollamaConfig.js +++ b/backend/services/ollamaConfig.js @@ -44,7 +44,7 @@ async function _updateSyncCache() { syncCache = { baseUrl: process.env.OLLAMA_BASE_URL || 'http://ollama:11434', defaultModel: process.env.OLLAMA_MODEL || 'qwen2.5:7b', - embeddingModel: process.env.OLLAMA_EMBED_MODEL || 'mxbai-embed-large:latest' + embeddingModel: process.env.OLLAMA_EMBED_MODEL || process.env.OLLAMA_EMBEDDINGS_MODEL || 'mxbai-embed-large:latest' }; } } @@ -69,7 +69,7 @@ function _getFromSyncCache(key) { const defaults = { baseUrl: process.env.OLLAMA_BASE_URL || 'http://ollama:11434', defaultModel: process.env.OLLAMA_MODEL || 'qwen2.5:7b', - embeddingModel: process.env.OLLAMA_EMBED_MODEL || 'mxbai-embed-large:latest' + embeddingModel: process.env.OLLAMA_EMBED_MODEL || process.env.OLLAMA_EMBEDDINGS_MODEL || 'mxbai-embed-large:latest' }; return defaults[key] || defaults.baseUrl; diff --git a/docker-compose.yml b/docker-compose.yml index 572eff2..d8de156 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -149,6 +149,7 @@ services: - OLLAMA_BASE_URL=http://ollama:11434 - OLLAMA_MODEL=${OLLAMA_MODEL:-qwen2.5:7b} - OLLAMA_EMBEDDINGS_MODEL=${OLLAMA_EMBEDDINGS_MODEL:-mxbai-embed-large:latest} + - OLLAMA_EMBED_MODEL=${OLLAMA_EMBEDDINGS_MODEL:-mxbai-embed-large:latest} # FRONTEND_URL настраивается в коде, не через env - VECTOR_SEARCH_URL=http://vector-search:8001 - LOG_LEVEL=${LOG_LEVEL:-warn} diff --git a/webssh-agent/docker-compose.prod.yml b/webssh-agent/docker-compose.prod.yml index a937e0f..b37c1b3 100644 --- a/webssh-agent/docker-compose.prod.yml +++ b/webssh-agent/docker-compose.prod.yml @@ -164,6 +164,8 @@ services: - OLLAMA_BASE_URL=http://dapp-ollama:11434 - OLLAMA_MODEL=${OLLAMA_MODEL:-qwen2.5:7b} - OLLAMA_EMBEDDINGS_MODEL=${OLLAMA_EMBEDDINGS_MODEL:-mxbai-embed-large:latest} + # Backend читает OLLAMA_EMBED_MODEL, не OLLAMA_EMBEDDINGS_MODEL — передаём то же значение + - OLLAMA_EMBED_MODEL=${OLLAMA_EMBEDDINGS_MODEL:-mxbai-embed-large:latest} # 🆕 Исправленный URL для Vector Search - VECTOR_SEARCH_URL=http://dapp-vector-search:8001 # Команда запуска для production