Files
Ai/config/rag_config.yaml
T
sajad-dev 94355af62b Add LLM configuration and update URL routing
- Introduced LLM configuration in rag_config.yaml and corresponding LLMConfig class in config.py.
- Updated load_rag_config function to parse LLM settings from the configuration file.
- Added new API route for RAG in urls.py to facilitate access to the chat model.
- Modified QdrantVectorStore to use query_points method for improved functionality.
2026-02-27 19:44:49 +03:30

30 lines
808 B
YAML

# تنظیمات RAG برای پایگاه دانش CropLogic
embedding:
provider: "avalai" # Avalai API (OpenAI-compatible)
model: "text-embedding-3-small"
base_url: "https://api.avalai.ir/v1"
api_key_env: "AVALAI_API_KEY"
batch_size: 32
# فاز یک: Qdrant به‌عنوان vector store
qdrant:
host: "localhost" # یا qdrant در Docker
port: 6333
collection_name: "croplogic_kb"
vector_size: 1536 # متناسب با text-embedding-3-small
chunking:
max_chunk_tokens: 500
overlap_tokens: 50
# تنظیمات مدل چت (LLM) — Avalai
llm:
model: "gpt-4o"
base_url: "https://api.avalai.ir/v1"
api_key_env: "AVALAI_API_KEY"
tone_file: "config/tone.txt"
knowledge_base_path: "config/knowledge_base"
user_info_path: "config/user_info"