UPDATE
This commit is contained in:
+99
-4
@@ -1,9 +1,12 @@
|
||||
"""
|
||||
چت RAG برای API چت عمومی — با ارسال کامل داده مزرعه و retrieval تکمیلی از KB.
|
||||
"""
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from .api_provider import get_chat_client
|
||||
from .chunker import chunk_text
|
||||
@@ -13,6 +16,95 @@ from .retrieve import search_with_texts
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _coerce_text_content(value: Any) -> str:
|
||||
if value is None:
|
||||
return ""
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
if isinstance(value, list):
|
||||
parts: list[str] = []
|
||||
for item in value:
|
||||
if isinstance(item, dict) and item.get("type") == "text":
|
||||
text_value = item.get("text")
|
||||
if isinstance(text_value, str) and text_value.strip():
|
||||
parts.append(text_value.strip())
|
||||
elif isinstance(item, str) and item.strip():
|
||||
parts.append(item.strip())
|
||||
return "\n".join(parts)
|
||||
return str(value)
|
||||
|
||||
|
||||
def _normalize_image_inputs(images: list[Any] | None) -> list[dict[str, str]]:
|
||||
normalized: list[dict[str, str]] = []
|
||||
for item in images or []:
|
||||
if isinstance(item, str):
|
||||
value = item.strip()
|
||||
if value:
|
||||
normalized.append({"url": value})
|
||||
continue
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
url = item.get("url") or item.get("image_url") or item.get("data_url")
|
||||
if not isinstance(url, str) or not url.strip():
|
||||
continue
|
||||
entry = {"url": url.strip()}
|
||||
detail = item.get("detail")
|
||||
if isinstance(detail, str) and detail.strip():
|
||||
entry["detail"] = detail.strip()
|
||||
normalized.append(entry)
|
||||
return normalized
|
||||
|
||||
|
||||
def _build_content_parts(text: str, images: list[dict[str, str]] | None = None) -> str | list[dict[str, Any]]:
|
||||
normalized_text = (text or "").strip()
|
||||
normalized_images = _normalize_image_inputs(images)
|
||||
if not normalized_images:
|
||||
return normalized_text
|
||||
|
||||
parts: list[dict[str, Any]] = []
|
||||
if normalized_text:
|
||||
parts.append({"type": "text", "text": normalized_text})
|
||||
for image in normalized_images:
|
||||
image_payload: dict[str, Any] = {"url": image["url"]}
|
||||
if image.get("detail"):
|
||||
image_payload["detail"] = image["detail"]
|
||||
parts.append({"type": "image_url", "image_url": image_payload})
|
||||
return parts
|
||||
|
||||
|
||||
def _normalize_history_messages(history: list[dict[str, Any]] | None) -> list[dict[str, Any]]:
|
||||
normalized: list[dict[str, Any]] = []
|
||||
for item in history or []:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
role = str(item.get("role") or "").strip().lower()
|
||||
if role not in {"user", "assistant"}:
|
||||
continue
|
||||
text = _coerce_text_content(
|
||||
item.get("content", item.get("message", item.get("text")))
|
||||
).strip()
|
||||
images = _normalize_image_inputs(item.get("images") or item.get("image_urls"))
|
||||
if not text and not images:
|
||||
continue
|
||||
content = _build_content_parts(text, images if role == "user" else None)
|
||||
normalized.append({"role": role, "content": content})
|
||||
return normalized
|
||||
|
||||
|
||||
def encode_uploaded_image(uploaded_file: Any) -> dict[str, str]:
|
||||
content_type = getattr(uploaded_file, "content_type", None) or mimetypes.guess_type(
|
||||
getattr(uploaded_file, "name", "")
|
||||
)[0] or "application/octet-stream"
|
||||
raw = uploaded_file.read()
|
||||
if not isinstance(raw, (bytes, bytearray)):
|
||||
raise ValueError("Uploaded image payload is invalid.")
|
||||
encoded = base64.b64encode(raw).decode("ascii")
|
||||
return {
|
||||
"url": f"data:{content_type};base64,{encoded}",
|
||||
"detail": "auto",
|
||||
}
|
||||
|
||||
|
||||
def _load_tone(config: RAGConfig | None) -> str:
|
||||
"""بارگذاری فایل لحن پیشفرض (chat KB)."""
|
||||
cfg = config or load_rag_config()
|
||||
@@ -214,6 +306,8 @@ def chat_rag_stream(
|
||||
config: RAGConfig | None = None,
|
||||
system_override: str | None = None,
|
||||
farm_details: dict | None = None,
|
||||
history: list[dict[str, Any]] | None = None,
|
||||
images: list[dict[str, str]] | None = None,
|
||||
):
|
||||
"""
|
||||
چت استریمی با سرویس ثابت `chat` و context مستقیم مزرعه.
|
||||
@@ -223,6 +317,8 @@ def chat_rag_stream(
|
||||
farm_uuid: شناسه مزرعه
|
||||
config: تنظیمات RAG
|
||||
system_override: جایگزین system prompt (اختیاری)
|
||||
history: لیست پیام های قبلی کاربر/هوش مصنوعی
|
||||
images: تصاویر مربوط به پیام فعلی کاربر
|
||||
|
||||
Yields:
|
||||
chunk های استریم پاسخ مدل
|
||||
@@ -268,10 +364,9 @@ def chat_rag_stream(
|
||||
else:
|
||||
system_prompt = _build_system_prompt(service, query, context, cfg)
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": query},
|
||||
]
|
||||
messages = [{"role": "system", "content": system_prompt}]
|
||||
messages.extend(_normalize_history_messages(history))
|
||||
messages.append({"role": "user", "content": _build_content_parts(query, images)})
|
||||
|
||||
logger.info(
|
||||
"Final prompt prepared service_id=%s farm_uuid=%s model=%s messages_count=%s",
|
||||
|
||||
@@ -4,8 +4,15 @@
|
||||
"""
|
||||
from .irrigation import get_irrigation_recommendation
|
||||
from .fertilization import get_fertilization_recommendation
|
||||
from .pest_disease import get_pest_disease_detection, get_pest_disease_risk
|
||||
from .soil_anomaly import get_soil_anomaly_insight
|
||||
from .water_need_prediction import get_water_need_prediction_insight
|
||||
|
||||
__all__ = [
|
||||
"get_irrigation_recommendation",
|
||||
"get_fertilization_recommendation",
|
||||
"get_pest_disease_detection",
|
||||
"get_pest_disease_risk",
|
||||
"get_soil_anomaly_insight",
|
||||
"get_water_need_prediction_insight",
|
||||
]
|
||||
|
||||
@@ -54,6 +54,62 @@ def _find_section(sections: list[dict], section_type: str) -> dict | None:
|
||||
return None
|
||||
|
||||
|
||||
def _field_sources(llm_section: dict, fallback_section: dict, merged_section: dict) -> dict[str, str]:
|
||||
sources: dict[str, str] = {}
|
||||
for key, value in merged_section.items():
|
||||
if key == "provenance":
|
||||
continue
|
||||
llm_value = llm_section.get(key)
|
||||
fallback_value = fallback_section.get(key)
|
||||
if key in llm_section and value == llm_value and value != fallback_value:
|
||||
sources[key] = "llm"
|
||||
elif key in fallback_section and value == fallback_value and value != llm_value:
|
||||
sources[key] = "fallback"
|
||||
elif key in llm_section and key in fallback_section and llm_value == fallback_value == value:
|
||||
sources[key] = "shared"
|
||||
elif key in llm_section and key in fallback_section:
|
||||
sources[key] = "merged"
|
||||
else:
|
||||
sources[key] = "fallback" if key in fallback_section else "llm"
|
||||
return sources
|
||||
|
||||
|
||||
def _attach_provenance(section_type: str, llm_section: dict, fallback_section: dict, merged_section: dict) -> dict:
|
||||
merged = dict(merged_section)
|
||||
field_sources = _field_sources(llm_section, fallback_section, merged)
|
||||
merged["provenance"] = {
|
||||
"sectionType": section_type,
|
||||
"llmProvided": bool(llm_section),
|
||||
"fallbackUsed": any(source != "llm" for source in field_sources.values()),
|
||||
"fieldSources": field_sources,
|
||||
}
|
||||
return merged
|
||||
|
||||
|
||||
def _fallback_with_provenance(fallback: dict, reason: str) -> dict:
|
||||
sections = []
|
||||
for section in fallback.get("sections", []):
|
||||
section_with_provenance = dict(section)
|
||||
section_with_provenance["provenance"] = {
|
||||
"sectionType": section.get("type"),
|
||||
"llmProvided": False,
|
||||
"fallbackUsed": True,
|
||||
"fieldSources": {
|
||||
key: "fallback"
|
||||
for key in section.keys()
|
||||
if key != "provenance"
|
||||
},
|
||||
}
|
||||
sections.append(section_with_provenance)
|
||||
return {
|
||||
"sections": sections,
|
||||
"mergeMetadata": {
|
||||
"source": "fallback_only",
|
||||
"reason": reason,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _build_fertilization_fallback(*, optimized_result: dict | None) -> dict:
|
||||
if optimized_result:
|
||||
recommended = optimized_result["recommended_strategy"]
|
||||
@@ -134,11 +190,11 @@ def _merge_fertilization_response(
|
||||
) -> dict:
|
||||
fallback = _build_fertilization_fallback(optimized_result=optimized_result)
|
||||
if not isinstance(parsed_result, dict):
|
||||
return fallback
|
||||
return _fallback_with_provenance(fallback, "invalid_llm_payload")
|
||||
|
||||
sections = parsed_result.get("sections")
|
||||
if not isinstance(sections, list):
|
||||
return fallback
|
||||
return _fallback_with_provenance(fallback, "missing_sections")
|
||||
|
||||
recommendation = _find_section(sections, "recommendation") or {}
|
||||
list_section = _find_section(sections, "list") or {}
|
||||
@@ -169,7 +225,36 @@ def _merge_fertilization_response(
|
||||
"content": warning_section.get("content") or fallback_warning["content"],
|
||||
}
|
||||
|
||||
return {"sections": [merged_recommendation, merged_list, merged_warning]}
|
||||
merged_recommendation = _attach_provenance(
|
||||
"recommendation",
|
||||
recommendation,
|
||||
fallback_recommendation,
|
||||
merged_recommendation,
|
||||
)
|
||||
merged_list = _attach_provenance(
|
||||
"list",
|
||||
list_section,
|
||||
fallback_list,
|
||||
merged_list,
|
||||
)
|
||||
merged_warning = _attach_provenance(
|
||||
"warning",
|
||||
warning_section,
|
||||
fallback_warning,
|
||||
merged_warning,
|
||||
)
|
||||
|
||||
return {
|
||||
"sections": [merged_recommendation, merged_list, merged_warning],
|
||||
"mergeMetadata": {
|
||||
"source": "llm_with_fallback_merge",
|
||||
"llmSectionsDetected": [section.get("type") for section in sections if isinstance(section, dict)],
|
||||
"fallbackSectionsApplied": [
|
||||
item["type"]
|
||||
for item in (fallback_recommendation, fallback_list, fallback_warning)
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def get_fertilization_recommendation(
|
||||
|
||||
@@ -57,6 +57,62 @@ def _find_section(sections: list[dict], section_type: str) -> dict | None:
|
||||
return None
|
||||
|
||||
|
||||
def _field_sources(llm_section: dict, fallback_section: dict, merged_section: dict) -> dict[str, str]:
|
||||
sources: dict[str, str] = {}
|
||||
for key, value in merged_section.items():
|
||||
if key == "provenance":
|
||||
continue
|
||||
llm_value = llm_section.get(key)
|
||||
fallback_value = fallback_section.get(key)
|
||||
if key in llm_section and value == llm_value and value != fallback_value:
|
||||
sources[key] = "llm"
|
||||
elif key in fallback_section and value == fallback_value and value != llm_value:
|
||||
sources[key] = "fallback"
|
||||
elif key in llm_section and key in fallback_section and llm_value == fallback_value == value:
|
||||
sources[key] = "shared"
|
||||
elif key in llm_section and key in fallback_section:
|
||||
sources[key] = "merged"
|
||||
else:
|
||||
sources[key] = "fallback" if key in fallback_section else "llm"
|
||||
return sources
|
||||
|
||||
|
||||
def _attach_provenance(section_type: str, llm_section: dict, fallback_section: dict, merged_section: dict) -> dict:
|
||||
merged = dict(merged_section)
|
||||
field_sources = _field_sources(llm_section, fallback_section, merged)
|
||||
merged["provenance"] = {
|
||||
"sectionType": section_type,
|
||||
"llmProvided": bool(llm_section),
|
||||
"fallbackUsed": any(source != "llm" for source in field_sources.values()),
|
||||
"fieldSources": field_sources,
|
||||
}
|
||||
return merged
|
||||
|
||||
|
||||
def _fallback_with_provenance(fallback: dict, reason: str) -> dict:
|
||||
sections = []
|
||||
for section in fallback.get("sections", []):
|
||||
section_with_provenance = dict(section)
|
||||
section_with_provenance["provenance"] = {
|
||||
"sectionType": section.get("type"),
|
||||
"llmProvided": False,
|
||||
"fallbackUsed": True,
|
||||
"fieldSources": {
|
||||
key: "fallback"
|
||||
for key in section.keys()
|
||||
if key != "provenance"
|
||||
},
|
||||
}
|
||||
sections.append(section_with_provenance)
|
||||
return {
|
||||
"sections": sections,
|
||||
"mergeMetadata": {
|
||||
"source": "fallback_only",
|
||||
"reason": reason,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _build_irrigation_fallback(
|
||||
*,
|
||||
optimized_result: dict | None,
|
||||
@@ -155,11 +211,11 @@ def _merge_irrigation_response(
|
||||
daily_water_needs=daily_water_needs,
|
||||
)
|
||||
if not isinstance(parsed_result, dict):
|
||||
return fallback
|
||||
return _fallback_with_provenance(fallback, "invalid_llm_payload")
|
||||
|
||||
sections = parsed_result.get("sections")
|
||||
if not isinstance(sections, list):
|
||||
return fallback
|
||||
return _fallback_with_provenance(fallback, "missing_sections")
|
||||
|
||||
recommendation = _find_section(sections, "recommendation") or {}
|
||||
list_section = _find_section(sections, "list") or {}
|
||||
@@ -190,7 +246,36 @@ def _merge_irrigation_response(
|
||||
"content": warning_section.get("content") or fallback_warning["content"],
|
||||
}
|
||||
|
||||
return {"sections": [merged_recommendation, merged_list, merged_warning]}
|
||||
merged_recommendation = _attach_provenance(
|
||||
"recommendation",
|
||||
recommendation,
|
||||
fallback_recommendation,
|
||||
merged_recommendation,
|
||||
)
|
||||
merged_list = _attach_provenance(
|
||||
"list",
|
||||
list_section,
|
||||
fallback_list,
|
||||
merged_list,
|
||||
)
|
||||
merged_warning = _attach_provenance(
|
||||
"warning",
|
||||
warning_section,
|
||||
fallback_warning,
|
||||
merged_warning,
|
||||
)
|
||||
|
||||
return {
|
||||
"sections": [merged_recommendation, merged_list, merged_warning],
|
||||
"mergeMetadata": {
|
||||
"source": "llm_with_fallback_merge",
|
||||
"llmSectionsDetected": [section.get("type") for section in sections if isinstance(section, dict)],
|
||||
"fallbackSectionsApplied": [
|
||||
item["type"]
|
||||
for item in (fallback_recommendation, fallback_list, fallback_warning)
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _resolve_irrigation_method(
|
||||
|
||||
@@ -0,0 +1,415 @@
|
||||
"""
|
||||
سرویس RAG برای تشخیص تصویری و پیش بینی ریسک آفات و بیماری گیاه.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from farm_data.services import get_farm_details
|
||||
from rag.api_provider import get_chat_client
|
||||
from rag.chat import (
|
||||
_build_content_parts,
|
||||
_complete_audit_log,
|
||||
_create_audit_log,
|
||||
_fail_audit_log,
|
||||
_load_service_tone,
|
||||
build_rag_context,
|
||||
)
|
||||
from rag.config import RAGConfig, get_service_config, load_rag_config
|
||||
from rag.user_data import build_plant_text
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KB_NAME = "pest_disease"
|
||||
SERVICE_ID = "pest_disease"
|
||||
|
||||
DETECTION_PROMPT = (
|
||||
"شما یک دستیار تخصصی تشخیص آفات و بیماری گیاهی هستی. "
|
||||
"با استفاده از تصویر، اطلاعات مزرعه، و متن های بازیابی شده از پایگاه دانش تحلیل کن. "
|
||||
"پاسخ فقط JSON معتبر باشد و این کلیدها را داشته باشد: "
|
||||
"has_issue, category, confidence, severity, summary, detected_signs, possible_causes, immediate_actions, reasoning. "
|
||||
"category فقط یکی از no_issue, pest, disease, nutrient_stress, abiotic_stress, unknown باشد. "
|
||||
"severity فقط یکی از low, medium, high باشد."
|
||||
)
|
||||
|
||||
RISK_PROMPT = (
|
||||
"شما یک دستیار تخصصی پیش بینی ریسک آفات و بیماری گیاهی هستی. "
|
||||
"با استفاده از داده های مزرعه، آب و هوا، مرحله رشد، و متن های بازیابی شده از پایگاه دانش تحلیل کن. "
|
||||
"پاسخ فقط JSON معتبر باشد و این کلیدها را داشته باشد: "
|
||||
"summary, forecast_window, overall_risk, disease_risk, pest_risk, key_drivers, recommended_actions. "
|
||||
"overall_risk فقط یکی از low, medium, high باشد. "
|
||||
"disease_risk و pest_risk باید آبجکت هایی با کلیدهای score, level, likely_conditions, reasoning باشند و level فقط یکی از low, medium, high باشد."
|
||||
)
|
||||
|
||||
|
||||
def _safe_float(value: Any, default: float = 0.0) -> float:
|
||||
try:
|
||||
if value in (None, ""):
|
||||
return default
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _normalize_images(images: list[dict[str, str]] | None) -> list[dict[str, str]]:
|
||||
output: list[dict[str, str]] = []
|
||||
for item in images or []:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
url = item.get("url")
|
||||
if not isinstance(url, str) or not url.strip():
|
||||
continue
|
||||
output.append({"url": url.strip(), "detail": item.get("detail", "auto")})
|
||||
return output
|
||||
|
||||
|
||||
def _clean_json(raw: str) -> dict[str, Any]:
|
||||
cleaned = (raw or "").strip()
|
||||
if cleaned.startswith("```"):
|
||||
cleaned = cleaned.strip("`")
|
||||
if cleaned.startswith("json"):
|
||||
cleaned = cleaned[4:]
|
||||
cleaned = cleaned.strip()
|
||||
if not cleaned:
|
||||
return {}
|
||||
try:
|
||||
return json.loads(cleaned)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
logger.warning("Invalid JSON returned by pest_disease LLM: %s", cleaned[:500])
|
||||
return {}
|
||||
|
||||
|
||||
def _load_farm_or_error(farm_uuid: str) -> dict[str, Any]:
|
||||
farm_details = get_farm_details(farm_uuid)
|
||||
if farm_details is None:
|
||||
raise ValueError("farm_uuid نامعتبر است یا اطلاعات مزرعه پیدا نشد.")
|
||||
return farm_details
|
||||
|
||||
|
||||
def _build_service_client(cfg: RAGConfig):
|
||||
service = get_service_config(SERVICE_ID, cfg)
|
||||
service_cfg = RAGConfig(
|
||||
embedding=cfg.embedding,
|
||||
qdrant=cfg.qdrant,
|
||||
chunking=cfg.chunking,
|
||||
llm=service.llm,
|
||||
knowledge_bases=cfg.knowledge_bases,
|
||||
services=cfg.services,
|
||||
chromadb=cfg.chromadb,
|
||||
)
|
||||
client = get_chat_client(service_cfg)
|
||||
return service, client, service.llm.model
|
||||
|
||||
|
||||
def _weather_risk_summary(farm_details: dict[str, Any]) -> dict[str, Any]:
|
||||
weather = farm_details.get("weather") or {}
|
||||
soil = (farm_details.get("soil") or {}).get("resolved_metrics") or {}
|
||||
humidity = _safe_float(weather.get("humidity_mean"), 55.0)
|
||||
temp = _safe_float(weather.get("temperature_mean"), 24.0)
|
||||
rain = _safe_float(weather.get("precipitation"), 0.0)
|
||||
moisture = _safe_float(soil.get("soil_moisture"), _safe_float(soil.get("wv0033"), 35.0))
|
||||
ec = _safe_float(soil.get("electrical_conductivity"), 0.0)
|
||||
ph = _safe_float(soil.get("soil_ph") or soil.get("phh2o"), 7.0)
|
||||
|
||||
fungal_score = min(max(round((humidity * 0.45) + (moisture * 0.35) + (rain * 2.5) - 25, 2), 0.0), 100.0)
|
||||
pest_score = min(max(round((temp * 2.2) + max(0.0, 45.0 - moisture) + (ec * 3.0) - 20, 2), 0.0), 100.0)
|
||||
abiotic_stress = min(max(round((abs(ph - 6.8) * 18.0) + (ec * 8.0), 2), 0.0), 100.0)
|
||||
return {
|
||||
"humidity_mean": humidity,
|
||||
"temperature_mean": temp,
|
||||
"precipitation": rain,
|
||||
"soil_moisture": moisture,
|
||||
"ec": ec,
|
||||
"ph": ph,
|
||||
"fungal_score": fungal_score,
|
||||
"pest_score": pest_score,
|
||||
"abiotic_stress_score": abiotic_stress,
|
||||
}
|
||||
|
||||
|
||||
def _risk_level(score: float) -> str:
|
||||
if score >= 70:
|
||||
return "high"
|
||||
if score >= 40:
|
||||
return "medium"
|
||||
return "low"
|
||||
|
||||
|
||||
def _build_risk_fallback(farm_details: dict[str, Any], plant_name: str | None, growth_stage: str | None) -> dict[str, Any]:
|
||||
risk = _weather_risk_summary(farm_details)
|
||||
disease_level = _risk_level(risk["fungal_score"])
|
||||
pest_level = _risk_level(risk["pest_score"])
|
||||
overall_score = max(risk["fungal_score"], risk["pest_score"], risk["abiotic_stress_score"])
|
||||
overall_level = _risk_level(overall_score)
|
||||
drivers = []
|
||||
if risk["humidity_mean"] >= 70:
|
||||
drivers.append("رطوبت بالا")
|
||||
if risk["soil_moisture"] >= 60:
|
||||
drivers.append("رطوبت خاک بالا")
|
||||
if risk["temperature_mean"] >= 30:
|
||||
drivers.append("دمای بالا")
|
||||
if risk["precipitation"] > 2:
|
||||
drivers.append("بارش موثر")
|
||||
if risk["ec"] > 2.5:
|
||||
drivers.append("EC بالا")
|
||||
if abs(risk["ph"] - 6.8) > 0.8:
|
||||
drivers.append("خروج pH از محدوده مطلوب")
|
||||
if not drivers:
|
||||
drivers.append("شرایط فعلی مزرعه نسبتا پایدار است")
|
||||
|
||||
return {
|
||||
"summary": "برآورد ریسک آفات و بیماری بر اساس داده های فعلی مزرعه ساخته شد.",
|
||||
"forecast_window": "24 تا 72 ساعت آینده",
|
||||
"overall_risk": overall_level,
|
||||
"disease_risk": {
|
||||
"score": risk["fungal_score"],
|
||||
"level": disease_level,
|
||||
"likely_conditions": [
|
||||
"فشار قارچی و بیماری برگی" if disease_level != "low" else "ریسک بیماری فعلا پایین است",
|
||||
],
|
||||
"reasoning": [
|
||||
f"رطوبت میانگین حدود {risk['humidity_mean']} درصد است.",
|
||||
f"رطوبت خاک حدود {risk['soil_moisture']} درصد برآورد شده است.",
|
||||
],
|
||||
},
|
||||
"pest_risk": {
|
||||
"score": risk["pest_score"],
|
||||
"level": pest_level,
|
||||
"likely_conditions": [
|
||||
"فشار آفات مکنده یا تنش زا" if pest_level != "low" else "ریسک آفت فعلا پایین است",
|
||||
],
|
||||
"reasoning": [
|
||||
f"دمای میانگین حدود {risk['temperature_mean']} درجه است.",
|
||||
f"EC فعلی حدود {risk['ec']} و pH حدود {risk['ph']} است.",
|
||||
],
|
||||
},
|
||||
"key_drivers": drivers,
|
||||
"recommended_actions": [
|
||||
"بازدید مزرعه و بررسی برگ ها و پشت برگ انجام شود.",
|
||||
"در صورت مشاهده علائم مشکوک، نمونه برداری تصویری نزدیک تر انجام شود.",
|
||||
"رطوبت ماندگار و یکنواختی آبیاری پایش شود.",
|
||||
],
|
||||
"farm_context": {
|
||||
"plant_name": plant_name,
|
||||
"growth_stage": growth_stage,
|
||||
"risk_summary": risk,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _build_detection_fallback(images: list[dict[str, str]], plant_name: str | None) -> dict[str, Any]:
|
||||
return {
|
||||
"has_issue": False,
|
||||
"category": "unknown",
|
||||
"confidence": 0.2,
|
||||
"severity": "low",
|
||||
"summary": "تحلیل خودکار تصویر انجام نشد یا برای نتیجه قطعی داده کافی نبود.",
|
||||
"detected_signs": [],
|
||||
"possible_causes": ["کیفیت یا زاویه تصویر برای تشخیص کافی نیست"],
|
||||
"immediate_actions": [
|
||||
"یک تصویر نزدیک تر از برگ و ساقه ارسال شود.",
|
||||
"در صورت مشاهده گسترش علائم، بازدید میدانی انجام شود.",
|
||||
],
|
||||
"reasoning": [
|
||||
f"تعداد تصاویر دریافتی: {len(images)}",
|
||||
f"نام گیاه: {plant_name or 'نامشخص'}",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def _build_detection_messages(
|
||||
*,
|
||||
service: Any,
|
||||
cfg: RAGConfig,
|
||||
query: str,
|
||||
rag_context: str,
|
||||
plant_text: str,
|
||||
images: list[dict[str, str]],
|
||||
) -> tuple[str, list[dict[str, Any]]]:
|
||||
tone = _load_service_tone(service, cfg)
|
||||
system_parts = [tone] if tone else []
|
||||
if service.system_prompt:
|
||||
system_parts.append(service.system_prompt)
|
||||
system_parts.append(DETECTION_PROMPT)
|
||||
if plant_text:
|
||||
system_parts.append("[اطلاعات گیاه]\n" + plant_text)
|
||||
if rag_context:
|
||||
system_parts.append(rag_context)
|
||||
system_prompt = "\n\n".join(part for part in system_parts if part)
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": _build_content_parts(query, images)},
|
||||
]
|
||||
return system_prompt, messages
|
||||
|
||||
|
||||
def _build_risk_messages(
|
||||
*,
|
||||
service: Any,
|
||||
cfg: RAGConfig,
|
||||
query: str,
|
||||
rag_context: str,
|
||||
structured_context: dict[str, Any],
|
||||
plant_text: str,
|
||||
) -> tuple[str, list[dict[str, str]]]:
|
||||
tone = _load_service_tone(service, cfg)
|
||||
system_parts = [tone] if tone else []
|
||||
if service.system_prompt:
|
||||
system_parts.append(service.system_prompt)
|
||||
system_parts.append(RISK_PROMPT)
|
||||
if plant_text:
|
||||
system_parts.append("[اطلاعات گیاه]\n" + plant_text)
|
||||
system_parts.append("[کانتکست ساختاریافته ریسک]\n" + json.dumps(structured_context, ensure_ascii=False, indent=2, default=str))
|
||||
if rag_context:
|
||||
system_parts.append(rag_context)
|
||||
system_prompt = "\n\n".join(part for part in system_parts if part)
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": query},
|
||||
]
|
||||
return system_prompt, messages
|
||||
|
||||
|
||||
def get_pest_disease_detection(
|
||||
*,
|
||||
farm_uuid: str,
|
||||
plant_name: str | None = None,
|
||||
query: str | None = None,
|
||||
images: list[dict[str, str]] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
normalized_images = _normalize_images(images)
|
||||
if not normalized_images:
|
||||
raise ValueError("حداقل یک تصویر برای تشخیص لازم است.")
|
||||
|
||||
cfg = load_rag_config()
|
||||
service, client, model = _build_service_client(cfg)
|
||||
farm_details = _load_farm_or_error(farm_uuid)
|
||||
resolved_plant_name = plant_name or (farm_details.get("plants") or [{}])[0].get("name")
|
||||
user_query = query or "این تصویر را بررسی کن و بگو آیا گیاه دچار آفت یا بیماری شده است یا نه."
|
||||
plant_text = build_plant_text(resolved_plant_name, "") if resolved_plant_name else ""
|
||||
rag_context = build_rag_context(
|
||||
query=user_query,
|
||||
sensor_uuid=farm_uuid,
|
||||
config=cfg,
|
||||
kb_name=KB_NAME,
|
||||
service_id=SERVICE_ID,
|
||||
farm_details=farm_details,
|
||||
)
|
||||
system_prompt, messages = _build_detection_messages(
|
||||
service=service,
|
||||
cfg=cfg,
|
||||
query=user_query,
|
||||
rag_context=rag_context,
|
||||
plant_text=plant_text or "",
|
||||
images=normalized_images,
|
||||
)
|
||||
audit_log = _create_audit_log(
|
||||
farm_uuid=farm_uuid,
|
||||
service_id=SERVICE_ID,
|
||||
model=model,
|
||||
query=user_query,
|
||||
system_prompt=system_prompt,
|
||||
messages=messages,
|
||||
)
|
||||
try:
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
raw = response.choices[0].message.content.strip()
|
||||
parsed = _clean_json(raw)
|
||||
_complete_audit_log(audit_log, raw)
|
||||
except Exception as exc:
|
||||
logger.error("Pest disease detection failed for %s: %s", farm_uuid, exc)
|
||||
fallback = _build_detection_fallback(normalized_images, resolved_plant_name)
|
||||
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
|
||||
return {
|
||||
**fallback,
|
||||
"farm_uuid": farm_uuid,
|
||||
"knowledge_base": KB_NAME,
|
||||
"tone_file": service.tone_file,
|
||||
"raw_response": None,
|
||||
}
|
||||
|
||||
if not parsed:
|
||||
parsed = _build_detection_fallback(normalized_images, resolved_plant_name)
|
||||
parsed.setdefault("has_issue", parsed.get("category") not in {"no_issue", "unknown"})
|
||||
parsed.setdefault("category", "unknown")
|
||||
parsed.setdefault("confidence", 0.4)
|
||||
parsed.setdefault("severity", "low")
|
||||
parsed.setdefault("detected_signs", [])
|
||||
parsed.setdefault("possible_causes", [])
|
||||
parsed.setdefault("immediate_actions", [])
|
||||
parsed.setdefault("reasoning", [])
|
||||
parsed["farm_uuid"] = farm_uuid
|
||||
parsed["knowledge_base"] = KB_NAME
|
||||
parsed["tone_file"] = service.tone_file
|
||||
parsed["raw_response"] = raw
|
||||
return parsed
|
||||
|
||||
|
||||
def get_pest_disease_risk(
|
||||
*,
|
||||
farm_uuid: str,
|
||||
plant_name: str | None = None,
|
||||
growth_stage: str | None = None,
|
||||
query: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
cfg = load_rag_config()
|
||||
service, client, model = _build_service_client(cfg)
|
||||
farm_details = _load_farm_or_error(farm_uuid)
|
||||
resolved_plant_name = plant_name or (farm_details.get("plants") or [{}])[0].get("name")
|
||||
fallback = _build_risk_fallback(farm_details, resolved_plant_name, growth_stage)
|
||||
user_query = query or "ریسک آفات و بیماری این مزرعه را برای چند روز آینده پیش بینی کن."
|
||||
plant_text = build_plant_text(resolved_plant_name, growth_stage or "") if resolved_plant_name else ""
|
||||
rag_context = build_rag_context(
|
||||
query=user_query,
|
||||
sensor_uuid=farm_uuid,
|
||||
config=cfg,
|
||||
kb_name=KB_NAME,
|
||||
service_id=SERVICE_ID,
|
||||
farm_details=farm_details,
|
||||
)
|
||||
system_prompt, messages = _build_risk_messages(
|
||||
service=service,
|
||||
cfg=cfg,
|
||||
query=user_query,
|
||||
rag_context=rag_context,
|
||||
structured_context=fallback,
|
||||
plant_text=plant_text or "",
|
||||
)
|
||||
audit_log = _create_audit_log(
|
||||
farm_uuid=farm_uuid,
|
||||
service_id=SERVICE_ID,
|
||||
model=model,
|
||||
query=user_query,
|
||||
system_prompt=system_prompt,
|
||||
messages=messages,
|
||||
)
|
||||
try:
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
raw = response.choices[0].message.content.strip()
|
||||
parsed = _clean_json(raw)
|
||||
_complete_audit_log(audit_log, raw)
|
||||
except Exception as exc:
|
||||
logger.error("Pest disease risk prediction failed for %s: %s", farm_uuid, exc)
|
||||
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
|
||||
fallback["farm_uuid"] = farm_uuid
|
||||
fallback["knowledge_base"] = KB_NAME
|
||||
fallback["tone_file"] = service.tone_file
|
||||
fallback["raw_response"] = None
|
||||
return fallback
|
||||
|
||||
if not parsed:
|
||||
parsed = fallback
|
||||
parsed.setdefault("summary", fallback["summary"])
|
||||
parsed.setdefault("forecast_window", fallback["forecast_window"])
|
||||
parsed.setdefault("overall_risk", fallback["overall_risk"])
|
||||
parsed.setdefault("disease_risk", fallback["disease_risk"])
|
||||
parsed.setdefault("pest_risk", fallback["pest_risk"])
|
||||
parsed.setdefault("key_drivers", fallback["key_drivers"])
|
||||
parsed.setdefault("recommended_actions", fallback["recommended_actions"])
|
||||
parsed["farm_uuid"] = farm_uuid
|
||||
parsed["knowledge_base"] = KB_NAME
|
||||
parsed["tone_file"] = service.tone_file
|
||||
parsed["raw_response"] = raw
|
||||
return parsed
|
||||
@@ -0,0 +1,204 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from farm_data.services import get_farm_details
|
||||
from rag.api_provider import get_chat_client
|
||||
from rag.chat import (
|
||||
_complete_audit_log,
|
||||
_create_audit_log,
|
||||
_fail_audit_log,
|
||||
_load_service_tone,
|
||||
build_rag_context,
|
||||
)
|
||||
from rag.config import RAGConfig, get_service_config, load_rag_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KB_NAME = "soil_anomaly"
|
||||
SERVICE_ID = "soil_anomaly"
|
||||
|
||||
SOIL_ANOMALY_PROMPT = (
|
||||
"شما یک دستیار تخصصی تحلیل ناهنجاری داده های خاک و سنسور مزرعه هستی. "
|
||||
"ورودی شامل داده های ساختاریافته ناهنجاری، اطلاعات مزرعه، و متن های بازیابی شده از پایگاه دانش است. "
|
||||
"فقط JSON معتبر برگردان و فقط این کلیدها را تولید کن: "
|
||||
"summary, explanation, likely_cause, recommended_action, monitoring_priority, confidence. "
|
||||
"monitoring_priority فقط یکی از low, medium, high, urgent باشد. "
|
||||
"confidence عددی بین 0 و 1 باشد. "
|
||||
"اگر ناهنجاری معناداری وجود ندارد، این موضوع را شفاف و بدون اغراق بیان کن."
|
||||
)
|
||||
|
||||
|
||||
def _clean_json(raw: str) -> dict[str, Any]:
|
||||
cleaned = (raw or "").strip()
|
||||
if cleaned.startswith("```"):
|
||||
cleaned = cleaned.strip("`")
|
||||
if cleaned.startswith("json"):
|
||||
cleaned = cleaned[4:]
|
||||
cleaned = cleaned.strip()
|
||||
if not cleaned:
|
||||
return {}
|
||||
try:
|
||||
return json.loads(cleaned)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
logger.warning("Invalid JSON returned by soil_anomaly LLM: %s", cleaned[:500])
|
||||
return {}
|
||||
|
||||
|
||||
def _load_farm_or_error(farm_uuid: str) -> dict[str, Any]:
|
||||
farm_details = get_farm_details(farm_uuid)
|
||||
if farm_details is None:
|
||||
raise ValueError("farm_uuid نامعتبر است یا اطلاعات مزرعه پیدا نشد.")
|
||||
return farm_details
|
||||
|
||||
|
||||
def _build_service_client(cfg: RAGConfig):
|
||||
service = get_service_config(SERVICE_ID, cfg)
|
||||
service_cfg = RAGConfig(
|
||||
embedding=cfg.embedding,
|
||||
qdrant=cfg.qdrant,
|
||||
chunking=cfg.chunking,
|
||||
llm=service.llm,
|
||||
knowledge_bases=cfg.knowledge_bases,
|
||||
services=cfg.services,
|
||||
chromadb=cfg.chromadb,
|
||||
)
|
||||
client = get_chat_client(service_cfg)
|
||||
return service, client, service.llm.model
|
||||
|
||||
|
||||
def _fallback_from_payload(anomaly_payload: dict[str, Any]) -> dict[str, Any]:
|
||||
interpretation = anomaly_payload.get("interpretation") or {}
|
||||
anomalies = anomaly_payload.get("anomalies") or []
|
||||
top_anomaly = anomalies[0] if anomalies else None
|
||||
|
||||
if top_anomaly is None:
|
||||
return {
|
||||
"summary": "در داده های اخیر ناهنجاری معناداری دیده نشد.",
|
||||
"explanation": interpretation.get("explanation")
|
||||
or "داده های فعلی با الگوی معمول مزرعه سازگار هستند و مورد غیرعادی برجسته ای دیده نمی شود.",
|
||||
"likely_cause": interpretation.get("likely_cause")
|
||||
or "شرایط فعلی مزرعه پایدار است یا داده کافی برای تشخیص رخداد غیرعادی وجود ندارد.",
|
||||
"recommended_action": interpretation.get("recommended_action")
|
||||
or "پایش عادی ادامه یابد و روندها در بازه بعدی دوباره بررسی شوند.",
|
||||
"monitoring_priority": "low",
|
||||
"confidence": 0.55,
|
||||
}
|
||||
|
||||
severity = str(top_anomaly.get("severity") or "medium")
|
||||
priority_map = {
|
||||
"low": "medium",
|
||||
"medium": "high",
|
||||
"high": "urgent",
|
||||
"critical": "urgent",
|
||||
}
|
||||
return {
|
||||
"summary": f"ناهنجاري در شاخص {top_anomaly.get('label', 'نامشخص')} شناسايي شد.",
|
||||
"explanation": interpretation.get("explanation")
|
||||
or f"مقدار {top_anomaly.get('label', 'اين شاخص')} از الگوي آماري معمول مزرعه فاصله گرفته است.",
|
||||
"likely_cause": interpretation.get("likely_cause")
|
||||
or "اين الگو مي تواند ناشي از تغيير شرايط محيطي، آبياري، شوري يا خطاي اندازه گيري سنسور باشد.",
|
||||
"recommended_action": interpretation.get("recommended_action")
|
||||
or "روند اين شاخص و شرايط مزرعه در کوتاه مدت بازبيني و در صورت تداوم، اقدام اصلاحي انجام شود.",
|
||||
"monitoring_priority": priority_map.get(severity, "high"),
|
||||
"confidence": 0.7 if severity in {"high", "critical"} else 0.6,
|
||||
}
|
||||
|
||||
|
||||
def _build_messages(
|
||||
*,
|
||||
service: Any,
|
||||
cfg: RAGConfig,
|
||||
query: str,
|
||||
rag_context: str,
|
||||
structured_context: dict[str, Any],
|
||||
) -> tuple[str, list[dict[str, str]]]:
|
||||
tone = _load_service_tone(service, cfg)
|
||||
system_parts = [tone] if tone else []
|
||||
if service.system_prompt:
|
||||
system_parts.append(service.system_prompt)
|
||||
system_parts.append(SOIL_ANOMALY_PROMPT)
|
||||
system_parts.append(
|
||||
"[کانتکست ساختاریافته ناهنجاري خاک]\n"
|
||||
+ json.dumps(structured_context, ensure_ascii=False, indent=2, default=str)
|
||||
)
|
||||
if rag_context:
|
||||
system_parts.append(rag_context)
|
||||
system_prompt = "\n\n".join(part for part in system_parts if part)
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": query},
|
||||
]
|
||||
return system_prompt, messages
|
||||
|
||||
|
||||
def get_soil_anomaly_insight(
|
||||
*,
|
||||
farm_uuid: str,
|
||||
anomaly_payload: dict[str, Any],
|
||||
query: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
cfg = load_rag_config()
|
||||
service, client, model = _build_service_client(cfg)
|
||||
farm_details = _load_farm_or_error(farm_uuid)
|
||||
fallback = _fallback_from_payload(anomaly_payload)
|
||||
user_query = query or "ناهنجاري هاي داده هاي خاک اين مزرعه را تفسير کن و اقدام مناسب پيشنهاد بده."
|
||||
structured_context = {
|
||||
"farm_uuid": farm_uuid,
|
||||
"anomaly_payload": anomaly_payload,
|
||||
"fallback_interpretation": fallback,
|
||||
}
|
||||
rag_context = build_rag_context(
|
||||
query=user_query,
|
||||
sensor_uuid=farm_uuid,
|
||||
config=cfg,
|
||||
kb_name=KB_NAME,
|
||||
service_id=SERVICE_ID,
|
||||
farm_details=farm_details,
|
||||
)
|
||||
system_prompt, messages = _build_messages(
|
||||
service=service,
|
||||
cfg=cfg,
|
||||
query=user_query,
|
||||
rag_context=rag_context,
|
||||
structured_context=structured_context,
|
||||
)
|
||||
audit_log = _create_audit_log(
|
||||
farm_uuid=farm_uuid,
|
||||
service_id=SERVICE_ID,
|
||||
model=model,
|
||||
query=user_query,
|
||||
system_prompt=system_prompt,
|
||||
messages=messages,
|
||||
)
|
||||
try:
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
raw = response.choices[0].message.content.strip()
|
||||
parsed = _clean_json(raw)
|
||||
_complete_audit_log(audit_log, raw)
|
||||
except Exception as exc:
|
||||
logger.error("Soil anomaly insight failed for %s: %s", farm_uuid, exc)
|
||||
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
|
||||
return {
|
||||
**fallback,
|
||||
"farm_uuid": farm_uuid,
|
||||
"knowledge_base": KB_NAME,
|
||||
"tone_file": service.tone_file,
|
||||
"raw_response": None,
|
||||
}
|
||||
|
||||
if not parsed:
|
||||
parsed = fallback
|
||||
parsed.setdefault("summary", fallback["summary"])
|
||||
parsed.setdefault("explanation", fallback["explanation"])
|
||||
parsed.setdefault("likely_cause", fallback["likely_cause"])
|
||||
parsed.setdefault("recommended_action", fallback["recommended_action"])
|
||||
parsed.setdefault("monitoring_priority", fallback["monitoring_priority"])
|
||||
parsed.setdefault("confidence", fallback["confidence"])
|
||||
parsed["farm_uuid"] = farm_uuid
|
||||
parsed["knowledge_base"] = KB_NAME
|
||||
parsed["tone_file"] = service.tone_file
|
||||
parsed["raw_response"] = raw
|
||||
return parsed
|
||||
@@ -0,0 +1,192 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from farm_data.services import get_farm_details
|
||||
from rag.api_provider import get_chat_client
|
||||
from rag.chat import (
|
||||
_complete_audit_log,
|
||||
_create_audit_log,
|
||||
_fail_audit_log,
|
||||
_load_service_tone,
|
||||
build_rag_context,
|
||||
)
|
||||
from rag.config import RAGConfig, get_service_config, load_rag_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
KB_NAME = "water_need_prediction"
|
||||
SERVICE_ID = "water_need_prediction"
|
||||
|
||||
WATER_NEED_PROMPT = (
|
||||
"شما یک دستیار تخصصی تحليل نياز آبي کوتاه مدت مزرعه هستي. "
|
||||
"ورودي شامل محاسبات ساختاريافته نياز آبي، اطلاعات مزرعه و متن هاي بازيابي شده از پايگاه دانش است. "
|
||||
"فقط JSON معتبر با اين کليدها برگردان: "
|
||||
"summary, irrigation_outlook, recommended_action, risk_note, confidence. "
|
||||
"confidence عددي بين 0 و 1 باشد. "
|
||||
"اعداد اصلي را از داده ورودي بگير و عدد متناقض جديد نساز."
|
||||
)
|
||||
|
||||
|
||||
def _clean_json(raw: str) -> dict[str, Any]:
|
||||
cleaned = (raw or "").strip()
|
||||
if cleaned.startswith("```"):
|
||||
cleaned = cleaned.strip("`")
|
||||
if cleaned.startswith("json"):
|
||||
cleaned = cleaned[4:]
|
||||
cleaned = cleaned.strip()
|
||||
if not cleaned:
|
||||
return {}
|
||||
try:
|
||||
return json.loads(cleaned)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
logger.warning("Invalid JSON returned by water_need_prediction LLM: %s", cleaned[:500])
|
||||
return {}
|
||||
|
||||
|
||||
def _load_farm_or_error(farm_uuid: str) -> dict[str, Any]:
|
||||
farm_details = get_farm_details(farm_uuid)
|
||||
if farm_details is None:
|
||||
raise ValueError("farm_uuid نامعتبر است یا اطلاعات مزرعه پیدا نشد.")
|
||||
return farm_details
|
||||
|
||||
|
||||
def _build_service_client(cfg: RAGConfig):
|
||||
service = get_service_config(SERVICE_ID, cfg)
|
||||
service_cfg = RAGConfig(
|
||||
embedding=cfg.embedding,
|
||||
qdrant=cfg.qdrant,
|
||||
chunking=cfg.chunking,
|
||||
llm=service.llm,
|
||||
knowledge_bases=cfg.knowledge_bases,
|
||||
services=cfg.services,
|
||||
chromadb=cfg.chromadb,
|
||||
)
|
||||
client = get_chat_client(service_cfg)
|
||||
return service, client, service.llm.model
|
||||
|
||||
|
||||
def _fallback_from_payload(prediction_payload: dict[str, Any]) -> dict[str, Any]:
|
||||
total = float(prediction_payload.get("totalNext7Days") or 0.0)
|
||||
daily = prediction_payload.get("dailyBreakdown") or []
|
||||
peak_day = max(daily, key=lambda item: float(item.get("gross_irrigation_mm", 0.0) or 0.0), default=None)
|
||||
if total <= 0:
|
||||
return {
|
||||
"summary": "براي چند روز آينده نياز آبي معناداري برآورد نشد.",
|
||||
"irrigation_outlook": "بارش موثر يا شرايط فعلي باعث شده نياز خالص آبياري پايين باشد.",
|
||||
"recommended_action": "پايش رطوبت خاک ادامه يابد و قبل از هر آبياري جديد forecast دوباره بررسي شود.",
|
||||
"risk_note": "اگر forecast تغيير کند يا بارش موثر رخ ندهد، برآورد بايد به روز شود.",
|
||||
"confidence": 0.58,
|
||||
}
|
||||
|
||||
peak_text = ""
|
||||
if peak_day:
|
||||
peak_text = (
|
||||
f" بيشترين فشار آبي در {peak_day.get('forecast_date')} "
|
||||
f"با حدود {peak_day.get('gross_irrigation_mm')} ميلي متر برآورد شده است."
|
||||
)
|
||||
return {
|
||||
"summary": f"جمع نياز آبي 7 روز آينده حدود {round(total, 2)} ميلي متر برآورد شده است.",
|
||||
"irrigation_outlook": "الگوي آبياري بايد در چند روز آينده بر اساس نياز روزانه و بارش موثر تنظيم شود." + peak_text,
|
||||
"recommended_action": "برنامه آبياري کوتاه مدت بر اساس روزهاي اوج نياز تنظيم و صبح زود يا نزديک غروب اجرا شود.",
|
||||
"risk_note": "در صورت تغيير دما، باد يا بارش، مقادير gross irrigation ممکن است تغيير کنند.",
|
||||
"confidence": 0.72,
|
||||
}
|
||||
|
||||
|
||||
def _build_messages(
|
||||
*,
|
||||
service: Any,
|
||||
cfg: RAGConfig,
|
||||
query: str,
|
||||
rag_context: str,
|
||||
structured_context: dict[str, Any],
|
||||
) -> tuple[str, list[dict[str, str]]]:
|
||||
tone = _load_service_tone(service, cfg)
|
||||
system_parts = [tone] if tone else []
|
||||
if service.system_prompt:
|
||||
system_parts.append(service.system_prompt)
|
||||
system_parts.append(WATER_NEED_PROMPT)
|
||||
system_parts.append(
|
||||
"[کانتکست ساختاريافته نياز آبي]\n"
|
||||
+ json.dumps(structured_context, ensure_ascii=False, indent=2, default=str)
|
||||
)
|
||||
if rag_context:
|
||||
system_parts.append(rag_context)
|
||||
system_prompt = "\n\n".join(part for part in system_parts if part)
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": query},
|
||||
]
|
||||
return system_prompt, messages
|
||||
|
||||
|
||||
def get_water_need_prediction_insight(
|
||||
*,
|
||||
farm_uuid: str,
|
||||
prediction_payload: dict[str, Any],
|
||||
query: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
cfg = load_rag_config()
|
||||
service, client, model = _build_service_client(cfg)
|
||||
farm_details = _load_farm_or_error(farm_uuid)
|
||||
fallback = _fallback_from_payload(prediction_payload)
|
||||
user_query = query or "نياز آبي کوتاه مدت اين مزرعه را تفسير کن و اقدام عملياتي پيشنهاد بده."
|
||||
structured_context = {
|
||||
"farm_uuid": farm_uuid,
|
||||
"prediction_payload": prediction_payload,
|
||||
"fallback_summary": fallback,
|
||||
}
|
||||
rag_context = build_rag_context(
|
||||
query=user_query,
|
||||
sensor_uuid=farm_uuid,
|
||||
config=cfg,
|
||||
kb_name=KB_NAME,
|
||||
service_id=SERVICE_ID,
|
||||
farm_details=farm_details,
|
||||
)
|
||||
system_prompt, messages = _build_messages(
|
||||
service=service,
|
||||
cfg=cfg,
|
||||
query=user_query,
|
||||
rag_context=rag_context,
|
||||
structured_context=structured_context,
|
||||
)
|
||||
audit_log = _create_audit_log(
|
||||
farm_uuid=farm_uuid,
|
||||
service_id=SERVICE_ID,
|
||||
model=model,
|
||||
query=user_query,
|
||||
system_prompt=system_prompt,
|
||||
messages=messages,
|
||||
)
|
||||
try:
|
||||
response = client.chat.completions.create(model=model, messages=messages)
|
||||
raw = response.choices[0].message.content.strip()
|
||||
parsed = _clean_json(raw)
|
||||
_complete_audit_log(audit_log, raw)
|
||||
except Exception as exc:
|
||||
logger.error("Water need prediction insight failed for %s: %s", farm_uuid, exc)
|
||||
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
|
||||
return {
|
||||
**fallback,
|
||||
"farm_uuid": farm_uuid,
|
||||
"knowledge_base": KB_NAME,
|
||||
"tone_file": service.tone_file,
|
||||
"raw_response": None,
|
||||
}
|
||||
|
||||
if not parsed:
|
||||
parsed = fallback
|
||||
parsed.setdefault("summary", fallback["summary"])
|
||||
parsed.setdefault("irrigation_outlook", fallback["irrigation_outlook"])
|
||||
parsed.setdefault("recommended_action", fallback["recommended_action"])
|
||||
parsed.setdefault("risk_note", fallback["risk_note"])
|
||||
parsed.setdefault("confidence", fallback["confidence"])
|
||||
parsed["farm_uuid"] = farm_uuid
|
||||
parsed["knowledge_base"] = KB_NAME
|
||||
parsed["tone_file"] = service.tone_file
|
||||
parsed["raw_response"] = raw
|
||||
return parsed
|
||||
@@ -2,7 +2,7 @@ from unittest.mock import patch
|
||||
|
||||
from django.test import SimpleTestCase
|
||||
|
||||
from rag.chat import build_rag_context
|
||||
from rag.chat import _normalize_history_messages, build_rag_context
|
||||
|
||||
|
||||
class ChatContextTests(SimpleTestCase):
|
||||
@@ -37,6 +37,22 @@ class ChatContextTests(SimpleTestCase):
|
||||
self.assertIn("farm chunk 1", sent_texts)
|
||||
self.assertIn("farm chunk 2", sent_texts)
|
||||
|
||||
def test_normalize_history_messages_supports_user_images(self):
|
||||
messages = _normalize_history_messages(
|
||||
[
|
||||
{"role": "user", "content": "این تصویر مزرعه است", "image_urls": ["https://example.com/a.jpg"]},
|
||||
{"role": "assistant", "content": "تصویر دریافت شد."},
|
||||
]
|
||||
)
|
||||
|
||||
self.assertEqual(len(messages), 2)
|
||||
self.assertEqual(messages[0]["role"], "user")
|
||||
self.assertIsInstance(messages[0]["content"], list)
|
||||
self.assertEqual(messages[0]["content"][0]["type"], "text")
|
||||
self.assertEqual(messages[0]["content"][1]["type"], "image_url")
|
||||
self.assertEqual(messages[1]["role"], "assistant")
|
||||
self.assertEqual(messages[1]["content"], "تصویر دریافت شد.")
|
||||
|
||||
@patch("rag.chat.search_with_texts", return_value=[])
|
||||
@patch("rag.chat.chunk_text", return_value=["farm chunk"])
|
||||
def test_build_rag_context_returns_full_farm_when_kb_empty(
|
||||
|
||||
@@ -144,6 +144,10 @@ class RecommendationServiceDefaultsTests(TestCase):
|
||||
"آبیاری قطرهای",
|
||||
)
|
||||
self.assertEqual(result["simulation_optimizer"]["engine"], "crop_simulation_heuristic")
|
||||
self.assertEqual(result["mergeMetadata"]["source"], "llm_with_fallback_merge")
|
||||
self.assertEqual(result["sections"][1]["provenance"]["sectionType"], "list")
|
||||
self.assertEqual(result["sections"][1]["provenance"]["fieldSources"]["title"], "llm")
|
||||
self.assertEqual(result["sections"][0]["provenance"]["fieldSources"]["amount"], "fallback")
|
||||
|
||||
@patch("rag.services.irrigation.calculate_forecast_water_needs", return_value=[])
|
||||
@patch("rag.services.irrigation.resolve_kc", return_value=0.9)
|
||||
@@ -185,6 +189,7 @@ class RecommendationServiceDefaultsTests(TestCase):
|
||||
self.assertEqual(self.farm.irrigation_method_id, sprinkler.id)
|
||||
self.assertEqual(result["selected_irrigation_method"]["id"], sprinkler.id)
|
||||
mock_build_irrigation_method_text.assert_called_once_with("بارانی")
|
||||
self.assertEqual(result["sections"][0]["provenance"]["fieldSources"]["content"], "llm")
|
||||
|
||||
@patch("rag.services.fertilization.build_plant_text", return_value="plant text")
|
||||
@patch("rag.services.fertilization.build_rag_context", return_value="")
|
||||
@@ -212,6 +217,8 @@ class RecommendationServiceDefaultsTests(TestCase):
|
||||
self.assertEqual(result["sections"][0]["fertilizerType"], "20-20-20")
|
||||
mock_build_plant_text.assert_called_once_with("گوجهفرنگی", "رویشی")
|
||||
self.assertEqual(result["simulation_optimizer"]["engine"], "crop_simulation_heuristic")
|
||||
self.assertEqual(result["sections"][2]["provenance"]["fieldSources"]["content"], "llm")
|
||||
self.assertEqual(result["sections"][0]["provenance"]["fieldSources"]["fertilizerType"], "fallback")
|
||||
|
||||
@patch("rag.services.fertilization.build_plant_text", return_value="plant text")
|
||||
@patch("rag.services.fertilization.build_rag_context", return_value="")
|
||||
@@ -238,3 +245,5 @@ class RecommendationServiceDefaultsTests(TestCase):
|
||||
|
||||
self.assertEqual(result["sections"][0]["applicationMethod"], "کودآبیاری")
|
||||
self.assertEqual(result["sections"][2]["type"], "warning")
|
||||
self.assertEqual(result["mergeMetadata"]["source"], "fallback_only")
|
||||
self.assertFalse(result["sections"][0]["provenance"]["llmProvided"])
|
||||
|
||||
+78
-10
@@ -1,6 +1,7 @@
|
||||
"""
|
||||
ویوهای RAG — چت با استریم
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
|
||||
from django.http import StreamingHttpResponse
|
||||
@@ -13,6 +14,7 @@ from drf_spectacular.utils import (
|
||||
)
|
||||
from rest_framework import status
|
||||
from rest_framework import serializers as drf_serializers
|
||||
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
|
||||
from rest_framework.request import Request
|
||||
from rest_framework.response import Response
|
||||
from rest_framework.views import APIView
|
||||
@@ -22,7 +24,7 @@ from config.openapi import (
|
||||
build_message_response_serializer,
|
||||
build_response,
|
||||
)
|
||||
from .chat import chat_rag_stream
|
||||
from .chat import chat_rag_stream, encode_uploaded_image
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -47,11 +49,45 @@ RagFertilizationResponseSerializer = build_envelope_serializer(
|
||||
|
||||
|
||||
class ChatView(APIView):
|
||||
"""
|
||||
چت RAG با استریم.
|
||||
POST با {"query": "متن سوال", "farm_uuid": "شناسه مزرعه"}.
|
||||
همیشه از سرویس ثابت `chat` استفاده میکند و اطلاعات مزرعه را مستقیم به مدل میفرستد.
|
||||
"""
|
||||
parser_classes = [JSONParser, MultiPartParser, FormParser]
|
||||
|
||||
def _parse_history(self, raw_history):
|
||||
if raw_history in (None, "", []):
|
||||
return []
|
||||
if isinstance(raw_history, list):
|
||||
return raw_history
|
||||
if isinstance(raw_history, str):
|
||||
try:
|
||||
parsed = json.loads(raw_history)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
raise ValueError("history باید JSON معتبر باشد.")
|
||||
if not isinstance(parsed, list):
|
||||
raise ValueError("history باید آرایه باشد.")
|
||||
return parsed
|
||||
raise ValueError("history فرمت پشتیبانی شده ندارد.")
|
||||
|
||||
def _collect_uploaded_images(self, request: Request):
|
||||
images = []
|
||||
for uploaded in request.FILES.getlist("images"):
|
||||
images.append(encode_uploaded_image(uploaded))
|
||||
single_image = request.FILES.get("image")
|
||||
if single_image is not None:
|
||||
images.append(encode_uploaded_image(single_image))
|
||||
image_urls = request.data.get("image_urls")
|
||||
if isinstance(image_urls, str) and image_urls.strip():
|
||||
try:
|
||||
parsed_urls = json.loads(image_urls)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
parsed_urls = [image_urls]
|
||||
image_urls = parsed_urls
|
||||
if isinstance(image_urls, list):
|
||||
for item in image_urls:
|
||||
if isinstance(item, str) and item.strip():
|
||||
images.append({"url": item.strip(), "detail": "auto"})
|
||||
elif isinstance(item, dict) and isinstance(item.get("url"), str):
|
||||
image_payload = {"url": item["url"].strip(), "detail": item.get("detail", "auto")}
|
||||
images.append(image_payload)
|
||||
return images
|
||||
|
||||
@extend_schema(
|
||||
tags=["RAG Chat"],
|
||||
@@ -63,6 +99,14 @@ class ChatView(APIView):
|
||||
"query": drf_serializers.CharField(required=False, help_text="متن سوال کاربر"),
|
||||
"message": drf_serializers.CharField(required=False, help_text="نام قبلی فیلد query"),
|
||||
"farm_uuid": drf_serializers.CharField(help_text="شناسه مزرعه"),
|
||||
"history": drf_serializers.JSONField(required=False, help_text="آرایه پیام های قبلی با role=user/assistant"),
|
||||
"image_urls": drf_serializers.JSONField(required=False, help_text="آرایه URL تصاویر برای پیام فعلی"),
|
||||
"image": drf_serializers.FileField(required=False, help_text="یک تصویر برای پیام فعلی"),
|
||||
"images": drf_serializers.ListField(
|
||||
child=drf_serializers.FileField(),
|
||||
required=False,
|
||||
help_text="چند تصویر برای پیام فعلی",
|
||||
),
|
||||
},
|
||||
),
|
||||
responses={
|
||||
@@ -83,8 +127,13 @@ class ChatView(APIView):
|
||||
OpenApiExample(
|
||||
"نمونه درخواست",
|
||||
value={
|
||||
"farm_uuid": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"farm_uuid": "11111111-1111-1111-1111-111111111111",
|
||||
"query": "وضعیت مزرعه من چطور است؟",
|
||||
"history": [
|
||||
{"role": "user", "content": "رطوبت خاک من پایین بود؟"},
|
||||
{"role": "assistant", "content": "بله، رطوبت خاک کمتر از محدوده مطلوب بود."},
|
||||
],
|
||||
"image_urls": ["https://example.com/farm-photo.jpg"],
|
||||
},
|
||||
request_only=True,
|
||||
),
|
||||
@@ -97,9 +146,19 @@ class ChatView(APIView):
|
||||
data = request.data if request.method == "POST" else request.query_params
|
||||
message = data.get("query", data.get("message"))
|
||||
farm_uuid = data.get("farm_uuid")
|
||||
raw_history = data.get("history")
|
||||
try:
|
||||
images = self._collect_uploaded_images(request)
|
||||
except ValueError as exc:
|
||||
return Response(
|
||||
{"code": 400, "msg": str(exc)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
if message is None and images:
|
||||
message = "لطفا تصویر ارسالی را در کنار اطلاعات مزرعه بررسی کن."
|
||||
if not message or not isinstance(message, str):
|
||||
return Response(
|
||||
{"code": 400, "msg": "پارامتر query الزامی است."},
|
||||
{"code": 400, "msg": "پارامتر query الزامی است، مگر اینکه تصویر ارسال شده باشد."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
message = str(message).strip()
|
||||
@@ -119,6 +178,13 @@ class ChatView(APIView):
|
||||
{"code": 400, "msg": "farm_uuid نباید خالی باشد."},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
try:
|
||||
history = self._parse_history(raw_history)
|
||||
except ValueError as exc:
|
||||
return Response(
|
||||
{"code": 400, "msg": str(exc)},
|
||||
status=status.HTTP_400_BAD_REQUEST,
|
||||
)
|
||||
cfg = load_rag_config()
|
||||
farm_details = get_farm_details(farm_uuid)
|
||||
if farm_details is None:
|
||||
@@ -134,6 +200,8 @@ class ChatView(APIView):
|
||||
farm_uuid=farm_uuid,
|
||||
config=cfg,
|
||||
farm_details=farm_details,
|
||||
history=history,
|
||||
images=images,
|
||||
):
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
@@ -188,7 +256,7 @@ class IrrigationRecommendationView(APIView):
|
||||
OpenApiExample(
|
||||
"نمونه درخواست",
|
||||
value={
|
||||
"farm_uuid": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"farm_uuid": "11111111-1111-1111-1111-111111111111",
|
||||
"plant_name": "گوجهفرنگی",
|
||||
"growth_stage": "میوهدهی",
|
||||
"irrigation_method_name": "آبیاری قطرهای",
|
||||
@@ -270,7 +338,7 @@ class FertilizationRecommendationView(APIView):
|
||||
OpenApiExample(
|
||||
"نمونه درخواست",
|
||||
value={
|
||||
"farm_uuid": "550e8400-e29b-41d4-a716-446655440000",
|
||||
"farm_uuid": "11111111-1111-1111-1111-111111111111",
|
||||
"plant_name": "گوجهفرنگی",
|
||||
"growth_stage": "رویشی",
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user