This commit is contained in:
2026-04-25 17:22:41 +03:30
parent 569d520a5c
commit aa24fc22b0
124 changed files with 8491 additions and 2582 deletions
+7
View File
@@ -4,8 +4,15 @@
"""
from .irrigation import get_irrigation_recommendation
from .fertilization import get_fertilization_recommendation
from .pest_disease import get_pest_disease_detection, get_pest_disease_risk
from .soil_anomaly import get_soil_anomaly_insight
from .water_need_prediction import get_water_need_prediction_insight
__all__ = [
"get_irrigation_recommendation",
"get_fertilization_recommendation",
"get_pest_disease_detection",
"get_pest_disease_risk",
"get_soil_anomaly_insight",
"get_water_need_prediction_insight",
]
+88 -3
View File
@@ -54,6 +54,62 @@ def _find_section(sections: list[dict], section_type: str) -> dict | None:
return None
def _field_sources(llm_section: dict, fallback_section: dict, merged_section: dict) -> dict[str, str]:
sources: dict[str, str] = {}
for key, value in merged_section.items():
if key == "provenance":
continue
llm_value = llm_section.get(key)
fallback_value = fallback_section.get(key)
if key in llm_section and value == llm_value and value != fallback_value:
sources[key] = "llm"
elif key in fallback_section and value == fallback_value and value != llm_value:
sources[key] = "fallback"
elif key in llm_section and key in fallback_section and llm_value == fallback_value == value:
sources[key] = "shared"
elif key in llm_section and key in fallback_section:
sources[key] = "merged"
else:
sources[key] = "fallback" if key in fallback_section else "llm"
return sources
def _attach_provenance(section_type: str, llm_section: dict, fallback_section: dict, merged_section: dict) -> dict:
merged = dict(merged_section)
field_sources = _field_sources(llm_section, fallback_section, merged)
merged["provenance"] = {
"sectionType": section_type,
"llmProvided": bool(llm_section),
"fallbackUsed": any(source != "llm" for source in field_sources.values()),
"fieldSources": field_sources,
}
return merged
def _fallback_with_provenance(fallback: dict, reason: str) -> dict:
sections = []
for section in fallback.get("sections", []):
section_with_provenance = dict(section)
section_with_provenance["provenance"] = {
"sectionType": section.get("type"),
"llmProvided": False,
"fallbackUsed": True,
"fieldSources": {
key: "fallback"
for key in section.keys()
if key != "provenance"
},
}
sections.append(section_with_provenance)
return {
"sections": sections,
"mergeMetadata": {
"source": "fallback_only",
"reason": reason,
},
}
def _build_fertilization_fallback(*, optimized_result: dict | None) -> dict:
if optimized_result:
recommended = optimized_result["recommended_strategy"]
@@ -134,11 +190,11 @@ def _merge_fertilization_response(
) -> dict:
fallback = _build_fertilization_fallback(optimized_result=optimized_result)
if not isinstance(parsed_result, dict):
return fallback
return _fallback_with_provenance(fallback, "invalid_llm_payload")
sections = parsed_result.get("sections")
if not isinstance(sections, list):
return fallback
return _fallback_with_provenance(fallback, "missing_sections")
recommendation = _find_section(sections, "recommendation") or {}
list_section = _find_section(sections, "list") or {}
@@ -169,7 +225,36 @@ def _merge_fertilization_response(
"content": warning_section.get("content") or fallback_warning["content"],
}
return {"sections": [merged_recommendation, merged_list, merged_warning]}
merged_recommendation = _attach_provenance(
"recommendation",
recommendation,
fallback_recommendation,
merged_recommendation,
)
merged_list = _attach_provenance(
"list",
list_section,
fallback_list,
merged_list,
)
merged_warning = _attach_provenance(
"warning",
warning_section,
fallback_warning,
merged_warning,
)
return {
"sections": [merged_recommendation, merged_list, merged_warning],
"mergeMetadata": {
"source": "llm_with_fallback_merge",
"llmSectionsDetected": [section.get("type") for section in sections if isinstance(section, dict)],
"fallbackSectionsApplied": [
item["type"]
for item in (fallback_recommendation, fallback_list, fallback_warning)
],
},
}
def get_fertilization_recommendation(
+88 -3
View File
@@ -57,6 +57,62 @@ def _find_section(sections: list[dict], section_type: str) -> dict | None:
return None
def _field_sources(llm_section: dict, fallback_section: dict, merged_section: dict) -> dict[str, str]:
sources: dict[str, str] = {}
for key, value in merged_section.items():
if key == "provenance":
continue
llm_value = llm_section.get(key)
fallback_value = fallback_section.get(key)
if key in llm_section and value == llm_value and value != fallback_value:
sources[key] = "llm"
elif key in fallback_section and value == fallback_value and value != llm_value:
sources[key] = "fallback"
elif key in llm_section and key in fallback_section and llm_value == fallback_value == value:
sources[key] = "shared"
elif key in llm_section and key in fallback_section:
sources[key] = "merged"
else:
sources[key] = "fallback" if key in fallback_section else "llm"
return sources
def _attach_provenance(section_type: str, llm_section: dict, fallback_section: dict, merged_section: dict) -> dict:
merged = dict(merged_section)
field_sources = _field_sources(llm_section, fallback_section, merged)
merged["provenance"] = {
"sectionType": section_type,
"llmProvided": bool(llm_section),
"fallbackUsed": any(source != "llm" for source in field_sources.values()),
"fieldSources": field_sources,
}
return merged
def _fallback_with_provenance(fallback: dict, reason: str) -> dict:
sections = []
for section in fallback.get("sections", []):
section_with_provenance = dict(section)
section_with_provenance["provenance"] = {
"sectionType": section.get("type"),
"llmProvided": False,
"fallbackUsed": True,
"fieldSources": {
key: "fallback"
for key in section.keys()
if key != "provenance"
},
}
sections.append(section_with_provenance)
return {
"sections": sections,
"mergeMetadata": {
"source": "fallback_only",
"reason": reason,
},
}
def _build_irrigation_fallback(
*,
optimized_result: dict | None,
@@ -155,11 +211,11 @@ def _merge_irrigation_response(
daily_water_needs=daily_water_needs,
)
if not isinstance(parsed_result, dict):
return fallback
return _fallback_with_provenance(fallback, "invalid_llm_payload")
sections = parsed_result.get("sections")
if not isinstance(sections, list):
return fallback
return _fallback_with_provenance(fallback, "missing_sections")
recommendation = _find_section(sections, "recommendation") or {}
list_section = _find_section(sections, "list") or {}
@@ -190,7 +246,36 @@ def _merge_irrigation_response(
"content": warning_section.get("content") or fallback_warning["content"],
}
return {"sections": [merged_recommendation, merged_list, merged_warning]}
merged_recommendation = _attach_provenance(
"recommendation",
recommendation,
fallback_recommendation,
merged_recommendation,
)
merged_list = _attach_provenance(
"list",
list_section,
fallback_list,
merged_list,
)
merged_warning = _attach_provenance(
"warning",
warning_section,
fallback_warning,
merged_warning,
)
return {
"sections": [merged_recommendation, merged_list, merged_warning],
"mergeMetadata": {
"source": "llm_with_fallback_merge",
"llmSectionsDetected": [section.get("type") for section in sections if isinstance(section, dict)],
"fallbackSectionsApplied": [
item["type"]
for item in (fallback_recommendation, fallback_list, fallback_warning)
],
},
}
def _resolve_irrigation_method(
+415
View File
@@ -0,0 +1,415 @@
"""
سرویس RAG برای تشخیص تصویری و پیش بینی ریسک آفات و بیماری گیاه.
"""
from __future__ import annotations
import json
import logging
from typing import Any
from farm_data.services import get_farm_details
from rag.api_provider import get_chat_client
from rag.chat import (
_build_content_parts,
_complete_audit_log,
_create_audit_log,
_fail_audit_log,
_load_service_tone,
build_rag_context,
)
from rag.config import RAGConfig, get_service_config, load_rag_config
from rag.user_data import build_plant_text
logger = logging.getLogger(__name__)
KB_NAME = "pest_disease"
SERVICE_ID = "pest_disease"
DETECTION_PROMPT = (
"شما یک دستیار تخصصی تشخیص آفات و بیماری گیاهی هستی. "
"با استفاده از تصویر، اطلاعات مزرعه، و متن های بازیابی شده از پایگاه دانش تحلیل کن. "
"پاسخ فقط JSON معتبر باشد و این کلیدها را داشته باشد: "
"has_issue, category, confidence, severity, summary, detected_signs, possible_causes, immediate_actions, reasoning. "
"category فقط یکی از no_issue, pest, disease, nutrient_stress, abiotic_stress, unknown باشد. "
"severity فقط یکی از low, medium, high باشد."
)
RISK_PROMPT = (
"شما یک دستیار تخصصی پیش بینی ریسک آفات و بیماری گیاهی هستی. "
"با استفاده از داده های مزرعه، آب و هوا، مرحله رشد، و متن های بازیابی شده از پایگاه دانش تحلیل کن. "
"پاسخ فقط JSON معتبر باشد و این کلیدها را داشته باشد: "
"summary, forecast_window, overall_risk, disease_risk, pest_risk, key_drivers, recommended_actions. "
"overall_risk فقط یکی از low, medium, high باشد. "
"disease_risk و pest_risk باید آبجکت هایی با کلیدهای score, level, likely_conditions, reasoning باشند و level فقط یکی از low, medium, high باشد."
)
def _safe_float(value: Any, default: float = 0.0) -> float:
try:
if value in (None, ""):
return default
return float(value)
except (TypeError, ValueError):
return default
def _normalize_images(images: list[dict[str, str]] | None) -> list[dict[str, str]]:
output: list[dict[str, str]] = []
for item in images or []:
if not isinstance(item, dict):
continue
url = item.get("url")
if not isinstance(url, str) or not url.strip():
continue
output.append({"url": url.strip(), "detail": item.get("detail", "auto")})
return output
def _clean_json(raw: str) -> dict[str, Any]:
cleaned = (raw or "").strip()
if cleaned.startswith("```"):
cleaned = cleaned.strip("`")
if cleaned.startswith("json"):
cleaned = cleaned[4:]
cleaned = cleaned.strip()
if not cleaned:
return {}
try:
return json.loads(cleaned)
except (json.JSONDecodeError, ValueError):
logger.warning("Invalid JSON returned by pest_disease LLM: %s", cleaned[:500])
return {}
def _load_farm_or_error(farm_uuid: str) -> dict[str, Any]:
farm_details = get_farm_details(farm_uuid)
if farm_details is None:
raise ValueError("farm_uuid نامعتبر است یا اطلاعات مزرعه پیدا نشد.")
return farm_details
def _build_service_client(cfg: RAGConfig):
service = get_service_config(SERVICE_ID, cfg)
service_cfg = RAGConfig(
embedding=cfg.embedding,
qdrant=cfg.qdrant,
chunking=cfg.chunking,
llm=service.llm,
knowledge_bases=cfg.knowledge_bases,
services=cfg.services,
chromadb=cfg.chromadb,
)
client = get_chat_client(service_cfg)
return service, client, service.llm.model
def _weather_risk_summary(farm_details: dict[str, Any]) -> dict[str, Any]:
weather = farm_details.get("weather") or {}
soil = (farm_details.get("soil") or {}).get("resolved_metrics") or {}
humidity = _safe_float(weather.get("humidity_mean"), 55.0)
temp = _safe_float(weather.get("temperature_mean"), 24.0)
rain = _safe_float(weather.get("precipitation"), 0.0)
moisture = _safe_float(soil.get("soil_moisture"), _safe_float(soil.get("wv0033"), 35.0))
ec = _safe_float(soil.get("electrical_conductivity"), 0.0)
ph = _safe_float(soil.get("soil_ph") or soil.get("phh2o"), 7.0)
fungal_score = min(max(round((humidity * 0.45) + (moisture * 0.35) + (rain * 2.5) - 25, 2), 0.0), 100.0)
pest_score = min(max(round((temp * 2.2) + max(0.0, 45.0 - moisture) + (ec * 3.0) - 20, 2), 0.0), 100.0)
abiotic_stress = min(max(round((abs(ph - 6.8) * 18.0) + (ec * 8.0), 2), 0.0), 100.0)
return {
"humidity_mean": humidity,
"temperature_mean": temp,
"precipitation": rain,
"soil_moisture": moisture,
"ec": ec,
"ph": ph,
"fungal_score": fungal_score,
"pest_score": pest_score,
"abiotic_stress_score": abiotic_stress,
}
def _risk_level(score: float) -> str:
if score >= 70:
return "high"
if score >= 40:
return "medium"
return "low"
def _build_risk_fallback(farm_details: dict[str, Any], plant_name: str | None, growth_stage: str | None) -> dict[str, Any]:
risk = _weather_risk_summary(farm_details)
disease_level = _risk_level(risk["fungal_score"])
pest_level = _risk_level(risk["pest_score"])
overall_score = max(risk["fungal_score"], risk["pest_score"], risk["abiotic_stress_score"])
overall_level = _risk_level(overall_score)
drivers = []
if risk["humidity_mean"] >= 70:
drivers.append("رطوبت بالا")
if risk["soil_moisture"] >= 60:
drivers.append("رطوبت خاک بالا")
if risk["temperature_mean"] >= 30:
drivers.append("دمای بالا")
if risk["precipitation"] > 2:
drivers.append("بارش موثر")
if risk["ec"] > 2.5:
drivers.append("EC بالا")
if abs(risk["ph"] - 6.8) > 0.8:
drivers.append("خروج pH از محدوده مطلوب")
if not drivers:
drivers.append("شرایط فعلی مزرعه نسبتا پایدار است")
return {
"summary": "برآورد ریسک آفات و بیماری بر اساس داده های فعلی مزرعه ساخته شد.",
"forecast_window": "24 تا 72 ساعت آینده",
"overall_risk": overall_level,
"disease_risk": {
"score": risk["fungal_score"],
"level": disease_level,
"likely_conditions": [
"فشار قارچی و بیماری برگی" if disease_level != "low" else "ریسک بیماری فعلا پایین است",
],
"reasoning": [
f"رطوبت میانگین حدود {risk['humidity_mean']} درصد است.",
f"رطوبت خاک حدود {risk['soil_moisture']} درصد برآورد شده است.",
],
},
"pest_risk": {
"score": risk["pest_score"],
"level": pest_level,
"likely_conditions": [
"فشار آفات مکنده یا تنش زا" if pest_level != "low" else "ریسک آفت فعلا پایین است",
],
"reasoning": [
f"دمای میانگین حدود {risk['temperature_mean']} درجه است.",
f"EC فعلی حدود {risk['ec']} و pH حدود {risk['ph']} است.",
],
},
"key_drivers": drivers,
"recommended_actions": [
"بازدید مزرعه و بررسی برگ ها و پشت برگ انجام شود.",
"در صورت مشاهده علائم مشکوک، نمونه برداری تصویری نزدیک تر انجام شود.",
"رطوبت ماندگار و یکنواختی آبیاری پایش شود.",
],
"farm_context": {
"plant_name": plant_name,
"growth_stage": growth_stage,
"risk_summary": risk,
},
}
def _build_detection_fallback(images: list[dict[str, str]], plant_name: str | None) -> dict[str, Any]:
return {
"has_issue": False,
"category": "unknown",
"confidence": 0.2,
"severity": "low",
"summary": "تحلیل خودکار تصویر انجام نشد یا برای نتیجه قطعی داده کافی نبود.",
"detected_signs": [],
"possible_causes": ["کیفیت یا زاویه تصویر برای تشخیص کافی نیست"],
"immediate_actions": [
"یک تصویر نزدیک تر از برگ و ساقه ارسال شود.",
"در صورت مشاهده گسترش علائم، بازدید میدانی انجام شود.",
],
"reasoning": [
f"تعداد تصاویر دریافتی: {len(images)}",
f"نام گیاه: {plant_name or 'نامشخص'}",
],
}
def _build_detection_messages(
*,
service: Any,
cfg: RAGConfig,
query: str,
rag_context: str,
plant_text: str,
images: list[dict[str, str]],
) -> tuple[str, list[dict[str, Any]]]:
tone = _load_service_tone(service, cfg)
system_parts = [tone] if tone else []
if service.system_prompt:
system_parts.append(service.system_prompt)
system_parts.append(DETECTION_PROMPT)
if plant_text:
system_parts.append("[اطلاعات گیاه]\n" + plant_text)
if rag_context:
system_parts.append(rag_context)
system_prompt = "\n\n".join(part for part in system_parts if part)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": _build_content_parts(query, images)},
]
return system_prompt, messages
def _build_risk_messages(
*,
service: Any,
cfg: RAGConfig,
query: str,
rag_context: str,
structured_context: dict[str, Any],
plant_text: str,
) -> tuple[str, list[dict[str, str]]]:
tone = _load_service_tone(service, cfg)
system_parts = [tone] if tone else []
if service.system_prompt:
system_parts.append(service.system_prompt)
system_parts.append(RISK_PROMPT)
if plant_text:
system_parts.append("[اطلاعات گیاه]\n" + plant_text)
system_parts.append("[کانتکست ساختاریافته ریسک]\n" + json.dumps(structured_context, ensure_ascii=False, indent=2, default=str))
if rag_context:
system_parts.append(rag_context)
system_prompt = "\n\n".join(part for part in system_parts if part)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": query},
]
return system_prompt, messages
def get_pest_disease_detection(
*,
farm_uuid: str,
plant_name: str | None = None,
query: str | None = None,
images: list[dict[str, str]] | None = None,
) -> dict[str, Any]:
normalized_images = _normalize_images(images)
if not normalized_images:
raise ValueError("حداقل یک تصویر برای تشخیص لازم است.")
cfg = load_rag_config()
service, client, model = _build_service_client(cfg)
farm_details = _load_farm_or_error(farm_uuid)
resolved_plant_name = plant_name or (farm_details.get("plants") or [{}])[0].get("name")
user_query = query or "این تصویر را بررسی کن و بگو آیا گیاه دچار آفت یا بیماری شده است یا نه."
plant_text = build_plant_text(resolved_plant_name, "") if resolved_plant_name else ""
rag_context = build_rag_context(
query=user_query,
sensor_uuid=farm_uuid,
config=cfg,
kb_name=KB_NAME,
service_id=SERVICE_ID,
farm_details=farm_details,
)
system_prompt, messages = _build_detection_messages(
service=service,
cfg=cfg,
query=user_query,
rag_context=rag_context,
plant_text=plant_text or "",
images=normalized_images,
)
audit_log = _create_audit_log(
farm_uuid=farm_uuid,
service_id=SERVICE_ID,
model=model,
query=user_query,
system_prompt=system_prompt,
messages=messages,
)
try:
response = client.chat.completions.create(model=model, messages=messages)
raw = response.choices[0].message.content.strip()
parsed = _clean_json(raw)
_complete_audit_log(audit_log, raw)
except Exception as exc:
logger.error("Pest disease detection failed for %s: %s", farm_uuid, exc)
fallback = _build_detection_fallback(normalized_images, resolved_plant_name)
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
return {
**fallback,
"farm_uuid": farm_uuid,
"knowledge_base": KB_NAME,
"tone_file": service.tone_file,
"raw_response": None,
}
if not parsed:
parsed = _build_detection_fallback(normalized_images, resolved_plant_name)
parsed.setdefault("has_issue", parsed.get("category") not in {"no_issue", "unknown"})
parsed.setdefault("category", "unknown")
parsed.setdefault("confidence", 0.4)
parsed.setdefault("severity", "low")
parsed.setdefault("detected_signs", [])
parsed.setdefault("possible_causes", [])
parsed.setdefault("immediate_actions", [])
parsed.setdefault("reasoning", [])
parsed["farm_uuid"] = farm_uuid
parsed["knowledge_base"] = KB_NAME
parsed["tone_file"] = service.tone_file
parsed["raw_response"] = raw
return parsed
def get_pest_disease_risk(
*,
farm_uuid: str,
plant_name: str | None = None,
growth_stage: str | None = None,
query: str | None = None,
) -> dict[str, Any]:
cfg = load_rag_config()
service, client, model = _build_service_client(cfg)
farm_details = _load_farm_or_error(farm_uuid)
resolved_plant_name = plant_name or (farm_details.get("plants") or [{}])[0].get("name")
fallback = _build_risk_fallback(farm_details, resolved_plant_name, growth_stage)
user_query = query or "ریسک آفات و بیماری این مزرعه را برای چند روز آینده پیش بینی کن."
plant_text = build_plant_text(resolved_plant_name, growth_stage or "") if resolved_plant_name else ""
rag_context = build_rag_context(
query=user_query,
sensor_uuid=farm_uuid,
config=cfg,
kb_name=KB_NAME,
service_id=SERVICE_ID,
farm_details=farm_details,
)
system_prompt, messages = _build_risk_messages(
service=service,
cfg=cfg,
query=user_query,
rag_context=rag_context,
structured_context=fallback,
plant_text=plant_text or "",
)
audit_log = _create_audit_log(
farm_uuid=farm_uuid,
service_id=SERVICE_ID,
model=model,
query=user_query,
system_prompt=system_prompt,
messages=messages,
)
try:
response = client.chat.completions.create(model=model, messages=messages)
raw = response.choices[0].message.content.strip()
parsed = _clean_json(raw)
_complete_audit_log(audit_log, raw)
except Exception as exc:
logger.error("Pest disease risk prediction failed for %s: %s", farm_uuid, exc)
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
fallback["farm_uuid"] = farm_uuid
fallback["knowledge_base"] = KB_NAME
fallback["tone_file"] = service.tone_file
fallback["raw_response"] = None
return fallback
if not parsed:
parsed = fallback
parsed.setdefault("summary", fallback["summary"])
parsed.setdefault("forecast_window", fallback["forecast_window"])
parsed.setdefault("overall_risk", fallback["overall_risk"])
parsed.setdefault("disease_risk", fallback["disease_risk"])
parsed.setdefault("pest_risk", fallback["pest_risk"])
parsed.setdefault("key_drivers", fallback["key_drivers"])
parsed.setdefault("recommended_actions", fallback["recommended_actions"])
parsed["farm_uuid"] = farm_uuid
parsed["knowledge_base"] = KB_NAME
parsed["tone_file"] = service.tone_file
parsed["raw_response"] = raw
return parsed
+204
View File
@@ -0,0 +1,204 @@
from __future__ import annotations
import json
import logging
from typing import Any
from farm_data.services import get_farm_details
from rag.api_provider import get_chat_client
from rag.chat import (
_complete_audit_log,
_create_audit_log,
_fail_audit_log,
_load_service_tone,
build_rag_context,
)
from rag.config import RAGConfig, get_service_config, load_rag_config
logger = logging.getLogger(__name__)
KB_NAME = "soil_anomaly"
SERVICE_ID = "soil_anomaly"
SOIL_ANOMALY_PROMPT = (
"شما یک دستیار تخصصی تحلیل ناهنجاری داده های خاک و سنسور مزرعه هستی. "
"ورودی شامل داده های ساختاریافته ناهنجاری، اطلاعات مزرعه، و متن های بازیابی شده از پایگاه دانش است. "
"فقط JSON معتبر برگردان و فقط این کلیدها را تولید کن: "
"summary, explanation, likely_cause, recommended_action, monitoring_priority, confidence. "
"monitoring_priority فقط یکی از low, medium, high, urgent باشد. "
"confidence عددی بین 0 و 1 باشد. "
"اگر ناهنجاری معناداری وجود ندارد، این موضوع را شفاف و بدون اغراق بیان کن."
)
def _clean_json(raw: str) -> dict[str, Any]:
cleaned = (raw or "").strip()
if cleaned.startswith("```"):
cleaned = cleaned.strip("`")
if cleaned.startswith("json"):
cleaned = cleaned[4:]
cleaned = cleaned.strip()
if not cleaned:
return {}
try:
return json.loads(cleaned)
except (json.JSONDecodeError, ValueError):
logger.warning("Invalid JSON returned by soil_anomaly LLM: %s", cleaned[:500])
return {}
def _load_farm_or_error(farm_uuid: str) -> dict[str, Any]:
farm_details = get_farm_details(farm_uuid)
if farm_details is None:
raise ValueError("farm_uuid نامعتبر است یا اطلاعات مزرعه پیدا نشد.")
return farm_details
def _build_service_client(cfg: RAGConfig):
service = get_service_config(SERVICE_ID, cfg)
service_cfg = RAGConfig(
embedding=cfg.embedding,
qdrant=cfg.qdrant,
chunking=cfg.chunking,
llm=service.llm,
knowledge_bases=cfg.knowledge_bases,
services=cfg.services,
chromadb=cfg.chromadb,
)
client = get_chat_client(service_cfg)
return service, client, service.llm.model
def _fallback_from_payload(anomaly_payload: dict[str, Any]) -> dict[str, Any]:
interpretation = anomaly_payload.get("interpretation") or {}
anomalies = anomaly_payload.get("anomalies") or []
top_anomaly = anomalies[0] if anomalies else None
if top_anomaly is None:
return {
"summary": "در داده های اخیر ناهنجاری معناداری دیده نشد.",
"explanation": interpretation.get("explanation")
or "داده های فعلی با الگوی معمول مزرعه سازگار هستند و مورد غیرعادی برجسته ای دیده نمی شود.",
"likely_cause": interpretation.get("likely_cause")
or "شرایط فعلی مزرعه پایدار است یا داده کافی برای تشخیص رخداد غیرعادی وجود ندارد.",
"recommended_action": interpretation.get("recommended_action")
or "پایش عادی ادامه یابد و روندها در بازه بعدی دوباره بررسی شوند.",
"monitoring_priority": "low",
"confidence": 0.55,
}
severity = str(top_anomaly.get("severity") or "medium")
priority_map = {
"low": "medium",
"medium": "high",
"high": "urgent",
"critical": "urgent",
}
return {
"summary": f"ناهنجاري در شاخص {top_anomaly.get('label', 'نامشخص')} شناسايي شد.",
"explanation": interpretation.get("explanation")
or f"مقدار {top_anomaly.get('label', 'اين شاخص')} از الگوي آماري معمول مزرعه فاصله گرفته است.",
"likely_cause": interpretation.get("likely_cause")
or "اين الگو مي تواند ناشي از تغيير شرايط محيطي، آبياري، شوري يا خطاي اندازه گيري سنسور باشد.",
"recommended_action": interpretation.get("recommended_action")
or "روند اين شاخص و شرايط مزرعه در کوتاه مدت بازبيني و در صورت تداوم، اقدام اصلاحي انجام شود.",
"monitoring_priority": priority_map.get(severity, "high"),
"confidence": 0.7 if severity in {"high", "critical"} else 0.6,
}
def _build_messages(
*,
service: Any,
cfg: RAGConfig,
query: str,
rag_context: str,
structured_context: dict[str, Any],
) -> tuple[str, list[dict[str, str]]]:
tone = _load_service_tone(service, cfg)
system_parts = [tone] if tone else []
if service.system_prompt:
system_parts.append(service.system_prompt)
system_parts.append(SOIL_ANOMALY_PROMPT)
system_parts.append(
"[کانتکست ساختاریافته ناهنجاري خاک]\n"
+ json.dumps(structured_context, ensure_ascii=False, indent=2, default=str)
)
if rag_context:
system_parts.append(rag_context)
system_prompt = "\n\n".join(part for part in system_parts if part)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": query},
]
return system_prompt, messages
def get_soil_anomaly_insight(
*,
farm_uuid: str,
anomaly_payload: dict[str, Any],
query: str | None = None,
) -> dict[str, Any]:
cfg = load_rag_config()
service, client, model = _build_service_client(cfg)
farm_details = _load_farm_or_error(farm_uuid)
fallback = _fallback_from_payload(anomaly_payload)
user_query = query or "ناهنجاري هاي داده هاي خاک اين مزرعه را تفسير کن و اقدام مناسب پيشنهاد بده."
structured_context = {
"farm_uuid": farm_uuid,
"anomaly_payload": anomaly_payload,
"fallback_interpretation": fallback,
}
rag_context = build_rag_context(
query=user_query,
sensor_uuid=farm_uuid,
config=cfg,
kb_name=KB_NAME,
service_id=SERVICE_ID,
farm_details=farm_details,
)
system_prompt, messages = _build_messages(
service=service,
cfg=cfg,
query=user_query,
rag_context=rag_context,
structured_context=structured_context,
)
audit_log = _create_audit_log(
farm_uuid=farm_uuid,
service_id=SERVICE_ID,
model=model,
query=user_query,
system_prompt=system_prompt,
messages=messages,
)
try:
response = client.chat.completions.create(model=model, messages=messages)
raw = response.choices[0].message.content.strip()
parsed = _clean_json(raw)
_complete_audit_log(audit_log, raw)
except Exception as exc:
logger.error("Soil anomaly insight failed for %s: %s", farm_uuid, exc)
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
return {
**fallback,
"farm_uuid": farm_uuid,
"knowledge_base": KB_NAME,
"tone_file": service.tone_file,
"raw_response": None,
}
if not parsed:
parsed = fallback
parsed.setdefault("summary", fallback["summary"])
parsed.setdefault("explanation", fallback["explanation"])
parsed.setdefault("likely_cause", fallback["likely_cause"])
parsed.setdefault("recommended_action", fallback["recommended_action"])
parsed.setdefault("monitoring_priority", fallback["monitoring_priority"])
parsed.setdefault("confidence", fallback["confidence"])
parsed["farm_uuid"] = farm_uuid
parsed["knowledge_base"] = KB_NAME
parsed["tone_file"] = service.tone_file
parsed["raw_response"] = raw
return parsed
+192
View File
@@ -0,0 +1,192 @@
from __future__ import annotations
import json
import logging
from typing import Any
from farm_data.services import get_farm_details
from rag.api_provider import get_chat_client
from rag.chat import (
_complete_audit_log,
_create_audit_log,
_fail_audit_log,
_load_service_tone,
build_rag_context,
)
from rag.config import RAGConfig, get_service_config, load_rag_config
logger = logging.getLogger(__name__)
KB_NAME = "water_need_prediction"
SERVICE_ID = "water_need_prediction"
WATER_NEED_PROMPT = (
"شما یک دستیار تخصصی تحليل نياز آبي کوتاه مدت مزرعه هستي. "
"ورودي شامل محاسبات ساختاريافته نياز آبي، اطلاعات مزرعه و متن هاي بازيابي شده از پايگاه دانش است. "
"فقط JSON معتبر با اين کليدها برگردان: "
"summary, irrigation_outlook, recommended_action, risk_note, confidence. "
"confidence عددي بين 0 و 1 باشد. "
"اعداد اصلي را از داده ورودي بگير و عدد متناقض جديد نساز."
)
def _clean_json(raw: str) -> dict[str, Any]:
cleaned = (raw or "").strip()
if cleaned.startswith("```"):
cleaned = cleaned.strip("`")
if cleaned.startswith("json"):
cleaned = cleaned[4:]
cleaned = cleaned.strip()
if not cleaned:
return {}
try:
return json.loads(cleaned)
except (json.JSONDecodeError, ValueError):
logger.warning("Invalid JSON returned by water_need_prediction LLM: %s", cleaned[:500])
return {}
def _load_farm_or_error(farm_uuid: str) -> dict[str, Any]:
farm_details = get_farm_details(farm_uuid)
if farm_details is None:
raise ValueError("farm_uuid نامعتبر است یا اطلاعات مزرعه پیدا نشد.")
return farm_details
def _build_service_client(cfg: RAGConfig):
service = get_service_config(SERVICE_ID, cfg)
service_cfg = RAGConfig(
embedding=cfg.embedding,
qdrant=cfg.qdrant,
chunking=cfg.chunking,
llm=service.llm,
knowledge_bases=cfg.knowledge_bases,
services=cfg.services,
chromadb=cfg.chromadb,
)
client = get_chat_client(service_cfg)
return service, client, service.llm.model
def _fallback_from_payload(prediction_payload: dict[str, Any]) -> dict[str, Any]:
total = float(prediction_payload.get("totalNext7Days") or 0.0)
daily = prediction_payload.get("dailyBreakdown") or []
peak_day = max(daily, key=lambda item: float(item.get("gross_irrigation_mm", 0.0) or 0.0), default=None)
if total <= 0:
return {
"summary": "براي چند روز آينده نياز آبي معناداري برآورد نشد.",
"irrigation_outlook": "بارش موثر يا شرايط فعلي باعث شده نياز خالص آبياري پايين باشد.",
"recommended_action": "پايش رطوبت خاک ادامه يابد و قبل از هر آبياري جديد forecast دوباره بررسي شود.",
"risk_note": "اگر forecast تغيير کند يا بارش موثر رخ ندهد، برآورد بايد به روز شود.",
"confidence": 0.58,
}
peak_text = ""
if peak_day:
peak_text = (
f" بيشترين فشار آبي در {peak_day.get('forecast_date')} "
f"با حدود {peak_day.get('gross_irrigation_mm')} ميلي متر برآورد شده است."
)
return {
"summary": f"جمع نياز آبي 7 روز آينده حدود {round(total, 2)} ميلي متر برآورد شده است.",
"irrigation_outlook": "الگوي آبياري بايد در چند روز آينده بر اساس نياز روزانه و بارش موثر تنظيم شود." + peak_text,
"recommended_action": "برنامه آبياري کوتاه مدت بر اساس روزهاي اوج نياز تنظيم و صبح زود يا نزديک غروب اجرا شود.",
"risk_note": "در صورت تغيير دما، باد يا بارش، مقادير gross irrigation ممکن است تغيير کنند.",
"confidence": 0.72,
}
def _build_messages(
*,
service: Any,
cfg: RAGConfig,
query: str,
rag_context: str,
structured_context: dict[str, Any],
) -> tuple[str, list[dict[str, str]]]:
tone = _load_service_tone(service, cfg)
system_parts = [tone] if tone else []
if service.system_prompt:
system_parts.append(service.system_prompt)
system_parts.append(WATER_NEED_PROMPT)
system_parts.append(
"[کانتکست ساختاريافته نياز آبي]\n"
+ json.dumps(structured_context, ensure_ascii=False, indent=2, default=str)
)
if rag_context:
system_parts.append(rag_context)
system_prompt = "\n\n".join(part for part in system_parts if part)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": query},
]
return system_prompt, messages
def get_water_need_prediction_insight(
*,
farm_uuid: str,
prediction_payload: dict[str, Any],
query: str | None = None,
) -> dict[str, Any]:
cfg = load_rag_config()
service, client, model = _build_service_client(cfg)
farm_details = _load_farm_or_error(farm_uuid)
fallback = _fallback_from_payload(prediction_payload)
user_query = query or "نياز آبي کوتاه مدت اين مزرعه را تفسير کن و اقدام عملياتي پيشنهاد بده."
structured_context = {
"farm_uuid": farm_uuid,
"prediction_payload": prediction_payload,
"fallback_summary": fallback,
}
rag_context = build_rag_context(
query=user_query,
sensor_uuid=farm_uuid,
config=cfg,
kb_name=KB_NAME,
service_id=SERVICE_ID,
farm_details=farm_details,
)
system_prompt, messages = _build_messages(
service=service,
cfg=cfg,
query=user_query,
rag_context=rag_context,
structured_context=structured_context,
)
audit_log = _create_audit_log(
farm_uuid=farm_uuid,
service_id=SERVICE_ID,
model=model,
query=user_query,
system_prompt=system_prompt,
messages=messages,
)
try:
response = client.chat.completions.create(model=model, messages=messages)
raw = response.choices[0].message.content.strip()
parsed = _clean_json(raw)
_complete_audit_log(audit_log, raw)
except Exception as exc:
logger.error("Water need prediction insight failed for %s: %s", farm_uuid, exc)
_fail_audit_log(audit_log, str(exc), json.dumps(fallback, ensure_ascii=False))
return {
**fallback,
"farm_uuid": farm_uuid,
"knowledge_base": KB_NAME,
"tone_file": service.tone_file,
"raw_response": None,
}
if not parsed:
parsed = fallback
parsed.setdefault("summary", fallback["summary"])
parsed.setdefault("irrigation_outlook", fallback["irrigation_outlook"])
parsed.setdefault("recommended_action", fallback["recommended_action"])
parsed.setdefault("risk_note", fallback["risk_note"])
parsed.setdefault("confidence", fallback["confidence"])
parsed["farm_uuid"] = farm_uuid
parsed["knowledge_base"] = KB_NAME
parsed["tone_file"] = service.tone_file
parsed["raw_response"] = raw
return parsed