Files
aiagent/backend/scripts/debug_cache_query.py

38 lines
1.2 KiB
Python

"""单次执行后打印 cache-query 与 llm-unified 输出,用于排查记忆。"""
import json
import requests
B = "http://127.0.0.1:8037"
AID = "688c2c41-dcd1-4285-b193-6bed00c485c2"
UID = "debug_uid_fresh_99"
MSG = "我叫李小红"
r = requests.post(
B + "/api/v1/auth/login",
data={"username": "admin", "password": "123456"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
timeout=15,
)
h = {"Authorization": f"Bearer {r.json()['access_token']}", "Content-Type": "application/json"}
eid = requests.post(
B + "/api/v1/executions",
headers=h,
json={"agent_id": AID, "input_data": {"query": MSG, "USER_INPUT": MSG, "user_id": UID}},
timeout=30,
).json()["id"]
import time
for _ in range(90):
d = requests.get(f"{B}/api/v1/executions/{eid}", headers=h, timeout=60).json()
if d["status"] not in ("pending", "running"):
break
time.sleep(0.6)
od = d.get("output_data") or {}
nr = od.get("node_results") or {}
cq = nr.get("cache-query") or {}
llm = nr.get("llm-unified") or {}
print("cache-query memory user_profile:", (cq.get("memory") or {}).get("user_profile"))
print("llm output:", (llm.get("output") or "")[:600])