1038 lines
46 KiB
Python
1038 lines
46 KiB
Python
import json
|
||
from pathlib import Path
|
||
import re
|
||
from datetime import datetime
|
||
import unicodedata
|
||
|
||
import httpx
|
||
from sqlalchemy import select
|
||
from sqlalchemy.orm import Session
|
||
|
||
from app.config import get_settings
|
||
from app.db import AuditLogORM, DEFAULT_TOOLS, SecretORM, SettingORM, ToolStateORM
|
||
from app.automation.store import AutomationService
|
||
from app.llm.ollama_client import OllamaClient
|
||
from app.llm.planner import build_prompt_context
|
||
from app.memory.store import MemoryService
|
||
from app.profile.store import UserProfileService
|
||
from app.second_brain.store import SecondBrainService
|
||
from app.models import RuntimeSettings
|
||
from app.static_templates import get_game_template_hint
|
||
from app.telegram.auth import is_authorized
|
||
from app.tools.base import Tool
|
||
from app.tools.registry import build_tools
|
||
|
||
|
||
class WiseClawOrchestrator:
|
||
def __init__(self, session: Session) -> None:
|
||
self.session = session
|
||
self.memory = MemoryService(session)
|
||
self.profiles = UserProfileService(session)
|
||
self.automations = AutomationService(session)
|
||
self.second_brain = SecondBrainService(session)
|
||
|
||
async def handle_message_payload(self, telegram_user_id: int, text: str) -> dict[str, object]:
|
||
response = await self.handle_text_message(telegram_user_id, text)
|
||
return {"text": response, "photos": []}
|
||
|
||
def get_runtime_settings(self) -> RuntimeSettings:
|
||
settings = {
|
||
item.key: item.value for item in self.session.scalars(select(SettingORM))
|
||
}
|
||
tool_records = {
|
||
tool.name: tool.enabled for tool in self.session.scalars(select(ToolStateORM).order_by(ToolStateORM.name.asc()))
|
||
}
|
||
return RuntimeSettings(
|
||
terminal_mode=int(settings["terminal_mode"]),
|
||
search_provider=settings["search_provider"],
|
||
model_provider=settings["model_provider"],
|
||
local_base_url=settings["local_base_url"],
|
||
local_model=settings["local_model"],
|
||
zai_model=settings["zai_model"],
|
||
anythingllm_base_url=settings["anythingllm_base_url"],
|
||
anythingllm_workspace_slug=settings["anythingllm_workspace_slug"],
|
||
tools=[{"name": name, "enabled": tool_records.get(name, enabled)} for name, enabled in DEFAULT_TOOLS.items()],
|
||
)
|
||
|
||
async def handle_text_message(self, telegram_user_id: int, text: str) -> str:
|
||
if not is_authorized(self.session, telegram_user_id):
|
||
return "This Telegram user is not authorized for WiseClaw."
|
||
|
||
normalized = text.strip()
|
||
command_response = self._handle_profile_command(telegram_user_id, normalized)
|
||
if command_response is not None:
|
||
self.session.commit()
|
||
return command_response
|
||
|
||
automation_response = self._handle_automation_command(telegram_user_id, normalized)
|
||
if automation_response is not None:
|
||
self.session.commit()
|
||
return automation_response
|
||
|
||
second_brain_command = self._handle_second_brain_command(telegram_user_id, normalized)
|
||
if second_brain_command is not None:
|
||
self.session.commit()
|
||
return second_brain_command
|
||
|
||
if self.profiles.is_onboarding_active(telegram_user_id):
|
||
if normalized.lower() in {"/iptal", "iptal", "cancel"}:
|
||
self.session.add(
|
||
AuditLogORM(category="profile", message=f"profile:onboarding-cancelled:{telegram_user_id}")
|
||
)
|
||
self.session.commit()
|
||
return "Tanisma akisini durdurdum. Devam etmek istersen /tanisalim yazabilirsin."
|
||
|
||
response, completed = self.profiles.answer_onboarding(telegram_user_id, normalized)
|
||
if completed:
|
||
summary = self.profiles.profile_memory_summary(telegram_user_id)
|
||
if summary:
|
||
self.memory.add_item(summary)
|
||
self.session.commit()
|
||
return response
|
||
|
||
if self.automations.is_wizard_active(telegram_user_id):
|
||
if normalized.lower() in {"/iptal", "iptal", "cancel"}:
|
||
response = self.automations.cancel_wizard(telegram_user_id)
|
||
self.session.commit()
|
||
return response
|
||
response, _completed = self.automations.answer_wizard(telegram_user_id, normalized)
|
||
self.session.commit()
|
||
return response
|
||
|
||
if self.second_brain.is_capture_active(telegram_user_id):
|
||
if normalized.lower() in {"/iptal", "iptal", "cancel"}:
|
||
response = self.second_brain.cancel_capture(telegram_user_id)
|
||
self.session.commit()
|
||
return response
|
||
workspace_root = Path(__file__).resolve().parents[2]
|
||
response = await self.second_brain.save_note_and_sync(telegram_user_id, normalized, workspace_root)
|
||
self.session.commit()
|
||
return response
|
||
|
||
self.memory.add_item(f"user:{telegram_user_id}:{text}")
|
||
runtime = self.get_runtime_settings()
|
||
try:
|
||
response = await self._generate_response(telegram_user_id, text, runtime)
|
||
except Exception as exc:
|
||
response = self._format_llm_error(exc, runtime.model_provider)
|
||
|
||
self.memory.add_item(f"assistant:{response}")
|
||
self.session.add(AuditLogORM(category="telegram", message=f"telegram:{telegram_user_id}:{text}"))
|
||
self.session.commit()
|
||
return response
|
||
|
||
async def _generate_response(self, telegram_user_id: int, text: str, runtime: RuntimeSettings) -> str:
|
||
try:
|
||
return await self._generate_response_once(telegram_user_id, text, runtime)
|
||
except Exception as exc:
|
||
if self._should_fallback_to_local(exc, runtime):
|
||
fallback_runtime = runtime.model_copy(update={"model_provider": "local"})
|
||
fallback_response = await self._generate_response_once(telegram_user_id, text, fallback_runtime)
|
||
return (
|
||
"Z.AI erisilemedi, bu istek icin otomatik olarak local modele dustum.\n\n"
|
||
f"{fallback_response}"
|
||
)
|
||
raise
|
||
|
||
async def _generate_response_once(self, telegram_user_id: int, text: str, runtime: RuntimeSettings) -> str:
|
||
workspace_root = Path(__file__).resolve().parents[2]
|
||
tools = build_tools(runtime, workspace_root, self.session)
|
||
second_brain_context = await self._prefetch_second_brain_context(text, tools)
|
||
context = build_prompt_context(
|
||
message=text,
|
||
runtime=runtime,
|
||
memory=self.memory.latest_items(limit=5),
|
||
workspace_root=str(workspace_root),
|
||
profile_preferences=self.profiles.build_prompt_profile(telegram_user_id),
|
||
second_brain_context=second_brain_context,
|
||
)
|
||
client = self._build_llm_client(runtime)
|
||
messages: list[dict[str, object]] = [
|
||
{"role": "system", "content": str(context["system"])},
|
||
{"role": "user", "content": text},
|
||
]
|
||
if self._looks_like_static_app_request(text):
|
||
return await self._handle_static_app_request(client, self._active_model(runtime), text, workspace_root, tools)
|
||
return await self._run_tool_loop(client, self._active_model(runtime), messages, tools)
|
||
|
||
def _handle_profile_command(self, telegram_user_id: int, text: str) -> str | None:
|
||
command = text.split(maxsplit=1)[0].lower()
|
||
if "@" in command:
|
||
command = command.split("@", 1)[0]
|
||
if command in {"/tanışalım", "/tanisalim"}:
|
||
return self.profiles.start_onboarding(telegram_user_id)
|
||
if command in {"/profilim"}:
|
||
return self.profiles.render_profile_summary(telegram_user_id)
|
||
if command in {"/tercihlerim"}:
|
||
return self.profiles.render_preferences_summary(telegram_user_id)
|
||
if command in {"/tanışalım_sifirla", "/tanisalim_sifirla"}:
|
||
return self.profiles.reset_onboarding(telegram_user_id)
|
||
return None
|
||
|
||
def _handle_automation_command(self, telegram_user_id: int, text: str) -> str | None:
|
||
command_parts = text.split(maxsplit=1)
|
||
command = command_parts[0].lower()
|
||
if "@" in command:
|
||
command = command.split("@", 1)[0]
|
||
argument = command_parts[1].strip() if len(command_parts) > 1 else ""
|
||
|
||
if command == "/otomasyon_ekle":
|
||
return self.automations.start_wizard(telegram_user_id)
|
||
if command == "/otomasyonlar":
|
||
return self.automations.render_automation_list(telegram_user_id)
|
||
if command == "/otomasyon_durdur":
|
||
automation_id = self._parse_numeric_argument(argument)
|
||
return (
|
||
self.automations.pause_automation(telegram_user_id, automation_id)
|
||
if automation_id is not None
|
||
else "Kullanim: /otomasyon_durdur <id>"
|
||
)
|
||
if command == "/otomasyon_baslat":
|
||
automation_id = self._parse_numeric_argument(argument)
|
||
return (
|
||
self.automations.resume_automation(telegram_user_id, automation_id)
|
||
if automation_id is not None
|
||
else "Kullanim: /otomasyon_baslat <id>"
|
||
)
|
||
if command == "/otomasyon_sil":
|
||
automation_id = self._parse_numeric_argument(argument)
|
||
return (
|
||
self.automations.delete_automation(telegram_user_id, automation_id)
|
||
if automation_id is not None
|
||
else "Kullanim: /otomasyon_sil <id>"
|
||
)
|
||
return None
|
||
|
||
def _handle_second_brain_command(self, telegram_user_id: int, text: str) -> str | None:
|
||
command = text.split(maxsplit=1)[0].lower()
|
||
if "@" in command:
|
||
command = command.split("@", 1)[0]
|
||
if command == "/notlarima_ekle":
|
||
return self.second_brain.start_capture(telegram_user_id)
|
||
return None
|
||
|
||
def _parse_numeric_argument(self, value: str) -> int | None:
|
||
try:
|
||
return int(value)
|
||
except ValueError:
|
||
return None
|
||
|
||
async def _run_tool_loop(
|
||
self,
|
||
client: OllamaClient,
|
||
model: str,
|
||
messages: list[dict[str, object]],
|
||
tools: dict[str, Tool],
|
||
) -> str:
|
||
tool_defs = [tool.definition() for tool in tools.values()]
|
||
user_text = str(messages[-1].get("content", ""))
|
||
normalized_user_text = self._normalize_intent_text(user_text)
|
||
note_request = self._extract_apple_note_request(user_text) if "apple_notes" in tools else None
|
||
preferred_tool = self._preferred_tool_name(user_text, tools)
|
||
second_brain_request = self._looks_like_second_brain_request(normalized_user_text) if "second_brain" in tools else False
|
||
|
||
if note_request is not None:
|
||
if note_request is None:
|
||
return "Apple Notes istegini anlayamadim. Lutfen not basligini acikca belirt."
|
||
tool_result = await self._execute_tool_call(tools, "apple_notes", note_request)
|
||
self._log_tool_event("apple_notes", note_request, tool_result)
|
||
if tool_result.get("status") != "ok":
|
||
message = str(tool_result.get("message", "Apple Notes command failed."))
|
||
return f"Apple Notes notu olusturamadim: {message}"
|
||
title = str(tool_result.get("title", note_request["title"]))
|
||
return f'Apple Notes uygulamasinda "{title}" baslikli yeni bir not olusturuldu.'
|
||
|
||
if second_brain_request:
|
||
second_brain_args = {"query": user_text, "mode": "query"}
|
||
tool_result = await self._execute_tool_call(tools, "second_brain", second_brain_args)
|
||
self._log_tool_event("second_brain", second_brain_args, tool_result)
|
||
if tool_result.get("status") != "ok":
|
||
message = str(tool_result.get("message", "AnythingLLM lookup failed."))
|
||
return f"Ikinci beyin baglamini cekemedim: {message}"
|
||
return self._render_second_brain_answer(tool_result)
|
||
|
||
if preferred_tool == "browser_use":
|
||
tool_payload: dict[str, object] = {
|
||
"task": user_text,
|
||
"max_steps": 20,
|
||
}
|
||
start_url = self._extract_url(user_text)
|
||
if start_url:
|
||
tool_payload["start_url"] = start_url
|
||
tool_result = await self._execute_tool_call(tools, "browser_use", tool_payload)
|
||
self._log_tool_event("browser_use", tool_payload, tool_result)
|
||
if tool_result.get("status") != "ok":
|
||
message = str(tool_result.get("message", "browser_use failed."))
|
||
return f"Tarayici gorevini tamamlayamadim: {message}"
|
||
final_result = str(tool_result.get("final_result", "")).strip()
|
||
if final_result:
|
||
return final_result
|
||
extracted = tool_result.get("extracted_content", [])
|
||
if isinstance(extracted, list) and extracted:
|
||
return "\n\n".join(str(item) for item in extracted[-3:])
|
||
return "Tarayici gorevi tamamlandi, ancak ozetlenecek bir sonuc uretilmedi."
|
||
|
||
if preferred_tool == "brave_search":
|
||
brave_args = {
|
||
"query": str(messages[-1].get("content", "")),
|
||
"count": 5,
|
||
"mode": "images" if self._looks_like_image_request(user_text) else "web",
|
||
}
|
||
brave_args = self._normalize_brave_search_arguments(user_text, brave_args)
|
||
tool_result = await self._execute_tool_call(
|
||
tools,
|
||
preferred_tool,
|
||
brave_args,
|
||
)
|
||
self._log_tool_event(preferred_tool, brave_args, tool_result)
|
||
if tool_result.get("status") == "error":
|
||
message = str(tool_result.get("message", "Brave Search failed."))
|
||
return f"Web aramasi yapamadim: {message}"
|
||
if brave_args["mode"] == "images":
|
||
images = tool_result.get("images", [])
|
||
web_result = await self._execute_tool_call(
|
||
tools,
|
||
preferred_tool,
|
||
self._normalize_brave_search_arguments(
|
||
user_text,
|
||
{"query": brave_args["query"], "count": 5, "mode": "web"},
|
||
),
|
||
)
|
||
self._log_tool_event(preferred_tool, {"query": brave_args["query"], "count": 5, "mode": "web"}, web_result)
|
||
if isinstance(images, list) and images:
|
||
summary = self._build_brave_combo_summary(images, web_result)
|
||
media = self._build_image_media_items(images)
|
||
if media:
|
||
return self._encode_media_response(summary, media)
|
||
return summary
|
||
if web_result.get("status") == "ok":
|
||
return self._build_brave_combo_summary([], web_result)
|
||
messages.append(
|
||
{
|
||
"role": "assistant",
|
||
"content": "",
|
||
"tool_calls": [
|
||
{
|
||
"id": "prefetched-brave-search",
|
||
"type": "function",
|
||
"function": {
|
||
"name": preferred_tool,
|
||
"arguments": json.dumps(
|
||
{
|
||
"query": str(messages[-1].get("content", "")),
|
||
"count": 5,
|
||
}
|
||
),
|
||
},
|
||
}
|
||
],
|
||
}
|
||
)
|
||
messages.append(
|
||
{
|
||
"role": "tool",
|
||
"tool_call_id": "prefetched-brave-search",
|
||
"content": json.dumps(tool_result),
|
||
}
|
||
)
|
||
|
||
for step in range(4):
|
||
completion = await client.chat_completion(
|
||
model=model,
|
||
messages=messages,
|
||
tools=tool_defs or None,
|
||
)
|
||
tool_calls = completion["tool_calls"]
|
||
|
||
if not tool_calls:
|
||
content = completion["content"].strip()
|
||
return content or "WiseClaw did not return a response."
|
||
|
||
messages.append(
|
||
{
|
||
"role": "assistant",
|
||
"content": completion["content"],
|
||
"tool_calls": [
|
||
{
|
||
"id": call["id"],
|
||
"type": "function",
|
||
"function": {
|
||
"name": call["name"],
|
||
"arguments": json.dumps(call["arguments"]),
|
||
},
|
||
}
|
||
for call in tool_calls
|
||
],
|
||
}
|
||
)
|
||
|
||
for call in tool_calls:
|
||
normalized_arguments = (
|
||
self._normalize_brave_search_arguments(user_text, call["arguments"])
|
||
if call["name"] == "brave_search"
|
||
else call["arguments"]
|
||
)
|
||
result = await self._execute_tool_call(tools, call["name"], normalized_arguments)
|
||
self._log_tool_event(call["name"], normalized_arguments, result)
|
||
messages.append(
|
||
{
|
||
"role": "tool",
|
||
"tool_call_id": call["id"],
|
||
"content": json.dumps(result),
|
||
}
|
||
)
|
||
|
||
return "WiseClaw stopped after too many tool steps."
|
||
|
||
async def _execute_tool_call(self, tools: dict[str, Tool], name: str, arguments: dict[str, object]) -> dict[str, object]:
|
||
tool = tools.get(name)
|
||
if tool is None:
|
||
return {"tool": name, "status": "error", "message": f"Tool is not enabled: {name}"}
|
||
try:
|
||
return await tool.run(arguments)
|
||
except Exception as exc:
|
||
return {"tool": name, "status": "error", "message": str(exc)}
|
||
|
||
def _log_tool_event(self, tool_name: str, arguments: dict[str, object], result: dict[str, object]) -> None:
|
||
self.session.add(
|
||
AuditLogORM(
|
||
category="tool",
|
||
message=f"tool:{tool_name}:{json.dumps(arguments, ensure_ascii=False)}",
|
||
)
|
||
)
|
||
if tool_name == "brave_search":
|
||
self.session.add(
|
||
AuditLogORM(
|
||
category="tool",
|
||
message=f"tool_result:brave_search:{self._truncate_log_payload(result)}",
|
||
)
|
||
)
|
||
if tool_name == "second_brain":
|
||
self.session.add(
|
||
AuditLogORM(
|
||
category="tool",
|
||
message=f"tool_result:second_brain:{self._truncate_log_payload(result)}",
|
||
)
|
||
)
|
||
|
||
def _truncate_log_payload(self, payload: dict[str, object], limit: int = 4000) -> str:
|
||
serialized = json.dumps(payload, ensure_ascii=False)
|
||
if len(serialized) <= limit:
|
||
return serialized
|
||
return serialized[: limit - 3] + "..."
|
||
|
||
def _looks_like_static_app_request(self, text: str) -> bool:
|
||
normalized = text.lower()
|
||
has_port = bool(re.search(r":\d{2,5}", normalized))
|
||
has_build_intent = any(term in normalized for term in ("yap", "oluştur", "olustur", "create", "build"))
|
||
mentions_stack = "html" in normalized and "css" in normalized and "js" in normalized
|
||
mentions_local_app = any(
|
||
term in normalized
|
||
for term in (
|
||
"localhost",
|
||
"localhos",
|
||
"yerelde",
|
||
"localde",
|
||
"responsive",
|
||
"mobil",
|
||
"mobile",
|
||
"oyun",
|
||
"game",
|
||
"app",
|
||
"uygulama",
|
||
"website",
|
||
"web sitesi",
|
||
"site",
|
||
)
|
||
)
|
||
return has_port and has_build_intent and (mentions_stack or mentions_local_app)
|
||
|
||
async def _handle_static_app_request(
|
||
self,
|
||
client: OllamaClient,
|
||
model: str,
|
||
text: str,
|
||
workspace_root: Path,
|
||
tools: dict[str, Tool],
|
||
) -> str:
|
||
files_tool = tools.get("files")
|
||
terminal_tool = tools.get("terminal")
|
||
if files_tool is None or terminal_tool is None:
|
||
return "Static app workflow requires both files and terminal tools to be enabled."
|
||
|
||
port_match = re.search(r":(\d{2,5})", text)
|
||
port = int(port_match.group(1)) if port_match else 9990
|
||
app_dir = f"generated_apps/app-{port}"
|
||
is_game_request = self._looks_like_game_request(text)
|
||
game_template_hint = get_game_template_hint(text) if is_game_request else ""
|
||
|
||
prompt = (
|
||
"Return only valid JSON with this exact shape: "
|
||
'{"summary":"short summary","index_html":"...","style_css":"...","script_js":"..."}.\n'
|
||
"Build a polished responsive static web app that satisfies this request.\n"
|
||
"Requirements:\n"
|
||
"- Use plain HTML, CSS, and JavaScript only.\n"
|
||
"- Make it work well on both mobile and desktop browsers.\n"
|
||
"- If the request is for a game, include complete gameplay, restart behavior, score display, and clear win/lose or game-over states.\n"
|
||
"- For mobile-focused requests, include touch-friendly controls and large tap targets.\n"
|
||
"- Keep the UI visually clean and intentional, not placeholder-like.\n"
|
||
"- Put all markup in index.html, all styles in style.css, and all behavior in script.js.\n"
|
||
"- Ensure the generated files can run directly in a local static server without any build step.\n"
|
||
"- Do not leave TODOs, placeholders, or missing logic.\n"
|
||
f"{self._game_prompt_requirements(text) if is_game_request else ''}"
|
||
f"{game_template_hint}"
|
||
"User request:\n"
|
||
f"{text}\n"
|
||
"Do not include markdown fences."
|
||
)
|
||
raw = await client.chat(
|
||
model=model,
|
||
system_prompt=(
|
||
"You generate complete production-ready static web app files. "
|
||
"Return JSON only. Prefer strong usability, responsive layout, and complete functionality. "
|
||
f"{self._game_system_prompt() if is_game_request else ''}"
|
||
),
|
||
user_message=prompt,
|
||
)
|
||
spec = await self._extract_json_object(client, model, raw)
|
||
|
||
for path, content in (
|
||
(f"{app_dir}/index.html", spec.get("index_html", "")),
|
||
(f"{app_dir}/style.css", spec.get("style_css", "")),
|
||
(f"{app_dir}/script.js", spec.get("script_js", "")),
|
||
):
|
||
result = await files_tool.run({"action": "write", "path": path, "content": content})
|
||
if result.get("status") != "ok":
|
||
return f"Failed to write app file {path}: {result.get('message', 'unknown error')}"
|
||
|
||
serve = await terminal_tool.run(
|
||
{
|
||
"command": f"python3 -m http.server {port} -d {app_dir}",
|
||
"background": True,
|
||
"workdir": ".",
|
||
}
|
||
)
|
||
if serve.get("status") != "ok":
|
||
return f"App files were written, but the local server could not be started: {serve.get('message') or serve.get('reason')}"
|
||
|
||
url = f"http://192.168.1.124:{port}/"
|
||
verification = await self._verify_url(url)
|
||
summary = spec.get("summary", "Static web app created.")
|
||
if verification:
|
||
return (
|
||
f"{summary}\n\n"
|
||
f"Files written to `{workspace_root / app_dir}`.\n"
|
||
f"Server started at {url}"
|
||
)
|
||
return (
|
||
f"{summary}\n\n"
|
||
f"Files written to `{workspace_root / app_dir}`.\n"
|
||
f"Server start was requested for {url}, but verification failed. "
|
||
f"Log: {serve.get('log_path', 'n/a')}"
|
||
)
|
||
|
||
def _looks_like_game_request(self, text: str) -> bool:
|
||
lowered = text.lower()
|
||
return any(
|
||
term in lowered
|
||
for term in (
|
||
"snake",
|
||
"oyun",
|
||
"game",
|
||
"arcade",
|
||
"tetris",
|
||
"pong",
|
||
"platformer",
|
||
)
|
||
)
|
||
|
||
def _game_system_prompt(self) -> str:
|
||
return (
|
||
"When the request is for a game, optimize for complete gameplay, reliable controls, and a satisfying first-run experience."
|
||
)
|
||
|
||
def _game_prompt_requirements(self, text: str) -> str:
|
||
lowered = text.lower()
|
||
lines = [
|
||
"- This is a game request. Build a fully playable game, not a mockup.\n",
|
||
"- Prefer a single-screen experience that starts immediately after loading.\n",
|
||
"- Include a visible score and a restart action.\n",
|
||
"- Add both keyboard controls and touch controls when mobile use is implied.\n",
|
||
"- Make touch controls fixed, thumb-friendly, and obvious on small screens.\n",
|
||
"- Prevent accidental page scrolling while using touch controls.\n",
|
||
"- Keep animation smooth and game state deterministic.\n",
|
||
]
|
||
if "snake" in lowered:
|
||
lines.extend(
|
||
[
|
||
"- For Snake specifically, use a visible grid or board area.\n",
|
||
"- Support swipe or large directional buttons on mobile.\n",
|
||
"- Prevent immediate 180-degree turns.\n",
|
||
"- Speed should feel fair on mobile and desktop.\n",
|
||
"- Show current score and best-effort game over feedback.\n",
|
||
]
|
||
)
|
||
lines.extend(self._game_engine_prompt_requirements(lowered))
|
||
return "".join(lines)
|
||
|
||
def _game_engine_prompt_requirements(self, lowered: str) -> list[str]:
|
||
if "three.js" in lowered or "threejs" in lowered or "webgl" in lowered or "3d" in lowered:
|
||
return [
|
||
"- Use Three.js via a browser-ready CDN import or module import that works directly in a static server.\n",
|
||
"- Build a true 3D scene with camera, lighting, animation loop, and responsive renderer sizing.\n",
|
||
"- Keep the scene performant on mobile devices by limiting geometry and draw complexity.\n",
|
||
"- Add touch-friendly camera or gameplay controls if the request implies mobile play.\n",
|
||
]
|
||
if "phaser" in lowered:
|
||
return [
|
||
"- Use Phaser from a browser-ready CDN and implement the game with Phaser scenes.\n",
|
||
"- Structure the gameplay with preload/create/update flow and working asset-free primitives if needed.\n",
|
||
"- Ensure the canvas resizes well on mobile and desktop.\n",
|
||
]
|
||
if "pixi" in lowered or "pixijs" in lowered:
|
||
return [
|
||
"- Use PixiJS from a browser-ready CDN and render gameplay through a responsive canvas.\n",
|
||
"- Keep sprite logic and animation self-contained in script.js.\n",
|
||
]
|
||
if "babylon" in lowered or "babylon.js" in lowered:
|
||
return [
|
||
"- Use Babylon.js from a browser-ready CDN with a complete 3D scene and render loop.\n",
|
||
"- Include camera, light, and mobile-safe rendering defaults.\n",
|
||
]
|
||
if "canvas" in lowered:
|
||
return [
|
||
"- Use the HTML canvas as the main gameplay surface.\n",
|
||
"- Keep rendering and game loop code explicit and self-contained in script.js.\n",
|
||
]
|
||
return [
|
||
"- Prefer plain DOM or canvas unless the user explicitly asked for a specific game library.\n",
|
||
]
|
||
|
||
async def _extract_json_object(self, client: OllamaClient, model: str, raw: str) -> dict[str, str]:
|
||
start = raw.find("{")
|
||
end = raw.rfind("}")
|
||
if start == -1 or end == -1 or end <= start:
|
||
repaired = await self._repair_json_response(client, model, raw)
|
||
start = repaired.find("{")
|
||
end = repaired.rfind("}")
|
||
if start == -1 or end == -1 or end <= start:
|
||
raise ValueError("Model did not return JSON.")
|
||
raw = repaired
|
||
try:
|
||
payload = json.loads(raw[start : end + 1])
|
||
except json.JSONDecodeError:
|
||
repaired = await self._repair_json_response(client, model, raw)
|
||
start = repaired.find("{")
|
||
end = repaired.rfind("}")
|
||
if start == -1 or end == -1 or end <= start:
|
||
raise ValueError("Model returned malformed JSON.")
|
||
payload = json.loads(repaired[start : end + 1])
|
||
if not isinstance(payload, dict):
|
||
raise ValueError("Model JSON payload is not an object.")
|
||
return payload
|
||
|
||
async def _repair_json_response(self, client: OllamaClient, model: str, raw: str) -> str:
|
||
repair_prompt = (
|
||
"Repair the following malformed JSON so that it becomes valid JSON with exactly this shape: "
|
||
'{"summary":"short summary","index_html":"...","style_css":"...","script_js":"..."}.\n'
|
||
"Return JSON only. Do not add markdown fences.\n\n"
|
||
f"{raw}"
|
||
)
|
||
return await client.chat(
|
||
model=model,
|
||
system_prompt="You repair malformed JSON. Return valid JSON only.",
|
||
user_message=repair_prompt,
|
||
)
|
||
|
||
async def _verify_url(self, url: str) -> bool:
|
||
try:
|
||
async with httpx.AsyncClient(timeout=3.0) as client:
|
||
response = await client.get(url)
|
||
return response.is_success
|
||
except httpx.HTTPError:
|
||
return False
|
||
|
||
def _preferred_tool_name(self, text: str, tools: dict[str, Tool]) -> str | None:
|
||
lowered = self._normalize_intent_text(text)
|
||
if "brave_search" in tools and any(keyword in lowered for keyword in ("web", "arama", "search", "internet")):
|
||
return "brave_search"
|
||
if "browser_use" in tools and self._looks_like_browser_task(lowered):
|
||
return "browser_use"
|
||
if "apple_notes" in tools and any(keyword in lowered for keyword in ("apple notes", "notlar", "notes", "not oluştur", "yeni not", "note")):
|
||
return "apple_notes"
|
||
if "files" in tools and any(keyword in lowered for keyword in ("dosya", "readme", ".md", ".py", "klasor", "directory")):
|
||
return "files"
|
||
if "terminal" in tools and any(keyword in lowered for keyword in ("terminal", "komut", "command", "dizin", "pwd")):
|
||
return "terminal"
|
||
return None
|
||
|
||
def _looks_like_browser_task(self, lowered: str) -> bool:
|
||
browser_terms = ("site", "browser", "tarayici", "sayfa", ".com", "http://", "https://")
|
||
browser_actions = (
|
||
"tıkla",
|
||
"tikla",
|
||
"click",
|
||
"fill",
|
||
"type",
|
||
"press",
|
||
"listele",
|
||
"list",
|
||
"find",
|
||
"bul",
|
||
"extract",
|
||
"çıkar",
|
||
"cikar",
|
||
"getir",
|
||
"compare",
|
||
"karşılaştır",
|
||
"karsilastir",
|
||
"ilk 10",
|
||
"first 10",
|
||
"son 10",
|
||
"last 10",
|
||
"latest",
|
||
"recent",
|
||
"fiyat",
|
||
"price",
|
||
"marka",
|
||
"model",
|
||
"notify",
|
||
"notification",
|
||
"notifications",
|
||
"bildirim",
|
||
"bildirimler",
|
||
"open",
|
||
"ac",
|
||
"aç",
|
||
"git",
|
||
"go to",
|
||
)
|
||
return any(term in lowered for term in browser_terms) and any(term in lowered for term in browser_actions)
|
||
|
||
def _looks_like_second_brain_request(self, lowered: str) -> bool:
|
||
return any(
|
||
term in lowered
|
||
for term in (
|
||
"ikinci beyn",
|
||
"second brain",
|
||
"anythingllm",
|
||
"notlarım",
|
||
"notlarim",
|
||
"dokümanlarım",
|
||
"dokumanlarim",
|
||
"belgelerim",
|
||
"arşivim",
|
||
"arsivim",
|
||
"workspace'imde",
|
||
"workspaceimde",
|
||
"kaydettiğim",
|
||
"kaydettigim",
|
||
)
|
||
)
|
||
|
||
def _looks_like_image_request(self, text: str) -> bool:
|
||
lowered = self._normalize_intent_text(text)
|
||
return any(term in lowered for term in ("resim", "gorsel", "görsel", "foto", "image", "images", "picture"))
|
||
|
||
def _normalize_intent_text(self, text: str) -> str:
|
||
lowered = text.casefold()
|
||
decomposed = unicodedata.normalize("NFKD", lowered)
|
||
return "".join(char for char in decomposed if not unicodedata.combining(char))
|
||
|
||
async def _prefetch_second_brain_context(self, text: str, tools: dict[str, Tool]) -> str:
|
||
if "second_brain" not in tools or not self._looks_like_second_brain_request(self._normalize_intent_text(text)):
|
||
return ""
|
||
payload = {"query": text, "mode": "query"}
|
||
result = await self._execute_tool_call(tools, "second_brain", payload)
|
||
self._log_tool_event("second_brain", payload, result)
|
||
if result.get("status") != "ok":
|
||
return f"- Second-brain retrieval failed: {result.get('message', 'Unknown error.')}"
|
||
return self._format_second_brain_context(result)
|
||
|
||
def _format_second_brain_context(self, result: dict[str, object]) -> str:
|
||
lines: list[str] = []
|
||
context = str(result.get("context", "")).strip()
|
||
if context:
|
||
lines.append(context)
|
||
sources = result.get("sources", [])
|
||
if isinstance(sources, list) and sources:
|
||
lines.append("Sources:")
|
||
for item in sources[:5]:
|
||
if not isinstance(item, dict):
|
||
continue
|
||
title = str(item.get("title", "")).strip() or "Untitled source"
|
||
url = str(item.get("url", "")).strip()
|
||
snippet = str(item.get("snippet", "")).strip()
|
||
line = f"- {title}"
|
||
if url:
|
||
line += f" -> {url}"
|
||
lines.append(line)
|
||
if snippet:
|
||
lines.append(f" {snippet[:240]}")
|
||
return "\n".join(lines).strip()
|
||
|
||
def _render_second_brain_answer(self, result: dict[str, object]) -> str:
|
||
context = str(result.get("context", "")).strip()
|
||
if context:
|
||
return self._cleanup_second_brain_answer(context)
|
||
return "Ikinci beyinden bir baglam buldum ama metin cevabi bos geldi."
|
||
|
||
def _cleanup_second_brain_answer(self, text: str) -> str:
|
||
cleaned = text.strip()
|
||
cleaned = re.sub(r"^patron,\s*notlara göre:\s*", "", cleaned, flags=re.IGNORECASE)
|
||
cleaned = re.sub(r"^notlara göre:\s*", "", cleaned, flags=re.IGNORECASE)
|
||
cleaned = re.sub(r"\n{2,}", "\n", cleaned)
|
||
lines = [line.strip(" -*") for line in cleaned.splitlines() if line.strip()]
|
||
if not lines:
|
||
return cleaned
|
||
filtered: list[str] = []
|
||
for line in lines:
|
||
lowered = line.casefold()
|
||
if lowered.startswith("ek not"):
|
||
continue
|
||
filtered.append(line)
|
||
if not filtered:
|
||
filtered = lines
|
||
if len(filtered) == 1:
|
||
return filtered[0]
|
||
if len(filtered) >= 2 and any("tarih" in item.casefold() for item in filtered) and any("yer" in item.casefold() for item in filtered):
|
||
date_value = ""
|
||
place_value = ""
|
||
for item in filtered:
|
||
lowered = item.casefold()
|
||
if "tarih" in lowered and ":" in item:
|
||
date_value = item.split(":", 1)[1].strip()
|
||
if "yer" in lowered and ":" in item:
|
||
place_value = item.split(":", 1)[1].strip()
|
||
if date_value and place_value:
|
||
return f"{date_value} tarihinde {place_value}'de buluştun."
|
||
return filtered[0]
|
||
|
||
def _active_model(self, runtime: RuntimeSettings) -> str:
|
||
return runtime.local_model if runtime.model_provider == "local" else runtime.zai_model
|
||
|
||
def _build_llm_client(self, runtime: RuntimeSettings) -> OllamaClient:
|
||
settings = get_settings()
|
||
api_key = ""
|
||
base_url = runtime.local_base_url
|
||
if runtime.model_provider == "zai":
|
||
secret = self.session.get(SecretORM, "zai_api_key")
|
||
api_key = secret.value if secret else settings.zai_api_key
|
||
base_url = settings.zai_base_url
|
||
return OllamaClient(base_url=base_url, provider=runtime.model_provider, api_key=api_key)
|
||
|
||
def _extract_url(self, text: str) -> str:
|
||
match = re.search(r"https?://[^\s)]+", text)
|
||
if match:
|
||
return match.group(0)
|
||
domain_match = re.search(r"\b([a-z0-9-]+\.(?:com|net|org|io|ai|dev|app|co|co\.uk))(?:/[^\s]*)?\b", text, re.IGNORECASE)
|
||
if domain_match:
|
||
return f"https://{domain_match.group(0)}"
|
||
return ""
|
||
|
||
def _format_llm_error(self, exc: Exception, provider: str) -> str:
|
||
message = str(exc).strip() or "Unknown LLM error."
|
||
if "rate limit" in message.lower() or "too many requests" in message.lower() or "429" in message:
|
||
if provider == "zai":
|
||
return "Z.AI su anda istek sinirina takildi. Biraz bekleyip tekrar deneyin."
|
||
return "LLM endpointi su anda cok fazla istek aliyor. Biraz bekleyip tekrar deneyin."
|
||
if "authentication failed" in message.lower() or "api key" in message.lower():
|
||
if provider == "zai":
|
||
return "Z.AI API key gecersiz ya da eksik gorunuyor. Admin panelden anahtari kontrol edin."
|
||
return "LLM kimlik dogrulamasi basarisiz oldu."
|
||
if "timed out" in message.lower():
|
||
return "LLM istegi zaman asimina ugradi. Tekrar deneyin."
|
||
return f"WiseClaw configured LLM provider ile konusamadi: {message}"
|
||
|
||
def _normalize_brave_search_arguments(self, user_text: str, arguments: dict[str, object]) -> dict[str, object]:
|
||
normalized = dict(arguments)
|
||
query = str(normalized.get("query", "")).strip()
|
||
if not query:
|
||
return normalized
|
||
|
||
user_lower = user_text.lower()
|
||
user_has_year = bool(re.search(r"\b20\d{2}\b", user_text))
|
||
query_has_year = bool(re.search(r"\b20\d{2}\b", query))
|
||
current_year = str(datetime.now().year)
|
||
current_info_request = any(
|
||
term in user_lower
|
||
for term in ("bugün", "bugun", "today", "güncel", "guncel", "latest", "current", "son")
|
||
)
|
||
|
||
if not user_has_year and query_has_year:
|
||
if current_info_request:
|
||
query = re.sub(r"\b20\d{2}\b", current_year, query)
|
||
else:
|
||
query = re.sub(r"\b20\d{2}\b", "", query)
|
||
query = re.sub(r"\s{2,}", " ", query).strip()
|
||
|
||
normalized["query"] = query
|
||
return normalized
|
||
|
||
def _build_image_media_items(self, images: list[object], limit: int = 3) -> list[dict[str, str]]:
|
||
media: list[dict[str, str]] = []
|
||
for item in images:
|
||
if not isinstance(item, dict):
|
||
continue
|
||
chosen_url = ""
|
||
for candidate in (item.get("properties_url"), item.get("thumbnail"), item.get("url")):
|
||
url = str(candidate or "").strip()
|
||
if url.startswith("http://") or url.startswith("https://"):
|
||
chosen_url = url
|
||
break
|
||
if not chosen_url:
|
||
continue
|
||
title = str(item.get("title", "")).strip() or "Gorsel"
|
||
source = str(item.get("source", "")).strip()
|
||
caption = title
|
||
if source:
|
||
caption = f"{title}\nKaynak: {source}"
|
||
media.append({"url": chosen_url, "caption": caption[:900]})
|
||
if len(media) >= limit:
|
||
break
|
||
return media
|
||
|
||
def _build_brave_combo_summary(self, images: list[object], web_result: dict[str, object]) -> str:
|
||
lines = []
|
||
if images:
|
||
lines.append("Brave gorsel arama sonuclari:")
|
||
for item in images[:5]:
|
||
if not isinstance(item, dict):
|
||
continue
|
||
title = str(item.get("title", "")).strip() or "Gorsel"
|
||
source = str(item.get("source", "")).strip()
|
||
lines.append(f"- {title}" + (f" ({source})" if source else ""))
|
||
web_results = web_result.get("results", []) if isinstance(web_result, dict) else []
|
||
if isinstance(web_results, list) and web_results:
|
||
if lines:
|
||
lines.append("")
|
||
lines.append("Web sonuclari:")
|
||
for item in web_results[:3]:
|
||
if not isinstance(item, dict):
|
||
continue
|
||
title = str(item.get("title", "")).strip() or "Sonuc"
|
||
url = str(item.get("url", "")).strip()
|
||
lines.append(f"- {title}" + (f" -> {url}" if url else ""))
|
||
return "\n".join(lines) if lines else "Arama sonucu bulundu."
|
||
|
||
def _encode_media_response(self, text: str, media: list[dict[str, str]]) -> str:
|
||
return "__WC_MEDIA__" + json.dumps({"text": text, "media": media}, ensure_ascii=False)
|
||
|
||
def _should_fallback_to_local(self, exc: Exception, runtime: RuntimeSettings) -> bool:
|
||
if runtime.model_provider != "zai":
|
||
return False
|
||
message = str(exc).lower()
|
||
if any(token in message for token in ("authentication failed", "api key", "rate limit", "too many requests", "429")):
|
||
return False
|
||
return any(
|
||
token in message
|
||
for token in (
|
||
"nodename nor servname provided",
|
||
"name or service not known",
|
||
"temporary failure in name resolution",
|
||
"connecterror",
|
||
"all connection attempts failed",
|
||
"network is unreachable",
|
||
)
|
||
)
|
||
|
||
def _extract_apple_note_request(self, text: str) -> dict[str, object] | None:
|
||
match = re.search(r'[“"](.*?)[”"]', text)
|
||
if match:
|
||
title = match.group(1).strip()
|
||
else:
|
||
match = re.search(r"(?:başlığıyla|basligiyla|başlıklı|baslikli)\s+(.+?)(?:\s+yeni not|\s+not oluştur|\s+not olustur|$)", text, re.IGNORECASE)
|
||
title = match.group(1).strip() if match else ""
|
||
|
||
if not title:
|
||
return None
|
||
|
||
folder = ""
|
||
folder_match = re.search(
|
||
r"[“\"]?([^”\"\n,]+)[”\"]?\s+(?:klas[oö]r[üu]ne|klas[oö]r[üu]nda|folder(?:üne|une|da|de)?|notlar klas[oö]r[üu]ne)",
|
||
text,
|
||
re.IGNORECASE,
|
||
)
|
||
if folder_match:
|
||
folder = folder_match.group(1).strip()
|
||
|
||
due = ""
|
||
due_match = re.search(
|
||
r"(?:son tarih|teslim tarihi|due date|tarih)\s+(?:olarak\s+)?([0-9]{1,2}[./-][0-9]{1,2}[./-][0-9]{2,4}|yarın|yarin|bugün|bugun|haftaya|gelecek hafta)",
|
||
text,
|
||
re.IGNORECASE,
|
||
)
|
||
if due_match:
|
||
due = due_match.group(1).strip()
|
||
|
||
priority = ""
|
||
priority_match = re.search(
|
||
r"(?:öncelik|oncelik|priority)\s+(?:olarak\s+)?(yüksek|yuksek|orta|düşük|dusuk|high|medium|low)",
|
||
text,
|
||
re.IGNORECASE,
|
||
)
|
||
if priority_match:
|
||
priority = priority_match.group(1).strip()
|
||
|
||
tags: list[str] = []
|
||
tags_match = re.search(
|
||
r"(?:etiket(?:ler)?|tag(?:ler)?)(?:\s+olarak)?\s+(.+?)(?:,\s*içine|,\s*icine|\.\s*|$)",
|
||
text,
|
||
re.IGNORECASE,
|
||
)
|
||
if tags_match:
|
||
tag_source = tags_match.group(1)
|
||
tags = [
|
||
re.sub(r"\s+(?:olsun|olsunlar|yaz|ekle)$", "", part.strip(" #,\t ."), flags=re.IGNORECASE)
|
||
for part in re.split(r",|\n|;", tag_source)
|
||
if part.strip(" #,\t")
|
||
]
|
||
|
||
body = ""
|
||
body_match = re.search(
|
||
r"(?:içine|icine|içerik olarak|icerik olarak|gövdesine|govdesine|şunları yaz|sunlari yaz|şunlari ekle|sunlari ekle)\s*(.+)$",
|
||
text,
|
||
re.IGNORECASE | re.DOTALL,
|
||
)
|
||
if body_match:
|
||
raw_body = body_match.group(1).strip()
|
||
raw_body = re.sub(r"\s*(?:oluştur|olustur|ekle|yaz)\.?\s*$", "", raw_body, flags=re.IGNORECASE).strip()
|
||
body = self._normalize_note_body(raw_body, text)
|
||
|
||
body = self._compose_note_body(body=body, due=due, priority=priority, tags=tags)
|
||
|
||
note_request: dict[str, object] = {
|
||
"action": "create_note",
|
||
"title": title,
|
||
"body": body,
|
||
}
|
||
if folder:
|
||
note_request["folder"] = folder
|
||
return note_request
|
||
|
||
def _normalize_note_body(self, raw_body: str, full_text: str) -> str:
|
||
bullet_source = raw_body.replace("•", ",").replace(";", ",")
|
||
parts = [part.strip(" -\n\r\t") for part in re.split(r",|\n", bullet_source) if part.strip(" -\n\r\t")]
|
||
checklist_hint = any(keyword in full_text.lower() for keyword in ("checklist", "görev listesi", "gorev listesi", "yapılacak", "yapilacak", "todo"))
|
||
if len(parts) >= 2:
|
||
marker = "- [ ]" if checklist_hint else "-"
|
||
return "\n".join(f"{marker} {part}" for part in parts)
|
||
return raw_body.strip()
|
||
|
||
def _compose_note_body(self, body: str, due: str, priority: str, tags: list[str]) -> str:
|
||
meta_lines: list[str] = []
|
||
if due:
|
||
meta_lines.append(f"Son tarih: {due}")
|
||
if priority:
|
||
meta_lines.append(f"Öncelik: {priority}")
|
||
if tags:
|
||
meta_lines.append("Etiketler: " + ", ".join(f"#{tag.lstrip('#')}" for tag in tags))
|
||
|
||
if body and meta_lines:
|
||
return "\n".join(meta_lines) + "\n\n" + body
|
||
if body:
|
||
return body
|
||
if meta_lines:
|
||
return "\n".join(meta_lines)
|
||
return ""
|