diff --git a/.env.example b/.env.example index 6edf13a..cd5a0d0 100644 --- a/.env.example +++ b/.env.example @@ -1,10 +1,16 @@ WISECLAW_ENV=development WISECLAW_DB_URL=sqlite:///./wiseclaw.db -WISECLAW_OLLAMA_BASE_URL=http://127.0.0.1:11434 -WISECLAW_DEFAULT_MODEL=qwen3.5:4b +WISECLAW_MODEL_PROVIDER=local +WISECLAW_LOCAL_BASE_URL=http://127.0.0.1:1234 +WISECLAW_LOCAL_MODEL=qwen3-vl-8b-instruct-mlx@5bit +WISECLAW_ZAI_BASE_URL=https://api.z.ai/api/anthropic +WISECLAW_ZAI_MODEL=glm-5 +WISECLAW_ANYTHINGLLM_BASE_URL=http://127.0.0.1:3001 +WISECLAW_ANYTHINGLLM_WORKSPACE_SLUG=wiseclaw WISECLAW_SEARCH_PROVIDER=brave WISECLAW_TELEGRAM_BOT_TOKEN= WISECLAW_BRAVE_API_KEY= +WISECLAW_ZAI_API_KEY= +WISECLAW_ANYTHINGLLM_API_KEY= WISECLAW_ADMIN_HOST=127.0.0.1 WISECLAW_ADMIN_PORT=8000 - diff --git a/.gitignore b/.gitignore index becdb95..e39a4bd 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,13 @@ build/ .DS_Store .env wiseclaw.db +.codex/ +.playwright-cli/ +.wiseclaw/ +backend/.wiseclaw/ +backend/tmp/ +backend/second_brain.md +generated_apps/ +snake/ +snake-game/ +Yapilacak_Odevler.md diff --git a/README.md b/README.md index 09f312a..ebe7bcd 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,14 @@ # WiseClaw -WiseClaw is a local-first personal assistant for macOS. It runs a FastAPI backend, uses Ollama for local LLM access, exposes a Telegram bot, and includes a React admin panel for settings, logs, and memory management. +WiseClaw is a local-first personal assistant for macOS. It runs a FastAPI backend, supports either a local LM Studio endpoint or the hosted Z.AI API for LLM access, can augment replies with an AnythingLLM-backed second brain, exposes a Telegram bot, and includes a React admin panel for settings, logs, and memory management. ## Planned capabilities - Telegram chat with whitelist support -- Local Ollama integration for `qwen3.5:4b` +- Global model provider switch for `Local (LM Studio)` or `Z.AI` +- Local LM Studio integration for `qwen3-vl-8b-instruct-mlx@5bit` +- Z.AI integration for `glm-4.7` and `glm-5` +- AnythingLLM second-brain context retrieval via workspace chat - Brave or SearXNG-backed web search - Apple Notes integration via AppleScript - File read/write tools @@ -52,10 +55,10 @@ Then in another shell: ```bash curl http://127.0.0.1:8000/health curl http://127.0.0.1:8000/bootstrap -curl http://127.0.0.1:8000/admin/integrations/ollama +curl http://127.0.0.1:8000/admin/integrations/llm curl http://127.0.0.1:8000/admin/integrations/telegram ``` ## Environment bootstrap -Copy `.env.example` to `.env` and fill in only the values you need for the first boot. Secrets that are changed from the admin panel should be written to Keychain, not back to `.env`. +Copy `.env.example` to `.env` and fill in only the values you need for the first boot. Secrets such as `WISECLAW_ZAI_API_KEY` and `WISECLAW_ANYTHINGLLM_API_KEY` can also be saved later from the admin panel. diff --git a/backend/README.md b/backend/README.md index 39db730..b59065f 100644 --- a/backend/README.md +++ b/backend/README.md @@ -4,7 +4,7 @@ FastAPI service for WiseClaw. The backend now includes: - SQLite persistence through SQLAlchemy - runtime/admin settings endpoints -- Ollama integration status endpoint +- LM Studio integration status endpoint - Telegram polling runtime scaffold ## Run locally diff --git a/backend/app/admin/routes.py b/backend/app/admin/routes.py index f01c03c..2609aa4 100644 --- a/backend/app/admin/routes.py +++ b/backend/app/admin/routes.py @@ -3,9 +3,10 @@ from pydantic import BaseModel from sqlalchemy.orm import Session from app.admin.services import AdminService -from app.db import get_session +from app.config import get_settings as get_app_settings +from app.db import SecretORM, get_session from app.llm.ollama_client import OllamaClient -from app.models import MemoryRecord, OllamaStatus, RuntimeSettings, TelegramStatus, UserRecord +from app.models import AutomationRecord, MemoryRecord, OllamaStatus, RuntimeSettings, TelegramStatus, UserProfileRecord, UserRecord router = APIRouter(prefix="/admin", tags=["admin"]) @@ -25,7 +26,7 @@ def get_dashboard(service: AdminService = Depends(get_admin_service)): @router.get("/settings", response_model=RuntimeSettings) -def get_settings(service: AdminService = Depends(get_admin_service)): +def get_runtime_settings(service: AdminService = Depends(get_admin_service)): return service.get_runtime_settings() @@ -44,6 +45,16 @@ def post_user(payload: UserRecord, service: AdminService = Depends(get_admin_ser return service.save_user(payload) +@router.get("/profiles", response_model=list[UserProfileRecord]) +def get_profiles(service: AdminService = Depends(get_admin_service)): + return service.list_user_profiles() + + +@router.get("/automations", response_model=list[AutomationRecord]) +def get_automations(service: AdminService = Depends(get_admin_service)): + return service.list_automations() + + @router.get("/memory", response_model=list[MemoryRecord]) def get_memory(service: AdminService = Depends(get_admin_service)): return service.list_memory() @@ -66,11 +77,18 @@ def post_secret(payload: SecretPayload, service: AdminService = Depends(get_admi return {"status": "ok"} +@router.get("/integrations/llm", response_model=OllamaStatus) @router.get("/integrations/ollama", response_model=OllamaStatus) -async def get_ollama_status(service: AdminService = Depends(get_admin_service)): +async def get_llm_status(service: AdminService = Depends(get_admin_service)): runtime = service.get_runtime_settings() - client = OllamaClient(runtime.ollama_base_url) - return await client.status(runtime.default_model) + settings = get_app_settings() + secret = service.session.get(SecretORM, "zai_api_key") if runtime.model_provider == "zai" else None + client = OllamaClient( + base_url=runtime.local_base_url if runtime.model_provider == "local" else settings.zai_base_url, + provider=runtime.model_provider, + api_key=secret.value if secret else settings.zai_api_key, + ) + return await client.status(runtime.local_model if runtime.model_provider == "local" else runtime.zai_model) @router.get("/integrations/telegram", response_model=TelegramStatus) diff --git a/backend/app/admin/services.py b/backend/app/admin/services.py index b9624f7..09fcd1a 100644 --- a/backend/app/admin/services.py +++ b/backend/app/admin/services.py @@ -5,15 +5,18 @@ from sqlalchemy.orm import Session from app.db import ( AuditLogORM, + AutomationORM, AuthorizedUserORM, + DEFAULT_TOOLS, MemoryItemORM, SecretORM, SettingORM, + TelegramUserProfileORM, ToolStateORM, list_recent_logs, ) from app.config import get_settings -from app.models import DashboardSnapshot, MemoryRecord, RuntimeSettings, TelegramStatus, ToolToggle, UserRecord +from app.models import AutomationRecord, DashboardSnapshot, MemoryRecord, RuntimeSettings, TelegramStatus, ToolToggle, UserProfileRecord, UserRecord class AdminService: @@ -24,20 +27,30 @@ class AdminService: settings = { item.key: item.value for item in self.session.scalars(select(SettingORM)) } - tools = list(self.session.scalars(select(ToolStateORM).order_by(ToolStateORM.name.asc()))) + tool_records = { + tool.name: tool.enabled for tool in self.session.scalars(select(ToolStateORM).order_by(ToolStateORM.name.asc())) + } return RuntimeSettings( terminal_mode=int(settings["terminal_mode"]), search_provider=settings["search_provider"], - ollama_base_url=settings["ollama_base_url"], - default_model=settings["default_model"], - tools=[ToolToggle(name=tool.name, enabled=tool.enabled) for tool in tools], + model_provider=settings["model_provider"], + local_base_url=settings["local_base_url"], + local_model=settings["local_model"], + zai_model=settings["zai_model"], + anythingllm_base_url=settings["anythingllm_base_url"], + anythingllm_workspace_slug=settings["anythingllm_workspace_slug"], + tools=[ToolToggle(name=name, enabled=tool_records.get(name, enabled)) for name, enabled in DEFAULT_TOOLS.items()], ) def update_runtime_settings(self, payload: RuntimeSettings) -> RuntimeSettings: self._save_setting("terminal_mode", str(payload.terminal_mode)) self._save_setting("search_provider", payload.search_provider) - self._save_setting("ollama_base_url", payload.ollama_base_url) - self._save_setting("default_model", payload.default_model) + self._save_setting("model_provider", payload.model_provider) + self._save_setting("local_base_url", payload.local_base_url) + self._save_setting("local_model", payload.local_model) + self._save_setting("zai_model", payload.zai_model) + self._save_setting("anythingllm_base_url", payload.anythingllm_base_url) + self._save_setting("anythingllm_workspace_slug", payload.anythingllm_workspace_slug) for tool in payload.tools: record = self.session.get(ToolStateORM, tool.name) @@ -92,6 +105,55 @@ class AdminService: self.session.commit() return user + def list_user_profiles(self) -> list[UserProfileRecord]: + stmt = select(TelegramUserProfileORM).order_by(TelegramUserProfileORM.updated_at.desc()) + profiles: list[UserProfileRecord] = [] + for item in self.session.scalars(stmt): + profiles.append( + UserProfileRecord( + telegram_user_id=item.telegram_user_id, + display_name=item.display_name, + bio=item.bio, + occupation=item.occupation, + primary_use_cases=self._decode_list(item.primary_use_cases), + answer_priorities=self._decode_list(item.answer_priorities), + tone_preference=item.tone_preference, + response_length=item.response_length, + language_preference=item.language_preference, + workflow_preference=item.workflow_preference, + interests=self._decode_list(item.interests), + approval_preferences=self._decode_list(item.approval_preferences), + avoid_preferences=item.avoid_preferences, + onboarding_completed=item.onboarding_completed, + last_onboarding_step=item.last_onboarding_step, + ) + ) + return profiles + + def list_automations(self) -> list[AutomationRecord]: + stmt = select(AutomationORM).order_by(AutomationORM.created_at.desc(), AutomationORM.id.desc()) + records: list[AutomationRecord] = [] + for item in self.session.scalars(stmt): + records.append( + AutomationRecord( + id=item.id, + telegram_user_id=item.telegram_user_id, + name=item.name, + prompt=item.prompt, + schedule_type=item.schedule_type, # type: ignore[arg-type] + interval_hours=item.interval_hours, + time_of_day=item.time_of_day, + days_of_week=self._decode_list(item.days_of_week), + status=item.status, # type: ignore[arg-type] + last_run_at=item.last_run_at, + next_run_at=item.next_run_at, + last_result=item.last_result, + created_at=item.created_at, + updated_at=item.updated_at, + ) + ) + return records + def list_memory(self) -> list[MemoryRecord]: stmt = select(MemoryItemORM).order_by(MemoryItemORM.created_at.desc(), MemoryItemORM.id.desc()).limit(50) return [ @@ -140,3 +202,14 @@ class AdminService: if configured else "Telegram token is not configured.", ) + + def _decode_list(self, value: str) -> list[str]: + import json + + try: + payload = json.loads(value) + except json.JSONDecodeError: + return [] + if not isinstance(payload, list): + return [] + return [str(item).strip() for item in payload if str(item).strip()] diff --git a/docs/architecture.md b/docs/architecture.md index b87fdfa..a509879 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -5,7 +5,7 @@ WiseClaw uses a single FastAPI process with modular tool adapters: - `telegram`: inbound/outbound bot handling and whitelist checks -- `llm`: Ollama client and simple tool-routing planner +- `llm`: LM Studio/OpenAI-compatible client and simple tool-routing planner - `tools`: search, notes, files, terminal, and fetch tools - `memory`: SQLite-backed short-term and long-term state - `admin`: REST API for settings, logs, users, and health @@ -24,7 +24,6 @@ WiseClaw uses a single FastAPI process with modular tool adapters: 1. Add SQLAlchemy models and Alembic migrations. 2. Replace placeholder services with real SQLite persistence. 3. Wire Telegram webhook or polling loop. -4. Add Ollama-driven tool calling. +4. Add LM Studio-driven tool calling. 5. Persist secrets in macOS Keychain. 6. Build audit views and approval flows in the admin panel. - diff --git a/docs/brainstorms/2026-03-21-model-provider-switch-brainstorm.md b/docs/brainstorms/2026-03-21-model-provider-switch-brainstorm.md new file mode 100644 index 0000000..2e13f0d --- /dev/null +++ b/docs/brainstorms/2026-03-21-model-provider-switch-brainstorm.md @@ -0,0 +1,83 @@ +--- +date: 2026-03-21 +topic: model-provider-switch +--- + +# Model Provider Switch + +## What We're Building +WiseClaw admin paneline global bir model sağlayıcı seçimi ekliyoruz. Yönetici ister mevcut yerel LM Studio akışını aktif edecek, ister z.ai sağlayıcısına geçip API key ile `glm-4.7` veya `glm-5` modellerini kullanacak. + +Bu seçim tüm yeni istekler için ortak runtime ayarı olacak. Yani Telegram, admin testleri ve backend orkestrasyonu seçili sağlayıcıya göre aynı LLM istemcisini kullanacak. + +## Why This Approach +En sade ve güvenli çözüm global provider seçimi. Per-user ya da per-chat seçim şu aşamada gereksiz karmaşıklık getirir; secret yönetimi, UI, audit ve hata ayıklama zorlaşır. + +z.ai tarafı OpenAI-uyumlu API sunduğu için mevcut istemci mimarisi çok büyük kırılım olmadan genişletilebilir. Bu da LM Studio ile z.ai arasında ortak bir soyutlama kurmayı mantıklı hale getiriyor. + +## Approaches Considered +### Approach A: Tek Global Provider Ayarı +Admin panelde provider seçilir, sadece ilgili alanlar görünür, backend seçili provider'a göre çağrı yapar. + +Pros: +- En basit kullanıcı deneyimi +- Backend davranışı öngörülebilir +- Secret ve runtime yönetimi kolay + +Cons: +- Aynı anda iki farklı provider kullanılamaz +- Deneysel karşılaştırmalar için manuel geçiş gerekir + +Best when: Ürün tek bir aktif model hattı ile çalışacaksa + +### Approach B: Global Provider + Manual Override Alanı +Global seçim korunur ama bazı akışlarda provider/model override edilebilir. + +Pros: +- Daha esnek +- Test ve karşılaştırma kolaylaşır + +Cons: +- UI ve backend karmaşıklığı artar +- Hangi isteğin hangi modelle çalıştığı daha az net olur + +Best when: Kısa vadede A/B model denemesi yapılacaksa + +### Approach C: Ayrı Provider Sekmeleri ve Bağımsız Konfigürasyonlar +Hem local hem z.ai ayarları hep görünür, ama aktif flag ayrı tutulur. + +Pros: +- Tüm ayarlar tek ekranda görünür +- Geçişler hızlı olur + +Cons: +- UI kalabalıklaşır +- İlk sürüm için gereğinden fazla yapı + +Best when: Sık sağlayıcı değişimi bekleniyorsa + +## Recommendation +Approach A. + +İlk sürüm için en doğru yol bu. Admin panelde: +- `Model Provider`: `local` / `zai` +- `local` seçiliyken: base URL + local model +- `zai` seçiliyken: API key + model dropdown (`glm-4.7`, `glm-5`) + +Backend tarafında ortak bir LLM gateway oluşturulmalı. Seçili provider'a göre: +- Local: mevcut LM Studio/OpenAI-compatible endpoint +- Z.AI: z.ai OpenAI-compatible endpoint + bearer/api key + +## Key Decisions +- Provider seçimi global olacak: sistem davranışı tek bir aktif modele bağlı kalacak. +- z.ai API key secret olarak saklanacak: normal runtime settings içine düz yazı olarak girmeyecek. +- z.ai model listesi ilk aşamada sabit olacak: `glm-4.7` ve `glm-5`. +- UI conditional olacak: sadece seçili provider'ın alanları gösterilecek. +- Backend provider-aware olacak: mevcut `ollama_base_url/default_model` yaklaşımı daha genel `provider/base_url/model` yapısına genişletilecek. + +## Open Questions +- z.ai için sabit bir base URL kullanıp UI'da göstermeyelim mi, yoksa readonly/default bir alan olarak mı gösterelim? +- `glm-4.7` ve `glm-5` dışında gelecekte serbest model adı girişi de desteklenecek mi? + +## Next Steps +- `/workflows:plan` seviyesinde implementasyon planına geç diff --git a/docs/brainstorms/2026-03-22-telegram-onboarding-brainstorm.md b/docs/brainstorms/2026-03-22-telegram-onboarding-brainstorm.md new file mode 100644 index 0000000..7f2a741 --- /dev/null +++ b/docs/brainstorms/2026-03-22-telegram-onboarding-brainstorm.md @@ -0,0 +1,33 @@ +--- +date: 2026-03-22 +topic: telegram-onboarding +--- + +# Telegram Onboarding + +## What We're Building +WiseClaw'a Telegram üzerinden `/tanışalım` komutu ile başlayan, 12 soruluk kalıcı bir onboarding sohbeti ekliyoruz. Bu akış kullanıcının adı, kullanım amacı, ton tercihi, dil tercihi, yanıt uzunluğu, çalışma biçimi ve sınırları gibi bilgileri toplar. + +Toplanan veriler geçici hafızada değil, SQLite içinde yapılandırılmış bir kullanıcı profili olarak saklanır. Böylece sunucu yeniden başlasa bile WiseClaw aynı kullanıcıyla aynı üslupta konuşmaya devam eder. + +## Why This Approach +Alternatif olarak cevapları yalnızca genel memory tablosuna yazmak mümkündü, ancak bu yaklaşım dağınık, kırılgan ve güncellemesi zor olurdu. Ayrı profil + onboarding state modeli daha güvenilir, sorgulanabilir ve kişiselleştirme için daha uygundur. + +## Key Decisions +- `/tanışalım` Telegram komutu olacak: onboarding yalnızca istek üzerine veya ilk temas senaryosunda başlatılacak. +- 12 soru tek tek sorulacak: uzun form yerine sohbet hissi korunacak. +- Her cevap anında kaydedilecek: yarıda kalırsa kaldığı yerden devam edilebilecek. +- Veriler ayrı kullanıcı profili tablosunda tutulacak: kalıcı kişiselleştirme için. +- Prompt'a structured profile enjekte edilecek: ton, dil, uzunluk ve çalışma tercihi her cevapta uygulanacak. +- Kısa profil özeti ayrıca memory'ye yazılabilecek: ama asıl kaynak structured profile olacak. + +## Open Questions +- İlk mesajda onboarding otomatik mi tetiklensin, yoksa sadece `/tanışalım` ile mi başlasın? +- Admin panelde profil düzenleme ilk sürüme dahil edilsin mi, yoksa yalnızca Telegram komutları yeterli mi? + +## Next Steps +- Veri modelini ve onboarding state yapısını ekle +- Telegram command akışını oluştur +- Orchestrator içine onboarding interception ekle +- Prompt kişiselleştirme katmanını bağla +- `/profilim`, `/tercihlerim`, `/tanışalım_sifirla` yardımcı komutlarını ekle diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 6802111..19ad196 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -2,21 +2,29 @@ import { FormEvent, useEffect, useState } from "react"; import { api } from "./api"; import type { + AutomationRecord, DashboardSnapshot, MemoryRecord, OllamaStatus, RuntimeSettings, TelegramStatus, + UserProfileRecord, UserRecord, } from "./types"; const defaultSettings: RuntimeSettings = { terminal_mode: 3, search_provider: "brave", - ollama_base_url: "http://127.0.0.1:11434", - default_model: "qwen3.5:4b", + model_provider: "local", + local_base_url: "http://127.0.0.1:1234", + local_model: "qwen3-vl-8b-instruct-mlx@5bit", + zai_model: "glm-5", + anythingllm_base_url: "http://127.0.0.1:3001", + anythingllm_workspace_slug: "wiseclaw", tools: [ { name: "brave_search", enabled: true }, + { name: "second_brain", enabled: true }, + { name: "browser_use", enabled: true }, { name: "searxng_search", enabled: false }, { name: "web_fetch", enabled: true }, { name: "apple_notes", enabled: true }, @@ -29,12 +37,25 @@ export function App() { const [dashboard, setDashboard] = useState(null); const [settings, setSettings] = useState(defaultSettings); const [users, setUsers] = useState([]); + const [profiles, setProfiles] = useState([]); + const [automations, setAutomations] = useState([]); const [memory, setMemory] = useState([]); const [secretMask, setSecretMask] = useState(""); const [secretValue, setSecretValue] = useState(""); + const [zaiSecretMask, setZaiSecretMask] = useState(""); + const [zaiSecretValue, setZaiSecretValue] = useState(""); + const [anythingSecretMask, setAnythingSecretMask] = useState(""); + const [anythingSecretValue, setAnythingSecretValue] = useState(""); const [ollamaStatus, setOllamaStatus] = useState(null); const [telegramStatus, setTelegramStatus] = useState(null); const [status, setStatus] = useState("Loading WiseClaw admin..."); + const providerLabel = settings.model_provider === "local" ? "Local (LM Studio)" : "Z.AI"; + const searchProviderLabel = settings.search_provider === "brave" ? "Brave" : "SearXNG"; + const llmStatusLabel = settings.model_provider === "local" ? "LM Studio status" : "Z.AI status"; + const llmStatusHint = + settings.model_provider === "local" + ? "Checking local model endpoint..." + : "Checking remote Z.AI endpoint..."; useEffect(() => { void load(); @@ -42,21 +63,29 @@ export function App() { async function load() { try { - const [dashboardData, settingsData, userData, memoryData, secretData, ollamaData, telegramData] = + const [dashboardData, settingsData, userData, profileData, automationData, memoryData, secretData, zaiSecretData, anythingSecretData, ollamaData, telegramData] = await Promise.all([ api.getDashboard(), api.getSettings(), api.getUsers(), + api.getProfiles(), + api.getAutomations(), api.getMemory(), api.getSecretMask("brave_api_key"), + api.getSecretMask("zai_api_key"), + api.getSecretMask("anythingllm_api_key"), api.getOllamaStatus(), api.getTelegramStatus(), ]); setDashboard(dashboardData); setSettings(settingsData); setUsers(userData); + setProfiles(profileData); + setAutomations(automationData); setMemory(memoryData); setSecretMask(secretData.masked); + setZaiSecretMask(zaiSecretData.masked); + setAnythingSecretMask(anythingSecretData.masked); setOllamaStatus(ollamaData); setTelegramStatus(telegramData); setStatus("WiseClaw admin ready."); @@ -84,6 +113,28 @@ export function App() { await load(); } + async function handleZaiSecretSubmit(event: FormEvent) { + event.preventDefault(); + if (!zaiSecretValue.trim()) { + return; + } + await api.saveSecret("zai_api_key", zaiSecretValue.trim()); + setZaiSecretValue(""); + setStatus("Z.AI API key updated."); + await load(); + } + + async function handleAnythingSecretSubmit(event: FormEvent) { + event.preventDefault(); + if (!anythingSecretValue.trim()) { + return; + } + await api.saveSecret("anythingllm_api_key", anythingSecretValue.trim()); + setAnythingSecretValue(""); + setStatus("AnythingLLM API key updated."); + await load(); + } + async function handleAddUser(event: FormEvent) { event.preventDefault(); const form = new FormData(event.currentTarget); @@ -133,7 +184,7 @@ export function App() {
Model - {settings.default_model} + {settings.model_provider === "local" ? settings.local_model : settings.zai_model}
@@ -151,21 +202,21 @@ export function App() {
Search provider - {settings.search_provider} + {searchProviderLabel}
- Ollama - {settings.ollama_base_url} + Provider + {providerLabel}
- Ollama status + {llmStatusLabel}: {ollamaStatus?.reachable ? "Reachable" : "Offline"} -

{ollamaStatus?.message || "Checking..."}

+

{ollamaStatus?.message || llmStatusHint}

- Telegram status + Telegram status: {telegramStatus?.configured ? "Configured" : "Missing token"}

{telegramStatus?.message || "Checking..."}

@@ -196,6 +247,22 @@ export function App() { + + + {settings.model_provider === "local" ? ( + <> + + + + + ) : ( + <> +

Z.AI uses the fixed hosted API endpoint and the API key saved below.

+ + + )} +
{settings.tools.map((tool) => (