feat: admin panelini ve kurulum dokumanlarini genislet

This commit is contained in:
2026-03-22 04:46:02 +03:00
parent 5f4c19a18d
commit 37da564a5f
14 changed files with 728 additions and 49 deletions

View File

@@ -1,10 +1,16 @@
WISECLAW_ENV=development
WISECLAW_DB_URL=sqlite:///./wiseclaw.db
WISECLAW_OLLAMA_BASE_URL=http://127.0.0.1:11434
WISECLAW_DEFAULT_MODEL=qwen3.5:4b
WISECLAW_MODEL_PROVIDER=local
WISECLAW_LOCAL_BASE_URL=http://127.0.0.1:1234
WISECLAW_LOCAL_MODEL=qwen3-vl-8b-instruct-mlx@5bit
WISECLAW_ZAI_BASE_URL=https://api.z.ai/api/anthropic
WISECLAW_ZAI_MODEL=glm-5
WISECLAW_ANYTHINGLLM_BASE_URL=http://127.0.0.1:3001
WISECLAW_ANYTHINGLLM_WORKSPACE_SLUG=wiseclaw
WISECLAW_SEARCH_PROVIDER=brave
WISECLAW_TELEGRAM_BOT_TOKEN=
WISECLAW_BRAVE_API_KEY=
WISECLAW_ZAI_API_KEY=
WISECLAW_ANYTHINGLLM_API_KEY=
WISECLAW_ADMIN_HOST=127.0.0.1
WISECLAW_ADMIN_PORT=8000

10
.gitignore vendored
View File

@@ -13,3 +13,13 @@ build/
.DS_Store
.env
wiseclaw.db
.codex/
.playwright-cli/
.wiseclaw/
backend/.wiseclaw/
backend/tmp/
backend/second_brain.md
generated_apps/
snake/
snake-game/
Yapilacak_Odevler.md

View File

@@ -1,11 +1,14 @@
# WiseClaw
WiseClaw is a local-first personal assistant for macOS. It runs a FastAPI backend, uses Ollama for local LLM access, exposes a Telegram bot, and includes a React admin panel for settings, logs, and memory management.
WiseClaw is a local-first personal assistant for macOS. It runs a FastAPI backend, supports either a local LM Studio endpoint or the hosted Z.AI API for LLM access, can augment replies with an AnythingLLM-backed second brain, exposes a Telegram bot, and includes a React admin panel for settings, logs, and memory management.
## Planned capabilities
- Telegram chat with whitelist support
- Local Ollama integration for `qwen3.5:4b`
- Global model provider switch for `Local (LM Studio)` or `Z.AI`
- Local LM Studio integration for `qwen3-vl-8b-instruct-mlx@5bit`
- Z.AI integration for `glm-4.7` and `glm-5`
- AnythingLLM second-brain context retrieval via workspace chat
- Brave or SearXNG-backed web search
- Apple Notes integration via AppleScript
- File read/write tools
@@ -52,10 +55,10 @@ Then in another shell:
```bash
curl http://127.0.0.1:8000/health
curl http://127.0.0.1:8000/bootstrap
curl http://127.0.0.1:8000/admin/integrations/ollama
curl http://127.0.0.1:8000/admin/integrations/llm
curl http://127.0.0.1:8000/admin/integrations/telegram
```
## Environment bootstrap
Copy `.env.example` to `.env` and fill in only the values you need for the first boot. Secrets that are changed from the admin panel should be written to Keychain, not back to `.env`.
Copy `.env.example` to `.env` and fill in only the values you need for the first boot. Secrets such as `WISECLAW_ZAI_API_KEY` and `WISECLAW_ANYTHINGLLM_API_KEY` can also be saved later from the admin panel.

View File

@@ -4,7 +4,7 @@ FastAPI service for WiseClaw. The backend now includes:
- SQLite persistence through SQLAlchemy
- runtime/admin settings endpoints
- Ollama integration status endpoint
- LM Studio integration status endpoint
- Telegram polling runtime scaffold
## Run locally

View File

@@ -3,9 +3,10 @@ from pydantic import BaseModel
from sqlalchemy.orm import Session
from app.admin.services import AdminService
from app.db import get_session
from app.config import get_settings as get_app_settings
from app.db import SecretORM, get_session
from app.llm.ollama_client import OllamaClient
from app.models import MemoryRecord, OllamaStatus, RuntimeSettings, TelegramStatus, UserRecord
from app.models import AutomationRecord, MemoryRecord, OllamaStatus, RuntimeSettings, TelegramStatus, UserProfileRecord, UserRecord
router = APIRouter(prefix="/admin", tags=["admin"])
@@ -25,7 +26,7 @@ def get_dashboard(service: AdminService = Depends(get_admin_service)):
@router.get("/settings", response_model=RuntimeSettings)
def get_settings(service: AdminService = Depends(get_admin_service)):
def get_runtime_settings(service: AdminService = Depends(get_admin_service)):
return service.get_runtime_settings()
@@ -44,6 +45,16 @@ def post_user(payload: UserRecord, service: AdminService = Depends(get_admin_ser
return service.save_user(payload)
@router.get("/profiles", response_model=list[UserProfileRecord])
def get_profiles(service: AdminService = Depends(get_admin_service)):
return service.list_user_profiles()
@router.get("/automations", response_model=list[AutomationRecord])
def get_automations(service: AdminService = Depends(get_admin_service)):
return service.list_automations()
@router.get("/memory", response_model=list[MemoryRecord])
def get_memory(service: AdminService = Depends(get_admin_service)):
return service.list_memory()
@@ -66,11 +77,18 @@ def post_secret(payload: SecretPayload, service: AdminService = Depends(get_admi
return {"status": "ok"}
@router.get("/integrations/llm", response_model=OllamaStatus)
@router.get("/integrations/ollama", response_model=OllamaStatus)
async def get_ollama_status(service: AdminService = Depends(get_admin_service)):
async def get_llm_status(service: AdminService = Depends(get_admin_service)):
runtime = service.get_runtime_settings()
client = OllamaClient(runtime.ollama_base_url)
return await client.status(runtime.default_model)
settings = get_app_settings()
secret = service.session.get(SecretORM, "zai_api_key") if runtime.model_provider == "zai" else None
client = OllamaClient(
base_url=runtime.local_base_url if runtime.model_provider == "local" else settings.zai_base_url,
provider=runtime.model_provider,
api_key=secret.value if secret else settings.zai_api_key,
)
return await client.status(runtime.local_model if runtime.model_provider == "local" else runtime.zai_model)
@router.get("/integrations/telegram", response_model=TelegramStatus)

View File

@@ -5,15 +5,18 @@ from sqlalchemy.orm import Session
from app.db import (
AuditLogORM,
AutomationORM,
AuthorizedUserORM,
DEFAULT_TOOLS,
MemoryItemORM,
SecretORM,
SettingORM,
TelegramUserProfileORM,
ToolStateORM,
list_recent_logs,
)
from app.config import get_settings
from app.models import DashboardSnapshot, MemoryRecord, RuntimeSettings, TelegramStatus, ToolToggle, UserRecord
from app.models import AutomationRecord, DashboardSnapshot, MemoryRecord, RuntimeSettings, TelegramStatus, ToolToggle, UserProfileRecord, UserRecord
class AdminService:
@@ -24,20 +27,30 @@ class AdminService:
settings = {
item.key: item.value for item in self.session.scalars(select(SettingORM))
}
tools = list(self.session.scalars(select(ToolStateORM).order_by(ToolStateORM.name.asc())))
tool_records = {
tool.name: tool.enabled for tool in self.session.scalars(select(ToolStateORM).order_by(ToolStateORM.name.asc()))
}
return RuntimeSettings(
terminal_mode=int(settings["terminal_mode"]),
search_provider=settings["search_provider"],
ollama_base_url=settings["ollama_base_url"],
default_model=settings["default_model"],
tools=[ToolToggle(name=tool.name, enabled=tool.enabled) for tool in tools],
model_provider=settings["model_provider"],
local_base_url=settings["local_base_url"],
local_model=settings["local_model"],
zai_model=settings["zai_model"],
anythingllm_base_url=settings["anythingllm_base_url"],
anythingllm_workspace_slug=settings["anythingllm_workspace_slug"],
tools=[ToolToggle(name=name, enabled=tool_records.get(name, enabled)) for name, enabled in DEFAULT_TOOLS.items()],
)
def update_runtime_settings(self, payload: RuntimeSettings) -> RuntimeSettings:
self._save_setting("terminal_mode", str(payload.terminal_mode))
self._save_setting("search_provider", payload.search_provider)
self._save_setting("ollama_base_url", payload.ollama_base_url)
self._save_setting("default_model", payload.default_model)
self._save_setting("model_provider", payload.model_provider)
self._save_setting("local_base_url", payload.local_base_url)
self._save_setting("local_model", payload.local_model)
self._save_setting("zai_model", payload.zai_model)
self._save_setting("anythingllm_base_url", payload.anythingllm_base_url)
self._save_setting("anythingllm_workspace_slug", payload.anythingllm_workspace_slug)
for tool in payload.tools:
record = self.session.get(ToolStateORM, tool.name)
@@ -92,6 +105,55 @@ class AdminService:
self.session.commit()
return user
def list_user_profiles(self) -> list[UserProfileRecord]:
stmt = select(TelegramUserProfileORM).order_by(TelegramUserProfileORM.updated_at.desc())
profiles: list[UserProfileRecord] = []
for item in self.session.scalars(stmt):
profiles.append(
UserProfileRecord(
telegram_user_id=item.telegram_user_id,
display_name=item.display_name,
bio=item.bio,
occupation=item.occupation,
primary_use_cases=self._decode_list(item.primary_use_cases),
answer_priorities=self._decode_list(item.answer_priorities),
tone_preference=item.tone_preference,
response_length=item.response_length,
language_preference=item.language_preference,
workflow_preference=item.workflow_preference,
interests=self._decode_list(item.interests),
approval_preferences=self._decode_list(item.approval_preferences),
avoid_preferences=item.avoid_preferences,
onboarding_completed=item.onboarding_completed,
last_onboarding_step=item.last_onboarding_step,
)
)
return profiles
def list_automations(self) -> list[AutomationRecord]:
stmt = select(AutomationORM).order_by(AutomationORM.created_at.desc(), AutomationORM.id.desc())
records: list[AutomationRecord] = []
for item in self.session.scalars(stmt):
records.append(
AutomationRecord(
id=item.id,
telegram_user_id=item.telegram_user_id,
name=item.name,
prompt=item.prompt,
schedule_type=item.schedule_type, # type: ignore[arg-type]
interval_hours=item.interval_hours,
time_of_day=item.time_of_day,
days_of_week=self._decode_list(item.days_of_week),
status=item.status, # type: ignore[arg-type]
last_run_at=item.last_run_at,
next_run_at=item.next_run_at,
last_result=item.last_result,
created_at=item.created_at,
updated_at=item.updated_at,
)
)
return records
def list_memory(self) -> list[MemoryRecord]:
stmt = select(MemoryItemORM).order_by(MemoryItemORM.created_at.desc(), MemoryItemORM.id.desc()).limit(50)
return [
@@ -140,3 +202,14 @@ class AdminService:
if configured
else "Telegram token is not configured.",
)
def _decode_list(self, value: str) -> list[str]:
import json
try:
payload = json.loads(value)
except json.JSONDecodeError:
return []
if not isinstance(payload, list):
return []
return [str(item).strip() for item in payload if str(item).strip()]

View File

@@ -5,7 +5,7 @@
WiseClaw uses a single FastAPI process with modular tool adapters:
- `telegram`: inbound/outbound bot handling and whitelist checks
- `llm`: Ollama client and simple tool-routing planner
- `llm`: LM Studio/OpenAI-compatible client and simple tool-routing planner
- `tools`: search, notes, files, terminal, and fetch tools
- `memory`: SQLite-backed short-term and long-term state
- `admin`: REST API for settings, logs, users, and health
@@ -24,7 +24,6 @@ WiseClaw uses a single FastAPI process with modular tool adapters:
1. Add SQLAlchemy models and Alembic migrations.
2. Replace placeholder services with real SQLite persistence.
3. Wire Telegram webhook or polling loop.
4. Add Ollama-driven tool calling.
4. Add LM Studio-driven tool calling.
5. Persist secrets in macOS Keychain.
6. Build audit views and approval flows in the admin panel.

View File

@@ -0,0 +1,83 @@
---
date: 2026-03-21
topic: model-provider-switch
---
# Model Provider Switch
## What We're Building
WiseClaw admin paneline global bir model sağlayıcı seçimi ekliyoruz. Yönetici ister mevcut yerel LM Studio akışını aktif edecek, ister z.ai sağlayıcısına geçip API key ile `glm-4.7` veya `glm-5` modellerini kullanacak.
Bu seçim tüm yeni istekler için ortak runtime ayarı olacak. Yani Telegram, admin testleri ve backend orkestrasyonu seçili sağlayıcıya göre aynı LLM istemcisini kullanacak.
## Why This Approach
En sade ve güvenli çözüm global provider seçimi. Per-user ya da per-chat seçim şu aşamada gereksiz karmaşıklık getirir; secret yönetimi, UI, audit ve hata ayıklama zorlaşır.
z.ai tarafı OpenAI-uyumlu API sunduğu için mevcut istemci mimarisi çok büyük kırılım olmadan genişletilebilir. Bu da LM Studio ile z.ai arasında ortak bir soyutlama kurmayı mantıklı hale getiriyor.
## Approaches Considered
### Approach A: Tek Global Provider Ayarı
Admin panelde provider seçilir, sadece ilgili alanlar görünür, backend seçili provider'a göre çağrı yapar.
Pros:
- En basit kullanıcı deneyimi
- Backend davranışı öngörülebilir
- Secret ve runtime yönetimi kolay
Cons:
- Aynı anda iki farklı provider kullanılamaz
- Deneysel karşılaştırmalar için manuel geçiş gerekir
Best when: Ürün tek bir aktif model hattı ile çalışacaksa
### Approach B: Global Provider + Manual Override Alanı
Global seçim korunur ama bazı akışlarda provider/model override edilebilir.
Pros:
- Daha esnek
- Test ve karşılaştırma kolaylaşır
Cons:
- UI ve backend karmaşıklığı artar
- Hangi isteğin hangi modelle çalıştığı daha az net olur
Best when: Kısa vadede A/B model denemesi yapılacaksa
### Approach C: Ayrı Provider Sekmeleri ve Bağımsız Konfigürasyonlar
Hem local hem z.ai ayarları hep görünür, ama aktif flag ayrı tutulur.
Pros:
- Tüm ayarlar tek ekranda görünür
- Geçişler hızlı olur
Cons:
- UI kalabalıklaşır
- İlk sürüm için gereğinden fazla yapı
Best when: Sık sağlayıcı değişimi bekleniyorsa
## Recommendation
Approach A.
İlk sürüm için en doğru yol bu. Admin panelde:
- `Model Provider`: `local` / `zai`
- `local` seçiliyken: base URL + local model
- `zai` seçiliyken: API key + model dropdown (`glm-4.7`, `glm-5`)
Backend tarafında ortak bir LLM gateway oluşturulmalı. Seçili provider'a göre:
- Local: mevcut LM Studio/OpenAI-compatible endpoint
- Z.AI: z.ai OpenAI-compatible endpoint + bearer/api key
## Key Decisions
- Provider seçimi global olacak: sistem davranışı tek bir aktif modele bağlı kalacak.
- z.ai API key secret olarak saklanacak: normal runtime settings içine düz yazı olarak girmeyecek.
- z.ai model listesi ilk aşamada sabit olacak: `glm-4.7` ve `glm-5`.
- UI conditional olacak: sadece seçili provider'ın alanları gösterilecek.
- Backend provider-aware olacak: mevcut `ollama_base_url/default_model` yaklaşımı daha genel `provider/base_url/model` yapısına genişletilecek.
## Open Questions
- z.ai için sabit bir base URL kullanıp UI'da göstermeyelim mi, yoksa readonly/default bir alan olarak mı gösterelim?
- `glm-4.7` ve `glm-5` dışında gelecekte serbest model adı girişi de desteklenecek mi?
## Next Steps
- `/workflows:plan` seviyesinde implementasyon planına geç

View File

@@ -0,0 +1,33 @@
---
date: 2026-03-22
topic: telegram-onboarding
---
# Telegram Onboarding
## What We're Building
WiseClaw'a Telegram üzerinden `/tanışalım` komutu ile başlayan, 12 soruluk kalıcı bir onboarding sohbeti ekliyoruz. Bu akış kullanıcının adı, kullanım amacı, ton tercihi, dil tercihi, yanıt uzunluğu, çalışma biçimi ve sınırları gibi bilgileri toplar.
Toplanan veriler geçici hafızada değil, SQLite içinde yapılandırılmış bir kullanıcı profili olarak saklanır. Böylece sunucu yeniden başlasa bile WiseClaw aynı kullanıcıyla aynı üslupta konuşmaya devam eder.
## Why This Approach
Alternatif olarak cevapları yalnızca genel memory tablosuna yazmak mümkündü, ancak bu yaklaşım dağınık, kırılgan ve güncellemesi zor olurdu. Ayrı profil + onboarding state modeli daha güvenilir, sorgulanabilir ve kişiselleştirme için daha uygundur.
## Key Decisions
- `/tanışalım` Telegram komutu olacak: onboarding yalnızca istek üzerine veya ilk temas senaryosunda başlatılacak.
- 12 soru tek tek sorulacak: uzun form yerine sohbet hissi korunacak.
- Her cevap anında kaydedilecek: yarıda kalırsa kaldığı yerden devam edilebilecek.
- Veriler ayrı kullanıcı profili tablosunda tutulacak: kalıcı kişiselleştirme için.
- Prompt'a structured profile enjekte edilecek: ton, dil, uzunluk ve çalışma tercihi her cevapta uygulanacak.
- Kısa profil özeti ayrıca memory'ye yazılabilecek: ama asıl kaynak structured profile olacak.
## Open Questions
- İlk mesajda onboarding otomatik mi tetiklensin, yoksa sadece `/tanışalım` ile mi başlasın?
- Admin panelde profil düzenleme ilk sürüme dahil edilsin mi, yoksa yalnızca Telegram komutları yeterli mi?
## Next Steps
- Veri modelini ve onboarding state yapısını ekle
- Telegram command akışını oluştur
- Orchestrator içine onboarding interception ekle
- Prompt kişiselleştirme katmanını bağla
- `/profilim`, `/tercihlerim`, `/tanışalım_sifirla` yardımcı komutlarını ekle

View File

@@ -2,21 +2,29 @@ import { FormEvent, useEffect, useState } from "react";
import { api } from "./api";
import type {
AutomationRecord,
DashboardSnapshot,
MemoryRecord,
OllamaStatus,
RuntimeSettings,
TelegramStatus,
UserProfileRecord,
UserRecord,
} from "./types";
const defaultSettings: RuntimeSettings = {
terminal_mode: 3,
search_provider: "brave",
ollama_base_url: "http://127.0.0.1:11434",
default_model: "qwen3.5:4b",
model_provider: "local",
local_base_url: "http://127.0.0.1:1234",
local_model: "qwen3-vl-8b-instruct-mlx@5bit",
zai_model: "glm-5",
anythingllm_base_url: "http://127.0.0.1:3001",
anythingllm_workspace_slug: "wiseclaw",
tools: [
{ name: "brave_search", enabled: true },
{ name: "second_brain", enabled: true },
{ name: "browser_use", enabled: true },
{ name: "searxng_search", enabled: false },
{ name: "web_fetch", enabled: true },
{ name: "apple_notes", enabled: true },
@@ -29,12 +37,25 @@ export function App() {
const [dashboard, setDashboard] = useState<DashboardSnapshot | null>(null);
const [settings, setSettings] = useState<RuntimeSettings>(defaultSettings);
const [users, setUsers] = useState<UserRecord[]>([]);
const [profiles, setProfiles] = useState<UserProfileRecord[]>([]);
const [automations, setAutomations] = useState<AutomationRecord[]>([]);
const [memory, setMemory] = useState<MemoryRecord[]>([]);
const [secretMask, setSecretMask] = useState("");
const [secretValue, setSecretValue] = useState("");
const [zaiSecretMask, setZaiSecretMask] = useState("");
const [zaiSecretValue, setZaiSecretValue] = useState("");
const [anythingSecretMask, setAnythingSecretMask] = useState("");
const [anythingSecretValue, setAnythingSecretValue] = useState("");
const [ollamaStatus, setOllamaStatus] = useState<OllamaStatus | null>(null);
const [telegramStatus, setTelegramStatus] = useState<TelegramStatus | null>(null);
const [status, setStatus] = useState("Loading WiseClaw admin...");
const providerLabel = settings.model_provider === "local" ? "Local (LM Studio)" : "Z.AI";
const searchProviderLabel = settings.search_provider === "brave" ? "Brave" : "SearXNG";
const llmStatusLabel = settings.model_provider === "local" ? "LM Studio status" : "Z.AI status";
const llmStatusHint =
settings.model_provider === "local"
? "Checking local model endpoint..."
: "Checking remote Z.AI endpoint...";
useEffect(() => {
void load();
@@ -42,21 +63,29 @@ export function App() {
async function load() {
try {
const [dashboardData, settingsData, userData, memoryData, secretData, ollamaData, telegramData] =
const [dashboardData, settingsData, userData, profileData, automationData, memoryData, secretData, zaiSecretData, anythingSecretData, ollamaData, telegramData] =
await Promise.all([
api.getDashboard(),
api.getSettings(),
api.getUsers(),
api.getProfiles(),
api.getAutomations(),
api.getMemory(),
api.getSecretMask("brave_api_key"),
api.getSecretMask("zai_api_key"),
api.getSecretMask("anythingllm_api_key"),
api.getOllamaStatus(),
api.getTelegramStatus(),
]);
setDashboard(dashboardData);
setSettings(settingsData);
setUsers(userData);
setProfiles(profileData);
setAutomations(automationData);
setMemory(memoryData);
setSecretMask(secretData.masked);
setZaiSecretMask(zaiSecretData.masked);
setAnythingSecretMask(anythingSecretData.masked);
setOllamaStatus(ollamaData);
setTelegramStatus(telegramData);
setStatus("WiseClaw admin ready.");
@@ -84,6 +113,28 @@ export function App() {
await load();
}
async function handleZaiSecretSubmit(event: FormEvent) {
event.preventDefault();
if (!zaiSecretValue.trim()) {
return;
}
await api.saveSecret("zai_api_key", zaiSecretValue.trim());
setZaiSecretValue("");
setStatus("Z.AI API key updated.");
await load();
}
async function handleAnythingSecretSubmit(event: FormEvent) {
event.preventDefault();
if (!anythingSecretValue.trim()) {
return;
}
await api.saveSecret("anythingllm_api_key", anythingSecretValue.trim());
setAnythingSecretValue("");
setStatus("AnythingLLM API key updated.");
await load();
}
async function handleAddUser(event: FormEvent<HTMLFormElement>) {
event.preventDefault();
const form = new FormData(event.currentTarget);
@@ -133,7 +184,7 @@ export function App() {
</div>
<div>
<span>Model</span>
<strong>{settings.default_model}</strong>
<strong>{settings.model_provider === "local" ? settings.local_model : settings.zai_model}</strong>
</div>
</div>
</aside>
@@ -151,21 +202,21 @@ export function App() {
</div>
<div>
<span>Search provider</span>
<strong>{settings.search_provider}</strong>
<strong>{searchProviderLabel}</strong>
</div>
<div>
<span>Ollama</span>
<strong>{settings.ollama_base_url}</strong>
<span>Provider</span>
<strong>{providerLabel}</strong>
</div>
</div>
<div className="integration-grid">
<div className="integration-card">
<span>Ollama status</span>
<span>{llmStatusLabel}:</span>
<strong>{ollamaStatus?.reachable ? "Reachable" : "Offline"}</strong>
<p>{ollamaStatus?.message || "Checking..."}</p>
<p>{ollamaStatus?.message || llmStatusHint}</p>
</div>
<div className="integration-card">
<span>Telegram status</span>
<span>Telegram status:</span>
<strong>{telegramStatus?.configured ? "Configured" : "Missing token"}</strong>
<p>{telegramStatus?.message || "Checking..."}</p>
</div>
@@ -196,6 +247,22 @@ export function App() {
</select>
</label>
<label>
Model provider
<select
value={settings.model_provider}
onChange={(event) =>
setSettings({
...settings,
model_provider: event.target.value as "local" | "zai",
})
}
>
<option value="local">Local (LM Studio)</option>
<option value="zai">Z.AI</option>
</select>
</label>
<label>
Search provider
<select
@@ -213,21 +280,59 @@ export function App() {
</label>
<label>
Ollama base URL
AnythingLLM base URL
<input
value={settings.ollama_base_url}
onChange={(event) => setSettings({ ...settings, ollama_base_url: event.target.value })}
value={settings.anythingllm_base_url}
onChange={(event) => setSettings({ ...settings, anythingllm_base_url: event.target.value })}
placeholder="http://127.0.0.1:3001"
/>
</label>
<label>
Default model
AnythingLLM workspace slug
<input
value={settings.default_model}
onChange={(event) => setSettings({ ...settings, default_model: event.target.value })}
value={settings.anythingllm_workspace_slug}
onChange={(event) => setSettings({ ...settings, anythingllm_workspace_slug: event.target.value })}
placeholder="wiseclaw"
/>
</label>
{settings.model_provider === "local" ? (
<>
<label>
LM Studio base URL
<input
value={settings.local_base_url}
onChange={(event) => setSettings({ ...settings, local_base_url: event.target.value })}
/>
</label>
<label>
Local model
<input
value={settings.local_model}
onChange={(event) => setSettings({ ...settings, local_model: event.target.value })}
/>
</label>
</>
) : (
<>
<p className="muted">Z.AI uses the fixed hosted API endpoint and the API key saved below.</p>
<label>
Z.AI model
<select
value={settings.zai_model}
onChange={(event) =>
setSettings({ ...settings, zai_model: event.target.value as "glm-4.7" | "glm-5" })
}
>
<option value="glm-4.7">glm-4.7</option>
<option value="glm-5">glm-5</option>
</select>
</label>
</>
)}
<div className="tool-list">
{settings.tools.map((tool) => (
<label key={tool.name} className="checkbox-row">
@@ -250,7 +355,7 @@ export function App() {
</form>
<div className="stack">
<form className="panel" onSubmit={handleSecretSubmit}>
<form className="panel secret-panel" onSubmit={handleSecretSubmit}>
<div className="panel-head">
<h3>Secrets</h3>
<button type="submit">Update</button>
@@ -267,6 +372,40 @@ export function App() {
</label>
</form>
<form className="panel secret-panel" onSubmit={handleZaiSecretSubmit}>
<div className="panel-head">
<h3>Z.AI Secret</h3>
<button type="submit">Update</button>
</div>
<p className="muted">Current Z.AI key: {zaiSecretMask || "not configured"}</p>
<label>
Z.AI API key
<input
type="password"
value={zaiSecretValue}
onChange={(event) => setZaiSecretValue(event.target.value)}
placeholder="Paste a new key"
/>
</label>
</form>
<form className="panel secret-panel" onSubmit={handleAnythingSecretSubmit}>
<div className="panel-head">
<h3>AnythingLLM Secret</h3>
<button type="submit">Update</button>
</div>
<p className="muted">Current AnythingLLM key: {anythingSecretMask || "not configured"}</p>
<label>
AnythingLLM API key
<input
type="password"
value={anythingSecretValue}
onChange={(event) => setAnythingSecretValue(event.target.value)}
placeholder="Paste a new key"
/>
</label>
</form>
<form className="panel" onSubmit={handleAddUser}>
<div className="panel-head">
<h3>Telegram Whitelist</h3>
@@ -297,6 +436,75 @@ export function App() {
</div>
</section>
<section className="grid two-up">
<div className="panel compact-fixed-panel">
<div className="panel-head">
<h3>User Profiles</h3>
</div>
<div className="list compact-scroll-list">
{profiles.length === 0 ? <span className="muted">No onboarding profiles yet.</span> : null}
{profiles.map((profile) => (
<div key={profile.telegram_user_id} className="list-row">
<strong>
{profile.display_name || `User ${profile.telegram_user_id}`} ·{" "}
{profile.onboarding_completed
? "Onboarding complete"
: `Step ${profile.last_onboarding_step + 1}/12`}
</strong>
<div>Telegram ID: {profile.telegram_user_id}</div>
<div>Ton: {profile.tone_preference || "belirtilmedi"}</div>
<div>Dil: {profile.language_preference || "belirtilmedi"}</div>
<div>Cevap uzunluğu: {profile.response_length || "belirtilmedi"}</div>
<div>Çalışma biçimi: {profile.workflow_preference || "belirtilmedi"}</div>
<div>
Kullanım amacı: {profile.primary_use_cases.length ? profile.primary_use_cases.join(", ") : "belirtilmedi"}
</div>
<div>
Öncelikler: {profile.answer_priorities.length ? profile.answer_priorities.join(", ") : "belirtilmedi"}
</div>
<div>
İlgi alanları: {profile.interests.length ? profile.interests.join(", ") : "belirtilmedi"}
</div>
<div>
Onay beklentileri:{" "}
{profile.approval_preferences.length ? profile.approval_preferences.join(", ") : "belirtilmedi"}
</div>
<div>Kaçınılacaklar: {profile.avoid_preferences || "belirtilmedi"}</div>
</div>
))}
</div>
</div>
<div className="panel compact-fixed-panel">
<div className="panel-head">
<h3>Automations</h3>
</div>
<div className="list compact-scroll-list">
{automations.length === 0 ? <span className="muted">No automations yet.</span> : null}
{automations.map((automation) => (
<div key={automation.id} className="list-row automation-row">
<strong>
#{automation.id} {automation.name} · {automation.status}
</strong>
<div>Telegram ID: {automation.telegram_user_id}</div>
<div>Prompt: {automation.prompt}</div>
<div>
Schedule:{" "}
{automation.schedule_type === "hourly"
? `every ${automation.interval_hours || 1} hour(s)`
: automation.schedule_type}
</div>
{automation.time_of_day ? <div>Time: {automation.time_of_day}</div> : null}
{automation.days_of_week.length ? <div>Days: {automation.days_of_week.join(", ")}</div> : null}
<div>Next run: {automation.next_run_at || "not scheduled"}</div>
<div>Last run: {automation.last_run_at || "never"}</div>
<div>Last result: {automation.last_result || "no result yet"}</div>
</div>
))}
</div>
</div>
</section>
<section className="grid two-up">
<div className="panel">
<div className="panel-head">
@@ -305,7 +513,7 @@ export function App() {
Clear
</button>
</div>
<div className="list">
<div className="list scroll-list">
{memory.length === 0 ? <span className="muted">No memory yet.</span> : null}
{memory.map((item, index) => (
<div key={`${item.id}-${index}`} className="list-row">
@@ -320,7 +528,7 @@ export function App() {
<div className="panel-head">
<h3>Recent Logs</h3>
</div>
<div className="list">
<div className="list scroll-list">
{(dashboard?.recent_logs || []).length === 0 ? (
<span className="muted">No recent logs.</span>
) : null}

View File

@@ -1,13 +1,15 @@
import type {
AutomationRecord,
DashboardSnapshot,
MemoryRecord,
OllamaStatus,
RuntimeSettings,
TelegramStatus,
UserProfileRecord,
UserRecord,
} from "./types";
const API_BASE = "http://127.0.0.1:8000";
const API_BASE = `${window.location.protocol}//${window.location.hostname}:8000`;
async function request<T>(path: string, init?: RequestInit): Promise<T> {
const response = await fetch(`${API_BASE}${path}`, {
@@ -33,6 +35,8 @@ export const api = {
body: JSON.stringify(payload),
}),
getUsers: () => request<UserRecord[]>("/admin/users"),
getProfiles: () => request<UserProfileRecord[]>("/admin/profiles"),
getAutomations: () => request<AutomationRecord[]>("/admin/automations"),
addUser: (payload: UserRecord) =>
request<UserRecord>("/admin/users", {
method: "POST",
@@ -49,6 +53,6 @@ export const api = {
method: "POST",
body: JSON.stringify({ key, value }),
}),
getOllamaStatus: () => request<OllamaStatus>("/admin/integrations/ollama"),
getOllamaStatus: () => request<OllamaStatus>("/admin/integrations/llm"),
getTelegramStatus: () => request<TelegramStatus>("/admin/integrations/telegram"),
};

View File

@@ -120,6 +120,7 @@ label {
padding: 2rem;
display: grid;
gap: 1.4rem;
min-width: 0;
}
.panel {
@@ -129,6 +130,22 @@ label {
padding: 1.2rem;
backdrop-filter: blur(10px);
box-shadow: 0 20px 60px rgba(72, 64, 39, 0.08);
min-width: 0;
overflow: hidden;
}
.fixed-log-panel {
display: grid;
grid-template-rows: auto minmax(0, 1fr);
height: calc(80 * 1.4em + 5.5rem);
align-self: start;
}
.compact-fixed-panel {
display: grid;
grid-template-rows: auto minmax(0, 1fr);
height: 600px;
align-self: start;
}
.hero {
@@ -161,6 +178,15 @@ label {
border: 1px solid rgba(31, 92, 102, 0.12);
}
.integration-card span,
.integration-card strong {
display: inline;
}
.integration-card strong {
margin-left: 0.3rem;
}
.integration-card p {
margin-bottom: 0;
color: #4f5b57;
@@ -170,11 +196,36 @@ label {
display: grid;
grid-template-columns: repeat(2, minmax(0, 1fr));
gap: 1.4rem;
min-width: 0;
align-items: start;
}
.stack {
display: grid;
gap: 1.4rem;
min-width: 0;
}
.secret-panel {
padding-top: 0.64rem;
padding-bottom: 0.64rem;
}
.secret-panel .panel-head {
margin-bottom: 0.24rem;
}
.secret-panel label {
gap: 0.2rem;
}
.secret-panel .muted {
margin-bottom: 0.1rem;
}
.secret-panel form,
.secret-panel {
gap: 0.36rem;
}
.panel-head {
@@ -190,6 +241,7 @@ label {
form {
display: grid;
gap: 0.9rem;
min-width: 0;
}
.checkbox-row {
@@ -216,11 +268,103 @@ form {
}
.list-row {
padding: 0.8rem 0.9rem;
padding: 0.65rem 0.75rem;
border-radius: 18px;
background: rgba(31, 36, 33, 0.05);
font-family: "IBM Plex Mono", "SF Mono", monospace;
font-size: 0.9rem;
font-size: 0.84rem;
line-height: 1.28;
min-width: 0;
max-width: 100%;
overflow-wrap: anywhere;
word-break: break-word;
white-space: pre-wrap;
}
.list {
min-width: 0;
}
.list .list-row strong {
display: block;
margin-bottom: 0.18rem;
}
.automation-row {
height: 250px;
overflow-y: auto;
align-content: start;
scrollbar-width: thin;
scrollbar-color: rgba(31, 92, 102, 0.72) rgba(233, 196, 106, 0.2);
}
.scroll-list {
height: calc(80 * 1.4em);
max-height: calc(80 * 1.4em);
overflow-y: auto;
overflow-x: hidden;
align-content: start;
padding-right: 0.35rem;
scrollbar-width: thin;
scrollbar-color: rgba(31, 92, 102, 0.72) rgba(233, 196, 106, 0.2);
}
.compact-scroll-list {
min-height: 0;
overflow-y: auto;
overflow-x: hidden;
padding-right: 0.35rem;
scrollbar-width: thin;
scrollbar-color: rgba(31, 92, 102, 0.72) rgba(233, 196, 106, 0.2);
}
.scroll-list::-webkit-scrollbar {
width: 12px;
}
.compact-scroll-list::-webkit-scrollbar {
width: 12px;
}
.scroll-list::-webkit-scrollbar-track {
background: rgba(233, 196, 106, 0.18);
border-radius: 999px;
}
.compact-scroll-list::-webkit-scrollbar-track {
background: rgba(233, 196, 106, 0.18);
border-radius: 999px;
}
.scroll-list::-webkit-scrollbar-thumb {
background: linear-gradient(180deg, rgba(31, 122, 140, 0.88), rgba(31, 92, 102, 0.72));
border-radius: 999px;
border: 2px solid rgba(255, 250, 242, 0.9);
}
.compact-scroll-list::-webkit-scrollbar-thumb {
background: linear-gradient(180deg, rgba(31, 122, 140, 0.88), rgba(31, 92, 102, 0.72));
border-radius: 999px;
border: 2px solid rgba(255, 250, 242, 0.9);
}
.automation-row::-webkit-scrollbar {
width: 10px;
}
.automation-row::-webkit-scrollbar-track {
background: rgba(233, 196, 106, 0.18);
border-radius: 999px;
}
.automation-row::-webkit-scrollbar-thumb {
background: linear-gradient(180deg, rgba(31, 122, 140, 0.88), rgba(31, 92, 102, 0.72));
border-radius: 999px;
border: 2px solid rgba(255, 250, 242, 0.9);
}
.scroll-list::-webkit-scrollbar-thumb:hover {
background: linear-gradient(180deg, rgba(31, 122, 140, 1), rgba(31, 92, 102, 0.88));
}
@media (max-width: 960px) {

View File

@@ -6,8 +6,12 @@ export type ToolToggle = {
export type RuntimeSettings = {
terminal_mode: 1 | 2 | 3;
search_provider: "brave" | "searxng";
ollama_base_url: string;
default_model: string;
model_provider: "local" | "zai";
local_base_url: string;
local_model: string;
zai_model: "glm-4.7" | "glm-5";
anythingllm_base_url: string;
anythingllm_workspace_slug: string;
tools: ToolToggle[];
};
@@ -25,6 +29,41 @@ export type UserRecord = {
is_active: boolean;
};
export type UserProfileRecord = {
telegram_user_id: number;
display_name?: string | null;
bio?: string | null;
occupation?: string | null;
primary_use_cases: string[];
answer_priorities: string[];
tone_preference?: string | null;
response_length?: string | null;
language_preference?: string | null;
workflow_preference?: string | null;
interests: string[];
approval_preferences: string[];
avoid_preferences?: string | null;
onboarding_completed: boolean;
last_onboarding_step: number;
};
export type AutomationRecord = {
id: number;
telegram_user_id: number;
name: string;
prompt: string;
schedule_type: "daily" | "weekdays" | "weekly" | "hourly";
interval_hours?: number | null;
time_of_day?: string | null;
days_of_week: string[];
status: "active" | "paused";
last_run_at?: string | null;
next_run_at?: string | null;
last_result?: string | null;
created_at: string;
updated_at: string;
};
export type MemoryRecord = {
id: number;
content: string;
@@ -34,6 +73,7 @@ export type MemoryRecord = {
export type OllamaStatus = {
reachable: boolean;
provider: "local" | "zai";
base_url: string;
model: string;
installed_models: string[];

58
restart.sh Executable file
View File

@@ -0,0 +1,58 @@
#!/bin/zsh
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")" && pwd)"
BACKEND_DIR="$ROOT_DIR/backend"
LOG_DIR="$ROOT_DIR/.wiseclaw/logs"
PID_FILE="$ROOT_DIR/.wiseclaw/backend.pid"
LOG_FILE="$LOG_DIR/backend.log"
HEALTH_URL="http://127.0.0.1:8000/health"
mkdir -p "$LOG_DIR"
stop_existing() {
if [[ -f "$PID_FILE" ]]; then
local old_pid
old_pid="$(cat "$PID_FILE" 2>/dev/null || true)"
if [[ -n "${old_pid:-}" ]] && kill -0 "$old_pid" 2>/dev/null; then
kill "$old_pid" 2>/dev/null || true
sleep 1
fi
rm -f "$PID_FILE"
fi
pkill -f "uvicorn app.main:app --host 0.0.0.0 --port 8000" >/dev/null 2>&1 || true
}
start_backend() {
(
cd "$BACKEND_DIR"
exec /bin/zsh -lc 'set -a; source .env >/dev/null 2>&1; exec .venv312/bin/python -m uvicorn app.main:app --host 0.0.0.0 --port 8000'
) >"$LOG_FILE" 2>&1 &
echo $! > "$PID_FILE"
}
wait_for_health() {
local attempt
for attempt in {1..20}; do
if curl -fsS "$HEALTH_URL" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
return 1
}
stop_existing
start_backend
if wait_for_health; then
echo "WiseClaw backend restarted."
echo "PID: $(cat "$PID_FILE")"
echo "Log: $LOG_FILE"
exit 0
fi
echo "WiseClaw backend failed to start. Check log: $LOG_FILE" >&2
exit 1