import { useState, useEffect, useRef, useCallback } from "react";
import { getSessionId } from "../services/websocket";
import { getSessionTokenStatus, getSessionMessages, chatWithAI, getUserConfig, getSession, cancelSession } from "../services/apiService";
const useSwarmControl = ({ pageContainerRef, onNewSessionCreated }) => {
const [chatHistory, setChatHistory] = useState([]);
const [isProcessing, setIsProcessing] = useState(false);
const [errorMessage, setErrorMessage] = useState("");
const [showErrorModal, setShowErrorModal] = useState(false);
const [isConfigured, setIsConfigured] = useState(true);
const [missingConfigs, setMissingConfigs] = useState([]);
const [sessionId, setSessionId] = useState(null);
const [tokenUsage, setTokenUsage] = useState({ token_count: 0, token_limit: 0, percentage: 0 });
const [userConfigData, setUserConfigData] = useState(null);
const [localActiveLLM, setLocalActiveLLM] = useState('');
const sessionIdRef = useRef(null);
const initialized = useRef(false);
const fetchTokenUsage = useCallback(async () => {
if (!sessionIdRef.current) return;
try {
const usage = await getSessionTokenStatus(sessionIdRef.current);
setTokenUsage(usage);
} catch (err) {
console.warn("Failed to fetch token usage", err);
}
}, []);
const fetchSessionHistory = useCallback(async (sid) => {
try {
const messagesData = await getSessionMessages(sid);
if (messagesData && messagesData.messages) {
const formattedHistory = messagesData.messages.map((msg) => ({
isUser: msg.sender === "user",
isPureAnswer: true,
text: msg.content,
reasoning: msg.reasoning_content,
timestamp: msg.created_at
}));
setChatHistory(formattedHistory);
}
} catch (err) {
console.warn("Failed to load chat history", err);
}
}, []);
// Setup
useEffect(() => {
if (initialized.current) return;
initialized.current = true;
const setup = async () => {
try {
let configData = null;
let provider = "gemini";
try {
configData = await getUserConfig();
setUserConfigData(configData);
if (configData.effective?.llm?.active_provider) {
provider = configData.effective.llm.active_provider;
}
} catch (e) {
console.warn("Could not load user config", e);
}
let wasNewSession = false;
if (!localStorage.getItem("sessionId_swarm_control")) {
wasNewSession = true;
}
const sid = await getSessionId("swarm_control", provider);
setSessionId(sid);
sessionIdRef.current = sid;
if (wasNewSession && onNewSessionCreated) {
onNewSessionCreated(sid);
}
let llm = provider;
try {
const sessionInfo = await getSession(sid);
if (sessionInfo && sessionInfo.provider_name) {
llm = sessionInfo.provider_name;
}
} catch (e) { console.warn("Could not check session provider", e); }
setLocalActiveLLM(llm);
// Config check
const eff = configData?.effective || {};
const missing = [];
const llmProviders = eff.llm?.providers || {};
const hasLLMKey = Object.values(llmProviders).some(p => p.api_key && p.api_key !== 'None');
if (!hasLLMKey) missing.push("Language Model (LLM) API Key");
if (missing.length > 0) {
setIsConfigured(false);
setMissingConfigs(missing);
} else {
setIsConfigured(true);
setMissingConfigs([]);
}
await fetchSessionHistory(sid);
await fetchTokenUsage();
} catch (error) {
console.error("Setup failed:", error);
}
};
setup();
}, [fetchSessionHistory, fetchTokenUsage]);
const handleSendChat = useCallback(async (text) => {
if (!isConfigured && text.trim().toLowerCase() !== "/new") {
setErrorMessage("Swarm Control requires a valid LLM configuration. Please visit Settings to set up your API keys.");
setShowErrorModal(true);
return;
}
if (text.trim().toLowerCase() === "/new") {
setChatHistory([]);
localStorage.removeItem("sessionId_swarm_control");
const prefProvider = userConfigData?.effective?.llm?.active_provider || "gemini";
const newSid = await getSessionId("swarm_control", prefProvider);
if (onNewSessionCreated) {
onNewSessionCreated(newSid);
}
setLocalActiveLLM(prefProvider);
setSessionId(newSid);
sessionIdRef.current = newSid;
fetchTokenUsage();
return;
}
setIsProcessing(true);
setChatHistory((prev) => [...prev, { isUser: true, text, timestamp: new Date().toISOString() }]);
try {
let reasoningStartTime = null;
let reasoningDuration = 0;
// Add a placeholder message for the AI starting with 'Generating' status
setChatHistory((prev) => [...prev, {
isUser: false,
text: "",
reasoning: "",
status: "Generating", // Initially show 'Generating'
provider: localActiveLLM,
timestamp: new Date().toISOString()
}]);
await chatWithAI(sessionIdRef.current, text, localActiveLLM || "gemini", (event) => {
setChatHistory((prev) => {
const newHistory = [...prev];
const lastMsg = { ...newHistory[newHistory.length - 1] };
if (event.type === "reasoning") {
if (!reasoningStartTime) reasoningStartTime = Date.now();
lastMsg.reasoning += event.content;
// Only update status to planning if we are in the brain's strategy phase.
if (!lastMsg.status || (lastMsg.status === "Generating" || lastMsg.status === "Analyzing & Planning")) {
lastMsg.status = "Analyzing & Planning";
}
} else if (event.type === "content") {
if (reasoningStartTime && !lastMsg.thoughtDone) {
reasoningDuration = Math.round((Date.now() - reasoningStartTime) / 1000);
lastMsg.status = reasoningDuration > 0 ? `Thought for ${reasoningDuration}s` : null;
lastMsg.thoughtDone = true;
} else if (!reasoningStartTime && (lastMsg.status === "Generating" || lastMsg.status === "Analyzing & Planning")) {
lastMsg.status = null;
}
lastMsg.text += event.content;
} else if (event.type === "status") {
lastMsg.status = event.content;
} else if (event.type === "tool_start") {
lastMsg.status = `Calling tool: ${event.name}`;
} else if (event.type === "tool_result") {
lastMsg.status = `Tool ${event.name} returned.`;
}
newHistory[newHistory.length - 1] = lastMsg;
return newHistory;
});
});
fetchTokenUsage();
} catch (error) {
setErrorMessage(error.message);
setShowErrorModal(true);
} finally {
setIsProcessing(false);
}
}, [isConfigured, localActiveLLM, fetchTokenUsage]);
const handleCancelChat = useCallback(async () => {
if (!sessionIdRef.current) return;
try {
await cancelSession(sessionIdRef.current);
// We don't set isProcessing false here immediately if we want to wait for the stream
// to actually close, but the user wants immediate feedback.
// RagPipeline already checks session.is_cancelled at the start of each turn.
} catch (err) {
console.warn("Failed to cancel session", err);
}
}, []);
const handleSwitchSession = useCallback(async (targetSessionId) => {
localStorage.setItem("sessionId_swarm_control", targetSessionId);
setSessionId(targetSessionId);
sessionIdRef.current = targetSessionId;
setChatHistory([]);
try {
const sessionInfo = await getSession(targetSessionId);
if (sessionInfo && sessionInfo.provider_name) {
setLocalActiveLLM(sessionInfo.provider_name);
}
await fetchSessionHistory(targetSessionId);
await fetchTokenUsage();
} catch (error) {
console.error("Failed to switch session:", error);
}
}, [fetchSessionHistory, fetchTokenUsage]);
return {
chatHistory,
isProcessing,
errorMessage,
showErrorModal,
tokenUsage,
isConfigured,
missingConfigs,
handleSendChat,
handleCancelChat,
setShowErrorModal,
handleSwitchSession,
sessionId,
userConfigData,
localActiveLLM,
setLocalActiveLLM
};
};
export default useSwarmControl;