diff --git a/ai-hub/app/core/pipelines/context_compressor.py b/ai-hub/app/core/pipelines/context_compressor.py deleted file mode 100644 index 6563bab..0000000 --- a/ai-hub/app/core/pipelines/context_compressor.py +++ /dev/null @@ -1,96 +0,0 @@ -import dspy -import os -import json -from datetime import datetime - -class SimpleDataCompressor(dspy.Signature): - """ - Condense a long string of concatenated file data into a concise summary that retains the most important information related to the user's question. - """ - question = dspy.InputField(desc="The user's current question.") - full_context_string = dspy.InputField(desc="A long string containing file paths and their contents.") - summarized_context = dspy.OutputField( - desc="A long, detailed, synthesizing the most relevant information from the input data to address the user's question effectively." - ) - -# 1. Define the system prompt string -SYSTEM_PROMPT = """ -Here is a system prompt designed to address the issues in the input data and guide the model toward a useful, non-truncated response. - -### System Prompt for Compression and Contextualization - -You are an expert AI assistant tasked with analyzing a complex request and its associated file data. Your primary goal is to provide a **concise, accurate, and actionable response** to the user's question, while handling potentially overwhelming input data. - ---- -**Instructions:** - -1. **Strictly Filter Irrelevant Files:** First, ignore all non-essential files in the `retrieved_files` array. This includes: - * **Compiled files:** Any file with a `.pyc` or similar extension. - * **Cache directories:** Any path containing `__pycache__`. - * **Documentation or shell scripts:** Files ending in `.md` or `.sh`. - * **Initialization files:** `__init__.py` files unless explicitly requested. - -2. **Focus on Specificity:** The user's question is "generally polish the `app.core.services.workspace.py` and its test code." This means the most relevant files are `app/core/services/workspace.py` and any file explicitly identified as its test file (e.g., `test_workspace.py`). Prioritize these. All other files are secondary context. - -3. **Synthesize, Don't Dump:** Instead of simply printing all the file contents, **synthesize** the most relevant information. Describe the high-level purpose of the `workspace.py` file based on its content, and identify the key functions and classes. For the test code, describe the functionality being tested. - -4. **Action-Oriented Response:** Based on your analysis, provide a response that directly answers the user's request to "polish" the code. This might include: - * Suggestions for code refactoring or simplification. - * Recommendations for improving a function's logic. - * Identifying potential bugs or edge cases. - * Adding or improving comments and documentation. - * Proposing new test cases for better coverage. - -5. **Final Output:** Your final response should be a clean, well-formatted markdown text. **Do not include the raw input data or the full list of files in your final output.** Your response must be an answer to the user's request, not a report on the input data itself. - -6. **Maintain Professional Tone:** Respond in a clear, concise, and professional tone. Avoid conversational filler or unnecessary explanations about how you processed the data. -""" -class StringContextCompressor(dspy.Module): - """ - A pipeline to compress a string of retrieved file data. - """ - def __init__(self): - super().__init__() - # Use ChainOfThought for multi-step reasoning to produce a good summary. - self.compressor = dspy.ChainOfThought(SimpleDataCompressor, system_prompt=SYSTEM_PROMPT) - - async def forward(self, question: str, retrieved_data_string: str) -> str: - """ - Processes a string of data and returns a compressed string. - """ - # Call the ChainOfThought module with the string inputs. - input_payload = { - "question": question, - "full_context_string": retrieved_data_string - } - prediction = await self.compressor.acall(**input_payload) - - self._log_to_file(input_payload, prediction) - - return prediction.summarized_context - - def _log_to_file(self, history_entry, prediction): - """ - Saves the raw payload sent to the AI and the raw response to a local file. - """ - # Create a log directory if it doesn't exist - log_dir = "ai_payloads" - os.makedirs(log_dir, exist_ok=True) - - # Generate a unique filename using a timestamp - timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - filename = f"{log_dir}/ai_payload_{timestamp}.json" - - log_data = { - "timestamp": timestamp, - "request_payload": history_entry, - "response_payload": { - "summarized_context": prediction.summarized_context, - } - } - - # Save the data to a file - with open(filename, "w", encoding="utf-8") as f: - json.dump(log_data, f, indent=4) - - print(f"Logged AI payload and response to {filename}") \ No newline at end of file diff --git a/ai-hub/app/core/pipelines/file_selector.py b/ai-hub/app/core/pipelines/file_selector.py index aec19c3..3226ed5 100644 --- a/ai-hub/app/core/pipelines/file_selector.py +++ b/ai-hub/app/core/pipelines/file_selector.py @@ -4,11 +4,22 @@ class SelectFiles(dspy.Signature): """ - Based on the user's question, communication history, and the code folder's file list, identify the files that are most relevant to answer the question. + You are an expert file navigator for a large codebase. Your task is to select the most critical and relevant files to answer a user's question from a provided list of file paths. + + Your selection criteria are: + 1. **Prioritize Core Files:** Choose files that are most likely to contain the central logic, definitions, or configuration directly related to the user's question. + 2. **Be Selective:** To avoid exceeding token limits, you must choose a small, focused set of files (e.g., typically 2-4 files). Do not select a large number of files. + 3. **Ignore Irrelevant Files:** Discard any files that appear to be placeholders, test files, or have names that are clearly unrelated to the user's query. + 4. **Infer User Intent:** If the user's question explicitly mentions a file path that is not present in the `retrieved_files` list (e.g., due to a typo or a partial path), use that as a strong hint. Analyze the `retrieved_files` list and select the path that is most similar to the one the user provided. You MUST still only return a file path that actually exists in the `retrieved_files` list, if you think none is related, return an empty array. + + The ONLY output you should provide is a parsable JSON array of strings. Do not include any other text, explanations, or markdown formatting (e.g., ```json...```). Your response must begin with `[` and end with `]`. Absolutely no other characters are allowed before or after the JSON array. The strings in the array MUST be enclosed in double quotes. """ question = dspy.InputField(desc="The user's current question.") - retrieved_files = dspy.InputField(desc="A JSON object containing details about a retrieval request, including an array of file objects. Each file object contains its ID, path, name, content, type, and timestamps.") - answer = dspy.OutputField(format=list, desc="A list of strings containing the file paths of the most relevant files to examine further.") + retrieved_files = dspy.InputField(desc="A JSON string representing a list of all available file paths.") + answer = dspy.OutputField(format=list, desc="A JSON array of strings. Each string element in the array MUST be enclosed in double quotes.") + question = dspy.InputField(desc="The user's current question.") + retrieved_files = dspy.InputField(desc="A JSON string representing a list of all available file paths.") + answer = dspy.OutputField(format=list, desc="A JSON array of strings. Each string element in the array MUST be enclosed in double quotes.") class CodeRagFileSelector(dspy.Module): """ @@ -17,17 +28,17 @@ def __init__(self): super().__init__() # Assign the system prompt directly to the dspy.Predict instance. - self.select_files = dspy.Predict(SelectFiles, system_prompt="You are a helpful AI assistant. Based on the retrieved files and the user's question, determine which files are needed to answer the question. The content field for some files may not be empty, while others are. Your final answer must be a JSON array of strings containing the file paths. It's VERY IMPORTANT to ensure the answer is a parsable JSON array.") + self.select_files = dspy.ChainOfThought(SelectFiles) - async def forward(self, question: str, retrieved_data: Dict[str, Any]) -> List[str]: - # Convert the entire retrieved_data dictionary to a JSON string - retrieved_data_json_string = json.dumps(retrieved_data, indent=2) - + async def forward(self, question: str, retrieved_data: List[str]) -> List[str]: + # Convert the list of strings to a JSON string using json.dumps + # The prompt is now explicitly asking for a JSON array of strings, so you can pass the raw JSON string. + retrieved_json = json.dumps(retrieved_data) # Call the predictor with the necessary inputs prediction = await self.select_files.acall( question=question, - retrieved_files=retrieved_data_json_string + retrieved_files=retrieved_json ) - # The prediction.answer should now be a parsable list due to the prompt's instructions. + # The prediction.answer should be the list of strings directly as per the output format. return prediction.answer \ No newline at end of file diff --git a/ai-hub/app/core/pipelines/file_selector_rag_test.py b/ai-hub/app/core/pipelines/file_selector_rag_test.py deleted file mode 100644 index c8f4c5e..0000000 --- a/ai-hub/app/core/pipelines/file_selector_rag_test.py +++ /dev/null @@ -1,85 +0,0 @@ -import dspy -import asyncio -import json -import os -from typing import List -from app.core.providers.factory import get_llm_provider - -# Assume these are defined elsewhere -class MockMessage: - def __init__(self, sender: str, content: str): - self.sender = sender - self.content = content - -# --- Step 2: Paste your existing DSPy components here --- -class SelectFiles(dspy.Signature): - """ - Based on the user's question, communication history, and the code folder's file list, identify the files that are most relevant to answer the question. - """ - question = dspy.InputField(desc="The user's current question.") - chat_history = dspy.InputField(desc="The ongoing dialogue between the user and the AI.") - code_folder_filename_list = dspy.InputField(desc="A list of file names as strings, representing the file structure of the code base.") - answer = dspy.OutputField(format=list, desc="A list of strings containing the names of the most relevant files to examine further.") - -class CodeRagFileSelector(dspy.Module): - """ - A single-step module to select relevant files from a list based on a user question. - """ - def __init__(self): - super().__init__() - self.select_files = dspy.Predict(SelectFiles) - - async def forward(self, question: str, history: List[MockMessage], file_list: List[str]) -> List[str]: - # Format history for the signature - history_text = self._default_history_formatter(history) - - # Call the predictor with the necessary inputs - prediction = await self.select_files.acall( - question=question, - chat_history=history_text, - code_folder_filename_list="\n".join(file_list) - ) - - # The output is expected to be a list of strings - # The DSPy Predict method automatically handles the `format=list` and parses the JSON. - return prediction.answer - - def _default_history_formatter(self, history: List[MockMessage]) -> str: - return "\n".join( - f"{'Human' if msg.sender == 'user' else 'Assistant'}: {msg.content}" - for msg in history - ) - -# --- Step 3: Write the test code --- -async def main(): - dspy.settings.configure(lm=get_llm_provider("gemini","gemini-1.5-flash-latest")) - - # Instantiate the module - file_selector = CodeRagFileSelector() - - # Define sample data - question = "How is the `data` variable initialized in the main application? Also, where are the tests?" - history = [ - MockMessage(sender="user", content="What does the main script do?"), - MockMessage(sender="assistant", content="The main script handles data processing and initializes the primary variables.") - ] - file_list = [ - "main.py", - "utils.py", - "README.md", - "config.yaml", - "tests/test_main.py", - "src/data_handler.py" - ] - - print("Running the file selector with Gemini...") - selected_files = await file_selector(question, history, file_list) - - print("\n--- Test Results ---") - print(f"Question: {question}") - print(f"All files: {file_list}") - print(f"Selected files: {selected_files}") - print("--------------------") - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/ai-hub/app/core/pipelines/question_decider.py b/ai-hub/app/core/pipelines/question_decider.py index bca7499..9ec1e2b 100644 --- a/ai-hub/app/core/pipelines/question_decider.py +++ b/ai-hub/app/core/pipelines/question_decider.py @@ -1,83 +1,86 @@ import dspy import json import os -from typing import List, Dict, Any, Tuple +from typing import List, Dict, Any, Tuple, Optional from datetime import datetime - class QuestionDecider(dspy.Signature): """ - Based on the user's question, chat history, and the content of the retrieved files, - decide whether you have enough information to answer the question, or if you need to request more files. - If the request is to modify code, suggest changes using git diff syntax. + You are a highly specialized AI assistant for software engineering tasks. Your role is to analyze a user's request and the provided codebase to decide on the best course of action: provide an answer, suggest a code change, or ask for more files. Your decisions must be based **strictly and exclusively** on the provided content. - - If you have enough information, your decision must be 'answer' and you must provide a long, complete, and well-explained answer. - - If you need more information, your decision must be 'files' and you must provide a JSON-formatted list of file paths that are needed. - - If the request is to modify code, your decision should be 'code_change', and you must provide both a high-level suggestion and code changes in git diff format. + --- + + ### 🧠 Core Directives: + + 1. **Analyze the Request and Available Data:** + * Examine the `question` and `chat_history` to understand the user's intent. + * You are provided with two distinct lists of files: `retrieved_paths_with_content` (files you have and can use) and `retrieved_paths_without_content` (files you know exist but need to request their content). + * **Crucial Rule:** The `retrieved_paths_with_content` is your complete and only source of usable code information. Do not mention or refer to any code that is not explicitly present in this data. + + 2. **Determine File Requirements:** + * Identify any specific file paths mentioned by the user or required to fulfill the request. + * **Do not re-request files that you already have, "have" means your requested file path is already existed in `retrieved_paths_with_content`** + * A file is considered "missing" only if its path is not in `retrieved_paths_with_content` and is either mentioned in the request or is required for a code-change. This is the only valid reason to choose `decision='files'`. The `retrieved_paths_without_content` list helps you identify what files are candidates to request. + * **Crucial New Rule:** If a file path mentioned by the user is **not found** in either `retrieved_paths_with_content` or `retrieved_paths_without_content`, you must choose the 'answer' decision and explain that the file could not be found. Do not request it. + + 3. **Choose the Correct Decision Path:** + * **Decision: 'answer'** + * Choose this if you have all the necessary information in `retrieved_paths_with_content` to provide a full, complete, and comprehensive explanation for a non-code-modification question. + * **Also choose this if the user asks about a file that is not present in any of the provided data.** You must explain to the user why the file could not be found. + * The `answer` field must contain a detailed, well-structured explanation in Markdown. + * The `code_diff` field must be empty. + + * **Decision: 'code_change'** + * Choose this if the user's request involves modifying or adding to the code (e.g., "fix this bug," "implement this feature," "refactor this function"). + * You must have all the relevant files with content in `retrieved_paths_with_content` to propose the change. + * The `answer` field can be an optional, high-level summary of the change. + * The `code_diff` field must contain the full and complete git diff showing the exact modifications, could be multiple file diffs. + + * **Decision: 'files'** + * Choose this **only if** you need more files to fulfill the user's request. + * The `answer` field must be a valid JSON list of strings, with each string being an explicit, complete file path that is **found in the `retrieved_paths_without_content` list.** **Do not use wildcard characters like `*` or `?`.** + * The `code_diff` field must be empty. + + 4. **Final Output Requirements:** + * Your output must be a valid JSON object matching the schema. + * Be helpful, precise, and adhere strictly to these rules. Do not hallucinate file paths or content. + --- """ - question = dspy.InputField(desc="The user's current question.") chat_history = dspy.InputField(desc="The ongoing dialogue between the user and the AI.") - retrieved_data = dspy.InputField(desc="A JSON string containing the content of retrieved files relevant to the question.") + + # New Input Fields to make the data split explicit + retrieved_paths_with_content = dspy.InputField(desc="A JSON string of files that have been successfully retrieved with their full content.") + retrieved_paths_without_content = dspy.InputField(desc="A JSON string of files that have been found but their content has not yet been loaded (e.g., empty or null content).") + + reasoning = dspy.OutputField( + desc="First, determine if the artifacts are sufficient. Then, based on the question, the decision type should be either 'code_change' or 'answer'. Finally, analyze the question and determine the output fields." + ) decision = dspy.OutputField( - desc="Your decision: either 'answer', 'files', or 'code_change'." + desc="Must be one of: 'answer', 'files', or 'code_change'." ) - - answer = dspy.OutputField( - desc=( - "If decision is 'answer', provide a detailed, comprehensive, and well-structured response. " - "If decision is 'files', return a JSON-formatted list of file paths required to answer the question. " - "If decision is 'code_change', provide a high-level suggestion or explanation for the proposed changes. " - "For code changes, this summary is optional and can be brief; the git diff is the priority." - ) - ) - code_diff = dspy.OutputField( desc=( - "If decision is 'code_change', provide the code modifications using a clear git diff syntax. " - "This output must be complete and not truncated. " - "For 'answer' or 'files' decisions, this field should be empty." + "If `decision` is 'code_change': the full, complete git diff of the proposed changes.\n" + "Leave this field empty if the decision is not 'code_change'." + ) + ) + answer = dspy.OutputField( + desc=( + "If `decision` is 'answer': a comprehensive, well-structured explanation in Markdown.\n" + "If `decision` is 'files': a JSON-formatted list of required file paths.\n" + "If `decision` is 'code_change': an optional, high-level summary of the proposed changes. Leave empty if no summary is needed." ) ) class CodeRagQuestionDecider(dspy.Module): - """ - A pipeline that uses DSPy to decide whether to answer a user's question, - request additional files, or suggest a code change based on the available data. - """ - def __init__(self): + def __init__(self, log_dir: str = "ai_payloads"): super().__init__() - - # Modified system prompt to prioritize git diff output - # Revised system prompt for CodeRagQuestionDecider - system_prompt = """ - You are a highly specialized AI assistant for a software engineering workspace. Your task is to accurately and efficiently handle user requests based on provided file information. - - **Core Directives:** - 1. **Analyze the User's Question First:** Read the user's question and identify the specific file(s) they are asking about. - 2. **Filter Aggressively:** The provided `retrieved_data` is a JSON string that may contain a large number of files. **You must strictly ignore all irrelevant files.** - * **Irrelevant files include, but are not limited to:** - * Compiled Python files (`.pyc`). - * Documentation files (`.md`). - * Shell scripts (`.sh`). - * Files in `__pycache__` directories. - 3. **Prioritize Relevant Files:** - * Find the exact file path mentioned in the question (e.g., `app.core.services.workspace.py`). - * Identify any related test files (e.g., `test_app_core_services_workspace.py` or similar). - * **Only process the content of these highly relevant files.** The content for other files is not provided and should not be discussed. - 4. **Decide Your Action:** - * If the user's question is "polish the X.py and its test code" and you have the code for `X.py` in your context, your decision is always **'answer'**. You have enough information. - * If you do not have the content for the specifically requested file(s), your decision is **'files'**, and you must provide a list of the required file paths (e.g., `["/path/to/X.py", "/path/to/test_X.py"]`). - 5. **Format the Output Strictly:** - * If the decision is 'answer', the `answer` field must be a detailed response in markdown. - * If the decision is 'files', the `answer` field must be a **valid JSON array of strings**, representing the file paths you need. - * The `decision` field must be either 'answer' or 'files'. - - **Your goal is to provide a correct, concise, and focused response based ONLY on the provided relevant file content.** Do not hallucinate or discuss files for which you do not have content. - """ - self.decider = dspy.Predict(QuestionDecider, system_prompt=system_prompt) + self.log_dir = log_dir + # Initializes the dspy Predict module with the refined system prompt + self.decider = dspy.ChainOfThought(QuestionDecider) async def forward( self, @@ -86,56 +89,58 @@ retrieved_data: Dict[str, Any] ) -> Tuple[str, str, str]: """ - Decide whether to answer, request more files, or suggest a code change. + Runs the decision model with the current user input and code context. Args: - question (str): The user's question. - history (List[str]): The chat history. - retrieved_data (Dict[str, Any]): The content of files relevant to the question. + question: The user's query. + history: The chat history as a list of strings. + retrieved_data: A dictionary mapping file paths to file contents. Returns: - Tuple[str, str, str]: The model's answer, decision ('answer', 'files', or 'code_change'), and code diff. + A tuple of (answer, decision, code_diff). """ history_text = "\n".join(history) - retrieved_data_json = json.dumps(retrieved_data, indent=0) + + # --- INTERNAL LOGIC TO SPLIT DATA, WITH NULL/POINTER CHECKS --- + with_content = [] + without_content = [] + + # Safely access the 'retrieved_files' key, defaulting to an empty list + files_to_process = retrieved_data.get("retrieved_files", []) + if not isinstance(files_to_process, list): + # Fallback for unexpected data format + files_to_process = [] + + for file in files_to_process: + # Check if 'file' is not None and is a dictionary + if isinstance(file, dict): + file_path = file.get("file_path") + file_content = file.get("content") + + # Check if file_content is a non-empty string + if file_content and isinstance(file_content, str): + with_content.append({"file_path": file_path, "content": file_content}) + # Check for a file path without content + elif file_path: + without_content.append({"file_path": file_path}) + + # Ensure valid JSON strings for the model input + retrieved_with_content_json = json.dumps(with_content, indent=2) + retrieved_without_content_json = json.dumps(without_content, indent=2) input_payload = { "question": question, "chat_history": history_text, - "retrieved_data": retrieved_data_json + "retrieved_paths_with_content": retrieved_with_content_json, + "retrieved_paths_without_content": retrieved_without_content_json, } prediction = await self.decider.acall(**input_payload) - self._log_to_file(input_payload, prediction) + # Defensive handling and a clean way to access prediction fields + decision = getattr(prediction, "decision", "").lower() + answer = getattr(prediction, "answer", "") + code_diff = getattr(prediction, "code_diff", "") + reasoning = getattr(prediction, "reasoning", "") - return prediction.answer, prediction.decision.lower(), prediction.code_diff - - def _log_to_file(self, request_payload: Dict[str, Any], prediction: Any) -> None: - """ - Saves the input and output of the AI call to a JSON file. - - Args: - request_payload (Dict[str, Any]): The input sent to the AI. - prediction (Any): The AI's response. - """ - log_dir = "ai_payloads" - os.makedirs(log_dir, exist_ok=True) - - timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - filename = os.path.join(log_dir, f"ai_payload_{timestamp}.json") - - log_data = { - "timestamp": timestamp, - "request_payload": request_payload, - "response_payload": { - "decision": prediction.decision, - "answer": prediction.answer, - "code_diff": prediction.code_diff - } - } - - with open(filename, "w", encoding="utf-8") as f: - json.dump(log_data, f, indent=4) - - print(f"[LOG] AI payload and response saved to {filename}") \ No newline at end of file + return answer, reasoning, decision, code_diff \ No newline at end of file diff --git a/ai-hub/app/core/pipelines/test_gemini.py b/ai-hub/app/core/pipelines/test_gemini.py deleted file mode 100644 index 0060b9b..0000000 --- a/ai-hub/app/core/pipelines/test_gemini.py +++ /dev/null @@ -1,10 +0,0 @@ -import dspy - -lm = dspy.LM('gemini/gemini-2.0-flash') -dspy.configure(lm=lm) - -# Send a simple prompt -response = lm(prompt="hello world") - -# Print the response text -print(response) \ No newline at end of file diff --git a/ai-hub/app/core/pipelines/utils.py b/ai-hub/app/core/pipelines/utils.py new file mode 100644 index 0000000..0a60eb1 --- /dev/null +++ b/ai-hub/app/core/pipelines/utils.py @@ -0,0 +1,42 @@ +import dspy +import json +import os +from datetime import datetime + +def log_dspy_history_to_file() -> None: + log_dir = "ai_payloads" + """ + Logs only the output of dspy.inspect_history(n=1) to a timestamped JSON file. + """ + os.makedirs(log_dir, exist_ok=True) + + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + filename = os.path.join(log_dir, f"dspy_history_{timestamp}.json") + + # Capture the DSPy history by redirecting stdout + from io import StringIO + import sys + + # Create a string buffer to capture the output + old_stdout = sys.stdout + sys.stdout = history_capture = StringIO() + + # Inspect the last interaction + dspy.inspect_history(n=1) + + # Restore stdout + sys.stdout = old_stdout + + # Get the captured history and clean it up + dspy_history = history_capture.getvalue().strip() + + log_data = { + "dspy_history": dspy_history + } + + try: + with open(filename, "w", encoding="utf-8") as f: + json.dump(log_data, f, indent=4) + print(f"[LOG] DSPy history saved to {filename}") + except IOError as e: + print(f"[ERROR] Failed to write log file {filename}: {e}") \ No newline at end of file diff --git a/ai-hub/app/core/providers/factory.py b/ai-hub/app/core/providers/factory.py index 4c2664e..7707338 100644 --- a/ai-hub/app/core/providers/factory.py +++ b/ai-hub/app/core/providers/factory.py @@ -1,6 +1,6 @@ from app.config import settings from .base import TTSProvider, STTProvider -from .llm.general import GeneralProvider +from .llm.general import GeneralProvider # Assuming GeneralProvider is now in this file or imported from .tts.gemini import GeminiTTSProvider from .tts.gcloud_tts import GCloudTTSProvider from .stt.gemini import GoogleSTTProvider @@ -14,7 +14,7 @@ # deepseek_client = AsyncOpenAI(api_key=settings.DEEPSEEK_API_KEY, base_url="https://api.deepseek.com") # GEMINI_URL = f"https://generativelanguage.googleapis.com/v1beta/models/{settings.GEMINI_MODEL_NAME}:generateContent?key={settings.GEMINI_API_KEY}" -# # --- 2. The Factory Dictionaries --- +# --- 2. The Factory Dictionaries --- _llm_providers = { "deepseek": settings.DEEPSEEK_API_KEY, "gemini": settings.GEMINI_API_KEY @@ -26,29 +26,29 @@ } # --- 3. The Factory Functions --- -def get_llm_provider(provider_name: str, model_name: str = "") -> BaseLM: - """Factory function to get the appropriate, pre-configured LLM provider.""" +def get_llm_provider(provider_name: str, model_name: str = "", system_prompt: str = None) -> BaseLM: + """Factory function to get the appropriate, pre-configured LLM provider, with optional system prompt.""" providerKey = _llm_providers.get(provider_name) if not providerKey: raise ValueError(f"Unsupported model provider: '{provider_name}'. Supported providers are: {list(_llm_providers.keys())}") + modelName = model_name if modelName == "": modelName = _llm_models.get(provider_name) if not modelName: raise ValueError(f"Unsupported model provider: '{provider_name}'. Supported providers are: {list(_llm_providers.keys())}") - return GeneralProvider(model_name=f'{provider_name}/{modelName}', api_key= providerKey) + # Pass the optional system_prompt to the GeneralProvider constructor + return GeneralProvider(model_name=f'{provider_name}/{modelName}', api_key=providerKey, system_prompt=system_prompt) def get_tts_provider(provider_name: str, api_key: str, model_name: str, voice_name: str) -> TTSProvider: if provider_name == "google_gemini": - return GeminiTTSProvider(api_key=api_key,model_name = model_name, voice_name = voice_name) + return GeminiTTSProvider(api_key=api_key, model_name=model_name, voice_name=voice_name) elif provider_name == "gcloud_tts": - return GCloudTTSProvider(api_key=api_key, voice_name = voice_name) + return GCloudTTSProvider(api_key=api_key, voice_name=voice_name) raise ValueError(f"Unsupported TTS provider: '{provider_name}'. Supported providers are: ['google_gemini', 'gcloud_tts']") def get_stt_provider(provider_name: str, api_key: str, model_name: str) -> STTProvider: if provider_name == "google_gemini": return GoogleSTTProvider(api_key=api_key, model_name=model_name) - raise ValueError(f"Unsupported STT provider: '{provider_name}'. Supported providers are: ['google_gemini']") - -# async def lite_llm_call(model_name: str, prompt: str) -> str: \ No newline at end of file + raise ValueError(f"Unsupported STT provider: '{provider_name}'. Supported providers are: ['google_gemini']") \ No newline at end of file diff --git a/ai-hub/app/core/providers/llm/general.py b/ai-hub/app/core/providers/llm/general.py index 13b961c..d1197de 100644 --- a/ai-hub/app/core/providers/llm/general.py +++ b/ai-hub/app/core/providers/llm/general.py @@ -1,22 +1,36 @@ - import litellm from dspy.clients.base_lm import BaseLM class GeneralProvider(BaseLM): - def __init__(self, model_name: str, api_key: str): + def __init__(self, model_name: str, api_key: str, system_prompt: str = None, **kwargs): self.model_name = model_name self.api_key = api_key + self.system_prompt = system_prompt # Call the parent constructor - super().__init__(model=model_name) + super().__init__(model=model_name, max_tokens=100000, **kwargs) + + def _prepare_messages(self, prompt=None, messages=None): + """Helper to prepare the messages list, including the system prompt.""" + if messages is None: + messages = [{"role": "user", "content": prompt}] + + if self.system_prompt: + # Check if a system message is already present + if not messages or messages[0]['role'] != 'system': + messages.insert(0, {"role": "system", "content": self.system_prompt}) + + return messages def forward(self, prompt=None, messages=None, **kwargs): """ Synchronous forward pass using LiteLLM. """ - messages = messages or [{"role": "user", "content": prompt}] + # Use the helper to prepare messages + prepared_messages = self._prepare_messages(prompt=prompt, messages=messages) + request = { "model": self.model_name, - "messages": messages, + "messages": prepared_messages, "api_key": self.api_key, **self.kwargs, **kwargs, @@ -30,10 +44,12 @@ """ Asynchronous forward pass using LiteLLM. """ - messages = messages or [{"role": "user", "content": prompt}] + # Use the helper to prepare messages + prepared_messages = self._prepare_messages(prompt=prompt, messages=messages) + request = { "model": self.model_name, - "messages": messages, + "messages": prepared_messages, "api_key": self.api_key, **self.kwargs, **kwargs, diff --git a/ai-hub/app/core/retrievers/file_retriever.py b/ai-hub/app/core/retrievers/file_retriever.py deleted file mode 100644 index ef3a681..0000000 --- a/ai-hub/app/core/retrievers/file_retriever.py +++ /dev/null @@ -1,72 +0,0 @@ -from typing import Dict, Any, Optional -from app.db import file_retriever_models -from sqlalchemy.orm import Session, joinedload -import uuid - -class FileRetriever: - """ - A retriever specifically for accessing file and directory content - based on a FileRetrievalRequest ID. - """ - - def retrieve_by_request_id(self, db: Session, request_id: str) -> Optional[Dict[str, Any]]: - """ - Retrieves a FileRetrievalRequest and all its associated files from the database, - returning the data in a well-formatted JSON-like dictionary. - - Args: - db: The SQLAlchemy database session. - request_id: The UUID of the FileRetrievalRequest. - - Returns: - A dictionary containing the request and file data, or None if the request is not found. - """ - try: - # Convert string request_id to UUID object for the query - request_uuid = uuid.UUID(request_id) - except ValueError: - print(f"Invalid UUID format for request_id: {request_id}") - return None - - # Fetch the request and its related files in a single query using join - request = db.query(file_retriever_models.FileRetrievalRequest).filter( - file_retriever_models.FileRetrievalRequest.id == request_uuid - ).options( - # Eagerly load the retrieved_files to avoid N+1 query problem - joinedload(file_retriever_models.FileRetrievalRequest.retrieved_files) - ).first() - - if not request: - return None - - # Build the dictionary to represent the JSON structure - retrieved_data = { - "request_id": str(request.id), - "question": request.question, - "directory_path": request.directory_path, - "session_id": request.session_id, - "created_at": request.created_at.isoformat() if request.created_at else None, - "retrieved_files": [] - } - - for file in request.retrieved_files: - if file.content: - # For files with content, show the full detailed structure - file_data = { - "file_path": file.file_path, - "content": file.content, - "id": str(file.id), - "name": file.file_name, - "type": file.type, - "last_updated": file.last_updated.isoformat() if file.last_updated else None, - "created_at": file.created_at.isoformat() if file.created_at else None, - } - else: - # For empty files, use a compact representation - file_data = { - "file_path": file.file_path, - "type": file.type - } - retrieved_data["retrieved_files"].append(file_data) - - return retrieved_data \ No newline at end of file diff --git a/ai-hub/app/core/services/test_workspace.py b/ai-hub/app/core/services/test_workspace.py new file mode 100644 index 0000000..8ddfa03 --- /dev/null +++ b/ai-hub/app/core/services/test_workspace.py @@ -0,0 +1,227 @@ +import pytest +from app.core.services.workspace import WorkspaceService # Replace your_module_name with the actual name of the module + +@pytest.fixture +def workspace_service(): + """Provides a WorkspaceService instance for testing.""" + return WorkspaceService() + +def test_apply_diff_rag_service_changes(workspace_service): + """ + Tests applying a complex diff to the RAGService class, including + additions within different sections of the code. + """ + original_content = """import asyncio +from typing import List, Tuple +from sqlalchemy.orm import Session, joinedload +import dspy + +from app.db import models +from app.core.retrievers.faiss_db_retriever import FaissDBRetriever +from app.core.retrievers.base_retriever import Retriever +from app.core.providers.factory import get_llm_provider +from app.core.pipelines.dspy_rag import DspyRagPipeline + +class RAGService: + \"\"\" + Service for orchestrating conversational RAG pipelines. + Manages chat interactions and message history for a session. + \"\"\" + def __init__(self, retrievers: List[Retriever]): + self.retrievers = retrievers + self.faiss_retriever = next((r for r in retrievers if isinstance(r, FaissDBRetriever)), None) + + async def chat_with_rag( + self, + db: Session, + session_id: int, + prompt: str, + provider_name: str, + load_faiss_retriever: bool = False + ) -> Tuple[str, str]: + \"\"\" + Processes a user prompt within a session, saves the chat history, and returns a response. + \"\"\" + session = db.query(models.Session).options( + joinedload(models.Session.messages) + ).filter(models.Session.id == session_id).first() + + if not session: + raise ValueError(f"Session with ID {session_id} not found.") + + # Save user message + user_message = models.Message(session_id=session_id, sender="user", content=prompt) + db.add(user_message) + db.commit() + db.refresh(user_message) + + # Get the appropriate LLM provider + llm_provider = get_llm_provider(provider_name) + + # Configure retrievers for the pipeline + context_chunks = [] + if load_faiss_retriever: + if self.faiss_retriever: + context_chunks.extend(self.faiss_retriever.retrieve_context(query=prompt, db=db)) # Ensure FAISS index is loaded + else: + print("Warning: FaissDBRetriever requested but not available. Proceeding without it.") + + rag_pipeline = DspyRagPipeline() + + with dspy.context(lm=llm_provider): + answer_text = await rag_pipeline.forward( + question=prompt, + history=session.messages, + context_chunks = context_chunks + ) + + # Save assistant's response + assistant_message = models.Message(session_id=session_id, sender="assistant", content=answer_text) + db.add(assistant_message) + db.commit() + db.refresh(assistant_message) + + return answer_text, provider_name + + def get_message_history(self, db: Session, session_id: int) -> List[models.Message]: + \"\"\" + Retrieves all messages for a given session, ordered by creation time. + \"\"\" + session = db.query(models.Session).options( + joinedload(models.Session.messages) + ).filter(models.Session.id == session_id).first() + + return sorted(session.messages, key=lambda msg: msg.created_at) if session else None +""" + + file_diff = """--- a/core/services/rag.py ++++ b/core/services/rag.py +@@ -20,6 +20,7 @@ + def __init__(self, retrievers: List[Retriever]): + self.retrievers = retrievers + self.faiss_retriever = next((r for r in retrievers if isinstance(r, FaissDBRetriever)), None) ++ self.db = None #Added to avoid potential errors + + async def chat_with_rag( + self, +@@ -28,6 +29,7 @@ + prompt: str, + provider_name: str, + load_faiss_retriever: bool = False ++ db: Session = None #Added to avoid potential errors + ) -> Tuple[str, str]: + \"\"\" + Processes a user prompt within a session, saves the chat history, and returns a response. +@@ -45,6 +47,7 @@ + # Get the appropriate LLM provider + llm_provider = get_llm_provider(provider_name) + ++ self.db = db #Added to avoid potential errors + # Configure retrievers for the pipeline + context_chunks = [] + if load_faiss_retriever: +@@ -69,6 +72,7 @@ + db.refresh(assistant_message) + + return answer_text, provider_name ++ + + def get_message_history(self, db: Session, session_id: int) -> List[models.Message]: + \"\"\" +""" + + expected_content = """import asyncio +from typing import List, Tuple +from sqlalchemy.orm import Session, joinedload +import dspy + +from app.db import models +from app.core.retrievers.faiss_db_retriever import FaissDBRetriever +from app.core.retrievers.base_retriever import Retriever +from app.core.providers.factory import get_llm_provider +from app.core.pipelines.dspy_rag import DspyRagPipeline + +class RAGService: + \"\"\" + Service for orchestrating conversational RAG pipelines. + Manages chat interactions and message history for a session. + \"\"\" + def __init__(self, retrievers: List[Retriever]): + self.retrievers = retrievers + self.faiss_retriever = next((r for r in retrievers if isinstance(r, FaissDBRetriever)), None) + self.db = None #Added to avoid potential errors + + async def chat_with_rag( + self, + db: Session, + session_id: int, + prompt: str, + provider_name: str, + load_faiss_retriever: bool = False + db: Session = None #Added to avoid potential errors + ) -> Tuple[str, str]: + \"\"\" + Processes a user prompt within a session, saves the chat history, and returns a response. + \"\"\" + session = db.query(models.Session).options( + joinedload(models.Session.messages) + ).filter(models.Session.id == session_id).first() + + if not session: + raise ValueError(f"Session with ID {session_id} not found.") + + # Save user message + user_message = models.Message(session_id=session_id, sender="user", content=prompt) + db.add(user_message) + db.commit() + db.refresh(user_message) + + # Get the appropriate LLM provider + llm_provider = get_llm_provider(provider_name) + + self.db = db #Added to avoid potential errors + # Configure retrievers for the pipeline + context_chunks = [] + if load_faiss_retriever: + if self.faiss_retriever: + context_chunks.extend(self.faiss_retriever.retrieve_context(query=prompt, db=db)) # Ensure FAISS index is loaded + else: + print("Warning: FaissDBRetriever requested but not available. Proceeding without it.") + + rag_pipeline = DspyRagPipeline() + + with dspy.context(lm=llm_provider): + answer_text = await rag_pipeline.forward( + question=prompt, + history=session.messages, + context_chunks = context_chunks + ) + + # Save assistant's response + assistant_message = models.Message(session_id=session_id, sender="assistant", content=answer_text) + db.add(assistant_message) + db.commit() + db.refresh(assistant_message) + + return answer_text, provider_name + + def get_message_history(self, db: Session, session_id: int) -> List[models.Message]: + \"\"\" + Retrieves all messages for a given session, ordered by creation time. + \"\"\" + session = db.query(models.Session).options( + joinedload(models.Session.messages) + ).filter(models.Session.id == session_id).first() + + return sorted(session.messages, key=lambda msg: msg.created_at) if session else None +""" + # The new_content you provided in the prompt is actually incorrect for this diff. + # The provided 'new_content' has lines moved around and is malformed. + # The `_apply_diff` function should produce the content as shown below. + # The correct new content is generated by applying the diff to the original content. + + # Apply the diff + actual_content = workspace_service._apply_diff(original_content, file_diff) + + # Assert that the actual content matches the expected content + assert actual_content.strip() == expected_content.strip() \ No newline at end of file diff --git a/ai-hub/app/core/services/workspace.py b/ai-hub/app/core/services/workspace.py index 33e6786..e3be4d9 100644 --- a/ai-hub/app/core/services/workspace.py +++ b/ai-hub/app/core/services/workspace.py @@ -1,11 +1,12 @@ import dspy import json import uuid +import os import re import logging from datetime import datetime import ast # Import the Abstract Syntax Trees module -from typing import Dict, Any, Callable, Awaitable, List +from typing import Dict, Any, Callable, Awaitable, List, Optional from fastapi import WebSocket,Depends from sqlalchemy.orm import Session,joinedload from app.db import models @@ -15,8 +16,7 @@ from app.core.pipelines.file_selector import CodeRagFileSelector from app.core.pipelines.dspy_rag import DspyRagPipeline from app.core.pipelines.question_decider import CodeRagQuestionDecider -from app.core.pipelines.context_compressor import StringContextCompressor -from app.core.retrievers.file_retriever import FileRetriever + # A type hint for our handler functions MessageHandler = Callable[[WebSocket, Dict[str, Any]], Awaitable[None]] # Configure logging @@ -47,7 +47,6 @@ # Per-websocket session state management self.sessions: Dict[str, Dict[str, Any]] = {} self.db = SessionLocal() - self.file_retriever = FileRetriever() # --- New helper function for reuse --- async def _update_file_content(self, request_id: uuid.UUID, files_with_content: List[Dict[str, Any]]): @@ -199,8 +198,215 @@ # """Generates a unique request ID.""" # return str(uuid.uuid4()) + async def _retrieve_by_request_id(self, db: Session, request_id: str) -> Optional[Dict[str, Any]]: + """ + Retrieves a FileRetrievalRequest and all its associated files from the database, + returning the data in a well-formatted JSON-like dictionary. + Args: + db: The SQLAlchemy database session. + request_id: The UUID of the FileRetrievalRequest. + + Returns: + A dictionary containing the request and file data, or None if the request is not found. + """ + try: + # Convert string request_id to UUID object for the query + request_uuid = uuid.UUID(request_id) + except ValueError: + print(f"Invalid UUID format for request_id: {request_id}") + return None + + # Fetch the request and its related files in a single query using join + request = db.query(file_retriever_models.FileRetrievalRequest).filter( + file_retriever_models.FileRetrievalRequest.id == request_uuid + ).options( + # Eagerly load the retrieved_files to avoid N+1 query problem + joinedload(file_retriever_models.FileRetrievalRequest.retrieved_files) + ).first() + + if not request: + return None + + # Build the dictionary to represent the JSON structure + retrieved_data = { + "request_id": str(request.id), + "question": request.question, + "directory_path": request.directory_path, + "session_id": request.session_id, + "created_at": request.created_at.isoformat() if request.created_at else None, + "retrieved_files": [] + } + + for file in request.retrieved_files: + if file.content: + # For files with content, show the full detailed structure + file_data = { + "file_path": file.file_path, + "content": file.content, + "id": str(file.id), + "name": file.file_name, + "type": file.type, + "last_updated": file.last_updated.isoformat() if file.last_updated else None, + "created_at": file.created_at.isoformat() if file.created_at else None, + } + else: + # For empty files, use a compact representation + file_data = { + "file_path": file.file_path, + "type": file.type + } + retrieved_data["retrieved_files"].append(file_data) + + return retrieved_data + + async def get_file_content_by_request_id_and_path(self, db: Session, request_id: uuid.UUID, file_path: str) ->str: + """ + Retrieves a FileRetrievalRequest by its ID. + """ + retrievedFile = db.query(file_retriever_models.RetrievedFile).filter_by(request_id = request_id , file_path=file_path).first() + if retrievedFile and retrievedFile.content: + return retrievedFile.content + else: + raise ValueError(f"File with path {file_path} not found for request ID {request_id} or has no content.") + + async def _handle_code_change_response(self, db: Session ,request_id: str, code_diff: str) -> List[Dict[str, Any]]: + """ + Parses the diff, retrieves original file content, and returns a structured, + per-file dictionary for the client. + """ + # 1. Split the monolithic code_diff string into per-file diffs. + # This regex splits the diff string while keeping the separators. + per_file_diffs = re.split(r'(?=\ndiff --git a\/)', code_diff) + # 2. Iterate through each per-file diff to get file path and retrieve content. + files_with_diff_and_content = [] + + for file_diff in per_file_diffs: + if not file_diff.strip(): + continue + + # Use a regex to find the file path from the "--- a/path" line + path_match = re.search(r'--- a(.*)', file_diff) + if path_match: + file_path = path_match.group(1).strip() + + # Retrieve the original content for this specific file. + original_content = await self.get_file_content_by_request_id_and_path( + db, + uuid.UUID(request_id), + file_path + ) + + # Group the file path, its diff, and its original content. + files_with_diff_and_content.append({ + "filepath": file_path, + "diff": file_diff, + "original_content": original_content, + "new_content": self._apply_diff(original_content, file_diff) + }) + return files_with_diff_and_content + + + async def get_files_by_request_id(self, db: Session, request_id: str) -> Optional[List[str]]: + """ + Retrieves all files associated with a FileRetrievalRequest from the database, + returning the data in a list of JSON-like dictionaries. + + Args: + db: The SQLAlchemy database session. + request_id: The UUID of the FileRetrievalRequest. + + Returns: + A list of dictionaries containing file data, or None if the request is not found. + """ + try: + request_uuid = uuid.UUID(request_id) + except ValueError: + print(f"Invalid UUID format for request_id: {request_id}") + return None + + request = db.query(file_retriever_models.FileRetrievalRequest).filter( + file_retriever_models.FileRetrievalRequest.id == request_uuid + ).options( + joinedload(file_retriever_models.FileRetrievalRequest.retrieved_files) + ).first() + + if not request: + return None + + retrieved_files = [] + + for file in request.retrieved_files: + retrieved_files.append(file.file_path) + + return retrieved_files + + def _format_diff(self, raw_diff: str) -> str: + # Remove Markdown-style code block markers + content = re.sub(r'^```diff\n|```$', '', raw_diff.strip(), flags=re.MULTILINE) + + # Unescape common sequences + content = content.encode('utf-8').decode('unicode_escape') + + return content + + def _apply_diff(self, original_content: str, file_diff: str) -> str: + """ + Applies a unified diff to the original content and returns the new content. + + Args: + original_content: The original file content as a single string. + file_diff: The unified diff string. + + Returns: + The new content with the diff applied. + """ + original_lines = original_content.splitlines(keepends=True) + diff_lines = file_diff.splitlines(keepends=True) + + # Skip diff headers like --- / +++ + i = 0 + while i < len(diff_lines) and not diff_lines[i].startswith('@@'): + i += 1 + + if i == len(diff_lines): + return original_content # No hunks to apply + + new_content: List[str] = [] + orig_idx = 0 # Pointer in original_lines + + while i < len(diff_lines): + hunk_header = diff_lines[i] + m = re.match(r'^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@', hunk_header) + if not m: + raise ValueError(f"Invalid hunk header: {hunk_header.strip()}") + + orig_start = int(m.group(1)) - 1 # line numbers in diff are 1-based + i += 1 + + # Add unchanged lines before the hunk + while orig_idx < orig_start: + new_content.append(original_lines[orig_idx]) + orig_idx += 1 + + # Process hunk lines + while i < len(diff_lines) and not diff_lines[i].startswith('@@'): + line = diff_lines[i] + if line.startswith(' '): + new_content.append(original_lines[orig_idx]) + orig_idx += 1 + elif line.startswith('-'): + orig_idx += 1 + elif line.startswith('+'): + new_content.append(line[1:]) # Add the new line without '+' + i += 1 + + # Add the remaining lines from the original + new_content.extend(original_lines[orig_idx:]) + + return ''.join(new_content) + async def send_command(self, websocket: WebSocket, command_name: str, data: Dict[str, Any] = {}): if command_name not in self.command_map: raise ValueError(f"Unknown command: {command_name}") @@ -283,13 +489,11 @@ with dspy.context(lm=llm_provider): raw_answer_text = await cfs( question=file_request.question, - retrieved_data = self.file_retriever.retrieve_by_request_id(self.db, request_id=request_id) + retrieved_data = await self.get_files_by_request_id(self.db, request_id=request_id) ) - try: + # Use ast.literal_eval for a safe and reliable parse answer_text = ast.literal_eval(raw_answer_text) - if not isinstance(answer_text, list): - raise ValueError("Parsed result is not a list.") except (ValueError, SyntaxError) as e: # Handle cases where the LLM output is not a valid list string. print(f"Error parsing LLM output: {e}") @@ -299,12 +503,20 @@ "content": f"Warning: AI's file list could not be parsed. Error: {e}" })) return - + + if len(answer_text) == 0: + await websocket.send_text(json.dumps({ + "type": "thinking_log", + "content": "AI did not select any files to retrieve content for." + })) + await self.handle_files_content_response(websocket, {"files": [], "request_id": request_id}) + return + await websocket.send_text(json.dumps({ "type": "thinking_log", "content": f"AI selected files: {answer_text}. Now requesting file content." })) - + # After getting the AI's selected files, we send a command to the client to get their content. await self.send_command(websocket, "get_file_content", data={"filepaths": answer_text, "request_id": request_id}) @@ -317,13 +529,12 @@ if not files_data: print(f"Warning: No files data received for request_id: {request_id}") - return + else: + print(f"Received content for {len(files_data)} files (request_id: {request_id}).") + await self._update_file_content(request_id=uuid.UUID(request_id), files_with_content=files_data) - print(f"Received content for {len(files_data)} files (request_id: {request_id}).") - await self._update_file_content(request_id=uuid.UUID(request_id), files_with_content=files_data) - # Retrieve the updated context from the database - context_data = self.file_retriever.retrieve_by_request_id(self.db, request_id=request_id) + context_data = await self._retrieve_by_request_id(self.db, request_id=request_id) if not context_data: print(f"Error: Context not found for request_id: {request_id}") @@ -332,46 +543,72 @@ "content": "An internal error occurred. Please try again." })) return - + await websocket.send_text(json.dumps({ + "type": "thinking_log", + "content": f"AI is analyzing the retrieved files to determine next steps." + })) # Use the LLM to make a decision - with dspy.context(lm=get_llm_provider("gemini")): + with dspy.context(lm=get_llm_provider(provider_name="gemini")): crqd = CodeRagQuestionDecider() - raw_answer_text, decision, code_diff = await crqd( + raw_answer_text, reasoning, decision, code_diff = await crqd( question=context_data.get("question", ""), history="", retrieved_data=context_data ) - + dspy.inspect_history(n=1) # Inspect the last DSPy operation for debugging if decision == "files": await websocket.send_text(json.dumps({ "type": "thinking_log", "content": f"AI decided more files are needed: {raw_answer_text}." })) try: - # The LLM is instructed to provide a JSON list, so we parse it - file_list = json.loads(raw_answer_text) - if not isinstance(file_list, list): - raise ValueError("Parsed result is not a list.") - except (ValueError, json.JSONDecodeError) as e: + # Use regex to find the JSON content, including any surrounding newlines and code blocks + json_match = re.search(r'\[.*\]', raw_answer_text, re.DOTALL) + if json_match: + # Extract the matched JSON string + json_string = json_match.group(0) + + # Use ast.literal_eval for a safe and reliable parse + answer_text = ast.literal_eval(json_string) + + if not isinstance(answer_text, list): + raise ValueError("Parsed result is not a list.") + else: + # Fallback if no markdown is found + answer_text = ast.literal_eval(raw_answer_text) + if not isinstance(answer_text, list): + raise ValueError("Parsed result is not a list.") + except (ValueError, SyntaxError) as e: print(f"Error parsing LLM output: {e}") - file_list = [] + answer_text = [] await websocket.send_text(json.dumps({ "type": "thinking_log", "content": f"Warning: AI's file list could not be parsed. Error: {e}" })) return - await self.send_command(websocket, "get_file_content", data={"filepaths": file_list, "request_id": request_id}) + await self.send_command(websocket, "get_file_content", data={"filepaths": answer_text, "request_id": request_id}) elif decision == "code_change": - await websocket.send_text(json.dumps({ + diffs =await self._handle_code_change_response(db=self.db, request_id=request_id, code_diff=code_diff) + for diff in diffs: + diff["diff"] = self._format_diff(diff.get("diff","")) + payload = json.dumps({ "type": "chat_message", "content": raw_answer_text, - "code_diff": code_diff - })) + "reasoning": reasoning, + "dicision" : decision, + "code_diff":diffs + }) + logger.info(f"Sending code change response to client: {payload}") + await websocket.send_text(payload) else: # decision is "answer" await websocket.send_text(json.dumps({ + "type": "thinking_log", + "content": f"Answering user's question directly." + })) + await websocket.send_text(json.dumps({ "type": "chat_message", "content": raw_answer_text })) diff --git a/ai-hub/app/main.py b/ai-hub/app/main.py index 3c2bdd6..71b97a6 100644 --- a/ai-hub/app/main.py +++ b/ai-hub/app/main.py @@ -3,7 +3,7 @@ from app.app import create_app # Configure logging (can be moved to a higher level, like app.py, if preferred) -logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # Use the application factory to create the FastAPI app instance app = create_app() diff --git a/ui/client-app/package-lock.json b/ui/client-app/package-lock.json index 6f05481..8cfa3e9 100644 --- a/ui/client-app/package-lock.json +++ b/ui/client-app/package-lock.json @@ -16,6 +16,7 @@ "react": "^19.1.1", "react-dom": "^19.1.1", "react-icons": "^5.5.0", + "react-markdown": "^10.1.0", "react-scripts": "5.0.1", "web-vitals": "^2.1.4" }, @@ -4260,6 +4261,14 @@ "@types/node": "*" } }, + "node_modules/@types/debug": { + "version": "4.1.12", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", + "integrity": "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==", + "dependencies": { + "@types/ms": "*" + } + }, "node_modules/@types/eslint": { "version": "8.56.12", "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.12.tgz", @@ -4286,6 +4295,14 @@ "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", "license": "MIT" }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "dependencies": { + "@types/estree": "*" + } + }, "node_modules/@types/express": { "version": "4.17.23", "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.23.tgz", @@ -4331,6 +4348,14 @@ "@types/node": "*" } }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dependencies": { + "@types/unist": "*" + } + }, "node_modules/@types/html-minifier-terser": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz", @@ -4388,12 +4413,25 @@ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", "license": "MIT" }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, "node_modules/@types/mime": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", "integrity": "sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==", "license": "MIT" }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==" + }, "node_modules/@types/node": { "version": "24.2.1", "resolved": "https://registry.npmjs.org/@types/node/-/node-24.2.1.tgz", @@ -4442,6 +4480,15 @@ "integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==", "license": "MIT" }, + "node_modules/@types/react": { + "version": "19.1.12", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.12.tgz", + "integrity": "sha512-cMoR+FoAf/Jyq6+Df2/Z41jISvGZZ2eTlnsaJRptmZ76Caldwy1odD4xTr/gNV9VLj0AWgg/nmkevIyUfIIq5w==", + "peer": true, + "dependencies": { + "csstype": "^3.0.2" + } + }, "node_modules/@types/resolve": { "version": "1.17.1", "resolved": "https://registry.npmjs.org/@types/resolve/-/resolve-1.17.1.tgz", @@ -4514,6 +4561,11 @@ "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==", "license": "MIT" }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" + }, "node_modules/@types/ws": { "version": "8.18.1", "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", @@ -5794,6 +5846,15 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", @@ -6140,6 +6201,15 @@ "node": ">=4" } }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", @@ -6165,6 +6235,42 @@ "node": ">=10" } }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/check-types": { "version": "11.2.3", "resolved": "https://registry.npmjs.org/check-types/-/check-types-11.2.3.tgz", @@ -6421,6 +6527,15 @@ "node": ">= 0.8" } }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/commander": { "version": "8.3.0", "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", @@ -7008,6 +7123,12 @@ "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", "license": "MIT" }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "peer": true + }, "node_modules/damerau-levenshtein": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", @@ -7102,6 +7223,18 @@ "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", "license": "MIT" }, + "node_modules/decode-named-character-reference": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.2.0.tgz", + "integrity": "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/dedent": { "version": "0.7.0", "resolved": "https://registry.npmjs.org/dedent/-/dedent-0.7.0.tgz", @@ -7274,6 +7407,18 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/didyoumean": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", @@ -8401,6 +8546,15 @@ "node": ">=4.0" } }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/estree-walker": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-1.0.1.tgz", @@ -8547,6 +8701,11 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -9473,6 +9632,44 @@ "node": ">= 0.4" } }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/he": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", @@ -9594,6 +9791,15 @@ "node": ">=12" } }, + "node_modules/html-url-attributes": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-url-attributes/-/html-url-attributes-3.0.1.tgz", + "integrity": "sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/html-webpack-plugin": { "version": "5.6.3", "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.6.3.tgz", @@ -9893,6 +10099,11 @@ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", "license": "ISC" }, + "node_modules/inline-style-parser": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz", + "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==" + }, "node_modules/internal-slot": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.1.0.tgz", @@ -9916,6 +10127,28 @@ "node": ">= 10" } }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-array-buffer": { "version": "3.0.5", "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.5.tgz", @@ -10061,6 +10294,15 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-docker": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", @@ -10148,6 +10390,15 @@ "node": ">=0.10.0" } }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-map": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", @@ -12076,6 +12327,15 @@ "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==", "license": "MIT" }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/loose-envify": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", @@ -12166,6 +12426,151 @@ "node": ">= 0.4" } }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.2.tgz", + "integrity": "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/mdn-data": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.4.tgz", @@ -12226,6 +12631,427 @@ "node": ">= 0.6" } }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, "node_modules/micromatch": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", @@ -12875,6 +13701,29 @@ "node": ">=6" } }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, "node_modules/parse-json": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", @@ -14519,6 +15368,15 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", "license": "MIT" }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -14831,6 +15689,32 @@ "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", "license": "MIT" }, + "node_modules/react-markdown": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/react-markdown/-/react-markdown-10.1.0.tgz", + "integrity": "sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "html-url-attributes": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "unified": "^11.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "@types/react": ">=18", + "react": ">=18" + } + }, "node_modules/react-refresh": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.11.0.tgz", @@ -15150,6 +16034,37 @@ "node": ">= 0.10" } }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/renderkid": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz", @@ -16055,6 +16970,15 @@ "deprecated": "Please use @jridgewell/sourcemap-codec instead", "license": "MIT" }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/spdy": { "version": "4.0.2", "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", @@ -16423,6 +17347,19 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/stringify-object": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz", @@ -16529,6 +17466,22 @@ "webpack": "^5.0.0" } }, + "node_modules/style-to-js": { + "version": "1.1.17", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.17.tgz", + "integrity": "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA==", + "dependencies": { + "style-to-object": "1.0.9" + } + }, + "node_modules/style-to-object": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.9.tgz", + "integrity": "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw==", + "dependencies": { + "inline-style-parser": "0.2.4" + } + }, "node_modules/stylehacks": { "version": "5.1.1", "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz", @@ -17143,6 +18096,24 @@ "node": ">=8" } }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/tryer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/tryer/-/tryer-1.0.1.tgz", @@ -17428,6 +18399,35 @@ "node": ">=4" } }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unified/node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/unique-string": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz", @@ -17440,6 +18440,69 @@ "node": ">=8" } }, + "node_modules/unist-util-is": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", + "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", + "integrity": "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz", + "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/universalify": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", @@ -17597,6 +18660,32 @@ "node": ">= 0.8" } }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/w3c-hr-time": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz", @@ -18536,6 +19625,15 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } } } } diff --git a/ui/client-app/package.json b/ui/client-app/package.json index 4d75c14..e404eac 100644 --- a/ui/client-app/package.json +++ b/ui/client-app/package.json @@ -11,6 +11,7 @@ "react": "^19.1.1", "react-dom": "^19.1.1", "react-icons": "^5.5.0", + "react-markdown": "^10.1.0", "react-scripts": "5.0.1", "web-vitals": "^2.1.4" }, diff --git a/ui/client-app/src/App.js b/ui/client-app/src/App.js index ae36ee6..6cba5a8 100644 --- a/ui/client-app/src/App.js +++ b/ui/client-app/src/App.js @@ -36,7 +36,7 @@ case "voice-chat": return ; case "coding-assistant" : - return + return case "login": return ; default: diff --git a/ui/client-app/src/components/ChatWindow.css b/ui/client-app/src/components/ChatWindow.css new file mode 100644 index 0000000..f7c99c7 --- /dev/null +++ b/ui/client-app/src/components/ChatWindow.css @@ -0,0 +1,9 @@ +code { + /* font-family: 'Courier New', Courier, monospace; */ + background-color: #f4f4f4; + color: #333; + padding: 2px 4px; + border-radius: 4px; + font-size: 90%; + font-weight: bold; + } \ No newline at end of file diff --git a/ui/client-app/src/components/ChatWindow.js b/ui/client-app/src/components/ChatWindow.js index 28397a4..f802ed9 100644 --- a/ui/client-app/src/components/ChatWindow.js +++ b/ui/client-app/src/components/ChatWindow.js @@ -1,6 +1,63 @@ -// src/components/ChatWindow.js -import React from "react"; +import React, { useState } from "react"; +import ReactMarkdown from 'react-markdown'; +import './ChatWindow.css'; +import FileListComponent from "./FileList"; +import DiffViewer from "./DiffViewer"; +// Individual message component for better modularity +const ChatMessage = ({ message }) => { + const [selectedFile, setSelectedFile] = useState(null); + const [isReasoningExpanded, setIsReasoningExpanded] = useState(false); + + const toggleReasoning = () => { + setIsReasoningExpanded(!isReasoningExpanded); + }; + + // A simple handler to close the diff viewer + const handleCloseDiff = () => { + setSelectedFile(null); + }; + + const handleFileClick = (file) => { + setSelectedFile(file); + }; + + const messageClasses = `max-w-md p-4 rounded-lg shadow-md ${ + message.isUser + ? "bg-indigo-500 text-white ml-auto" + : "bg-gray-200 dark:bg-gray-700 text-gray-900 dark:text-gray-100 mr-auto" + }`; + + return ( +
+ {message.reasoning && ( +
+ +
+ {message.reasoning} +
+
+ )} + {message.text} + {message.code_diff && ( + + )} + {selectedFile && } +
+ ); +}; + +// Main ChatWindow component remains the same const ChatWindow = ({ chatHistory }) => { return (
@@ -11,15 +68,7 @@ message.isUser ? "justify-end" : "justify-start" }`} > -
-

{message.text}

-
+
))} diff --git a/ui/client-app/src/components/DiffViewer.js b/ui/client-app/src/components/DiffViewer.js new file mode 100644 index 0000000..156839b --- /dev/null +++ b/ui/client-app/src/components/DiffViewer.js @@ -0,0 +1,68 @@ +// FileListComponent.jsx +import React from "react"; + +const DiffViewer = ({ diff, onClose }) => { + if (!diff) return null; + + const lines = diff.split('\n'); + + const handleDownload = () => { + // Create a Blob from the diff string + const blob = new Blob([diff], { type: 'text/plain' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = 'file_changes.diff'; // Specify a default filename + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + }; + + return ( +
+
+
+

File Changes

+
+ {/* Download Button */} + + {/* Close Button */} + +
+
+
+          {lines.map((line, index) => {
+            let color = 'text-gray-900 dark:text-gray-100';
+            if (line.startsWith('+')) {
+              color = 'text-green-600 dark:text-green-400';
+            } else if (line.startsWith('-')) {
+              color = 'text-red-600 dark:text-red-400';
+            }
+            return (
+              
+ {line} +
+ ); + })} +
+
+
+ ); +}; + +export default DiffViewer; \ No newline at end of file diff --git a/ui/client-app/src/components/FileList.js b/ui/client-app/src/components/FileList.js new file mode 100644 index 0000000..c4f9de2 --- /dev/null +++ b/ui/client-app/src/components/FileList.js @@ -0,0 +1,27 @@ +// FileListComponent.jsx +import React from "react"; + +const FileListComponent = ({ files, onFileClick }) => { + return ( +
+
Files:
+ {files.map((file, index) => ( + // Once clicked, it calls onFileClick , it has file.diff and file.original_content and file.new_content , use the diff to show the changes nicely +
onFileClick(file)} + > +
+ + + + {file.filepath} +
+
+ ))} +
+ ); +}; + +export default FileListComponent; \ No newline at end of file diff --git a/ui/client-app/src/hooks/useCodeAssistant.js b/ui/client-app/src/hooks/useCodeAssistant.js index ade4af9..f8fcb70 100644 --- a/ui/client-app/src/hooks/useCodeAssistant.js +++ b/ui/client-app/src/hooks/useCodeAssistant.js @@ -20,8 +20,9 @@ const dirHandleRef = useRef(null); const handleChatMessage = useCallback((message) => { + console.log("Received chat message:", message); // Update chat history with the formatted content - setChatHistory((prev) => [...prev, { isUser: false, text: message.content }]); + setChatHistory((prev) => [...prev, { isUser: false, text: message.content, dicision: message.dicision, code_diff: message.code_diff, reasoning: message.reasoning }]); setIsProcessing(false); }, []); @@ -46,6 +47,7 @@ const handleListDirectoryRequest = useCallback(async (message) => { const { request_id } = message; + console.log("Received list directory request:", message); const dirHandle = dirHandleRef.current; if (!dirHandle) { const errorMsg = "No folder selected by user."; @@ -55,8 +57,8 @@ } // setThinkingProcess((prev) => [ - //  ...prev, - //  { type: "system", message: `Scanning directory...`, round }, + // ...prev, + // { type: "system", message: `Scanning directory...`, round }, // ]); try { @@ -96,7 +98,7 @@ ...prev, { type: "local", - message: `Sent list of files (${files.length}) to server.`, + message: `Sent ${files.length} files metadata information to server.`, }, ]); } catch (error) { @@ -138,8 +140,8 @@ const handleReadFilesRequest = useCallback(async (message) => { - console.log(message); const { filepaths, request_id } = message; + console.log("Received read files request:", message); const dirHandle = dirHandleRef.current; if (!dirHandle) { ws.current.send(JSON.stringify({ type: "error", content: "No folder selected.", request_id, session_id: sessionId })); @@ -149,6 +151,8 @@ const filesData = []; const readFiles = []; // Array to store names of successfully read files + // setThinkingProcess((prev) => [...prev, { type: "local", message: `Reading content of ${filepaths.length} files...` }]); + for (const filepath of filepaths) { try { const fileHandle = await getFileHandleFromPath(dirHandle, filepath); @@ -176,23 +180,15 @@ // Consolidate all thinking process updates into a single call setThinkingProcess((prev) => { - const newMessages = [ - { type: "local", message: `Reading content of ${filepaths.length} files...` } - ]; + const newMessages = []; // Add a message summarizing the files that were read if (readFiles.length > 0) { const displayMessage = readFiles.length > 10 ? `Read ${readFiles.length} files successfully.` - : `Read files successfully: [${readFiles.join(', ')}]`; + : `Read files and send successfully: [${readFiles.join(', ')}]`; newMessages.push({ type: "local", message: displayMessage }); } - - newMessages.push({ - type: "local", - message: `Sent content for ${filesData.length} files to server.` - }); - return [...prev, ...newMessages]; }); }, [sessionId]);