Newer
Older
cortex-hub / ai-hub / app / core / pipelines / code_changer.py
import json
import os
from typing import List, Dict, Any, Tuple, Optional, Callable

PROMPT_TEMPLATE = """
### 🧠 Core Directives

You are a code generation assistant specialized in producing **one precise and complete code change** per instruction. Your output must be a strict JSON object containing:

- `reasoning`: A concise explanation of the change.
- `content`: The **full content of the file** (or an empty string for deletions).

---

### 1. Input Structure

- `overall_plan`: {overall_plan}
- `instruction`: {instruction}
- `filepath`: {filepath}
- `original_files`: {original_files}
- `updated_files`: {updated_files}

-----

### 2. 💻 Code Generation Rules

Please provide **one complete and functional code file** per request, for the specified `file_path`. You must output the **entire, modified file**.

* **Identical Code Sections:** Use the `#[unchanged_section]|<file_path>|<start_line>|<end_line>` syntax for large, sequential blocks of code that are not being modified.
* **Complete File Output:** Always provide the **full file contents** in the `content` block. Do not use placeholders like `...`.
* **Imports:** Ensure all required imports are included.

---

### 3. Output Format

Return exactly one JSON object:
{{
  "reasoning": "Brief explanation.",
  "content": "Full file content"
}}
"""

class CodeRagCodeChanger:
    def __init__(self):
        pass

    async def forward(
        self,
        overall_plan: str,
        instruction: str,
        filepath: str,
        original_files: List[Dict[str, Any]],
        updated_files: List[Dict[str, Any]],
        llm_provider = None
    ) -> Tuple[str, str]:
        if not llm_provider:
            raise ValueError("LLM Provider is required.")

        original_json = json.dumps(original_files)
        updated_json = json.dumps(updated_files)

        prompt = PROMPT_TEMPLATE.format(
            overall_plan=overall_plan,
            instruction=instruction,
            filepath=filepath,
            original_files=original_json,
            updated_files=updated_json
        )

        response = await llm_provider.acompletion(prompt=prompt, response_format={"type": "json_object"})
        content = response.choices[0].message.content
        
        try:
            data = json.loads(content)
            return data.get("content", ""), data.get("reasoning", "")
        except json.JSONDecodeError:
            return "", f"Failed to parse JSON: {content}"