# main.py

import os
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from dotenv import load_dotenv
from openai import OpenAI

# --- 1. Application Setup ---
load_dotenv()
app = FastAPI(
    title="AI Model Hub Service",
    description="A central hub to route requests to various LLMs.",
    version="0.1.2",
)

# --- 2. Pydantic Models for Request/Response ---
class ChatRequest(BaseModel):
    prompt: str

# --- 3. Configure DeepSeek API ---
api_key = os.getenv("DEEPSEEK_API_KEY")
if not api_key:
    raise ValueError("DEEPSEEK_API_KEY not found in environment variables. Please set it in the .env file.")

# MODIFIED: Updated base_url to match the primary documentation
client = OpenAI(
    api_key=api_key,
    base_url="https://api.deepseek.com"
)

# --- 4. API Endpoint Definition ---
@app.post("/chat")
async def chat_handler(request: ChatRequest):
    """
    Accepts a user prompt and returns a response from the DeepSeek model.
    """
    try:
        chat_completion = client.chat.completions.create(
            model="deepseek-chat",
            # MODIFIED: Added a system message for better model instruction
            messages=[
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": request.prompt},
            ],
            stream=False # Explicitly setting stream to false as in the example
        )
        
        response_text = chat_completion.choices[0].message.content
        return {"response": response_text}

    except Exception as e:
        print(f"An error occurred: {e}")
        raise HTTPException(status_code=500, detail="Failed to get response from the model.")

@app.get("/")
def read_root():
    """
    Root endpoint to confirm the server is running.
    """
    return {"status": "AI Model Hub is running!"}