import logging
from openai import AsyncOpenAI  # Use AsyncOpenAI
from typing import final
from app.core.providers.base import LLMProvider
from app.config import settings

@final
class DeepSeekProvider(LLMProvider):
    """Provider for the DeepSeek API."""
    def __init__(self, model_name: str, client: AsyncOpenAI): # Type hint with AsyncOpenAI
        self.model = model_name
        self._client = client

    async def generate_response(self, prompt: str) -> str:
        messages = [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt}]
        try:
            # This await is now correct for AsyncOpenAI
            chat_completion = await self._client.chat.completions.create(model=self.model, messages=messages)
            return chat_completion.choices[0].message.content
        except Exception as e:
            logging.error("DeepSeek Provider Error", exc_info=True)
            raise