from fastapi import FastAPI
import uvicorn
from pydantic import BaseModel
#from .agent_utils import run_agent_with_streaming  # Correct import

################
####
from .pydantic_ai_agent import pydantic_ai_agent, PydanticAIDeps
from .db import init_collection
from openai import AsyncOpenAI
import os
from dotenv import load_dotenv

load_dotenv()
openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))

chroma_collection = init_collection()

async def run_agent_with_streaming(user_input: str):
    """Run the agent with streaming text for the user_input prompt."""
    deps = PydanticAIDeps(collection=chroma_collection, openai_client=openai_client)

    async with pydantic_ai_agent.run_stream(
        user_input,
        deps=deps,
        message_history=[]  # Remove Streamlit dependency
    ) as result:
        partial_text = ""
        async for chunk in result.stream_text(delta=True):
            partial_text += chunk
        return partial_text  # Return plain text instead of Streamlit objects
####
################



class QuestionRequest(BaseModel):
    question: str

api = FastAPI()

@api.get("/")
def read_root():
    return {"message": "API is running!"}

# Add your existing API route
@api.post("/ask")
async def ask_question(request: QuestionRequest):
    response = await run_agent_with_streaming(request.question)
    return {"question": request.question, "answer": response}

if __name__ == "__main__":
    uvicorn.run(api, host="0.0.0.0", port=8505, log_level="info")
