import asyncio import os from typing import AsyncIterable, Awaitable
import uvicorn from dotenv import load_dotenv from fastapi import FastAPI from fastapi.responses import StreamingResponse from langchain.callbacks import AsyncIteratorCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage from pydantic import BaseModel
# Two ways to load env variables # 1.load env variables from .env file load_dotenv()
# 2.manually set env variables if"OPENAI_API_KEY"notin os.environ: os.environ["OPENAI_API_KEY"] = ""
asyncdefwrap_done(fn: Awaitable, event: asyncio.Event): """Wrap an awaitable with a event to signal when it's done or an exception is raised.""" try: await fn except Exception as e: # TODO: handle exception print(f"Caught exception: {e}") finally: # Signal the aiter to stop. event.set()
# Begin a task that runs in the background. task = asyncio.create_task(wrap_done( model.agenerate(messages=[[HumanMessage(content=message)]]), callback.done), )
asyncfor token in callback.aiter(): # Use server-sent-events to stream the response yieldf"data: {token}\n\n"
await task
classStreamRequest(BaseModel): """Request body for streaming.""" message: str