I am using Django, and Langchain with OpenAI to generate responses to my prompts. I was trying to enable streaming using Server-Sent-Events (SSE) in my API function. When I run my code It does stream the OpenAI output in the terminal, but it returns the output as a whole to the client once the streaming has ended.
我尝试了精简HttpResponse,但却没有取得成功。 如果任何人能够提出该法典中的错误,那将是真的麻烦。
页: 1
@api_view([ GET , POST ])
def sse_view(request):
if request.method != POST :
return Response({"message": "Use the POST method for the response"})
url = request.data.get("url")
questions = request.data.get("questions")
prompt = request.data.get("promptName")
if not url or not questions:
return Response({"message": "Please provide valid URL and questions"})
# Process the documents received from the user
try:
doc_store = process_url(url)
if not doc_store:
return Response({"message": "PDF document not loaded"})
except Exception as e:
return Response({"message": "Error loading PDF document"})
custom_prompt_template = set_custom_prompt(url,prompt)
# Load and process documents
loader = DirectoryLoader(DATA_PATH, glob= *.pdf , loader_cls=PyPDFLoader)
documents = loader.load()
text_splitter = CustomRecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=30)
texts = text_splitter.split_documents(documents)
#Creating Embeddings using OpenAI
embeddings = OpenAIEmbeddings(chunk_size= 16, openai_api_key= openai_gpt_key,)
db = FAISS.from_documents(texts, embeddings)
db.save_local(DB_FAISS_PATH)
search_kwargs = {
k : 30,
fetch_k :100,
maximal_marginal_relevance : True,
distance_metric : cos ,
}
retriever=db.as_retriever(search_kwargs=search_kwargs)
# get the list of questions from the body
questionList =request.data[ questions ]
#This function is to generate responses from OpenAi
def openai_response_generator():
# Create an instance of ChatOpenAI
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
openai_api_key= openai_gpt_key,
)
# Iterating through the questions list
for question in (questionList):
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": custom_prompt_template},
)
res = qa({ query : question})
yield f"data: {res[ result ]}
"
response = StreamingHttpResponse(openai_response_generator(), content_type="text/event-stream")
return response