from langchain.llms import OpenAI
from langchain import hub
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import PromptTemplate
llm = OpenAI(model_name=modal,openai_api_key=api_key,openai_api_base=api_url,temperature=0.0)
ResponseSchema(name="answer", description="answer to the user's question"),
ResponseSchema(name="source", description="source used to answer the user's question, should be a website.")
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
prompt = PromptTemplate(
template="answer the users question as best as possible.\n{format_instructions}\n{input}",
input_variables=["input"],
partial_variables={"format_instructions": format_instructions}
qachain = prompt | llm | output_parser
question_1 = qachain.invoke( {"input": "What is Lu Xun's real name?"})
print("执行结果:"+str(question_1))
print("输出类型为:"+str(type(question_1)))
print("参考来源为:"+question_1['source'])