from together import Together
os.environ["TOGETHER_API_KEY"] = "your_api_key_here"
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))
from together import AsyncTogether
async_client = AsyncTogether(api_key=os.environ.get("TOGETHER_API_KEY"))
user_prompt = "What is Karma Yoga as per Bhagavad Gita, Vyadha Gita, Yoga Vasistham and Tripura Rahasya?"
"Qwen/Qwen2-72B-Instruct",
"Qwen/Qwen1.5-72B-Chat",
"mistralai/Mixtral-8x22B-Instruct-v0.1",
"databricks/dbrx-instruct",
aggregator_model = "mistralai/Mixtral-8x22B-Instruct-v0.1"
aggregator_system_prompt = """You have been provided with a set of responses from various open-source models to the latest user query. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.
Responses from models:"""
async def run_llm(model):
"""Run a single LLM call with a reference model."""
response = await async_client.chat.completions.create(
messages=[{"role": "user", "content": user_prompt}],
print(f"Response from {model}: {response.choices[0].message.content}\n")
return response.choices[0].message.content
results = await asyncio.gather(*[run_llm(model) for model in reference_models])
finalStream = client.chat.completions.create(
{"role": "system", "content": aggregator_system_prompt},
{"role": "user", "content": ",".join(str(element) for element in results)},
for chunk in finalStream:
print(chunk.choices[0].delta.content or "", end="", flush=True)