Skip to content

Instantly share code, notes, and snippets.

@Impulsleistung
Created September 18, 2024 06:48
Show Gist options
  • Save Impulsleistung/75d600ded66b85c4babae4900d444eb8 to your computer and use it in GitHub Desktop.
Save Impulsleistung/75d600ded66b85c4babae4900d444eb8 to your computer and use it in GitHub Desktop.
Usage of llama-3.1-sonar-small-128k-online
import os
import sys
from dotenv import load_dotenv
from langchain_community.chat_models import ChatPerplexity
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
# Load environment variables
load_dotenv()
# Set up the Perplexity API client
chat = ChatPerplexity(
model="llama-3.1-sonar-small-128k-online",
temperature=0,
pplx_api_key=os.getenv("PPLX_API_KEY"),
)
def evaluate_text(file_path, prompt):
# Read the file content
with open(file_path, "r", encoding="utf-8") as file:
file_content = file.read()
# Create a prompt template
prompt_template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("human", "{prompt}\n\n{file_content}"),
]
)
# Create a runnable sequence
chain = prompt_template | chat | StrOutputParser()
# Invoke the chain
return chain.invoke({"prompt": prompt, "file_content": file_content})
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python script_name.py <prompt> <path_to_text_file>")
sys.exit(1)
prompt = sys.argv[1]
file_path = sys.argv[2]
# Make the API call and get the evaluation
evaluation = evaluate_text(file_path, prompt)
# Print the evaluation
print(evaluation)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment