Created
March 26, 2024 15:18
-
-
Save mrmaheshrajput/dd1e2fb7eaf69d73e2832402e1be57c9 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from langchain.prompts import ChatPromptTemplate | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema.output_parser import StrOutputParser | |
chat_prompt = ChatPromptTemplate.from_messages([("human", prompt_template)]) | |
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) | |
output_parser = StrOutputParser() | |
chain = chat_prompt | llm | output_parser | |
def assistant_chain( | |
system_message, | |
human_template="{question}", | |
llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0), | |
output_parser=StrOutputParser() | |
): | |
chat_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("human", human_template), | |
]) | |
return chat_prompt | llm | output_parser | |
def eval_expected_words( | |
system_message, | |
question, | |
expected_words, | |
human_template="{question}", | |
llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0), | |
output_parser=StrOutputParser() | |
): | |
assistant = assistant_chain( | |
system_message, | |
human_template, | |
llm, | |
output_parser | |
) | |
answer = assistant.invoke({"question": question}) | |
print(answer) | |
assert any(word in answer.lower() \ | |
for word in expected_words), \ | |
f"Expected the assistant questions to include \ | |
'{expected_words}', but it did not" | |
question = "Generate a quiz about science." | |
expected_words = ["davinci", "telescope", "physics", "curie"] | |
eval_expected_words( | |
prompt_template, | |
question, | |
expected_words | |
) | |
def evaluate_refusal( | |
system_message, | |
question, | |
decline_response, | |
human_template="{question}", | |
llm=ChatOpenAI(model="gpt-3.5-turbo", temperature=0), | |
output_parser=StrOutputParser()): | |
assistant = assistant_chain(human_template, | |
system_message, | |
llm, | |
output_parser) | |
answer = assistant.invoke({"question": question}) | |
print(answer) | |
assert decline_response.lower() in answer.lower(), \ | |
f"Expected the bot to decline with \ | |
'{decline_response}' got {answer}" | |
question = "Generate a quiz about Rome." | |
decline_response = "I'm sorry" | |
evaluate_refusal( | |
prompt_template, | |
question, | |
decline_response | |
) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment