Skip to content

Instantly share code, notes, and snippets.

@aelaguiz
Created September 3, 2024 11:48
Show Gist options
  • Save aelaguiz/b89a9dbeca3d45846c7bef7fc818ac07 to your computer and use it in GitHub Desktop.
Save aelaguiz/b89a9dbeca3d45846c7bef7fc818ac07 to your computer and use it in GitHub Desktop.
Prompt logging for crews/Langchain
class PromptLoggingHandler(BaseCallbackHandler):
def __init__(self):
logging.basicConfig(filename='llm_interactions.log', level=logging.INFO,
format='%(asctime)s - %(message)s')
def on_llm_start(self, serialized, prompts, **kwargs):
logging.info("Prompt sent:")
for i, prompt in enumerate(prompts, 1):
logging.info(f"Prompt {i}:\n{prompt}\n")
def on_llm_end(self, response, **kwargs):
formatted_response = response.generations[0][0].text
logging.info(f"Response received:\n{formatted_response}")
def on_llm_error(self, error, **kwargs):
logging.error(f"LLM error: {error}")
# use like this
gpt_4o = ChatOpenAI(model="gpt-4o", temperature=0.7, max_tokens=4096, callbacks=[PromptLoggingHandler()])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment