Skip to content

Instantly share code, notes, and snippets.

@garyblankenship
Created May 6, 2024 03:12
Show Gist options
  • Save garyblankenship/93430f40f4f690d5565999a33c7168a6 to your computer and use it in GitHub Desktop.
Save garyblankenship/93430f40f4f690d5565999a33c7168a6 to your computer and use it in GitHub Desktop.
Ollama LLM Models Testing from a prompts.txt file and a list of models
#!/bin/bash
# Set default prompt or use the provided command line argument
PROMPT="${1:-You are an extremely helpful assistant going above and beyond to provide the highest quality response.}"
# Inform if default prompt is used
[ "$#" -eq 0 ] && echo "Prompt not provided, using default: $PROMPT"
# Check for the prompts file, one prompt per line
if [ ! -f "prompts.txt" ]; then
echo "Prompts file (prompts.txt) not found."
exit 1
fi
# Function to process questions with a model
process_questions() {
local command=$1
local models=("${@:2}")
for MODEL in "${models[@]}"; do
echo "========================================"
echo "Loading model: $MODEL"
echo "========================================"
while IFS= read -r line; do
hash=$(echo "$line" | md5sum | awk '{print $1}')
OUTPUT_FILE="response_${MODEL//:/_}_${hash}.txt"
if [ ! -f "$OUTPUT_FILE" ]; then
echo "$line"
echo "----------------------------------------"
echo "$line" | $command "$MODEL" "$PROMPT" > "$OUTPUT_FILE"
fi
done < "prompts.txt"
echo ""
done
}
# Define local models for processing with Ollama
LOCAL_MODELS=(
"llama3:70b-instruct-q2_K"
"llama3:70b-instruct-q5_K_M"
"dolphin-mixtral:8x7b-v2.7-q8_0"
"command-r-plus:104b-q2_K"
"command-r:35b-v0.1-q5_K_M"
"qwen15-32b-chat:latest"
)
process_questions "ollama run" "${LOCAL_MODELS[@]}"
# Define remote models configured for processing with Mods
REMOTE_MODELS=(
"35t"
"4t"
"gemini_pro"
"groq3big"
"groqmix"
)
process_questions "mods -rq -m" "${REMOTE_MODELS[@]}"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment