Skip to content

Instantly share code, notes, and snippets.

Model AGIEval GPT4All TruthfulQA Bigbench
recoilme-gemma-2-9B-v0.4 45.94 Error: File does not exist 59.03 Error: File does not exist

AGIEval

Task Version Metric Value Stderr
agieval_aqua_rat 0 acc 26.77 ± 2.78
acc_norm 25.98 ± 2.76
agieval_logiqa_en 0 acc 41.32 ± 1.93
Model ARC HellaSwag MMLU TruthfulQA Winogrande GSM8K
recoilme-gemma-2-9B-v0.4 Error: File does not exist Error: File does not exist Error: File does not exist Error: File does not exist Error: File does not exist Error: File does not exist

ARC

Average: Error: File does not exist%

HellaSwag

Model AGIEval GPT4All TruthfulQA Bigbench
recoilme-gemma-2-9B-v0.3 40.71 Error: File does not exist 58.62 Error: File does not exist

AGIEval

Task Version Metric Value Stderr
agieval_aqua_rat 0 acc 22.44 ± 2.62
acc_norm 24.41 ± 2.70
agieval_logiqa_en 0 acc 36.56 ± 1.89
Model AGIEval GPT4All TruthfulQA Bigbench
Gemma-2-Ataraxy-Gemmasutra-9B-slerp 40.91 Error: File does not exist 60.1 Error: File does not exist

AGIEval

Task Version Metric Value Stderr
agieval_aqua_rat 0 acc 21.26 ± 2.57
acc_norm 23.23 ± 2.65
agieval_logiqa_en 0 acc 38.56 ± 1.91
@recoilme
recoilme / joy-caption-pre-alpha.py
Created August 15, 2024 13:06
joy-caption-pre-alpha
#import spaces
#import gradio as gr
from huggingface_hub import InferenceClient
from torch import nn
from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM
from pathlib import Path
import torch
import torch.amp.autocast_mode
from PIL import Image
import os, time
from diffusers import StableDiffusionXLPipeline
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl_2p
from sd_embed.embedding_funcs import group_tokens_and_weights
from sd_embed.prompt_parser import parse_prompt_attention
from diffusers import DiffusionPipeline,EulerAncestralDiscreteScheduler
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers.utils import make_image_grid
import torch, gc
azureuser@aiartlab-1:~$ python3 -m pip freeze
absl-py==2.1.0
accelerate==0.25.0
aiohttp==3.9.5
aiosignal==1.3.1
altair==4.2.2
async-timeout==4.0.3
attrs==21.2.0
Automat==20.2.0
Babel==2.8.0
from diffusers import DiffusionPipeline,EulerAncestralDiscreteScheduler
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
import torch
MODEL_PATH = "models/colorfulxl"
pipe = DiffusionPipeline.from_pretrained(
MODEL_PATH,
text_encoder=None, tokenizer=None,
text_encoder_2=None, tokenizer_2=None,
torch_dtype=torch.float16,
from diffusers import StableDiffusionXLPipeline
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl
from sd_embed.embedding_funcs import get_weighted_text_embeddings_sdxl_2p
from sd_embed.prompt_parser import parse_prompt_attention
from diffusers import DiffusionPipeline,EulerAncestralDiscreteScheduler
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers.utils import make_image_grid
import torch
MODEL_PATH = "models/colorfulxl"
from diffusers import DiffusionPipeline,EulerAncestralDiscreteScheduler
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
import torch
MODEL_PATH = "models/colorfulxl"
pipe = DiffusionPipeline.from_pretrained(
MODEL_PATH,
text_encoder=None, tokenizer=None,
text_encoder_2=None, tokenizer_2=None,
torch_dtype=torch.float16,