Skip to content

Instantly share code, notes, and snippets.

@secemp9
Created August 17, 2024 12:28
Show Gist options
  • Save secemp9/4143724b728548753fbf5224e921ba23 to your computer and use it in GitHub Desktop.
Save secemp9/4143724b728548753fbf5224e921ba23 to your computer and use it in GitHub Desktop.
Flux inference
import numpy as np
import torch
from diffusers import DiffusionPipeline
from transformers import BitsAndBytesConfig
# Set up device
device = "cuda" if torch.cuda.is_available() else "cpu"
# Configure 4-bit quantization
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4"
)
# Load the model with 4-bit quantization
pipe = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell",
torch_dtype=torch.float16,
use_safetensors=True,
low_cpu_mem_usage=True,
quantization_config=quantization_config,
)
# Enable memory-efficient attention
pipe.enable_attention_slicing(slice_size="auto")
# Enable sequential CPU offloading
pipe.enable_sequential_cpu_offload()
# Set up constants
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
def infer(prompt, seed=42, width=1024, height=1024, num_inference_steps=4):
# Set up the generator with the seed
generator = torch.Generator(device=device).manual_seed(seed)
# Generate the image
with torch.inference_mode():
image = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=num_inference_steps,
generator=generator,
guidance_scale=0.0
).images[0]
return image, seed
# Example usage
prompt = "a tiny astronaut hatching from an egg on the moon"
result_image, used_seed = infer(prompt)
# If you want to save the image
result_image.save("output_image.png")
print(f"Image generated with seed: {used_seed}")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment