First, here is my SimpleTuner LoRA config, so you can get started with it. I used an 80GB A100 (thanks @bghira!).
LoRA repository:
import inspect | |
from typing import Any, Callable, Dict, List, Optional, Union | |
import numpy as np | |
import torch | |
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast | |
from diffusers.image_processor import VaeImageProcessor | |
from diffusers.loaders import FluxLoraLoaderMixin | |
from diffusers.models.autoencoders import AutoencoderKL |
First, here is my SimpleTuner LoRA config, so you can get started with it. I used an 80GB A100 (thanks @bghira!).
LoRA repository:
from PIL import Image | |
import stable_inference | |
import numpy as np | |
from einops import repeat | |
# Interpolate video between two conditionings | |
FOLDER = 'test/painting' | |
MAX_STRENGTH = 0.5 # Strength at maximum in the middle of the interpolation | |
SEED = 9001 | |
SECONDS = 10 |
from PIL import Image | |
import stable_inference | |
import numpy as np | |
from einops import repeat | |
''' | |
Interpolate between two images with a prompt of what you expect the midstate to be. | |
Alter the stuff below here to whatever you need it to be. | |
''' |
from PIL import Image | |
import stable_inference | |
import torch | |
from einops import repeat | |
def image_grid(imgs, rows, cols): | |
assert len(imgs) == rows*cols | |
w, h = imgs[0].size | |
grid = Image.new('RGB', size=(cols*w, rows*h)) |
import argparse | |
from pathlib import Path | |
import sys | |
import torch | |
THRESHOLD_STRENGTH = 2. | |
DEFAULT_OUT_NAME = 'output.ckpt' | |
parser = argparse.ArgumentParser(description='Create a compressed dreambooth patch or patch weights') | |
parser.add_argument('mode', type=str, help='"compress" or "inflate"') |