- Think of clip nodes as containers!
- You can plug in an arbitrary composite node and make them "instances" of said node
A simple drop shadow effect, assuming you have a solid color and transparent clip such as text.
from diffusers import FluxPipeline, AutoencoderKL | |
from diffusers.image_processor import VaeImageProcessor | |
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel | |
import torch | |
import gc | |
def flush(): | |
gc.collect() | |
torch.cuda.empty_cache() |
def forward(self, x, timesteps=None, context=None, y=None, **kwargs): | |
# broadcast timesteps to batch dimension | |
timesteps = timesteps.expand(x.shape[0]) | |
hs = [] | |
t_emb = get_timestep_embedding(timesteps, self.model_channels) # , repeat_only=False) | |
t_emb = t_emb.to(x.dtype) | |
emb = self.time_embed(t_emb) | |
assert x.shape[0] == y.shape[0], f"batch size mismatch: {x.shape[0]} != {y.shape[0]}" |
# huggingface login | |
from huggingface_hub import login | |
login() | |
# load textencorder in 8bit quantized | |
from transformers import T5EncoderModel | |
from diffusers import DiffusionPipeline | |
import datetime |
# Ran it with the following packages installed: | |
# accelerate 0.18.0 | |
# diffusers 0.16.0.dev0 | |
# torch 2.0.0+cu118 | |
# torchvision 0.15.0+cu118 | |
# transformers 4.28.1 | |
# xformers 0.0.18 | |
import torch | |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler |
from PIL import Image | |
import stable_inference | |
import numpy as np | |
from einops import repeat | |
# Interpolate video between two conditionings | |
FOLDER = 'test/painting' | |
MAX_STRENGTH = 0.5 # Strength at maximum in the middle of the interpolation | |
SEED = 9001 | |
SECONDS = 10 |
bl_info = { | |
"name": "Kuchi Paku Light", | |
"author": "ds54e", | |
"version": (1, 1, 1), | |
"blender": (2, 80, 0), | |
"location": "View3D > Sidebar > KPL", | |
"description": "Generate Kuchi-Paku animations from the sound sequences in the VSE", | |
"warning": "", | |
"doc_url": "", | |
"category": "Animation", |
#!/bin/bash | |
# This script generate a wheel for OpenTimelineIO in dist folder | |
# We need it has a patch to install otio 0.12.1 in Windows Blender because it requires compilation | |
# This wheel should be distributed with code that depends on otio 0.12.1 and installed using Blender's python -m pip | |
# You need python (3.7.*) and Visual Studio 2017+ | |
CURRENT_DIR=`dirname $0` | |
ROOT_DIR=$CURRENT_DIR/ |
import subprocess | |
import sys | |
import os | |
import json | |
import shutil | |
import requests | |
import opentimelineio as otio |
Blender 2.80 introduces a new feature of collections, which (in my opinion) is a very important for various purposes like grouping objects, selectively enable / disable in view port, etc.
Accordingly, the codes also needs to be updated in 2.80 for adding an object to the active scene. From the documentation (https://wiki.blender.org/wiki/Reference/Release_Notes/2.80/Python_API/Scene_and_Object_API),