forked from justinpinkney/stable-diffusion
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* simple datasets * add conversion script * finish fine tune example * update readme * update readme
- Loading branch information
1 parent
704f564
commit f1293f9
Showing
11 changed files
with
942 additions
and
15 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,5 @@ | ||
logs/ | ||
dump/ | ||
im-examples/ | ||
outputs/ | ||
flagged/ | ||
*.egg-info | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,133 @@ | ||
model: | ||
base_learning_rate: 1.0e-04 | ||
target: ldm.models.diffusion.ddpm.LatentDiffusion | ||
params: | ||
linear_start: 0.00085 | ||
linear_end: 0.0120 | ||
num_timesteps_cond: 1 | ||
log_every_t: 200 | ||
timesteps: 1000 | ||
first_stage_key: "image" | ||
cond_stage_key: "txt" | ||
image_size: 64 | ||
channels: 4 | ||
cond_stage_trainable: false # Note: different from the one we trained before | ||
conditioning_key: crossattn | ||
scale_factor: 0.18215 | ||
|
||
scheduler_config: # 10000 warmup steps | ||
target: ldm.lr_scheduler.LambdaLinearScheduler | ||
params: | ||
warm_up_steps: [ 1 ] # NOTE for resuming. use 10000 if starting from scratch | ||
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases | ||
f_start: [ 1.e-6 ] | ||
f_max: [ 1. ] | ||
f_min: [ 1. ] | ||
|
||
unet_config: | ||
target: ldm.modules.diffusionmodules.openaimodel.UNetModel | ||
params: | ||
image_size: 32 # unused | ||
in_channels: 4 | ||
out_channels: 4 | ||
model_channels: 320 | ||
attention_resolutions: [ 4, 2, 1 ] | ||
num_res_blocks: 2 | ||
channel_mult: [ 1, 2, 4, 4 ] | ||
num_heads: 8 | ||
use_spatial_transformer: True | ||
transformer_depth: 1 | ||
context_dim: 768 | ||
use_checkpoint: True | ||
legacy: False | ||
|
||
first_stage_config: | ||
target: ldm.models.autoencoder.AutoencoderKL | ||
ckpt_path: "models/first_stage_models/kl-f8/model.ckpt" | ||
params: | ||
embed_dim: 4 | ||
monitor: val/rec_loss | ||
ddconfig: | ||
double_z: true | ||
z_channels: 4 | ||
resolution: 256 | ||
in_channels: 3 | ||
out_ch: 3 | ||
ch: 128 | ||
ch_mult: | ||
- 1 | ||
- 2 | ||
- 4 | ||
- 4 | ||
num_res_blocks: 2 | ||
attn_resolutions: [] | ||
dropout: 0.0 | ||
lossconfig: | ||
target: torch.nn.Identity | ||
|
||
cond_stage_config: | ||
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder | ||
|
||
|
||
data: | ||
target: main.DataModuleFromConfig | ||
params: | ||
batch_size: 4 | ||
num_workers: 4 | ||
num_val_workers: 0 # Avoid a weird val dataloader issue | ||
train: | ||
target: ldm.data.simple.hf_dataset | ||
params: | ||
name: lambdalabs/pokemon-blip-captions | ||
image_transforms: | ||
- target: torchvision.transforms.Resize | ||
params: | ||
size: 512 | ||
interpolation: 3 | ||
- target: torchvision.transforms.RandomCrop | ||
params: | ||
size: 512 | ||
- target: torchvision.transforms.RandomHorizontalFlip | ||
validation: | ||
target: ldm.data.simple.TextOnly | ||
params: | ||
captions: | ||
- "A pokemon with green eyes, large wings, and a hat" | ||
- "A cute bunny rabbit" | ||
- "Yoda" | ||
- "An epic landscape photo of a mountain" | ||
output_size: 512 | ||
n_gpus: 2 # small hack to sure we see all our samples | ||
|
||
|
||
lightning: | ||
find_unused_parameters: False | ||
|
||
modelcheckpoint: | ||
params: | ||
every_n_train_steps: 2000 | ||
save_top_k: -1 | ||
monitor: null | ||
|
||
callbacks: | ||
image_logger: | ||
target: main.ImageLogger | ||
params: | ||
batch_frequency: 2000 | ||
max_images: 4 | ||
increase_log_steps: False | ||
log_first_step: True | ||
log_all_val: True | ||
log_images_kwargs: | ||
use_ema_scope: True | ||
inpaint: False | ||
plot_progressive_rows: False | ||
plot_diffusion_rows: False | ||
N: 4 | ||
unconditional_guidance_scale: 3.0 | ||
unconditional_guidance_label: [""] | ||
|
||
trainer: | ||
benchmark: True | ||
num_sanity_val_steps: 0 | ||
accumulate_grad_batches: 1 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
import numpy as np | ||
import torch | ||
from torch.utils.data import Dataset | ||
from pathlib import Path | ||
import json | ||
from PIL import Image | ||
from torchvision import transforms | ||
from einops import rearrange | ||
from ldm.util import instantiate_from_config | ||
from datasets import load_dataset | ||
|
||
class FolderData(Dataset): | ||
def __init__(self, root_dir, caption_file, image_transforms, ext="jpg") -> None: | ||
self.root_dir = Path(root_dir) | ||
with open(caption_file, "rt") as f: | ||
captions = json.load(f) | ||
self.captions = captions | ||
|
||
self.paths = list(self.root_dir.rglob(f"*.{ext}")) | ||
image_transforms = [instantiate_from_config(tt) for tt in image_transforms] | ||
image_transforms.extend([transforms.ToTensor(), | ||
transforms.Lambda(lambda x: rearrange(x * 2. - 1., 'c h w -> h w c'))]) | ||
image_transforms = transforms.Compose(image_transforms) | ||
self.tform = image_transforms | ||
|
||
# assert all(['full/' + str(x.name) in self.captions for x in self.paths]) | ||
|
||
def __len__(self): | ||
return len(self.captions.keys()) | ||
|
||
def __getitem__(self, index): | ||
chosen = list(self.captions.keys())[index] | ||
im = Image.open(self.root_dir/chosen) | ||
im = self.process_im(im) | ||
caption = self.captions[chosen] | ||
if caption is None: | ||
caption = "old book illustration" | ||
return {"jpg": im, "txt": caption} | ||
|
||
def process_im(self, im): | ||
im = im.convert("RGB") | ||
return self.tform(im) | ||
|
||
def hf_dataset( | ||
name, | ||
image_transforms=[], | ||
image_column="image", | ||
text_column="text", | ||
split='train', | ||
image_key='image', | ||
caption_key='txt', | ||
): | ||
"""Make huggingface dataset with appropriate list of transforms applied | ||
""" | ||
ds = load_dataset(name, split=split) | ||
image_transforms = [instantiate_from_config(tt) for tt in image_transforms] | ||
image_transforms.extend([transforms.ToTensor(), | ||
transforms.Lambda(lambda x: rearrange(x * 2. - 1., 'c h w -> h w c'))]) | ||
tform = transforms.Compose(image_transforms) | ||
|
||
assert image_column in ds.column_names, f"Didn't find column {image_column} in {ds.column_names}" | ||
assert text_column in ds.column_names, f"Didn't find column {text_column} in {ds.column_names}" | ||
|
||
def pre_process(examples): | ||
processed = {} | ||
processed[image_key] = [tform(im) for im in examples[image_column]] | ||
processed[caption_key] = examples[text_column] | ||
return processed | ||
|
||
ds.set_transform(pre_process) | ||
return ds | ||
|
||
class TextOnly(Dataset): | ||
def __init__(self, captions, output_size, image_key="image", caption_key="txt", n_gpus=1): | ||
"""Returns only captions with dummy images""" | ||
self.output_size = output_size | ||
self.image_key = image_key | ||
self.caption_key = caption_key | ||
if isinstance(captions, Path): | ||
self.captions = self._load_caption_file(captions) | ||
else: | ||
self.captions = captions | ||
|
||
if n_gpus > 1: | ||
# hack to make sure that all the captions appear on each gpu | ||
repeated = [n_gpus*[x] for x in self.captions] | ||
self.captions = [] | ||
[self.captions.extend(x) for x in repeated] | ||
|
||
def __len__(self): | ||
return len(self.captions) | ||
|
||
def __getitem__(self, index): | ||
dummy_im = torch.zeros(3, self.output_size, self.output_size) | ||
dummy_im = rearrange(dummy_im * 2. - 1., 'c h w -> h w c') | ||
return {self.image_key: dummy_im, self.caption_key: self.captions[index]} | ||
|
||
def _load_caption_file(self, filename): | ||
with open(filename, 'rt') as f: | ||
captions = f.readlines() | ||
return [x.strip('\n') for x in captions] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.