trainrealfill
trainrealfill
import argparse
import copy
import itertools
import logging
import math
import os
import shutil
from pathlib import Path
from torchvision import transforms
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from huggingface_hub import create_repo, upload_folder
from packaging import version
from PIL import Image
from PIL.ImageOps import exif_transpose
from torch.utils.data import Dataset
import torchvision.transforms.v2 as transforms_v2
from tqdm.auto import tqdm
from transformers import AutoTokenizer, CLIPTextModel
import diffusers
from diffusers import (
AutoencoderKL,
DDPMScheduler,
StableDiffusionInpaintPipeline,
UNet2DConditionModel,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
# Will error if the minimal version of diffusers is not installed. Remove at your
own risks.
check_min_version("0.20.1")
logger = get_logger(__name__)
for _ in range(times):
width = np.random.randint(int(min_size), int(max_size))
height = np.random.randint(int(min_size), int(max_size))
def save_model_card(
repo_id: str,
images=None,
base_model=str,
repo_folder=None,
):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
prompt: "a photo of sks"
tags:
- stable-diffusion-inpainting
- stable-diffusion-inpainting-diffusers
- text-to-image
- diffusers
- realfill
inference: true
---
"""
model_card = f"""
# RealFill - {repo_id}
This is a realfill model derived from {base_model}. The weights were trained using
[RealFill](https://realfill.github.io/).
You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
@torch.no_grad()
def log_validation(
text_encoder,
tokenizer,
unet,
args,
accelerator,
weight_dtype,
epoch,
):
logger.info(
f"Running validation... \nGenerating {args.num_validation_images} images"
)
# create pipeline (note: unet and vae are loaded again in float32)
pipeline = StableDiffusionInpaintPipeline.from_pretrained(
args.pretrained_model_name_or_path,
tokenizer=tokenizer,
revision=args.revision,
)
pipeline = pipeline.to(accelerator.device)
pipeline.set_progress_bar_config(disable=True)
# run inference
generator = None if args.seed is None else
torch.Generator(device=accelerator.device).manual_seed(args.seed)
if image.mode != "RGB":
image = image.convert("RGB")
images = []
for _ in range(args.num_validation_images):
image = pipeline(
prompt="a photo of sks", image=image, mask_image=mask_image,
num_inference_steps=200, guidance_scale=1, generator=generator
).images[0]
images.append(image)
del pipeline
torch.cuda.empty_cache()
return images
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description="Simple example of a training
script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from
huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
required=True,
help="A folder containing the training data of images.",
)
parser.add_argument(
"--num_validation_images",
type=int,
default=4,
help="Number of images that should be generated during validation with
`validation_conditioning`.",
)
parser.add_argument(
"--validation_steps",
type=int,
default=100,
help=(
"Run realfill validation every X steps. RealFill validation consists of
running the conditioning"
" `args.validation_conditioning` multiple times:
`args.num_validation_images`."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="realfill-model",
help="The output directory where the model predictions and checkpoints will
be written.",
)
parser.add_argument("--seed", type=int, default=None, help="A seed for
reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the
train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--train_batch_size", type=int, default=4, help="Batch size (per device)
for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=1)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides
num_train_epochs.",
)
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These
checkpoints can be used both as final"
" checkpoints in case they are better than the last checkpoint, and are
also suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a
path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the
last available checkpoint.'
),
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a
backward/update pass.",
)
parser.add_argument(
"--gradient_checkpointing",
action="store_true",
help="Whether or not to use gradient checkpointing to save memory at the
expense of slower backward pass.",
)
parser.add_argument(
"--unet_learning_rate",
type=float,
default=2e-4,
help="Learning rate to use for unet.",
)
parser.add_argument(
"--text_encoder_learning_rate",
type=float,
default=4e-5,
help="Learning rate to use for text encoder.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation
steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine",
"cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the
warmup in the lr scheduler."
)
parser.add_argument(
"--lr_num_cycles",
type=int,
default=1,
help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
)
parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor
of the polynomial scheduler.")
parser.add_argument(
"--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit
Adam from bitsandbytes."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1
parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2
parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2,
help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon
value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max
gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not
to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to
use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local
`output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory.
Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help=(
"Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up
training. For more information, see"
" https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-
on-ampere-devices"
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms
are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all
integrations.'
),
)
parser.add_argument(
"--wandb_key",
type=str,
default=None,
help=("If report to option is set to wandb, api-key for wandb used for
login to wandb "),
)
parser.add_argument(
"--wandb_project_name",
type=str,
default=None,
help=("If report to option is set to wandb, project name in wandb for log
tracking "),
)
parser.add_argument(
"--mixed_precision",
type=str,
default=None,
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose between fp16 and bf16
(bfloat16). Bf16 requires PyTorch >="
" 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate
config of the current system or the"
" flag passed with the `accelerate.launch` command. Use this argument
to override the accelerate config."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed
training: local_rank")
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true",
help="Whether or not to use xformers."
)
parser.add_argument(
"--set_grads_to_none",
action="store_true",
help=(
"Save more memory by using setting grads to None instead of zero. Be
aware, that this changes certain"
" behaviors, so disable this argument if it causes any problems. More
info:"
"
https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
),
)
parser.add_argument(
"--lora_rank",
type=int,
default=16,
help=("The dimension of the LoRA update matrices."),
)
parser.add_argument(
"--lora_alpha",
type=int,
default=27,
help=("The alpha constant of the LoRA update matrices."),
)
parser.add_argument(
"--lora_dropout",
type=float,
default=0.0,
help="The dropout rate of the LoRA update matrices.",
)
parser.add_argument(
"--lora_bias",
type=str,
default="none",
help="The bias type of the Lora update matrices. Must be 'none', 'all' or
'lora_only'.",
)
return args
class RealFillDataset(Dataset):
"""
A dataset to prepare the training and conditioning images and
the masks with the dummy prompt for fine-tuning the model.
It pre-processes the images, masks and tokenizes the prompts.
"""
def __init__(
self,
train_data_root,
tokenizer,
size=512,
):
self.size = size
self.tokenizer = tokenizer
self.train_images_path = list(self.ref_data_root.iterdir()) +
[self.target_image]
self.num_train_images = len(self.train_images_path)
self.train_prompt = "a photo of sks"
self.transform = transforms_v2.Compose(
[
transforms_v2.RandomResize(size, int(1.125 * size)),
transforms_v2.RandomCrop(size),
transforms_v2.ToTensor(),
transforms_v2.ConvertImageDtype(),
transforms_v2.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self.num_train_images
image = Image.open(self.train_images_path[index])
image = exif_transpose(image)
return example
def collate_fn(examples):
input_ids = [example["prompt_ids"] for example in examples]
images = [example["images"] for example in examples]
images = torch.stack(images)
images = images.to(memory_format=torch.contiguous_format).float()
masks = torch.stack(masks)
masks = masks.to(memory_format=torch.contiguous_format).float()
weightings = torch.stack(weightings)
weightings = weightings.to(memory_format=torch.contiguous_format).float()
conditioning_images = torch.stack(conditioning_images)
conditioning_images =
conditioning_images.to(memory_format=torch.contiguous_format).float()
batch = {
"input_ids": input_ids,
"images": images,
"masks": masks,
"weightings": weightings,
"conditioning_images": conditioning_images,
}
return batch
def main(args):
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.report_to,
project_dir=logging_dir,
)
if args.report_to == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for
logging during training.")
import wandb
wandb.login(key=args.wandb_key)
wandb.init(project=args.wandb_project_name)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name,
exist_ok=True, token=args.hub_token
).repo_id
unet_config = LoraConfig(
r=args.lora_rank,
lora_alpha=args.lora_alpha,
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
lora_dropout=args.lora_dropout,
bias=args.lora_bias,
)
unet = get_peft_model(unet, unet_config)
text_encoder_config = LoraConfig(
r=args.lora_rank,
lora_alpha=args.lora_alpha,
target_modules=["k_proj", "q_proj", "v_proj", "out_proj"],
lora_dropout=args.lora_dropout,
bias=args.lora_bias,
)
text_encoder = get_peft_model(text_encoder, text_encoder_config)
vae.requires_grad_(False)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If
you observe problems during training, please update xFormers to at least 0.0.17.
See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more
details."
)
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed
correctly")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
text_encoder.gradient_checkpointing_enable()
load_model =
model_cls.from_pretrained(args.pretrained_model_name_or_path, subfolder=sub_dir)
load_model = PeftModel.from_pretrained(load_model, input_dir,
subfolder=sub_dir)
model.load_state_dict(load_model.state_dict())
del load_model
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
if args.scale_lr:
args.unet_learning_rate = (
args.unet_learning_rate * args.gradient_accumulation_steps *
args.train_batch_size * accelerator.num_processes
)
args.text_encoder_learning_rate = (
args.text_encoder_learning_rate * args.gradient_accumulation_steps *
args.train_batch_size * accelerator.num_processes
)
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError(
"To use 8-bit Adam, please install the bitsandbytes library: `pip
install bitsandbytes`."
)
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
# Optimizer creation
optimizer = optimizer_class(
[
{"params": unet.parameters(), "lr": args.unet_learning_rate},
{"params": text_encoder.parameters(), "lr":
args.text_encoder_learning_rate}
],
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch_size,
shuffle=True,
collate_fn=collate_fn,
num_workers=1,
)
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
num_cycles=args.lr_num_cycles,
power=args.lr_power,
)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora
text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full
precision is not required.
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# We need to recalculate our total training steps as the size of the training
dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) /
args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps /
num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
tracker_config = vars(copy.deepcopy(args))
accelerator.init_trackers("realfill", config=tracker_config)
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes *
args.gradient_accumulation_steps
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist.
Starting a new training run."
)
args.resume_from_checkpoint = None
initial_global_step = 0
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
initial_global_step = global_step
first_epoch = global_step // num_update_steps_per_epoch
else:
initial_global_step = 0
progress_bar = tqdm(
range(0, args.max_train_steps),
initial=initial_global_step,
desc="Steps",
# Only show the progress bar once on each machine.
disable=not accelerator.is_local_main_process,
)
# Downsample mask and weighting so that they match with the latents
masks, size = batch["masks"].to(dtype=weight_dtype),
latents.shape[2:]
masks = F.interpolate(masks, size=size)
weightings = batch["weightings"].to(dtype=weight_dtype)
weightings = F.interpolate(weightings, size=size)
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = itertools.chain(
unet.parameters(), text_encoder.parameters()
)
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad(set_to_none=args.set_grads_to_none)
if accelerator.is_main_process:
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us
over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if
d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x:
int(x.split("-")[1]))
logger.info(
f"{len(checkpoints)} checkpoints already exist,
removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {',
'.join(removing_checkpoints)}")
if global_step % args.validation_steps == 0:
unet.eval()
text_encoder.eval()
log_validation(
text_encoder,
tokenizer,
unet,
args,
accelerator,
weight_dtype,
global_step,
)
pipeline.save_pretrained(args.output_dir)
# Final inference
images = log_validation(
text_encoder,
tokenizer,
unet,
args,
accelerator,
weight_dtype,
global_step,
)
if args.push_to_hub:
save_model_card(
repo_id,
images=images,
base_model=args.pretrained_model_name_or_path,
repo_folder=args.output_dir,
)
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)