Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP][tests] add precomputation tests #234

Open
wants to merge 23 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions finetrainers/models/cogvideox/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch
from diffusers import AutoencoderKLCogVideoX, CogVideoXDDIMScheduler, CogVideoXPipeline, CogVideoXTransformer3DModel
from PIL import Image
from transformers import T5EncoderModel, T5Tokenizer
from transformers import AutoTokenizer, T5EncoderModel, T5Tokenizer

from .utils import prepare_rotary_positional_embeddings

Expand All @@ -15,7 +15,14 @@ def load_condition_models(
cache_dir: Optional[str] = None,
**kwargs,
):
tokenizer = T5Tokenizer.from_pretrained(model_id, subfolder="tokenizer", revision=revision, cache_dir=cache_dir)
try:
tokenizer = T5Tokenizer.from_pretrained(
model_id, subfolder="tokenizer", revision=revision, cache_dir=cache_dir
)
except: # noqa
tokenizer = AutoTokenizer.from_pretrained(
model_id, subfolder="tokenizer", revision=revision, cache_dir=cache_dir
)
Comment on lines +18 to +25
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not proud of the change but T5Tokenizer cannot be used on a dummy T5 tokenizer ckpt.

text_encoder = T5EncoderModel.from_pretrained(
model_id, subfolder="text_encoder", torch_dtype=text_encoder_dtype, revision=revision, cache_dir=cache_dir
)
Expand Down
11 changes: 9 additions & 2 deletions finetrainers/models/ltx_video/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from accelerate.logging import get_logger
from diffusers import AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXPipeline, LTXVideoTransformer3DModel
from PIL import Image
from transformers import T5EncoderModel, T5Tokenizer
from transformers import AutoTokenizer, T5EncoderModel, T5Tokenizer


logger = get_logger("finetrainers") # pylint: disable=invalid-name
Expand All @@ -18,7 +18,14 @@ def load_condition_models(
cache_dir: Optional[str] = None,
**kwargs,
) -> Dict[str, nn.Module]:
tokenizer = T5Tokenizer.from_pretrained(model_id, subfolder="tokenizer", revision=revision, cache_dir=cache_dir)
try:
tokenizer = T5Tokenizer.from_pretrained(
model_id, subfolder="tokenizer", revision=revision, cache_dir=cache_dir
)
except: # noqa
tokenizer = AutoTokenizer.from_pretrained(
model_id, subfolder="tokenizer", revision=revision, cache_dir=cache_dir
)
text_encoder = T5EncoderModel.from_pretrained(
model_id, subfolder="text_encoder", torch_dtype=text_encoder_dtype, revision=revision, cache_dir=cache_dir
)
Expand Down
16 changes: 11 additions & 5 deletions finetrainers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,13 @@
)
from .utils.file_utils import string_to_filename
from .utils.hub_utils import save_model_card
from .utils.memory_utils import free_memory, get_memory_statistics, make_contiguous
from .utils.memory_utils import (
free_memory,
get_memory_statistics,
make_contiguous,
reset_memory_stats,
synchronize_device,
)
from .utils.model_utils import resolve_vae_cls_from_ckpt_path
from .utils.optimizer_utils import get_optimizer
from .utils.torch_utils import align_device_and_dtype, expand_tensor_dims, unwrap_model
Expand Down Expand Up @@ -259,7 +265,7 @@ def collate_fn(batch):

memory_statistics = get_memory_statistics()
logger.info(f"Memory after precomputing conditions: {json.dumps(memory_statistics, indent=4)}")
torch.cuda.reset_peak_memory_stats(accelerator.device)
reset_memory_stats(accelerator.device)

# Precompute latents
with self.state.accelerator.main_process_first():
Expand Down Expand Up @@ -307,7 +313,7 @@ def collate_fn(batch):

memory_statistics = get_memory_statistics()
logger.info(f"Memory after precomputing latents: {json.dumps(memory_statistics, indent=4)}")
torch.cuda.reset_peak_memory_stats(accelerator.device)
reset_memory_stats(accelerator.device)

# Update dataloader to use precomputed conditions and latents
self.dataloader = torch.utils.data.DataLoader(
Expand Down Expand Up @@ -997,7 +1003,7 @@ def validate(self, step: int, final_validation: bool = False) -> None:
free_memory()
memory_statistics = get_memory_statistics()
logger.info(f"Memory after validation end: {json.dumps(memory_statistics, indent=4)}")
torch.cuda.reset_peak_memory_stats(accelerator.device)
reset_memory_stats(accelerator.device)

if not final_validation:
self.transformer.train()
Expand Down Expand Up @@ -1120,7 +1126,7 @@ def _delete_components(self) -> None:
self.vae = None
self.scheduler = None
free_memory()
torch.cuda.synchronize(self.state.accelerator.device)
synchronize_device(self.state.accelerator.device)

def _get_and_prepare_pipeline_for_validation(self, final_validation: bool = False) -> DiffusionPipeline:
accelerator = self.state.accelerator
Expand Down
45 changes: 32 additions & 13 deletions finetrainers/utils/memory_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,29 +9,33 @@


def get_memory_statistics(precision: int = 3) -> Dict[str, Any]:
memory_allocated = None
memory_reserved = None
max_memory_allocated = None
max_memory_reserved = None
memory_stats = {
"memory_allocated": None,
"memory_reserved": None,
"max_memory_allocated": None,
"max_memory_reserved": None,
}

if torch.cuda.is_available():
device = torch.cuda.current_device()
memory_allocated = torch.cuda.memory_allocated(device)
memory_reserved = torch.cuda.memory_reserved(device)
max_memory_allocated = torch.cuda.max_memory_allocated(device)
max_memory_reserved = torch.cuda.max_memory_reserved(device)
memory_stats.update(
{
"memory_allocated": torch.cuda.memory_allocated(device),
"memory_reserved": torch.cuda.memory_reserved(device),
"max_memory_allocated": torch.cuda.max_memory_allocated(device),
"max_memory_reserved": torch.cuda.max_memory_reserved(device),
}
)

elif torch.backends.mps.is_available():
memory_allocated = torch.mps.current_allocated_memory()
memory_stats["memory_allocated"] = torch.mps.current_allocated_memory()

else:
logger.warning("No CUDA, MPS, or ROCm device found. Memory statistics are not available.")

return {
"memory_allocated": round(bytes_to_gigabytes(memory_allocated), ndigits=precision),
"memory_reserved": round(bytes_to_gigabytes(memory_reserved), ndigits=precision),
"max_memory_allocated": round(bytes_to_gigabytes(max_memory_allocated), ndigits=precision),
"max_memory_reserved": round(bytes_to_gigabytes(max_memory_reserved), ndigits=precision),
key: (round(bytes_to_gigabytes(value), ndigits=precision) if value else None)
for key, value in memory_stats.items()
}


Expand All @@ -49,6 +53,21 @@ def free_memory() -> None:
# TODO(aryan): handle non-cuda devices


def reset_memory_stats(device: torch.device):
# TODO: handle for non-cuda devices
if torch.cuda.is_available():
torch.cuda.reset_peak_memory_stats(device)
else:
logger.warning("No CUDA, device found. Nothing to reset memory of.")


def synchronize_device(device: torch.device):
if torch.cuda.is_available():
torch.cuda.synchronize(device)
else:
logger.warning("No CUDA, device found. Nothing to synchronize.")


def make_contiguous(x: Union[torch.Tensor, Dict[str, torch.Tensor]]) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
if isinstance(x, torch.Tensor):
return x.contiguous()
Expand Down
Empty file added tests/trainers/__init__.py
Empty file.
Empty file.
44 changes: 44 additions & 0 deletions tests/trainers/cogvideox/test_cogvideox.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import sys
import unittest
from pathlib import Path


current_file = Path(__file__).resolve()
root_dir = current_file.parents[3]
sys.path.append(str(root_dir))

from finetrainers import Args # noqa
from ..test_trainers_common import TrainerTestMixin, parse_resolution_bucket # noqa


class CogVideoXTester(unittest.TestCase, TrainerTestMixin):
MODEL_NAME = "cogvideox"
EXPECTED_PRECOMPUTATION_LATENT_KEYS = {"latents"}
EXPECTED_PRECOMPUTATION_CONDITION_KEYS = {"prompt_embeds"}

def get_training_args(self):
args = Args()
args.model_name = self.MODEL_NAME
args.training_type = "lora"
args.pretrained_model_name_or_path = "finetrainers/dummy-cogvideox"
args.data_root = "" # will be set from the tester method.
args.video_resolution_buckets = [parse_resolution_bucket("9x16x16")]
args.precompute_conditions = True
args.validation_prompts = []
args.validation_heights = []
args.validation_widths = []
return args

@property
def latent_output_shape(self):
return (8, 3, 2, 2)

@property
def condition_output_shape(self):
return (226, 32)

def populate_shapes(self):
for k in self.EXPECTED_PRECOMPUTATION_LATENT_KEYS:
self.EXPECTED_LATENT_SHAPES[k] = self.latent_output_shape
for k in self.EXPECTED_PRECOMPUTATION_CONDITION_KEYS:
self.EXPECTED_CONDITION_SHAPES[k] = self.condition_output_shape
Empty file.
51 changes: 51 additions & 0 deletions tests/trainers/hunyaun_video/test_hunyaun_video.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import sys
import unittest
from pathlib import Path


current_file = Path(__file__).resolve()
root_dir = current_file.parents[3]
sys.path.append(str(root_dir))

from finetrainers import Args # noqa
from ..test_trainers_common import TrainerTestMixin, parse_resolution_bucket # noqa


class HunyuanVideoTester(unittest.TestCase, TrainerTestMixin):
MODEL_NAME = "hunyuan_video"
EXPECTED_PRECOMPUTATION_LATENT_KEYS = {"latents"}
EXPECTED_PRECOMPUTATION_CONDITION_KEYS = {
"guidance",
"pooled_prompt_embeds",
"prompt_attention_mask",
"prompt_embeds",
}

def get_training_args(self):
args = Args()
args.model_name = self.MODEL_NAME
args.training_type = "lora"
args.pretrained_model_name_or_path = "finetrainers/dummy-hunyaunvideo"
args.data_root = "" # will be set from the tester method.
args.video_resolution_buckets = [parse_resolution_bucket("9x16x16")]
args.precompute_conditions = True
args.validation_prompts = []
args.validation_heights = []
args.validation_widths = []
return args

@property
def latent_output_shape(self):
# only tensor object shapes
return (8, 3, 2, 2)

@property
def condition_output_shape(self):
# only tensor object shapes
return (), (8,), (256,), (256, 16)

def populate_shapes(self):
for i, k in enumerate(sorted(self.EXPECTED_PRECOMPUTATION_LATENT_KEYS)):
self.EXPECTED_LATENT_SHAPES[k] = self.latent_output_shape
for i, k in enumerate(sorted(self.EXPECTED_PRECOMPUTATION_CONDITION_KEYS)):
self.EXPECTED_CONDITION_SHAPES[k] = self.condition_output_shape[i]
Empty file.
50 changes: 50 additions & 0 deletions tests/trainers/ltx_video/test_ltx_video.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import sys
import unittest
from pathlib import Path


current_file = Path(__file__).resolve()
root_dir = current_file.parents[3]
sys.path.append(str(root_dir))

from finetrainers import Args # noqa
from ..test_trainers_common import TrainerTestMixin, parse_resolution_bucket # noqa


class LTXVideoTester(unittest.TestCase, TrainerTestMixin):
MODEL_NAME = "ltx_video"
EXPECTED_PRECOMPUTATION_LATENT_KEYS = {"height", "latents", "latents_mean", "latents_std", "num_frames", "width"}
EXPECTED_PRECOMPUTATION_CONDITION_KEYS = {"prompt_attention_mask", "prompt_embeds"}

def get_training_args(self):
args = Args()
args.model_name = self.MODEL_NAME
args.training_type = "lora"
args.pretrained_model_name_or_path = "finetrainers/dummy-ltxvideo"
args.data_root = "" # will be set from the tester method.
args.video_resolution_buckets = [parse_resolution_bucket("9x16x16")]
args.precompute_conditions = True
args.validation_prompts = []
args.validation_heights = []
args.validation_widths = []
return args

@property
def latent_output_shape(self):
# only tensor object shapes
return (16, 3, 4, 4), (), ()

@property
def condition_output_shape(self):
# only tensor object shapes
return (128,), (128, 32)

def populate_shapes(self):
i = 0
for k in sorted(self.EXPECTED_PRECOMPUTATION_LATENT_KEYS):
if k in ["height", "num_frames", "width"]:
continue
self.EXPECTED_LATENT_SHAPES[k] = self.latent_output_shape[i]
i += 1
for i, k in enumerate(sorted(self.EXPECTED_PRECOMPUTATION_CONDITION_KEYS)):
self.EXPECTED_CONDITION_SHAPES[k] = self.condition_output_shape[i]
Loading