|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from diffusers.configuration_utils import ConfigMixin, register_to_config |
|
from diffusers.models.activations import FP32SiLU, get_activation |
|
from diffusers.models.embeddings import Timesteps, PixArtAlphaTextProjection |
|
from diffusers.models.modeling_utils import ModelMixin |
|
from diffusers.models.transformers.transformer_flux import AdaLayerNormContinuous, CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, EmbedND, FluxSingleTransformerBlock, FluxTransformerBlock |
|
from diffusers.models.modeling_outputs import Transformer2DModelOutput |
|
from diffusers.utils import logging |
|
import torch |
|
from torch import nn |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
class FluxTransformer2DModel(ModelMixin, ConfigMixin): |
|
""" |
|
The Transformer model introduced in Flux. |
|
|
|
Reference: https://blackforestlabs.ai/announcing-black-forest-labs/ |
|
|
|
Parameters: |
|
patch_size (`int`): Patch size to turn the input data into small patches. |
|
in_channels (`int`, *optional*, defaults to 16): The number of channels in the input. |
|
num_layers (`int`, *optional*, defaults to 18): The number of layers of MMDiT blocks to use. |
|
num_single_layers (`int`, *optional*, defaults to 18): The number of layers of single DiT blocks to use. |
|
attention_head_dim (`int`, *optional*, defaults to 64): The number of channels in each head. |
|
num_attention_heads (`int`, *optional*, defaults to 18): The number of heads to use for multi-head attention. |
|
joint_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. |
|
pooled_projection_dim (`int`): Number of dimensions to use when projecting the `pooled_projections`. |
|
guidance_embeds (`bool`, defaults to False): Whether to use guidance embeddings. |
|
""" |
|
|
|
_supports_gradient_checkpointing = True |
|
_no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"] |
|
|
|
@register_to_config |
|
def __init__( |
|
self, |
|
patch_size: int = 1, |
|
in_channels: int = 64, |
|
num_layers: int = 19, |
|
num_single_layers: int = 38, |
|
attention_head_dim: int = 128, |
|
num_attention_heads: int = 24, |
|
joint_attention_dim: int = 4096, |
|
pooled_projection_dim: int = 768, |
|
guidance_embeds: bool = False, |
|
axes_dims_rope=(16, 56, 56), |
|
device=None |
|
): |
|
super().__init__() |
|
self.out_channels = in_channels |
|
self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim |
|
|
|
|
|
self.pos_embed = EmbedND(dim=self.inner_dim, theta=10000, axes_dim=axes_dims_rope).to(device) |
|
|
|
text_time_guidance_cls = ( |
|
CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings |
|
) |
|
self.time_text_embed = text_time_guidance_cls( |
|
embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim |
|
).to(device) |
|
|
|
self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim).to(device) |
|
self.x_embedder = nn.Linear(self.config.in_channels, self.inner_dim).to(device) |
|
|
|
self.transformer_blocks = nn.ModuleList( |
|
[ |
|
FluxTransformerBlock( |
|
dim=self.inner_dim, |
|
num_attention_heads=self.config.num_attention_heads, |
|
attention_head_dim=self.config.attention_head_dim, |
|
).to(device) |
|
for i in range(self.config.num_layers) |
|
] |
|
) |
|
|
|
self.single_transformer_blocks = nn.ModuleList( |
|
[ |
|
FluxSingleTransformerBlock( |
|
dim=self.inner_dim, |
|
num_attention_heads=self.config.num_attention_heads, |
|
attention_head_dim=self.config.attention_head_dim, |
|
).to(device) |
|
for i in range(self.config.num_single_layers) |
|
] |
|
) |
|
|
|
self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6).to(device) |
|
self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True).to(device) |
|
|
|
self.pul_id = None |
|
self.pul_id_weight = 1.0 |
|
|
|
self.gradient_checkpointing = False |
|
|
|
@property |
|
|
|
def attn_processors(self): |
|
r""" |
|
Returns: |
|
`dict` of attention processors: A dictionary containing all attention processors used in the model with |
|
indexed by its weight name. |
|
""" |
|
|
|
processors = {} |
|
|
|
def fn_recursive_add_processors(name: str, module: nn.Module, processors): |
|
if hasattr(module, "get_processor"): |
|
processors[f"{name}.processor"] = module.get_processor() |
|
|
|
for sub_name, child in module.named_children(): |
|
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
|
return processors |
|
|
|
for name, module in self.named_children(): |
|
fn_recursive_add_processors(name, module, processors) |
|
|
|
return processors |
|
|
|
|
|
def set_attn_processor(self, processor): |
|
r""" |
|
Sets the attention processor to use to compute attention. |
|
|
|
Parameters: |
|
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): |
|
The instantiated processor class or a dictionary of processor classes that will be set as the processor |
|
for **all** `Attention` layers. |
|
|
|
If `processor` is a dict, the key needs to define the path to the corresponding cross attention |
|
processor. This is strongly recommended when setting trainable attention processors. |
|
|
|
""" |
|
count = len(self.attn_processors.keys()) |
|
|
|
if isinstance(processor, dict) and len(processor) != count: |
|
raise ValueError( |
|
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
|
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
|
) |
|
|
|
def fn_recursive_attn_processor(name: str, module: nn.Module, processor): |
|
if hasattr(module, "set_processor"): |
|
if not isinstance(processor, dict): |
|
module.set_processor(processor) |
|
else: |
|
module.set_processor(processor.pop(f"{name}.processor")) |
|
|
|
for sub_name, child in module.named_children(): |
|
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
|
for name, module in self.named_children(): |
|
fn_recursive_attn_processor(name, module, processor) |
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
if hasattr(module, "gradient_checkpointing"): |
|
module.gradient_checkpointing = value |
|
|
|
def forward( |
|
self, |
|
hidden_states: torch.Tensor, |
|
encoder_hidden_states: torch.Tensor = None, |
|
pooled_projections: torch.Tensor = None, |
|
timestep: torch.LongTensor = None, |
|
img_ids: torch.Tensor = None, |
|
txt_ids: torch.Tensor = None, |
|
guidance: torch.Tensor = None, |
|
joint_attention_kwargs = None, |
|
controlnet_block_samples=None, |
|
controlnet_single_block_samples=None, |
|
return_dict: bool = True |
|
): |
|
""" |
|
The [`FluxTransformer2DModel`] forward method. |
|
|
|
Args: |
|
hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`): |
|
Input `hidden_states`. |
|
encoder_hidden_states (`torch.FloatTensor` of shape `(batch size, sequence_len, embed_dims)`): |
|
Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. |
|
pooled_projections (`torch.FloatTensor` of shape `(batch_size, projection_dim)`): Embeddings projected |
|
from the embeddings of input conditions. |
|
timestep ( `torch.LongTensor`): |
|
Used to indicate denoising step. |
|
block_controlnet_hidden_states: (`list` of `torch.Tensor`): |
|
A list of tensors that if specified are added to the residuals of transformer blocks. |
|
joint_attention_kwargs (`dict`, *optional*): |
|
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under |
|
`self.processor` in |
|
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain |
|
tuple. |
|
|
|
Returns: |
|
If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a |
|
`tuple` where the first element is the sample tensor. |
|
""" |
|
hidden_states = self.x_embedder(hidden_states) |
|
|
|
timestep = timestep.to(hidden_states.dtype) * 1000 |
|
if guidance is not None: |
|
guidance = guidance.to(hidden_states.dtype) * 1000 |
|
else: |
|
guidance = None |
|
temb = ( |
|
self.time_text_embed(timestep, pooled_projections) |
|
if guidance is None |
|
else self.time_text_embed(timestep, guidance, pooled_projections) |
|
) |
|
encoder_hidden_states = self.context_embedder(encoder_hidden_states) |
|
|
|
|
|
|
|
|
|
|
|
ids = torch.cat((txt_ids, img_ids), dim=1) |
|
image_rotary_emb = self.pos_embed(ids) |
|
ca_index = 0 |
|
|
|
for index_block, block in enumerate(self.transformer_blocks): |
|
encoder_hidden_states, hidden_states = block( |
|
hidden_states=hidden_states, |
|
encoder_hidden_states=encoder_hidden_states, |
|
temb=temb, |
|
image_rotary_emb=image_rotary_emb, |
|
) |
|
|
|
if index_block % self.pulid_double_interval == 0 and self.pul_id is not None: |
|
weighted = self.pul_id_weight * self.pulid_ca[ca_index](self.pul_id, hidden_states.to(self.pul_id.dtype)) |
|
hidden_states = hidden_states + weighted.to(hidden_states.dtype) |
|
ca_index += 1 |
|
|
|
hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) |
|
|
|
for index_block, block in enumerate(self.single_transformer_blocks): |
|
hidden_states = block( |
|
hidden_states=hidden_states, |
|
temb=temb, |
|
image_rotary_emb=image_rotary_emb, |
|
) |
|
if index_block % self.pulid_single_interval == 0 and self.pul_id is not None: |
|
encoder_hidden_states, real_ = hidden_states[:, :encoder_hidden_states.shape[1], ...], hidden_states[:, encoder_hidden_states.shape[1]:, ...] |
|
weighted = self.pul_id_weight * self.pulid_ca[ca_index](self.pul_id, real_.to(self.pul_id.dtype)) |
|
real_ = real_ + weighted.to(real_.dtype) |
|
hidden_states = torch.cat([encoder_hidden_states, real_], dim=1) |
|
ca_index += 1 |
|
|
|
hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] |
|
|
|
hidden_states = self.norm_out(hidden_states, temb) |
|
output = self.proj_out(hidden_states) |
|
|
|
if not return_dict: |
|
return (output,) |
|
|
|
return Transformer2DModelOutput(sample=output) |
|
|