Skip to content

Commit a2c99e6

Browse files
committed
fix IP2V -node
1 parent e4b5266 commit a2c99e6

File tree

3 files changed

+229
-18
lines changed

3 files changed

+229
-18
lines changed

hyvideo/diffusion/pipelines/pipeline_hunyuan_video.py

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,7 @@ def prepare_latents(
251251
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
252252
)
253253
noise = randn_tensor(shape, generator=generator, device=device, dtype=self.base_dtype)
254+
254255
if freenoise:
255256
logger.info("Applying FreeNoise")
256257
# code and comments from AnimateDiff-Evolved by Kosinkadink (https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved)
@@ -285,28 +286,26 @@ def prepare_latents(
285286
# apply shuffled indexes
286287
#print("place_idx:", place_idx, "delta:", delta, "list_idx:", list_idx)
287288
noise[:, :, place_idx:place_idx + delta, :, :] = noise[:, :, list_idx, :, :]
289+
288290
i2v_mask = None
291+
if official_i2v:
292+
# Create mask
293+
i2v_mask = torch.zeros(shape[0], 1, shape[2], shape[3], shape[4], device=device)
294+
i2v_mask[:, :, 0, ...] = 1.0
295+
289296
if image_cond_latents is not None:
290297
if image_cond_latents.shape[2] == 1:
291298
padding = torch.zeros(shape, device=device)
292299
padding[:, :, 0:1, :, :] = image_cond_latents
293300
image_cond_latents = padding
294-
if official_i2v:
295-
# Create mask
296-
i2v_mask = torch.zeros(shape[0], 1, shape[2], shape[3], shape[4], device=device)
297-
i2v_mask[:, :, 0, ...] = 1.0
298-
t = torch.tensor([0.999]).to(device=device)
299-
latents = noise * t + image_cond_latents * (1 - t)
300-
latents = latents.to(dtype=self.base_dtype)
301-
elif latents is None:
302-
print("No latents provided, generating noise and using it as latents")
303-
latents = noise
304-
elif denoise_strength < 1.0:
305-
latents = latents.to(device)
301+
302+
if denoise_strength < 1.0:
303+
if official_i2v:
304+
latents = torch.cat((latents[:,:,0].unsqueeze(2), latents), dim=2)
306305
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, denoise_strength, device)
307306
latent_timestep = timesteps[:1]
308-
frames_needed = noise.shape[1]
309-
current_frames = latents.shape[1]
307+
frames_needed = noise.shape[2]
308+
current_frames = latents.shape[2]
310309

311310
if frames_needed > current_frames:
312311
repeat_factor = frames_needed - current_frames
@@ -316,8 +315,13 @@ def prepare_latents(
316315
elif frames_needed < current_frames:
317316
latents = latents[:, :frames_needed, :, :, :]
318317
latents = latents * (1 - latent_timestep / 1000) + latent_timestep / 1000 * noise
318+
print("latents shape:", latents.shape)
319+
elif official_i2v:
320+
t = torch.tensor([0.999]).to(device=device)
321+
latents = noise * t + image_cond_latents * (1 - t)
322+
latents = latents.to(dtype=self.base_dtype)
319323
else:
320-
latents = latents.to(device)
324+
latents = noise
321325

322326
# Check existence to make it compatible with FlowMatchEulerDiscreteScheduler
323327
if hasattr(self.scheduler, "init_noise_sigma"):

hyvideo/text_encoder/__init__.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import torch.nn as nn
77
from transformers import CLIPTextModel, CLIPTokenizer, AutoTokenizer, AutoModel, AutoProcessor, CLIPImageProcessor #LlavaForConditionalGeneration
88
from .modeling_llava import LlavaForConditionalGeneration
9+
from .processing_llava import LlavaProcessor
910
from transformers.utils import ModelOutput
1011

1112
from ..constants import TEXT_ENCODER_PATH, TOKENIZER_PATH
@@ -166,8 +167,11 @@ def __init__(
166167
elif "llm" in text_encoder_type or "glm" in text_encoder_type or "vlm" in text_encoder_type:
167168
self.output_key = output_key or "last_hidden_state"
168169
if "glm" in text_encoder_type or "vlm" in text_encoder_type:
169-
#self.processor = AutoProcessor.from_pretrained(text_encoder_path, device=device)
170-
self.processor = CLIPImageProcessor.from_pretrained(text_encoder_path, use_fast=False)
170+
self.processor_ip2v = LlavaProcessor.from_pretrained(text_encoder_path, device=device)
171+
self.processor_ip2v.patch_size = None
172+
self.processor_ip2v.vision_feature_select_strategy = None
173+
174+
self.processor = CLIPImageProcessor.from_pretrained(text_encoder_path, device=device)
171175
self.processor.patch_size = None
172176
self.processor.vision_feature_select_strategy = None
173177
else:
@@ -259,7 +263,7 @@ def text2tokens(self, text, prompt_template, image1=None, image2=None, clip_text
259263
raw_images.append(image1.squeeze(0)*255)
260264
if image2 is not None:
261265
raw_images.append(image2.squeeze(0)*255)
262-
text_tokens = self.processor(
266+
text_tokens = self.processor_ip2v(
263267
raw_images,
264268
text,
265269
**kwargs,
Lines changed: 203 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,203 @@
1+
# coding=utf-8
2+
# Copyright 2023 The HuggingFace Inc. team.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
"""
16+
Processor class for Llava.
17+
"""
18+
19+
from typing import List, Union
20+
21+
from transformers.feature_extraction_utils import BatchFeature
22+
from transformers.image_utils import ImageInput, get_image_size, to_numpy_array
23+
from transformers.processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, _validate_images_text_input_order
24+
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
25+
from transformers.utils import logging
26+
27+
28+
logger = logging.get_logger(__name__)
29+
30+
31+
class LlavaProcessorKwargs(ProcessingKwargs, total=False):
32+
_defaults = {
33+
"text_kwargs": {
34+
"padding": False,
35+
},
36+
"images_kwargs": {},
37+
}
38+
39+
40+
class LlavaProcessor(ProcessorMixin):
41+
r"""
42+
Constructs a Llava processor which wraps a Llava image processor and a Llava tokenizer into a single processor.
43+
44+
[`LlavaProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
45+
[`~LlavaProcessor.__call__`] and [`~LlavaProcessor.decode`] for more information.
46+
47+
Args:
48+
image_processor ([`CLIPImageProcessor`], *optional*):
49+
The image processor is a required input.
50+
tokenizer ([`LlamaTokenizerFast`], *optional*):
51+
The tokenizer is a required input.
52+
patch_size (`int`, *optional*):
53+
Patch size from the vision tower.
54+
vision_feature_select_strategy (`str`, *optional*):
55+
The feature selection strategy used to select the vision feature from the vision backbone.
56+
Shoudl be same as in model's config
57+
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
58+
in a chat into a tokenizable string.
59+
image_token (`str`, *optional*, defaults to `"<image>"`):
60+
Special token used to denote image location.
61+
num_additional_image_tokens (`int`, *optional*, defaults to 0):
62+
Number of additional tokens added to the image embeddings, such as CLS (+1). If the backbone has no CLS or other
63+
extra tokens appended, no need to set this arg.
64+
"""
65+
66+
attributes = ["image_processor", "tokenizer"]
67+
valid_kwargs = [
68+
"chat_template",
69+
"patch_size",
70+
"vision_feature_select_strategy",
71+
"image_token",
72+
"num_additional_image_tokens",
73+
]
74+
image_processor_class = "AutoImageProcessor"
75+
tokenizer_class = "AutoTokenizer"
76+
77+
def __init__(
78+
self,
79+
image_processor=None,
80+
tokenizer=None,
81+
patch_size=None,
82+
vision_feature_select_strategy=None,
83+
chat_template=None,
84+
image_token="<image>", # set the default and let users change if they have peculiar special tokens in rare cases
85+
num_additional_image_tokens=0,
86+
**kwargs,
87+
):
88+
self.patch_size = patch_size
89+
self.num_additional_image_tokens = num_additional_image_tokens
90+
self.vision_feature_select_strategy = vision_feature_select_strategy
91+
self.image_token = tokenizer.image_token if hasattr(tokenizer, "image_token") else image_token
92+
super().__init__(image_processor, tokenizer, chat_template=chat_template)
93+
94+
def __call__(
95+
self,
96+
images: ImageInput = None,
97+
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
98+
audio=None,
99+
videos=None,
100+
**kwargs: Unpack[LlavaProcessorKwargs],
101+
) -> BatchFeature:
102+
"""
103+
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
104+
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
105+
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
106+
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
107+
of the above two methods for more information.
108+
109+
Args:
110+
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
111+
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
112+
tensor. Both channels-first and channels-last formats are supported.
113+
text (`str`, `List[str]`, `List[List[str]]`):
114+
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
115+
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
116+
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
117+
return_tensors (`str` or [`~utils.TensorType`], *optional*):
118+
If set, will return tensors of a particular framework. Acceptable values are:
119+
- `'tf'`: Return TensorFlow `tf.constant` objects.
120+
- `'pt'`: Return PyTorch `torch.Tensor` objects.
121+
- `'np'`: Return NumPy `np.ndarray` objects.
122+
- `'jax'`: Return JAX `jnp.ndarray` objects.
123+
124+
Returns:
125+
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
126+
127+
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
128+
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
129+
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
130+
`None`).
131+
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
132+
"""
133+
if images is None and text is None:
134+
raise ValueError("You have to specify at least one of `images` or `text`.")
135+
136+
# check if images and text inputs are reversed for BC
137+
images, text = _validate_images_text_input_order(images, text)
138+
139+
output_kwargs = self._merge_kwargs(
140+
LlavaProcessorKwargs,
141+
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
142+
**kwargs,
143+
)
144+
if images is not None:
145+
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
146+
else:
147+
image_inputs = {}
148+
149+
if isinstance(text, str):
150+
text = [text]
151+
elif not isinstance(text, list) and not isinstance(text[0], str):
152+
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
153+
154+
# try to expand inputs in processing if we have the necessary parts
155+
prompt_strings = text
156+
if image_inputs.get("pixel_values") is not None:
157+
if self.patch_size is not None and self.vision_feature_select_strategy is not None:
158+
# Replace the image token with the expanded image token sequence
159+
pixel_values = image_inputs["pixel_values"]
160+
height, width = get_image_size(to_numpy_array(pixel_values[0]))
161+
num_image_tokens = (height // self.patch_size) * (
162+
width // self.patch_size
163+
) + self.num_additional_image_tokens
164+
if self.vision_feature_select_strategy == "default":
165+
num_image_tokens -= self.num_additional_image_tokens
166+
167+
prompt_strings = []
168+
for sample in text:
169+
sample = sample.replace(self.image_token, self.image_token * num_image_tokens)
170+
prompt_strings.append(sample)
171+
else:
172+
logger.warning_once(
173+
"Expanding inputs for image tokens in LLaVa should be done in processing. "
174+
"Please add `patch_size` and `vision_feature_select_strategy` to the model's processing config or set directly "
175+
"with `processor.patch_size = {{patch_size}}` and processor.vision_feature_select_strategy = {{vision_feature_select_strategy}}`. "
176+
"Using processors without these attributes in the config is deprecated and will throw an error in v4.50."
177+
)
178+
179+
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
180+
return BatchFeature(data={**text_inputs, **image_inputs})
181+
182+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.batch_decode with CLIP->Llama
183+
def batch_decode(self, *args, **kwargs):
184+
"""
185+
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
186+
refer to the docstring of this method for more information.
187+
"""
188+
return self.tokenizer.batch_decode(*args, **kwargs)
189+
190+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.decode with CLIP->Llama
191+
def decode(self, *args, **kwargs):
192+
"""
193+
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
194+
the docstring of this method for more information.
195+
"""
196+
return self.tokenizer.decode(*args, **kwargs)
197+
198+
@property
199+
# Copied from transformers.models.clip.processing_clip.CLIPProcessor.model_input_names
200+
def model_input_names(self):
201+
tokenizer_input_names = self.tokenizer.model_input_names
202+
image_processor_input_names = self.image_processor.model_input_names
203+
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))

0 commit comments

Comments
 (0)