Skip to content
6 changes: 4 additions & 2 deletions src/diffusers/pipelines/allegro/pipeline_allegro.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,10 @@ def __init__(
scheduler: KarrasDiffusionSchedulers,
):
super().__init__()
self._guidance_scale = 7.5
self._num_timesteps = 0
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -843,8 +847,6 @@ def __call__(
negative_prompt_attention_mask,
)
self._guidance_scale = guidance_scale
self._current_timestep = None
self._interrupt = False

# 2. Default height and width to transformer
if prompt is not None and isinstance(prompt, str):
Expand Down
6 changes: 5 additions & 1 deletion src/diffusers/pipelines/animatediff/pipeline_animatediff.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,11 @@ def __init__(
image_encoder: CLIPVisionModelWithProjection = None,
):
super().__init__()
self._guidance_scale = 7.5
self._clip_skip = None
self._cross_attention_kwargs = None
self._num_timesteps = 0
self._interrupt = False
if isinstance(unet, UNet2DConditionModel):
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)

Expand Down Expand Up @@ -714,7 +719,6 @@ def __call__(
self._guidance_scale = guidance_scale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False

# 2. Define call parameters
if prompt is not None and isinstance(prompt, (str, dict)):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,11 @@ def __init__(
image_encoder: Optional[CLIPVisionModelWithProjection] = None,
):
super().__init__()
self._guidance_scale = 7.5
self._clip_skip = None
self._cross_attention_kwargs = None
self._num_timesteps = 0
self._interrupt = False
if isinstance(unet, UNet2DConditionModel):
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)

Expand Down Expand Up @@ -877,7 +882,6 @@ def __call__(
self._guidance_scale = guidance_scale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False

# 2. Define call parameters
if prompt is not None and isinstance(prompt, (str, dict)):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,13 @@ def __init__(
force_zeros_for_empty_prompt: bool = True,
):
super().__init__()
self._guidance_scale = 5.0
self._guidance_rescale = 0.0
self._clip_skip = None
self._cross_attention_kwargs = None
self._denoising_end = None
self._num_timesteps = 0
self._interrupt = False

if isinstance(unet, UNet2DConditionModel):
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
Expand Down Expand Up @@ -1083,7 +1090,6 @@ def __call__(
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._denoising_end = denoising_end
self._interrupt = False

# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,10 @@ def __init__(
image_encoder: CLIPVisionModelWithProjection = None,
):
super().__init__()
self._guidance_scale = 7.5
self._clip_skip = None
self._cross_attention_kwargs = None
self._num_timesteps = 0
if isinstance(unet, UNet2DConditionModel):
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,11 @@ def __init__(
image_encoder: CLIPVisionModelWithProjection = None,
):
super().__init__()
self._guidance_scale = 7.5
self._clip_skip = None
self._cross_attention_kwargs = None
self._num_timesteps = 0
self._interrupt = False
if isinstance(unet, UNet2DConditionModel):
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)

Expand Down Expand Up @@ -883,7 +888,6 @@ def __call__(
self._guidance_scale = guidance_scale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False

# 2. Define call parameters
if prompt is not None and isinstance(prompt, (str, dict)):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,11 @@ def __init__(
image_encoder: CLIPVisionModelWithProjection = None,
):
super().__init__()
self._guidance_scale = 7.5
self._clip_skip = None
self._cross_attention_kwargs = None
self._num_timesteps = 0
self._interrupt = False
if isinstance(unet, UNet2DConditionModel):
unet = UNetMotionModel.from_unet2d(unet, motion_adapter)

Expand Down Expand Up @@ -1086,7 +1091,6 @@ def __call__(
self._guidance_scale = guidance_scale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False

# 2. Define call parameters
if prompt is not None and isinstance(prompt, (str, dict)):
Expand Down
3 changes: 3 additions & 0 deletions src/diffusers/pipelines/aura_flow/pipeline_aura_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,9 @@ def __init__(
scheduler: FlowMatchEulerDiscreteScheduler,
):
super().__init__()
self._guidance_scale = 3.5
self._attention_kwargs = None
self._num_timesteps = 0

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down
6 changes: 6 additions & 0 deletions src/diffusers/pipelines/bria/pipeline_bria.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,12 @@ def __init__(
image_encoder: CLIPVisionModelWithProjection = None,
feature_extractor: CLIPImageProcessor = None,
):
super().__init__()
self._guidance_scale = 5.0
self._attention_kwargs = None
self._interrupt = False
self._num_timesteps = 0

self.register_modules(
vae=vae,
text_encoder=text_encoder,
Expand Down
5 changes: 5 additions & 0 deletions src/diffusers/pipelines/bria_fibo/pipeline_bria_fibo.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,11 @@ def __init__(
scheduler=scheduler,
)

self._guidance_scale = 5.0
self._joint_attention_kwargs = None
self._interrupt = False
self._num_timesteps = 0

self.vae_scale_factor = 16
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
self.default_sample_size = 64
Expand Down
5 changes: 5 additions & 0 deletions src/diffusers/pipelines/chroma/pipeline_chroma.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,11 @@ def __init__(
feature_extractor: CLIPImageProcessor = None,
):
super().__init__()
self._guidance_scale = 5.0
self._joint_attention_kwargs = None
self._current_timestep = None
self._interrupt = False
self._num_timesteps = 0

self.register_modules(
vae=vae,
Expand Down
5 changes: 5 additions & 0 deletions src/diffusers/pipelines/chroma/pipeline_chroma_img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,11 @@ def __init__(
feature_extractor: CLIPImageProcessor = None,
):
super().__init__()
self._guidance_scale = 5.0
self._joint_attention_kwargs = None
self._current_timestep = None
self._interrupt = False
self._num_timesteps = 0

self.register_modules(
vae=vae,
Expand Down
5 changes: 5 additions & 0 deletions src/diffusers/pipelines/chronoedit/pipeline_chronoedit.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,11 @@ def __init__(
):
super().__init__()

self._guidance_scale = 5.0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False
self._num_timesteps = 0
self.register_modules(
vae=vae,
text_encoder=text_encoder,
Expand Down
9 changes: 6 additions & 3 deletions src/diffusers/pipelines/cogvideo/pipeline_cogvideox.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,11 @@ def __init__(
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
):
super().__init__()
self._guidance_scale = 6
self._num_timesteps = 0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -631,8 +636,6 @@ def __call__(
)
self._guidance_scale = guidance_scale
self._attention_kwargs = attention_kwargs
self._current_timestep = None
self._interrupt = False

# 2. Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down Expand Up @@ -737,7 +740,7 @@ def __call__(

# perform guidance
if use_dynamic_cfg:
self._guidance_scale = 1 + guidance_scale * (
self._guidance_scale = 6 + guidance_scale * (
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
)
if do_classifier_free_guidance:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,11 @@ def __init__(
scheduler: KarrasDiffusionSchedulers,
):
super().__init__()
self._guidance_scale = 6
self._num_timesteps = 0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -684,8 +689,6 @@ def __call__(
)
self._guidance_scale = guidance_scale
self._attention_kwargs = attention_kwargs
self._current_timestep = None
self._interrupt = False

# 2. Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down Expand Up @@ -803,7 +806,7 @@ def __call__(

# perform guidance
if use_dynamic_cfg:
self._guidance_scale = 1 + guidance_scale * (
self._guidance_scale = 6 + guidance_scale * (
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
)
if do_classifier_free_guidance:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,11 @@ def __init__(
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
):
super().__init__()
self._guidance_scale = 6
self._num_timesteps = 0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer,
Expand Down Expand Up @@ -728,9 +733,7 @@ def __call__(
negative_prompt_embeds=negative_prompt_embeds,
)
self._guidance_scale = guidance_scale
self._current_timestep = None
self._attention_kwargs = attention_kwargs
self._interrupt = False

# 2. Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down Expand Up @@ -847,7 +850,7 @@ def __call__(

# perform guidance
if use_dynamic_cfg:
self._guidance_scale = 1 + guidance_scale * (
self._guidance_scale = 6 + guidance_scale * (
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
)
if do_classifier_free_guidance:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,11 @@ def __init__(
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
):
super().__init__()
self._guidance_scale = 6
self._num_timesteps = 0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -704,8 +709,6 @@ def __call__(
)
self._guidance_scale = guidance_scale
self._attention_kwargs = attention_kwargs
self._current_timestep = None
self._interrupt = False

# 2. Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down Expand Up @@ -818,7 +821,7 @@ def __call__(

# perform guidance
if use_dynamic_cfg:
self._guidance_scale = 1 + guidance_scale * (
self._guidance_scale = 6 + guidance_scale * (
(1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
)
if do_classifier_free_guidance:
Expand Down
4 changes: 3 additions & 1 deletion src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,9 @@ def __init__(
scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
):
super().__init__()
self._guidance_scale = 5.0
self._num_timesteps = 0
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -534,7 +537,6 @@ def __call__(
negative_prompt_embeds,
)
self._guidance_scale = guidance_scale
self._interrupt = False

# 2. Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down
7 changes: 5 additions & 2 deletions src/diffusers/pipelines/cogview4/pipeline_cogview4.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,11 @@ def __init__(
scheduler: FlowMatchEulerDiscreteScheduler,
):
super().__init__()
self._guidance_scale = 5.0
self._num_timesteps = 0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -535,8 +540,6 @@ def __call__(
)
self._guidance_scale = guidance_scale
self._attention_kwargs = attention_kwargs
self._current_timestep = None
self._interrupt = False

# Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down
7 changes: 5 additions & 2 deletions src/diffusers/pipelines/cogview4/pipeline_cogview4_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,11 @@ def __init__(
scheduler: FlowMatchEulerDiscreteScheduler,
):
super().__init__()
self._guidance_scale = 5.0
self._num_timesteps = 0
self._attention_kwargs = None
self._current_timestep = None
self._interrupt = False

self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
Expand Down Expand Up @@ -567,8 +572,6 @@ def __call__(
)
self._guidance_scale = guidance_scale
self._attention_kwargs = attention_kwargs
self._current_timestep = None
self._interrupt = False

# Default call parameters
if prompt is not None and isinstance(prompt, str):
Expand Down
Loading
Loading