Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions bria-ai/fibo-bedrock/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python

# Virtual Environment
venv/
env/
ENV/
.venv/

# Ignore all markdown files (including fibo-bedrock-pipe/README.md)
*.md
!/README.md

# Ignore all Python files except specific ones
*.py
!fibo-bedrock-pipe/*.py
!use_fibo.py

# IDE
.vscode/
.idea/
*.swp
*.swo
*~

# OS
.DS_Store
Thumbs.db

.json
.png

34 changes: 34 additions & 0 deletions bria-ai/fibo-bedrock/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
---
license: cc-by-nc-4.0
---

Bria Fibo using Amazon Bedrock

This is a modification of Bria's pipeline block that converts a prompt to a JSON object using Bedrock. See use_fibo.py for usage details.

## Usage

```python
# Create a prompt to generate an initial image
# You can also enable verbose logging per-call by adding verbose=True
output = vlm_pipe(
prompt="A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl's gaze is curious and full of charm, giving it a whimsical, storybook-like personality.",
# verbose=True
)
json_prompt_generate = output.values["json_prompt"]

...

results_generate = pipe(
prompt=json_prompt_generate, num_inference_steps=50, guidance_scale=5, negative_prompt=negative_prompt
)
```


## Inputs

- `prompt`: A string prompt to convert to a JSON object.

## Outputs

- `json_prompt`: A JSON object representing the prompt.
979 changes: 979 additions & 0 deletions bria-ai/fibo-bedrock/fibo-bedrock-pipe/bedrock_prompt_to_json.py

Large diffs are not rendered by default.

7 changes: 7 additions & 0 deletions bria-ai/fibo-bedrock/fibo-bedrock-pipe/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"_class_name": "BriaFiboGeminiPromptToJson",
"_diffusers_version": "0.35.0.dev0",
"auto_map": {
"ModularPipelineBlocks": "gemini_prompt_to_json.BriaFiboGeminiPromptToJson"
}
}
37 changes: 37 additions & 0 deletions bria-ai/fibo-bedrock/fibo-bedrock-pipe/modular_config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
{
"_class_name": "BriaFiboGeminiPromptToJson",
"_diffusers_version": "0.36.0.dev0",
"auto_map": {
"ModularPipelineBlocks": "gemini_prompt_to_json.BriaFiboGeminiPromptToJson"
},
"requirements": [
[
"google.generativeai",
"0.8.4"
],
[
"google-genai",
"1.46.0"
],
[
"transformers",
"4.57.1"
],
[
"pydantic",
"2.12.3"
],
[
"boltons",
"25.0.0"
],
[
"ujson",
"5.11.0"
],
[
"Pillow",
"10.1.0"
]
]
}
15 changes: 15 additions & 0 deletions bria-ai/fibo-bedrock/set_models.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash
# Set Bedrock model environment variables

# BASE_MODEL: For text-only operations (Generate, RefineA)
# Use efficient Nova Lite for fast, cost-effective text generation
export BASE_MODEL="us.amazon.nova-lite-v1:0"

# VLM_MODEL: For vision operations (Caption, RefineB, InspireA)
export VLM_MODEL="us.amazon.nova-2-lite-v1:0"

echo "Environment variables set:"
echo " BASE_MODEL=$BASE_MODEL"
echo " VLM_MODEL=$VLM_MODEL"
echo ""
echo "Usage: source set_models.sh"
112 changes: 112 additions & 0 deletions bria-ai/fibo-bedrock/use_fibo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import json
import os
import sys
import importlib

import torch
from diffusers import BriaFiboPipeline
from diffusers.modular_pipelines import ModularPipeline

# Add the local fibo-bedrock-pipe to the Python path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'fibo-bedrock-pipe'))

# Force reload of the module to avoid caching issues
if 'bedrock_prompt_to_json' in sys.modules:
importlib.reload(sys.modules['bedrock_prompt_to_json'])

# Import the local Bedrock-based pipeline
from bedrock_prompt_to_json import BriaFiboBedrockPromptToJson

# -------------------------------
# Load the VLM pipeline (using local Bedrock version)
# -------------------------------
torch.set_grad_enabled(False)

# Create the pipeline using the local Bedrock implementation
# Model IDs are read from environment variables:
# - BASE_MODEL: For text-only operations (default: us.amazon.nova-lite-v1:0)
# - VLM_MODEL: For vision operations (default: us.anthropic.claude-3-5-sonnet-20241022-v2:0)
# NOTE: presumed AWS credentials set in shell to access Bedrock
# Set verbose=True to enable detailed logging of all operations
blocks = BriaFiboBedrockPromptToJson(region_name="us-east-1", verbose=True)

# Initialize the ModularPipeline with the local blocks
vlm_pipe = ModularPipeline(blocks=blocks)

# NOTE: Bedrock VLM uses API calls (no local models, no GPU usage)
# All inference happens on AWS servers via boto3 client


# Load the FIBO pipeline with aggressive memory optimizations
# If you get a 401 error, you need to:
# 1. Get a HuggingFace token from https://huggingface.co/settings/tokens
# 2. Request access to the model at https://huggingface.co/briaai/FIBO
# 3. Either login with: huggingface-cli login
# Or uncomment the token parameter below and add your token
pipe = BriaFiboPipeline.from_pretrained(
"briaai/FIBO",
torch_dtype=torch.bfloat16,
use_safetensors=True,
low_cpu_mem_usage=True, # Reduce CPU memory during loading
# token="hf_YOUR_TOKEN_HERE", # Uncomment and add your token if needed
)

# Use sequential CPU offload for T4 GPU (15GB VRAM)
# This keeps only the active component on GPU, rest on CPU
# Slower but fits in memory
pipe.enable_sequential_cpu_offload()

# Additional memory optimizations (only use methods that BriaFiboPipeline supports)
try:
pipe.enable_attention_slicing(1) # Most aggressive slicing (slice_size=1)
except AttributeError:
pass # Not supported by this pipeline

try:
pipe.enable_vae_slicing() # Reduces VAE memory usage
except AttributeError:
pass # Not supported by this pipeline

try:
pipe.enable_vae_tiling() # Process VAE in tiles for large images
except AttributeError:
pass # Not supported by this pipeline

# -------------------------------
# Run Prompt to JSON
# -------------------------------

# Create a prompt to generate an initial image
# You can also enable verbose logging per-call by adding verbose=True
output = vlm_pipe(
prompt="A hyper-detailed, ultra-fluffy owl sitting in the trees at night, looking directly at the camera with wide, adorable, expressive eyes. Its feathers are soft and voluminous, catching the cool moonlight with subtle silver highlights. The owl's gaze is curious and full of charm, giving it a whimsical, storybook-like personality.",
# verbose=True # Uncomment to enable verbose logging for this call only
)
json_prompt_generate = output.values["json_prompt"]

def get_default_negative_prompt(existing_json: dict) -> str:
negative_prompt = ""
style_medium = existing_json.get("style_medium", "").lower()
if style_medium in ["photograph", "photography", "photo"]:
negative_prompt = """{'style_medium':'digital illustration','artistic_style':'non-realistic'}"""
return negative_prompt


negative_prompt = get_default_negative_prompt(json.loads(json_prompt_generate))

print("\nSaving JSON prompt to file...")
with open("image_generate_json_prompt.json", "w") as f:
f.write(json_prompt_generate)
print(f"JSON prompt saved to: image_generate_json_prompt.json")

# -------------------------------
# Run Image Generation
# -------------------------------
# Generate the image from the structured json prompt
print("\nGenerating image from JSON prompt...")
results_generate = pipe(
prompt=json_prompt_generate, num_inference_steps=10, guidance_scale=5, negative_prompt=negative_prompt
)
results_generate.images[0].save("image_generate.png")
print(f"Image saved to: image_generate.png")