2024-08-10 20:58:23 -06:00
|
|
|
# from https://discuss.huggingface.co/t/generating-and-saving-multiple-images-using-img2img-pipeline/30929
|
2024-08-10 20:37:38 -06:00
|
|
|
from diffusers import StableDiffusionImg2ImgPipeline, EulerDiscreteScheduler
|
|
|
|
from pathlib import Path
|
|
|
|
from PIL import Image
|
|
|
|
import torch
|
|
|
|
import re
|
|
|
|
import requests
|
|
|
|
|
|
|
|
|
|
|
|
def slugify(text):
|
|
|
|
# remove non-word characters and foreign characters
|
|
|
|
text = re.sub(r"[^\w\s]", "", text)
|
|
|
|
text = re.sub(r"\s+", "-", text)
|
|
|
|
return text
|
|
|
|
|
|
|
|
model_id = "stabilityai/stable-diffusion-2"
|
|
|
|
|
|
|
|
images_url = ["https://s3.amazonaws.com/moonup/production/uploads/1675140495576-noauth.png",
|
|
|
|
"https://s3.amazonaws.com/moonup/production/uploads/1675032939263-noauth.png",
|
|
|
|
"https://s3.amazonaws.com/moonup/production/uploads/1673856328001-noauth.png"]
|
|
|
|
|
|
|
|
init_images = [Image.open(requests.get(url, stream=True).raw).convert("RGB").resize((758,768)) for url in images_url]
|
|
|
|
|
|
|
|
prompts = ["beautiful colorful flowr",
|
|
|
|
"green city future mountain 3d sunrise skycrapers",
|
|
|
|
"rainbow beach, palm trees, neon, miami"]
|
|
|
|
|
|
|
|
negative_prompts = ["blurry, dark photo, blue",
|
|
|
|
"blurry, dark photo, blue",
|
|
|
|
"blurry, dark photo, blue"]
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
# Use the Euler scheduler here instead
|
|
|
|
scheduler = EulerDiscreteScheduler.from_pretrained(
|
|
|
|
model_id, subfolder="scheduler")
|
|
|
|
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|
|
|
model_id, scheduler=scheduler, torch_dtype=torch.float16)
|
|
|
|
pipe = pipe.to(device)
|
|
|
|
|
|
|
|
DIR_NAME="./images/"
|
|
|
|
dirpath = Path(DIR_NAME)
|
|
|
|
# create parent dir if doesn't exist
|
|
|
|
dirpath.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
steps = 20
|
|
|
|
scale = 9
|
|
|
|
num_images_per_prompt = 1
|
|
|
|
seed = torch.randint(0, 1000000, (1,)).item()
|
|
|
|
generator = torch.Generator(device=device).manual_seed(seed)
|
|
|
|
output = pipe(prompts, negative_prompt=negative_prompts, image=init_images, num_inference_steps=steps,
|
|
|
|
guidance_scale=scale, num_images_per_prompt=num_images_per_prompt, generator=generator)
|
|
|
|
|
|
|
|
for idx, (image,prompt) in enumerate(zip(output.images, prompts*num_images_per_prompt)):
|
|
|
|
image_name = f'{slugify(prompt)}-{idx}.png'
|
|
|
|
image_path = dirpath / image_name
|
|
|
|
image.save(image_path)
|