From c5e8a74815e8a1c5714418ee509c29c908bea4c8 Mon Sep 17 00:00:00 2001 From: Victor Giers Date: Thu, 7 May 2026 12:02:43 +0200 Subject: [PATCH] auto-git: [change] generate_equirect.py --- generate_equirect.py | 137 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) diff --git a/generate_equirect.py b/generate_equirect.py index f69b3ca..2fe8ff7 100644 --- a/generate_equirect.py +++ b/generate_equirect.py @@ -324,6 +324,143 @@ def run_realesrgan( return out_path +def postprocess_image( + prompt: str, + input_path: str, + output_path: str, + tempdir: str, + upscale: str = "none", + steps: int = 25, + guidance: float = 4.5, + width: int = 1024, + height: int = 512, + seam_inpaint: bool = False, +) -> str: + device = select_device() + enable_upscale = bool(upscale and upscale != "none") + progress_cb = make_progress_cb(enable_upscale, seam_inpaint) + + with Image.open(input_path) as input_img: + image = input_img.convert("RGB") + + seamless_path = os.path.join(tempdir, os.path.basename(output_path)) + if seam_inpaint: + shift_amt = width // 2 + mask_w = width // 8 + shifted = shift_image(image, shift_amt) + mask = create_mask(width, height, mask_w) + + print("→ Loading seam inpaint model…") + inpaint_pipe = StableDiffusionInpaintPipeline.from_pretrained( + INPAINT_MODEL, + torch_dtype=torch.float32, + safety_checker=None, + requires_safety_checker=False, + ).to(device) + configure_pipeline_memory(inpaint_pipe) + + print("→ Inpainting seam for seamless tiling…") + progress_cb("inpaint", 0, steps) + inpainted = inpaint_pipe( + prompt=prompt, + image=shifted, + mask_image=mask, + num_inference_steps=steps, + guidance_scale=guidance, + width=width, + height=height, + callback_steps=1, + callback=lambda step, timestep, kwargs: progress_cb("inpaint", step + 1, steps), + ).images[0] + + del inpaint_pipe, shifted, mask + clear_torch_cache(device) + + inpainted = unshift_image(inpainted, shift_amt) + inpainted.save(seamless_path) + print(f"→ Crafted seamless image: {seamless_path}") + final_source = inpainted + else: + image.save(seamless_path) + print(f"→ Using raw output (seam inpaint disabled): {seamless_path}") + final_source = image + + final_path = seamless_path + + if upscale and upscale != "none": + try: + if upscale is True or upscale == "topaz": + final_path = run_topaz(seamless_path, tempdir) + elif upscale == "realesrgan": + final_path = run_realesrgan( + final_source, + tempdir, + scale=REALESRGAN_SCALE, + model_path=REALESRGAN_MODEL, + progress_cb=progress_cb + ) + else: + raise ValueError(f"Unknown upscale option '{upscale}'") + except Exception as e: # noqa: BLE001 + print(f"Upscaling failed ({upscale}); keeping seamless image: {e}") + + with Image.open(final_path) as final_img: + final_img.load() + save_png_with_prompt(final_img, output_path, prompt) + try: + with Image.open(output_path) as _im: + print(f"→ Final image written to {output_path} [{_im.size[0]}x{_im.size[1]}]") + except Exception: + print(f"→ Final image written to {output_path}") + return output_path + + +def restart_for_postprocess( + prompt: str, + input_path: str, + output_path: str, + tempdir: str, + work_dir: str, + upscale: str, + steps: int, + guidance: float, + width: int, + height: int, + seam_inpaint: bool, +) -> None: + clear_torch_cache(select_device()) + script = os.path.abspath(__file__) + args = [ + sys.executable, + script, + "--prompt", + prompt, + "--postprocess-input", + input_path, + "--postprocess-output", + output_path, + "--postprocess-tempdir", + tempdir, + "--work-dir", + work_dir, + "--upscale", + upscale or "none", + "--steps", + str(steps), + "--guidance", + str(guidance), + "--width", + str(width), + "--height", + str(height), + ] + if seam_inpaint: + args.append("--seam-inpaint") + + print("→ Restarting Python for post-processing to release generation model memory…", flush=True) + os.execv(sys.executable, args) + + def generate( prompt: str, output_path: str,