auto-git:

[add] scripts/.DS_Store
 [add] scripts/README.md
 [add] scripts/extract_texture_filename_from_3ds.py
 [add] scripts/generate_3d_glb.py
 [add] scripts/generate_json.py
 [add] scripts/image_from_json.py
 [add] scripts/naming.py
 [add] scripts/openai_image_gen.py
 [add] scripts/remesh_bake_batch.py
 [add] server/public/assets/images/.DS_Store
 [add] server/public/assets/models/spirits/.DS_Store
 [change] server/public/assets/.DS_Store
 [change] server/public/assets/models/.DS_Store
This commit is contained in:
Victor Giers
2025-12-04 08:39:04 +01:00
parent 30c2d45648
commit 0961bbccbf
13 changed files with 757 additions and 0 deletions

BIN
scripts/.DS_Store vendored Normal file

Binary file not shown.

9
scripts/README.md Normal file
View File

@@ -0,0 +1,9 @@
# Scripts Overview
- `extract_texture_filename_from_3ds.py`: Parses a `.3ds` binary and lists referenced texture filenames.
- `generate_3d_glb.py`: For each PNG in `images/`, calls the `tencent/hunyuan3d-2` model via `synexa`, downloads `textured_mesh.glb`, and saves it locally.
- `generate_json.py`: Loads `wesen.json`, fuzzy-maps names to a hardcoded model list, and writes `spirit_list_out.json` with `Model URL` fields (German console messages).
- `image_from_json.py`: For each entry in a JSON list, asks an OpenAI chat model for an image prompt, then calls the Image API to generate/download images (configurable CLI).
- `naming.py`: Matches `.webp` images in `webp/` to entries in `spirit_list.json` (or their model filenames), adds `Image URL` fields, and writes `spirit_list_with_images.json`.
- `openai_image_gen.py`: Simple CLI wrapper around the OpenAI Image API to generate and download images from a prompt.
- `remesh_bake_batch.py`: Blender automation: imports a GLB, QuadRemesher remeshes it, auto-UVs, bakes diffuse/normal maps, exports a remeshed GLB plus PNG bake outputs.

View File

@@ -0,0 +1,61 @@
import struct
import sys
def extract_3ds_texture_paths(three_ds_path):
"""
Reads a .3ds file and returns a list of referenced texture filenames.
Args:
three_ds_path (str): Path to the .3ds file.
Returns:
List[str]: Texture filenames referenced in the .3ds file.
"""
paths = []
with open(three_ds_path, 'rb') as f:
while True:
header = f.read(6)
if len(header) < 6:
break
chunk_id, chunk_len = struct.unpack('<HI', header)
data_len = chunk_len - 6
if chunk_id == 0xA300: # Mapping Filename
name_bytes = b''
# Read until null terminator
while True:
c = f.read(1)
if not c or c == b'\x00':
break
name_bytes += c
try:
name = name_bytes.decode('ascii')
except UnicodeDecodeError:
name = name_bytes.decode('latin-1')
paths.append(name)
# Skip any leftover bytes in this chunk
f.seek(data_len - len(name_bytes) - 1, 1)
else:
# Skip this chunk's data
f.seek(data_len, 1)
return paths
def main():
if len(sys.argv) != 2:
print(f"Usage: python {sys.argv[0]} <path/to/model.3ds>")
sys.exit(1)
input_path = sys.argv[1]
textures = extract_3ds_texture_paths(input_path)
if textures:
print("Referenced textures:")
for tex in textures:
print(f"- {tex}")
else:
print("No texture filenames found in the .3ds file.")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,71 @@
import os
import requests
import synexa
# Configuration
INPUT_DIR = "images" # local folder with your .png files
BASE_URL = "https://www.victorgiers.com/shinto" # where the images live
MODEL_NAME = "tencent/hunyuan3d-2"
TIMEOUT = 180 # seconds
def process_image_file(filename: str):
# 1. Build the full URL for the input image
base_name, ext = os.path.splitext(filename)
if ext.lower() != ".png":
return # skip non-png files
image_url = f"{BASE_URL}/{filename}"
output_filename = f"{base_name}.glb"
print(f"\n→ Processing {filename}")
# 2. Run the model with extended timeout
try:
response_list = synexa.run(
MODEL_NAME,
input={
"seed": 1234,
"image": image_url,
"steps": 5,
"caption": "",
"shape_only": False,
"guidance_scale": 5.5,
"multiple_views": [],
"check_box_rembg": True,
"octree_resolution": "256"
},
wait=TIMEOUT
)
except Exception as e:
print(f" ⚠️ Model run failed for {filename}: {e}")
return
# 3. Find the textured_mesh.glb URL
textured_url = None
for fo in response_list:
url = getattr(fo, "url", "")
if url.endswith("textured_mesh.glb"):
textured_url = url
break
if not textured_url:
print(f" ⚠️ No textured_mesh.glb found in response for {filename}")
return
# 4. Download and save
print(f" ↓ Downloading textured mesh → {output_filename}")
try:
dl = requests.get(textured_url, timeout=TIMEOUT)
dl.raise_for_status()
with open(output_filename, "wb") as out_file:
out_file.write(dl.content)
print(f" ✅ Saved {output_filename}")
except Exception as e:
print(f" ⚠️ Download failed for {filename}: {e}")
def main():
# Ensure we're in the right directory (or adjust INPUT_DIR to full path)
for fname in os.listdir(INPUT_DIR):
process_image_file(fname)
if __name__ == "__main__":
main()

100
scripts/generate_json.py Normal file
View File

@@ -0,0 +1,100 @@
import json
import re
from difflib import get_close_matches
MODEL_FILES = [
"Ebisu.glb","Enenra.glb","Enenra2.glb","Oboroguruma.glb","Oiwa.glb","Okiku.glb","Okomeki.001.glb",
"Okuninushi.glb","Oni.glb","Onryo.glb","Oyamatsumi.001.glb","Raijin.glb","Rokurokubi.glb","Ryujin.glb",
"Sarutahiko_Okami.glb","Shinigami.001.glb","Shuten_Doji.glb","Sojobo.glb","Sojobo2.glb","Susanoo.glb",
"Takeminakata.glb","Takeminakata2.001.glb","Tanuki.glb","Tengu.glb","Tenjin.glb","Tsukumogami.glb",
"Tsukuyomi_No_Mikoto.glb","Tsurube_Otoshi.glb","Tsurube_Otoshi2.glb","Tsurube_Otoshi3.glb","Tsurube_Otoshi4.glb",
"Ubume.glb","Yama_Uba.glb","Yama_Uba2.glb","Yamata_No_Orichi.glb","Yamawaro.glb","Yatagarasu2.glb",
"Yuki_Onna.glb","Yurei.glb","Abe_No_Seimei.glb","Abura_Akago.glb","Abura_Sumashi.glb","Abura_Sumashi2.glb",
"Aka_Manto.glb","Akaname.glb","Akateko2.glb","Akkorokamui.glb","Akuchu.glb","Amabie2.glb","Amanojaku.glb",
"Amaterasu.glb","Ame_No_Uzume.001.glb","Amenominakanushi.glb","Aoandon.001.glb","Aoandon2.001.glb",
"Ashiari_Yashiki.glb","Ashinaga_Tenaga2.glb","Azukiarai.glb","Azukibabaa.glb","Azukihakari.glb",
"Bake_Kujira.glb","Bake_Kujira2.glb","Bake_Kujira3.glb","Bakezori.glb","Baku.glb","Basan.glb",
"Benzaiten.glb","Betobeto_San.glb","Bishamonten.glb","Biwa_Bokuboku.glb","Chochin_Obake.glb","Daidarabotchi.glb",
"Daikokuten+Text.glb","Daikokuten.glb","Fujin.glb","Funayurei.glb","Furaribi.glb","Futakuchi_Onna.glb",
"Gaki.glb","Gashadokuro.glb","Hachiman.glb","Hiderigami.001.glb","Hitotsume_Kozo.glb","Hoko.glb",
"Inari_Okami.glb","Ittan_Momen2.glb","Izanagi_No_Mikoto.glb","Izanami_No_Mikoto.glb","Jikininki.glb",
"Jorogumo3.glb","Kamaitachi.glb","Kamikiri.glb","Kappa.glb","Karakasa_Obake.glb","Karakasa_Obake2.glb",
"Kitsune.glb","Kodama.glb","Kudan.glb","Mizushi.glb","Mokumokuren.glb","Mujina.glb","Nekomata.glb",
"Noppera_Bo.glb","Nue.glb","Nuppeppo2.glb","Nurarihyon.glb","Nure_Onna.glb","Nurikabe.glb","Nurikabe2.glb"
]
def normalize(s):
s = s.lower()
s = re.sub(r"[^a-z0-9]", "", s)
s = s.replace("ou", "o") # für "YamatanoOrOchi" vs "Yamata_No_Orichi"
return s
def generate_candidates(spirit_name):
base = spirit_name.split()[0]
latin = re.split(r"\s|\(|", spirit_name)[0]
candidates = [latin]
candidates += [latin.replace("-", "_"), latin.replace("-", ""), latin.replace("_", ""), latin.title(), latin.upper()]
if not latin.endswith("NoMikoto"):
candidates.append(latin + "NoMikoto")
candidates.append(latin + "_No_Mikoto")
return list(set([normalize(c) for c in candidates]))
def find_best_model(spirit_name):
candidates = generate_candidates(spirit_name)
model_names = [f[:-4] for f in MODEL_FILES]
normalized_models = [normalize(n) for n in model_names]
results = []
for c in candidates:
for i, n in enumerate(normalized_models):
dist = levenshtein(c, n)
if dist <= 2 or c in n or n in c:
results.append(MODEL_FILES[i])
results = sorted(list(set(results)))
if not results:
matches = get_close_matches(candidates[0], normalized_models, n=3, cutoff=0.6)
models = [MODEL_FILES[normalized_models.index(m)] for m in matches]
return models
return results
def levenshtein(a, b):
if a == b: return 0
if not a: return len(b)
if not b: return len(a)
v0 = list(range(len(b) + 1))
v1 = [0] * (len(b) + 1)
for i in range(len(a)):
v1[0] = i + 1
for j in range(len(b)):
cost = 0 if a[i] == b[j] else 1
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
v0, v1 = v1, v0
return v0[len(b)]
def main():
with open("wesen.json", encoding="utf-8") as f:
spirits = json.load(f)
output = []
for spirit in spirits:
name = spirit.get("Name", "")
matches = find_best_model(name)
if not matches:
print(f"[!] Kein Modell gefunden für '{name}'!")
new_spirit = spirit.copy()
new_spirit["Model URL"] = ""
output.append(new_spirit)
elif len(matches) == 1:
new_spirit = spirit.copy()
new_spirit["Model URL"] = "/assets/models/spirits/" + matches[0]
output.append(new_spirit)
else:
print(f"\n[?] Mehrere mögliche Modelle für '{name}': {matches}")
for m in matches:
new_spirit = spirit.copy()
new_spirit["Model URL"] = "/assets/models/spirits/" + m
output.append(new_spirit)
with open("spirit_list_out.json", "w", encoding="utf-8") as f:
json.dump(output, f, ensure_ascii=False, indent=2)
print("\nFERTIG. Ergebnis: spirit_list_out.json")
if __name__ == "__main__":
main()

188
scripts/image_from_json.py Normal file
View File

@@ -0,0 +1,188 @@
#!/usr/bin/env python3
import os
import sys
import argparse
import json
import requests
import urllib.parse
def generate_image_prompt(entity_json: dict, chat_model: str, api_key: str) -> str:
"""
Generiert einen Bild-Prompt aus der JSON-Beschreibung mit Hilfe eines OpenAI-Chat-Modells.
"""
url = "https://api.openai.com/v1/chat/completions"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
pretext = (
"Ich schicke dir nun einen JSON-Abschnitt, der ein japanisches spirituelles Wesen beschreibt."
" Du wirst das Internet bemühen, um nach Darstellungen und weitere Beschreibung der äußeren Erscheinung dieses Wesens zu finden."
" Mit all diesen Informationen generierst du dann ein Bild von dem Wesen im Stil von moderner Low-Poly 3D-Grafik,"
" ohne Hintergrund, nur das Wesen selbst. Das Wesen soll vollständig auf dem Bild dargestellt sein, nicht abgeschnitten."
" Es soll dafür geeignet sein, ein 3D Objekt daraus zu bauen."
" Hier der JSON-Abschnitt:"
)
content = f"{pretext}\n{json.dumps(entity_json, ensure_ascii=False)}"
payload = {
"model": chat_model,
"messages": [
{"role": "user", "content": content}
],
"temperature": 0.7,
}
resp = requests.post(url, headers=headers, json=payload)
resp.raise_for_status()
data = resp.json()
# Annahme: Der Prompt steht im ersten Choice unter message.content
prompt = data["choices"][0]["message"]["content"].strip()
return prompt
def generate_and_download_image(prompt: str,
image_model: str,
api_key: str,
count: int,
size: str,
fmt: str,
base_output: str):
"""
Generiert Bilder mit der OpenAI Image API und lädt sie herunter.
"""
url = "https://api.openai.com/v1/images/generations"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
payload = {
"model": image_model,
"prompt": prompt,
"n": count,
"size": size,
"response_format": fmt,
}
resp = requests.post(url, headers=headers, json=payload)
resp.raise_for_status()
data = resp.json().get("data", [])
for idx, item in enumerate(data, start=1):
if fmt == "url":
img_url = item.get("url")
try:
img_resp = requests.get(img_url)
img_resp.raise_for_status()
except requests.RequestException as e:
print(f"Fehler beim Herunterladen des Bildes #{idx}: {e}", file=sys.stderr)
continue
# Dateinamen bestimmen
if base_output:
base, ext = os.path.splitext(base_output)
ext = ext or os.path.splitext(urllib.parse.urlparse(img_url).path)[1]
filename = f"{base}_{idx}{ext}" if count > 1 else base_output
else:
path = urllib.parse.urlparse(img_url).path
filename = os.path.basename(path)
with open(filename, "wb") as f:
f.write(img_resp.content)
print(f"Bild gespeichert: {filename}")
else:
# Base64 JSON direkt ausgeben
b64 = item.get("b64_json")
out_name = f"{base_output or 'image'}_{idx}.b64.txt"
with open(out_name, "w") as f:
f.write(b64)
print(f"Base64 in Datei geschrieben: {out_name}")
def main():
parser = argparse.ArgumentParser(
description="Generiere Bild-Prompts aus JSON und erstelle Bilder via OpenAI API"
)
parser.add_argument(
"--api_key", "-k",
help="OpenAI API-Schlüssel (alternativ ENV OPENAI_API_KEY)",
)
parser.add_argument(
"--chat_model", "-c",
default="o4-mini-high",
help="Modell für die Prompt-Generierung (z.B. 'o4-mini-high')",
)
parser.add_argument(
"--image_model", "-m",
default="dall-e-3",
help="Modell für die Bild-Generierung (z.B. 'dall-e-3')",
)
parser.add_argument(
"--input", "-i",
required=True,
help="Pfad zu JSON-Datei mit einer Liste von Entity-Objekten",
)
parser.add_argument(
"--count", "-n",
type=int,
default=1,
help="Anzahl Bilder pro Entity",
)
parser.add_argument(
"--size", "-s",
choices=["256x256", "512x512", "1024x1024"],
default="1024x1024",
help="Bildgröße",
)
parser.add_argument(
"--format", "-f",
choices=["url", "b64_json"],
default="url",
help="Antwort-Format",
)
parser.add_argument(
"--output", "-o",
help="Basis-Ausgabe-Dateiname oder Verzeichnis (Suffixe _1,_2 werden ergänzt)",
)
args = parser.parse_args()
api_key = args.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
print("Error: API-Schlüssel fehlt. Nutze --api_key oder setze OPENAI_API_KEY.", file=sys.stderr)
sys.exit(1)
# JSON-Datei einlesen
try:
with open(args.input, "r", encoding="utf-8") as f:
entities = json.load(f)
except Exception as e:
print(f"Fehler beim Laden der JSON-Datei: {e}", file=sys.stderr)
sys.exit(1)
if not isinstance(entities, list):
print("Error: Die JSON-Datei muss eine Liste von Objekten enthalten.", file=sys.stderr)
sys.exit(1)
# Für jede Entity Prompt generieren und Bild erstellen
for idx, entity in enumerate(entities, start=1):
name_safe = entity.get("Name", f"entity_{idx}").replace(" ", "_")
print(f"Verarbeite: {entity.get('Name', name_safe)}")
prompt = generate_image_prompt(entity, args.chat_model, api_key)
print(f"Generierter Prompt: {prompt}\n")
base_out = None
if args.output:
# Wenn Ausgabe ein Verzeichnis ist, dort ablegen
if os.path.isdir(args.output):
base_out = os.path.join(args.output, f"{name_safe}.png")
else:
base_out = f"{os.path.splitext(args.output)[0]}_{name_safe}.png"
generate_and_download_image(
prompt=prompt,
image_model=args.image_model,
api_key=api_key,
count=args.count,
size=args.size,
fmt=args.format,
base_output=base_out,
)
if __name__ == "__main__":
main()

66
scripts/naming.py Normal file
View File

@@ -0,0 +1,66 @@
import os
import json
import re
from difflib import get_close_matches
# ---- Konfiguration ----
image_dir = "webp"
json_path = "spirit_list.json"
output_path = "spirit_list_with_images.json"
image_url_prefix = "/assets/images/spirits/" # Deine URL
# --- Hilfsfunktion: Normalisiere Namen (um sie vergleichbar zu machen) ---
def norm(s):
s = s.lower()
s = re.sub(r'[^a-z0-9]+', '', s) # Alles außer Buchstaben/Zahlen raus
return s
# ---- Bilddateien einlesen & normalisieren ----
image_files = [f for f in os.listdir(image_dir) if f.lower().endswith('.webp')]
norm2file = {norm(os.path.splitext(f)[0]): f for f in image_files}
# ---- JSON einlesen ----
with open(json_path, "r", encoding="utf-8") as f:
spirits = json.load(f)
matched = 0
notfound = []
for entry in spirits:
# Nimm zuerst Model URL, ansonsten Name
base = None
if "Model URL" in entry and entry["Model URL"]:
base = os.path.splitext(os.path.basename(entry["Model URL"]))[0]
if not base and "Name" in entry:
base = entry["Name"]
if not base:
notfound.append(entry)
continue
base_norm = norm(base)
# Direktes Mapping versuchen
if base_norm in norm2file:
entry["Image URL"] = image_url_prefix + norm2file[base_norm]
matched += 1
continue
# Fuzzy-Match, falls nicht gefunden
candidates = get_close_matches(base_norm, norm2file.keys(), n=1, cutoff=0.7)
if candidates:
file_name = norm2file[candidates[0]]
entry["Image URL"] = image_url_prefix + file_name
print(f"Fuzzy: {base}{file_name}")
matched += 1
else:
print(f"Kein Bild gefunden für: {base}")
notfound.append(entry)
# --- Neue JSON schreiben ---
with open(output_path, "w", encoding="utf-8") as f:
json.dump(spirits, f, indent=2, ensure_ascii=False)
print(f"{matched} von {len(spirits)} Einträgen mit Bild gematcht.")
print(f"Nicht gefunden: {len(notfound)}")
if notfound:
for entry in notfound:
print(" -", entry.get("Name", "???"))

112
scripts/openai_image_gen.py Normal file
View File

@@ -0,0 +1,112 @@
#!/usr/bin/env python3
import os
import sys
import argparse
import requests
import urllib.parse
def main():
parser = argparse.ArgumentParser(
description="Bilder mit der OpenAI Image API generieren und herunterladen"
)
parser.add_argument(
"--api_key", "-k",
help="OpenAI API-Schlüssel (alternativ über OPENAI_API_KEY)"
)
parser.add_argument(
"--model", "-m",
default="dall-e-2",
help="Modellname (z.B. 'gpt-image-1' oder 'dall-e-2')"
)
parser.add_argument(
"--prompt", "-p",
required=True,
help="Text-Prompt für die Bildgenerierung"
)
parser.add_argument(
"--count", "-n",
type=int,
default=1,
help="Anzahl der Bilder"
)
parser.add_argument(
"--size", "-s",
choices=["256x256", "512x512", "1024x1024"],
default="1024x1024",
help="Bildgröße"
)
parser.add_argument(
"--format", "-f",
choices=["url", "b64_json"],
default="url",
help="Format der Antwort"
)
parser.add_argument(
"--output", "-o",
help="Zieldatei für das heruntergeladene Bild (bei mehreren: Suffix _1,_2 etc.)"
)
args = parser.parse_args()
# API-Key: zuerst aus Argument, sonst aus Umgebungsvariable
api_key = args.api_key or os.getenv("OPENAI_API_KEY")
if not api_key:
print("Error: Bitte gib einen API-Schlüssel via --api_key an oder setze OPENAI_API_KEY.", file=sys.stderr)
sys.exit(1)
# Request aufsetzen
url = "https://api.openai.com/v1/images/generations"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
payload = {
"model": args.model,
"prompt": args.prompt,
"n": args.count,
"size": args.size,
"response_format": args.format,
}
# Anfrage abschicken
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
except requests.RequestException as e:
print(f"API-Request fehlgeschlagen: {e}", file=sys.stderr)
sys.exit(1)
# Antwort auswerten und ggf. herunterladen
data = response.json().get("data", [])
for i, item in enumerate(data, start=1):
if args.format == "url":
img_url = item.get('url')
print(f"[{i}] Bild-URL: {img_url}")
# Bild herunterladen
try:
img_resp = requests.get(img_url)
img_resp.raise_for_status()
except requests.RequestException as e:
print(f"Fehler beim Herunterladen des Bildes: {e}", file=sys.stderr)
continue
# Dateinamen bestimmen
if args.output:
base, ext = os.path.splitext(args.output)
filename = f"{base}_{i}{ext}" if args.count > 1 else args.output
else:
path = urllib.parse.urlparse(img_url).path
filename = os.path.basename(path)
# Datei schreiben
with open(filename, 'wb') as f:
f.write(img_resp.content)
print(f"Bild gespeichert: {filename}")
else:
# Base64-Ausgabe
b64 = item.get('b64_json')
print(f"[{i}] Bild (Base64):\n{b64}\n")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,150 @@
import bpy
import sys
import os
import math
# ----------- Argument Handling -----------
# Get input and output file from command line args
argv = sys.argv
if "--" not in argv:
print("ERROR: No arguments passed. Usage: blender --background --python remesh_bake_batch.py -- /path/to/input.glb [/path/to/output.glb]")
sys.exit(1)
argv = argv[argv.index("--") + 1:]
input_path = os.path.abspath(argv[0])
output_path = os.path.abspath(argv[1]) if len(argv) > 1 else os.path.splitext(input_path)[0] + "_remesh.glb"
# ----------- Scene Cleanup -----------
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
for block in bpy.data.meshes: bpy.data.meshes.remove(block)
for block in bpy.data.materials: bpy.data.materials.remove(block)
for block in bpy.data.images: bpy.data.images.remove(block)
for block in bpy.data.textures: bpy.data.textures.remove(block)
for block in bpy.data.lights: bpy.data.lights.remove(block)
for block in bpy.data.cameras: bpy.data.cameras.remove(block)
# ----------- Import GLB -----------
print(f"Importing {input_path}...")
bpy.ops.import_scene.gltf(filepath=input_path)
objs = [o for o in bpy.context.scene.objects if o.type == 'MESH']
if not objs:
print("ERROR: No mesh objects found in the imported file.")
sys.exit(1)
high = objs[0]
# ----------- Optional: Center object and apply transforms -----------
bpy.context.view_layer.objects.active = high
bpy.ops.object.select_all(action='DESELECT')
high.select_set(True)
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
# ----------- Remesh via QuadRemesher -----------
# NOTE: You need QuadRemesher installed & activated in your Blender install!
bpy.ops.object.select_all(action='DESELECT')
high.select_set(True)
bpy.context.view_layer.objects.active = high
bpy.ops.qremesher.remesh()
# Wait for new mesh to appear
import time
max_wait = 60
before = set(bpy.context.scene.objects)
for t in range(max_wait * 10):
after = set(bpy.context.scene.objects)
new_objs = [o for o in after - before if o.type == 'MESH']
if new_objs:
low = sorted(new_objs, key=lambda o: len(o.name))[0]
break
time.sleep(0.1)
else:
print("ERROR: Remeshed object not found after 60s.")
sys.exit(1)
# ----------- UV Mapping & Packing -----------
bpy.ops.object.select_all(action='DESELECT')
low.select_set(True)
bpy.context.view_layer.objects.active = low
while low.data.uv_layers:
low.data.uv_layers.remove(low.data.uv_layers[0])
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.smart_project(angle_limit=math.radians(66), island_margin=0.03)
bpy.ops.uv.pack_islands(margin=0.003)
bpy.ops.object.mode_set(mode='OBJECT')
# ----------- Material & Bake Setup -----------
mat = bpy.data.materials.new(f"{low.name}_BakeMat")
mat.use_nodes = True
low.data.materials.clear()
low.data.materials.append(mat)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
nodes.clear()
out = nodes.new('ShaderNodeOutputMaterial'); out.location = (300, 0)
bsdf = nodes.new('ShaderNodeBsdfPrincipled'); bsdf.location = (0, 0)
links.new(bsdf.outputs['BSDF'], out.inputs['Surface'])
# --- Diffuse Image ---
diff = nodes.new('ShaderNodeTexImage')
diff.name = diff.label = "Diffuse"
diff.location = (-400, 200)
img_diff = bpy.data.images.new(f"{low.name}_Diffuse", 1024, 1024)
diff.image = img_diff
scene = bpy.context.scene
scene.render.engine = 'CYCLES'
scene.cycles.use_bake_selected_to_active = False # Only bake from itself!
scene.cycles.bake_margin = 16
scene.cycles.bake_type = 'DIFFUSE'
scene.cycles.use_bake_direct = False
scene.cycles.use_bake_indirect = False
scene.cycles.use_bake_color = True
bpy.ops.object.select_all(action='DESELECT')
low.select_set(True)
bpy.context.view_layer.objects.active = low
for n in nodes: n.select = False
diff.select = True; nodes.active = diff
bpy.ops.object.bake(type='DIFFUSE')
links.new(diff.outputs['Color'], bsdf.inputs['Base Color'])
# --- Normal Image ---
norm_img = bpy.data.images.new(f"{low.name}_Normal", 1024, 1024)
norm = nodes.new('ShaderNodeTexImage')
norm.name = norm.label = "Normal"
norm.location = (-400, -200)
norm.image = norm_img
scene.cycles.bake_type = 'NORMAL'
scene.cycles.normal_space = 'TANGENT'
for n in nodes: n.select = False
norm.select = True; nodes.active = norm
bpy.ops.object.bake(type='NORMAL')
nm_node = nodes.new('ShaderNodeNormalMap')
nm_node.location = (-150, -200)
nm_node.inputs['Strength'].default_value = 0.5
links.new(norm.outputs['Color'], nm_node.inputs['Color'])
links.new(nm_node.outputs['Normal'], bsdf.inputs['Normal'])
bsdf.inputs['Metallic'].default_value = 1.0
bsdf.inputs['Roughness'].default_value = 0.95
# ----------- Export as GLB -----------
print(f"Exporting {output_path}...")
bpy.ops.export_scene.gltf(filepath=output_path, export_format='GLB', export_selected=False)
print("✅ Done.")
# ----------- Optional: Save Baked Images Externally -----------
img_diff.filepath_raw = os.path.splitext(output_path)[0] + "_diffuse.png"
img_diff.file_format = 'PNG'
img_diff.save()
norm_img.filepath_raw = os.path.splitext(output_path)[0] + "_normal.png"
norm_img.file_format = 'PNG'
norm_img.save()
print("✅ Images saved.")

Binary file not shown.

BIN
server/public/assets/images/.DS_Store vendored Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.