Files
RestEasyDiffusion/main.py
2025-07-07 21:51:59 +03:00

61 lines
1.9 KiB
Python

from flask import Flask, request, jsonify
from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionXLPipeline
import base64
from io import BytesIO
from PIL import Image
import os
app = Flask(__name__)
# Define paths for OpenVINO IR models
OV_MODEL_DIR = "echarlaix/stable-diffusion-2-1-openvino" # New directory for the LCM model
# --- Model Loading ---
pipe = None
try:
pipe = OVStableDiffusionXLPipeline.from_pretrained(
"rupeshs/hyper-sd-sdxl-1-step-openvino-int8",
ov_config={"CACHE_DIR": ""},
)
print("Compiling OpenVINO pipeline...")
pipe.compile() # Compile the pipeline for the target device (CPU by default)
print("OpenVINO compiled successfully.")
except Exception as e:
print(f"An error occurred during OpenVINO LCM model loading or conversion: {e}")
exit()
@app.route('/api/generate', methods=['POST'])
def generate_image():
prompt = request.json.get('prompt')
if not prompt:
return jsonify({"error": "Prompt is required in the request body."}), 400
print(f"Generating image for prompt: '{prompt}' using OpenVINO LCM...")
try:
# Crucially, set num_inference_steps=1 for 1-step generation with LCMs
# You might even omit guidance_scale for some LCMs, or use a very low value.
image = pipe(
prompt=prompt,
width=768,
height=768,
num_inference_steps=1,
guidance_scale=1.0,
).images[0]
image = image.resize((128, 128), Image.LANCZOS)
buffered = BytesIO()
image.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
return jsonify({"image": img_str})
except Exception as e:
print(f"Error during image generation with OpenVINO LCM: {e}")
return jsonify({"error": "An error occurred during image generation."}), 500
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)