akhaliq HF Staff commited on
Commit
3f6c6eb
·
verified ·
1 Parent(s): ca12aca

Deploy Gradio app with multiple files

Browse files
Files changed (3) hide show
  1. app.py +59 -0
  2. models.py +62 -0
  3. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from models import generate_image, MODEL_ID
3
+
4
+ def create_ui():
5
+ with gr.Blocks(title=f"Tencent HunyuanImage-3.0 Demo") as demo:
6
+ gr.HTML(
7
+ f"<div style='text-align: center; max-width: 700px; margin: 0 auto;'>"
8
+ f"<h1>Tencent {MODEL_ID.split('/')[-1]}</h1>"
9
+ f"<p>Generate images using Tencent's state-of-the-art model hosted by FAL AI.</p>"
10
+ f"Built with <a href='https://huggingface.co/spaces/akhaliq/anycoder' target='_blank'>anycoder</a>"
11
+ f"</div>"
12
+ )
13
+
14
+ with gr.Row():
15
+ with gr.Column(scale=1):
16
+ prompt_input = gr.Textbox(
17
+ label="Prompt",
18
+ placeholder="e.g., Astronaut riding a horse, 4K, realistic photo, cinematic lighting",
19
+ lines=4
20
+ )
21
+ generate_btn = gr.Button("🎨 Generate Image", variant="primary")
22
+
23
+ with gr.Column(scale=1):
24
+ output_image = gr.Image(
25
+ label="Generated Image",
26
+ height=512,
27
+ width=512,
28
+ interactive=False,
29
+ show_download_button=True
30
+ )
31
+
32
+ # Set up the event listener
33
+ generate_btn.click(
34
+ fn=generate_image,
35
+ inputs=[prompt_input],
36
+ outputs=[output_image],
37
+ # Use queue and concurrency for robustness when dealing with external APIs
38
+ queue=True
39
+ )
40
+
41
+ # Example usage guidance
42
+ gr.Examples(
43
+ examples=[
44
+ "A dramatic black and white photo of a futuristic motorcycle gang leader in a rainy city street.",
45
+ "High quality photorealistic close-up portrait of an elderly wizard, highly detailed, dramatic lighting.",
46
+ "A detailed watercolor painting of a small red fox sleeping on a pile of autumn leaves."
47
+ ],
48
+ inputs=prompt_input,
49
+ outputs=output_image,
50
+ fn=generate_image,
51
+ cache_examples=False,
52
+ )
53
+
54
+ return demo
55
+
56
+ if __name__ == "__main__":
57
+ app = create_ui()
58
+ app.queue()
59
+ app.launch()
models.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from huggingface_hub import InferenceClient
3
+ from PIL import Image
4
+ import gradio as gr
5
+ from typing import Union
6
+
7
+ # Load environment variables (useful for local testing)
8
+ from dotenv import load_dotenv
9
+ load_dotenv()
10
+
11
+ # --- Model Configuration ---
12
+ MODEL_ID = "tencent/HunyuanImage-3.0"
13
+ PROVIDER = "fal-ai"
14
+ BILL_TO = "huggingface"
15
+
16
+ # Initialize client
17
+ HF_TOKEN = os.environ.get("HF_TOKEN")
18
+ CLIENT: Union[InferenceClient, None] = None
19
+
20
+ if HF_TOKEN:
21
+ try:
22
+ CLIENT = InferenceClient(
23
+ provider=PROVIDER,
24
+ api_key=HF_TOKEN,
25
+ bill_to=BILL_TO,
26
+ )
27
+ print(f"✅ InferenceClient initialized for {MODEL_ID} via {PROVIDER}")
28
+ except Exception as e:
29
+ print(f"❌ Error initializing InferenceClient: {e}")
30
+ CLIENT = None
31
+ else:
32
+ print("⚠️ HF_TOKEN environment variable not set. Client will be unavailable.")
33
+
34
+ def generate_image(prompt: str) -> Image.Image:
35
+ """
36
+ Generates an image from a text prompt using the Hugging Face Inference Client.
37
+ """
38
+ if not CLIENT:
39
+ raise gr.Error("API client not available. Please ensure HF_TOKEN is set correctly.")
40
+
41
+ if not prompt:
42
+ raise gr.Error("Please provide a prompt.")
43
+
44
+ print(f"Generating image for prompt: '{prompt[:50]}...'")
45
+
46
+ try:
47
+ # The output is a PIL.Image object directly
48
+ image = CLIENT.text_to_image(
49
+ prompt,
50
+ model=MODEL_ID,
51
+ # Optional parameters for better quality/control might be added here
52
+ # e.g., negative_prompt="bad quality, low resolution",
53
+ )
54
+ return image
55
+ except Exception as e:
56
+ print(f"Error during image generation: {e}")
57
+ # Check for common API errors
58
+ if "Authentication failed" in str(e):
59
+ raise gr.Error("Authentication failed. Check your HF_TOKEN.")
60
+ if "limit reached" in str(e) or "quota" in str(e):
61
+ raise gr.Error("Rate limit or quota reached for this API endpoint.")
62
+ raise gr.Error(f"Generation failed: {str(e)}")
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ huggingface-hub
3
+ Pillow
4
+ python-dotenv