| import spaces |
| import torch |
| import gradio as gr |
| from diffusers import FluxPipeline |
| from huggingface_hub import hf_hub_download, login |
| import random |
| import os |
|
|
| |
| hf_token = os.getenv("HF_TOKEN") |
| print(f"Token trouvé : {bool(hf_token)}") |
| if hf_token: |
| login(token=hf_token) |
| print("✅ Authentifié") |
|
|
| model_id = "black-forest-labs/FLUX.1-schnell" |
|
|
| |
| DEFAULT_PROMPT = "A stunning portrait of a woman with flowing red hair, piercing green eyes, natural lighting, ultra detailed, 8k quality, professional photography" |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| lora_repo = None |
| lora_path = None |
|
|
| def load_lora(repo_id, filename): |
| """Charge un LoRA depuis un repository HuggingFace""" |
| global lora_repo, lora_path |
| if not repo_id or not filename: |
| return "Abzaloff/Flux_Art_Fusion" |
| |
| try: |
| lora_path = hf_hub_download(repo_id=repo_id, filename=filename) |
| lora_repo = repo_id |
| return f"✅ LoRA chargé : {repo_id}/{filename}" |
| except Exception as e: |
| return f"❌ Erreur : {str(e)}\n\nVérifiez que le repository contient bien ce fichier." |
|
|
| @spaces.GPU(duration=120) |
| def generate(prompt, negative_prompt="", width=1024, height=1024, steps=4, seed=-1, lora_scale=0.8): |
| try: |
| |
| pipe = FluxPipeline.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16 |
| ) |
| pipe.to("cuda") |
| |
| |
| if lora_repo and lora_path: |
| pipe.load_lora_weights(lora_path) |
| pipe.fuse_lora(lora_scale=lora_scale) |
| |
| |
| pipe.enable_model_cpu_offload() |
| |
| |
| if seed == -1: |
| seed = random.randint(0, 2**32 - 1) |
| generator = torch.Generator("cuda").manual_seed(seed) |
| |
| |
| image = pipe( |
| prompt=prompt, |
| negative_prompt=negative_prompt if negative_prompt else None, |
| height=height, |
| width=width, |
| num_inference_steps=steps, |
| guidance_scale=0.0, |
| generator=generator, |
| max_sequence_length=256 |
| ).images[0] |
| |
| |
| del pipe |
| torch.cuda.empty_cache() |
| |
| return image |
| |
| except Exception as e: |
| print(f"❌ Erreur de génération : {str(e)}") |
| raise gr.Error(f"Erreur : {str(e)}") |
|
|
| |
| with gr.Blocks(title="Flux Schnell + LoRA") as demo: |
| gr.Markdown("# 🎨 Générateur Flux Schnell + LoRA") |
| gr.Markdown(f"**Modèle :** `{model_id}`") |
| |
| with gr.Row(): |
| with gr.Column(): |
| |
| gr.Markdown("### 🎨 Ajouter un LoRA (optionnel)") |
| gr.Markdown(""" |
| ⚠️ **Important** : Le modèle de base FLUX.1-schnell fonctionne **sans LoRA**. |
| |
| Si vous voulez ajouter un style avec un LoRA, utilisez des repositories communautaires : |
| - `XLabs-AI/flux-RealismLora` (fichier: `lora.safetensors`) |
| - `Shakker-Labs/FLUX.1-dev-LoRA-add-details` (fichier: `FLUX-dev-lora-add_details.safetensors`) |
| """) |
| |
| lora_repo_input = gr.Textbox( |
| label="Repository LoRA", |
| placeholder="Ex: XLabs-AI/flux-RealismLora", |
| value="Abzaloff/Flux_Art_Fusion" |
| ) |
| lora_filename = gr.Textbox( |
| label="Nom du fichier LoRA", |
| placeholder="Ex: lora.safetensors", |
| value="Flux_Art_fusion_v1_fp16_00001_.safetensors" |
| ) |
| load_btn = gr.Button("Charger LoRA") |
| lora_status = gr.Textbox(label="Status", interactive=False) |
| |
| |
| gr.Markdown("### Paramètres de génération") |
| prompt = gr.Textbox( |
| label="Prompt", |
| placeholder="Décrivez votre image...", |
| lines=3, |
| value=DEFAULT_PROMPT |
| ) |
| negative_prompt = gr.Textbox( |
| label="Negative Prompt (optionnel)", |
| placeholder="blurry, low quality, distorted, ugly", |
| lines=2 |
| ) |
| |
| with gr.Row(): |
| width = gr.Slider(512, 2048, 1024, step=64, label="Largeur") |
| height = gr.Slider(512, 2048, 1024, step=64, label="Hauteur") |
| |
| with gr.Row(): |
| steps = gr.Slider(1, 10, 4, step=1, label="Steps") |
| seed = gr.Number(label="Seed (-1 = aléatoire)", value=-1) |
| lora_scale = gr.Slider(0, 1, 0.8, step=0.1, label="LoRA Scale") |
| |
| generate_btn = gr.Button("🚀 Générer", variant="primary") |
| |
| with gr.Column(): |
| output_image = gr.Image(label="Image générée", type="pil") |
| gr.Markdown("### 💡 Conseils") |
| gr.Markdown(""" |
| - **Modèle de base** : FLUX.1-schnell (rapide, sans LoRA nécessaire) |
| - **Steps recommandés** : 4 pour Schnell |
| - **Résolution** : 1024x1024 par défaut |
| - **LoRA Scale** : 0.8 pour un effet équilibré (ajustez entre 0-1) |
| |
| **Exemples de repositories LoRA compatibles :** |
| - [XLabs-AI/flux-RealismLora](https://huggingface.co/XLabs-AI/flux-RealismLora) |
| - [Shakker-Labs collections](https://huggingface.co/Shakker-Labs) |
| """) |
| |
| |
| load_btn.click( |
| fn=load_lora, |
| inputs=[lora_repo_input, lora_filename], |
| outputs=lora_status |
| ) |
| |
| generate_btn.click( |
| fn=generate, |
| inputs=[prompt, negative_prompt, width, height, steps, seed, lora_scale], |
| outputs=output_image |
| ) |
|
|
| |
| if __name__ == "__main__": |
| demo.queue() |
| demo.launch( |
| server_name="0.0.0.0", |
| server_port=7860, |
| show_error=True, |
| quiet=False |
| ) |