Running Flux.1 Dev/Schnell + Lora on your Mac Mx without ComfyUI

In this article I will show you how to run Flux.1 models on your mac with ease.

The goal of this article is to propose an alternative way of running Flux.1 on a Mac, most of the tutorials available are based on ComfyUI.

I tried to find a way for myself to generate images in a simple way, using stable-diffusion.cpp which now support Flux.1 models.

You will find a simple python application to drive stable-diffusion.cpp for Flux.1 Dev and Flux.1 Schnell with most of the compatibles Lora adapters.

Prerequisites


You will first need to create an account and login to hugginface, then generate a token to retreive the models and the Lora adapters :

  1. Goto your user account, settings, and create a token :
  2. Install hugginface command line on your Mac
hugginface-cli login

then enter your token.

3. Create a folder and download / build stable-diffusion.cpp

mkdir flux.1 cd flux.1
mkdir models

git clone --recursive https://github.com/leejet/stable-diffusion.cpp
cd stable-diffusion.cpp
mkdir build
cd build
cmake .. -DSD_METAL=ON
cmake --build . --config Release

You now should have a new binary command “sd” in ./bin folder

4. Download the models

Go back to your “flux.1” folder, you should see the “./models” folder in it, then download all the models with the following command lines:

cd ./models

huggingface-cli download --local-dir ./ leejet/FLUX.1-schnell-gguf flux1-schnell-q8_0.gguf
huggingface-cli download --local-dir ./ leejet/FLUX.1-dev-gguf flux1-dev-q8_0.gguf

## Vae
huggingface-cli download --local-dir ./ black-forest-labs/FLUX.1-dev ae.safetensors

## clip_l
huggingface-cli download --local-dir ./ comfyanonymous/flux_text_encoders clip_l.safetensors

##t5xxl
huggingface-cli download --local-dir ./ comfyanonymous/flux_text_encoders t5xxl_fp16.safetensors

## Lora
huggingface-cli download --local-dir ./ XLabs-AI/flux-lora-collection anime_lora_comfy_converted.safetensors
huggingface-cli download --local-dir ./ XLabs-AI/flux-lora-collection art_lora_comfy_converted.safetensors
huggingface-cli download --local-dir ./ XLabs-AI/flux-lora-collection disney_lora_comfy_converted.safetensors
huggingface-cli download --local-dir ./ XLabs-AI/flux-lora-collection mjv6_lora_comfy_converted.safetensors
huggingface-cli download --local-dir ./ XLabs-AI/flux-lora-collection realism_lora_comfy_converted.safetensors
huggingface-cli download --local-dir ./ XLabs-AI/flux-lora-collection scenery_lora_comfy_converted.safetensors

The GUI

The python GUI is written using the Panel library ; it allows to generate the “sd” commands lines arguments and display the results.

The command line with its arguments is written on the terminal each time you generate an image, you can copy the line for batch purpose.

import panel as pn
import param
import subprocess
import os
from typing import List, Tuple
from panel.widgets import Spinner
from threading import Thread

class StableDiffusionApp(param.Parameterized):
model = param.ObjectSelector(default="Flux1 Dev", objects=["Flux1 Dev", "Flux1 Schnell"])
lora_adapters = param.ObjectSelector(default=None, objects=["None"])
prompt = param.String(default="")
negative_prompt = param.String(default="")
resolution = param.ObjectSelector(default="512x512", objects=["512x512", "1024x1024"])
steps = param.Integer(default=4, bounds=(1, 100))
seed = param.Integer(default=-1)
command_line = param.String(default="")
image_pane = param.ClassSelector(class_=pn.pane.Image, default=pn.pane.Image(sizing_mode='scale_both', min_height=512))
command_output = param.String(default="")

def __init__(self, **params):
super(StableDiffusionApp, self).__init__(**params)
self.generate_button = pn.widgets.Button(name="Generate Image", button_type="primary")
self.generate_button.on_click(self.run_generation)
self.loading_spinner = pn.indicators.LoadingSpinner(value=False, size=25)
self.image_pane = pn.pane.Image(sizing_mode='scale_both', min_height=512)
self.output_image = ""
self.update_lora_adapters()

def update_lora_adapters(self) -> None:
"""Update the list of available LoRA adapters."""
if not os.path.exists("./models"):
print("Models directory not found. Please create it and add LoRA adapter files.")
return
lora_files = ["None"] + [f for f in os.listdir("./models") if "lora" in f.lower() and f.endswith(".safetensors")]
self.param.lora_adapters.objects = lora_files
if lora_files:
self.lora_adapters = "None"
else:
print("No LoRA adapter files found in the models directory.")

def generate_command(self) -> str:
"""Generate the stable-diffusion.cpp command line."""
width, height = map(int, self.resolution.split('x'))
lora_name = self.lora_adapters.replace('.safetensors', '') if self.lora_adapters != "None" else "no_lora"
self.output_image = f"{self.prompt.replace(' ', '_')}_{self.seed}_{lora_name}_{width}x{height}_{self.steps}steps.png"

# Update prompt with LoRA information
effective_prompt = self.prompt
if self.lora_adapters != "None":
effective_prompt += f" <{lora_name}:1>"

cmd = [
"./stable-diffusion.cpp/build/bin/sd",
f"--diffusion-model ./models/{self.model.replace(' ', '-').lower()}-q8_0.gguf",
f"--prompt '{effective_prompt}'",
f"--negative-prompt '{self.negative_prompt}'",
f"-W {width}",
f"-H {height}",
f"--steps {self.steps}",
f"--seed {self.seed}",
"--vae ./models/ae.safetensors",
"--clip_l ./models/clip_l.safetensors",
"--t5xxl ./models/t5xxl_fp16.safetensors",
"--lora-model-dir ./models",
"--cfg-scale 1.0",
"--sampling-method euler",
f"-o {self.output_image}",
"-v"
]

return " ".join(cmd)

@param.depends('model', 'lora_adapters', 'prompt', 'negative_prompt', 'resolution', 'steps', 'seed', watch=True)
def update_command_line(self):
"""Update the command line preview."""
if not hasattr(self, '_updating_command_line'):
self._updating_command_line = True
try:
self.command_line = self.generate_command()
finally:
del self._updating_command_line

def run_generation(self, event):
"""Run the stable-diffusion.cpp command and update the output image."""
cmd = self.generate_command()
self.command_output = ""

# Disable button and start spinner
self.generate_button.disabled = True
self.loading_spinner.value = True

def run_command():
try:
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
self.command_output += output
self.param.trigger('command_output')

stderr = process.stderr.read()
if stderr:
print(f"Error: {stderr}")
self.error_message = f"Error: {stderr}"
# Use pn.io.add_periodic_callback instead of push_notebook
self.image_pane.object = self.output_image # Set the new image
self.param.trigger('image_pane')
except subprocess.CalledProcessError as e:
self.output_image = ""
finally:
# Re-enable button and stop spinner
self.generate_button.disabled = False
self.loading_spinner.value = False
pn.io.push_notebook()

# Run the command in a separate thread
Thread(target=run_command).start()


@param.depends('model', 'lora_adapters', 'prompt', 'negative_prompt', 'resolution', 'steps', 'seed', 'image_pane', watch=True)
def view(self):
"""Create the main view of the application."""
input_column = pn.Column(
pn.pane.Markdown("# Flux.1 Cpp"),
pn.Param(self.param,
parameters=['model', 'lora_adapters', 'prompt', 'negative_prompt', 'resolution', 'steps', 'seed'],
widgets={
'model': pn.widgets.Select,
'lora_adapters': pn.widgets.Select,
'prompt': pn.widgets.TextAreaInput,
'negative_prompt': pn.widgets.TextAreaInput,
'resolution': pn.widgets.Select,
'steps': pn.widgets.IntSlider,
'seed': pn.widgets.IntInput,
},
show_name=False),

pn.Row(self.generate_button, self.loading_spinner),
)

output_column = pn.Column(
pn.pane.Markdown("## Generated Image"),
self.image_pane,
pn.Row(
pn.Column(
pn.pane.Markdown("## Command Preview"),
pn.widgets.TextAreaInput(value=self.param.command_line, disabled=True, height=100),
),
pn.Column(
pn.pane.Markdown("## Command Output"),
pn.widgets.TextAreaInput(value=self.param.command_output, disabled=True, height=100, width=400, max_length=10000),
)
)
)

return pn.Row(input_column, output_column)

# Create and show the app
app = StableDiffusionApp()
pn.serve(app.view, port=5006, show=True)

Conclusion

I hope this quick tutorial will help some of you !

Regarding the performances, each steps is around 5s for Flux.1 Dev with Lora.

The images are saved in the “flux.1” folder.

You will find all the code on github !

If you find this article useful, I would appreciate some claps on it !

Many thanks, and happy generating.