MohamedTalaat91 commited on
Commit
cb4f194
1 Parent(s): c9cec50

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -52,29 +52,31 @@ flux_generator = FluxGenerator()
52
  @spaces.GPU
53
  @torch.inference_mode()
54
  def generate_image(
55
- prompt,
56
  id_image,
57
  start_step,
58
  guidance,
59
  seed,
60
- style,
61
  true_cfg,
62
  width=896,
63
  height=1152,
64
  num_steps=20,
65
  id_weight=1.0,
66
- neg_prompt="bad quality, worst quality, text, signature, watermark, extra limbs , nudity ,Blurred face, low-quality details, exaggerated facial expressions, unrealistic skin texture, distorted proportions",
67
  timestep_to_start_cfg=1,
68
  max_sequence_length=128,
69
  ):
70
- flux_generator.t5.max_length = max_sequence_length
71
- prompt , neg_prompt= apply_prompt(prompt, prompt, neg_prompt)
 
 
72
  seed = int(seed)
73
  if seed == -1:
74
  seed = None
75
 
76
  opts = SamplingOptions(
77
- prompt=prompt,
78
  width=width,
79
  height=height,
80
  num_steps=num_steps,
@@ -84,6 +86,8 @@ def generate_image(
84
 
85
  if opts.seed is None:
86
  opts.seed = torch.Generator(device="cpu").seed()
 
 
87
  print(f"Generating '{opts.prompt}' with seed {opts.seed}")
88
  t0 = time.perf_counter()
89
 
@@ -165,12 +169,18 @@ def generate_image(
165
  flux_generator.pulid_model.debug_img_list)
166
 
167
  def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
168
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
169
- return p.replace("{prompt}", positive), n + ' ' + negative
170
-
171
- def apply_prompt(prompt: str, positive: str, negative: str = "") -> tuple[str, str]:
172
- p,n = prompt_dict.get(prompt,prompt_dict[DEFAULT_PROMPT_NAME])
173
- return p.replace("{prompt}", positive), n + ' ' + negative
 
 
 
 
 
 
174
 
175
 
176
 
 
52
  @spaces.GPU
53
  @torch.inference_mode()
54
  def generate_image(
55
+ prompt_name,
56
  id_image,
57
  start_step,
58
  guidance,
59
  seed,
60
+ style_name,
61
  true_cfg,
62
  width=896,
63
  height=1152,
64
  num_steps=20,
65
  id_weight=1.0,
66
+ neg_prompt="bad quality, worst quality, text, signature, watermark, extra limbs",
67
  timestep_to_start_cfg=1,
68
  max_sequence_length=128,
69
  ):
70
+ # Fetch and apply prompt and style
71
+ pos_prompt, neg_prompt = apply_prompt(prompt_name)
72
+ pos_prompt, neg_prompt = apply_style(style_name, pos_prompt, neg_prompt)
73
+
74
  seed = int(seed)
75
  if seed == -1:
76
  seed = None
77
 
78
  opts = SamplingOptions(
79
+ prompt=pos_prompt,
80
  width=width,
81
  height=height,
82
  num_steps=num_steps,
 
86
 
87
  if opts.seed is None:
88
  opts.seed = torch.Generator(device="cpu").seed()
89
+
90
+ # Log generation details
91
  print(f"Generating '{opts.prompt}' with seed {opts.seed}")
92
  t0 = time.perf_counter()
93
 
 
169
  flux_generator.pulid_model.debug_img_list)
170
 
171
  def apply_style(style_name: str, positive: str, negative: str = "") -> tuple[str, str]:
172
+ # Fetch the style from the styles dict
173
+ style_data = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
174
+ pos_style = style_data['prompt'].replace("{prompt}", positive)
175
+ neg_style = style_data['negative_prompt'] + ' ' + negative
176
+ return pos_style, neg_style
177
+
178
+ def apply_prompt(prompt_name: str, positive: str = "", negative: str = "") -> tuple[str, str]:
179
+ # Fetch the prompt from the prompt_dict
180
+ prompt_data = prompt_dict.get(prompt_name, prompt_dict[DEFAULT_PROMPT_NAME])
181
+ pos_prompt = prompt_data['prompt'].replace("{prompt}", positive)
182
+ neg_prompt = prompt_data['negative_prompt'] + ' ' + negative
183
+ return pos_prompt, neg_prompt
184
 
185
 
186