sohojoe commited on
Commit
5b46634
1 Parent(s): 950401a

tabbed examples

Browse files
Files changed (4) hide show
  1. app.py +80 -26
  2. images/371739.jpeg +0 -0
  3. images/452650.jpeg +0 -0
  4. images/557922.jpeg +0 -0
app.py CHANGED
@@ -228,19 +228,60 @@ tile_size = 100
228
  # image_folder = os.path.join("file", "images")
229
  image_folder ="images"
230
 
231
- image_examples = {
232
- "Snoop": "Snoop Dogg.jpg",
233
- "Ray": "Ray-Liotta-Goodfellas.jpg",
234
- "Anya": "Anya Taylor-Joy 003.jpg",
235
- "Billie": "billie eilish 004.jpeg",
236
- "Lizzo": "Lizzo 001.jpeg",
237
- "Donkey": "Donkey.jpg",
238
- "SohoJoe": "SohoJoeEth.jpeg",
239
- "Mirai": "Mirai.jpg",
240
- "OnChainMonkey": "OnChainMonkey-2278.jpg",
241
- "Wassie": "Wassie 4498.jpeg",
242
- "Pup in TeaCup": "pup1.jpg",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  }
 
 
244
  image_examples_tile_size = 50
245
 
246
  with gr.Blocks() as demo:
@@ -264,9 +305,9 @@ Try uploading a few images and/or add some text prompts and click generate image
264
  gr.Markdown("## Generates:")
265
  for example in examples:
266
  with gr.Row():
267
- for image in example:
268
  with gr.Column(scale=1, min_width=tile_size):
269
- local_path = os.path.join(image_folder, image)
270
  gr.Image(
271
  value = local_path, shape=(tile_size,tile_size),
272
  show_label=False, interactive=False) \
@@ -277,25 +318,37 @@ Try uploading a few images and/or add some text prompts and click generate image
277
  with gr.Tab(f"Input {i+1}"):
278
  with gr.Row():
279
  with gr.Column(scale=1, min_width=240):
280
- input_images[i] = gr.Image()
281
  with gr.Column(scale=3, min_width=600):
282
  embedding_plots[i] = gr.LinePlot(show_label=False).style(container=False)
283
  # input_image.change(on_image_load, inputs= [input_image, plot])
284
  with gr.Row():
285
  with gr.Column(scale=2, min_width=240):
286
- input_prompts[i] = gr.Textbox()
287
  with gr.Column(scale=3, min_width=600):
288
  with gr.Accordion(f"Embeddings (base64)", open=False):
289
  embedding_base64s[i] = gr.Textbox(show_label=False)
290
- with gr.Row():
291
- for idx, (title, image) in enumerate(image_examples.items()):
292
- local_path = os.path.join(image_folder, image)
293
- with gr.Column(scale=1, min_width=image_examples_tile_size):
294
- gr.Examples(
295
- examples=[local_path],
296
- inputs=input_images[i],
297
- label=title,
298
- )
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
  with gr.Row():
301
  average_embedding_plot = gr.LinePlot(show_label=True, label="Average Embeddings (base64)").style(container=False)
@@ -319,7 +372,8 @@ Try uploading a few images and/or add some text prompts and click generate image
319
  embedding_base64s_state = gr.State(value=[None for i in range(max_tabs)])
320
  for i in range(max_tabs):
321
  input_images[i].change(on_image_load_update_embeddings, input_images[i], [embedding_base64s[i]])
322
- input_prompts[i].submit(on_prompt_change_update_embeddings, input_prompts[i], [embedding_base64s[i]])
 
323
  embedding_base64s[i].change(on_embeddings_changed_update_plot, embedding_base64s[i], [embedding_plots[i]])
324
  # embedding_plots[i].change(on_plot_changed, embedding_base64s[i], average_embedding_base64)
325
  # embedding_plots[i].change(on_embeddings_changed_update_average_embeddings, embedding_base64s[i], average_embedding_base64)
 
228
  # image_folder = os.path.join("file", "images")
229
  image_folder ="images"
230
 
231
+ # image_examples = {
232
+ # "452650": "452650.jpeg",
233
+ # "Prompt 1": "a college dorm with a desk and bunk beds",
234
+ # "371739": "371739.jpeg",
235
+ # "Prompt 2": "a large banana is placed before a stuffed monkey.",
236
+ # "557922": "557922.jpeg",
237
+ # "Prompt 3": "a person sitting on a bench using a cell phone",
238
+
239
+ # }
240
+
241
+ tabbed_examples = {
242
+ "CoCo": {
243
+ "452650": "452650.jpeg",
244
+ "Prompt 1": "a college dorm with a desk and bunk beds",
245
+ "371739": "371739.jpeg",
246
+ "Prompt 2": "a large banana is placed before a stuffed monkey.",
247
+ "557922": "557922.jpeg",
248
+ "Prompt 3": "a person sitting on a bench using a cell phone",
249
+ },
250
+ "Portraits": {
251
+ "Snoop": "Snoop Dogg.jpg",
252
+ "Snoop Prompt": "Snoop Dogg",
253
+ "Ray": "Ray-Liotta-Goodfellas.jpg",
254
+ "Ray Prompt": "Ray Liotta, Goodfellas",
255
+ "Anya": "Anya Taylor-Joy 003.jpg",
256
+ "Anya Prompt": "Anya Taylor-Joy, The Queen's Gambit",
257
+ "Billie": "billie eilish 004.jpeg",
258
+ "Billie Prompt": "Billie Eilish, blonde hair",
259
+ "Lizzo": "Lizzo 001.jpeg",
260
+ "Lizzo Prompt": "Lizzo,",
261
+ "Donkey": "Donkey.jpg",
262
+ "Donkey Prompt": "Donkey, from Shrek",
263
+ },
264
+ "NFT's": {
265
+ "SohoJoe": "SohoJoeEth.jpeg",
266
+ "SohoJoe Prompt": "SohoJoe.Eth",
267
+ "Mirai": "Mirai.jpg",
268
+ "Mirai Prompt": "Mirai from White Rabbit, @shibuyaxyz",
269
+ "OnChainMonkey": "OnChainMonkey-2278.jpg",
270
+ "OCM Prompt": "On Chain Monkey",
271
+ "Wassie": "Wassie 4498.jpeg",
272
+ "Wassie Prompt": "Wassie by Wassies",
273
+ },
274
+ "Pups": {
275
+ "Pup1": "pup1.jpg",
276
+ "Prompt": "Teacup Yorkies",
277
+ "Pup2": "pup2.jpg",
278
+ "Pup3": "pup3.jpg",
279
+ "Pup4": "pup4.jpeg",
280
+ "Pup5": "pup5.jpg",
281
+ },
282
  }
283
+
284
+
285
  image_examples_tile_size = 50
286
 
287
  with gr.Blocks() as demo:
 
305
  gr.Markdown("## Generates:")
306
  for example in examples:
307
  with gr.Row():
308
+ for example in example:
309
  with gr.Column(scale=1, min_width=tile_size):
310
+ local_path = os.path.join(image_folder, example)
311
  gr.Image(
312
  value = local_path, shape=(tile_size,tile_size),
313
  show_label=False, interactive=False) \
 
318
  with gr.Tab(f"Input {i+1}"):
319
  with gr.Row():
320
  with gr.Column(scale=1, min_width=240):
321
+ input_images[i] = gr.Image(label="Image Prompt", show_label=True)
322
  with gr.Column(scale=3, min_width=600):
323
  embedding_plots[i] = gr.LinePlot(show_label=False).style(container=False)
324
  # input_image.change(on_image_load, inputs= [input_image, plot])
325
  with gr.Row():
326
  with gr.Column(scale=2, min_width=240):
327
+ input_prompts[i] = gr.Textbox(label="Text Prompt", show_label=True)
328
  with gr.Column(scale=3, min_width=600):
329
  with gr.Accordion(f"Embeddings (base64)", open=False):
330
  embedding_base64s[i] = gr.Textbox(show_label=False)
331
+ for idx, (tab_title, examples) in enumerate(tabbed_examples.items()):
332
+ with gr.Tab(tab_title):
333
+ with gr.Row():
334
+ for idx, (title, example) in enumerate(examples.items()):
335
+ if example.endswith(".jpg") or example.endswith(".jpeg"):
336
+ # add image example
337
+ local_path = os.path.join(image_folder, example)
338
+ with gr.Column(scale=1, min_width=image_examples_tile_size):
339
+ gr.Examples(
340
+ examples=[local_path],
341
+ inputs=input_images[i],
342
+ label=title,
343
+ )
344
+ else:
345
+ # add text example
346
+ with gr.Column(scale=1, min_width=image_examples_tile_size*2):
347
+ gr.Examples(
348
+ examples=[example],
349
+ inputs=input_prompts[i],
350
+ label=title,
351
+ )
352
 
353
  with gr.Row():
354
  average_embedding_plot = gr.LinePlot(show_label=True, label="Average Embeddings (base64)").style(container=False)
 
372
  embedding_base64s_state = gr.State(value=[None for i in range(max_tabs)])
373
  for i in range(max_tabs):
374
  input_images[i].change(on_image_load_update_embeddings, input_images[i], [embedding_base64s[i]])
375
+ # input_prompts[i].submit(on_prompt_change_update_embeddings, input_prompts[i], [embedding_base64s[i]])
376
+ input_prompts[i].change(on_prompt_change_update_embeddings, input_prompts[i], [embedding_base64s[i]])
377
  embedding_base64s[i].change(on_embeddings_changed_update_plot, embedding_base64s[i], [embedding_plots[i]])
378
  # embedding_plots[i].change(on_plot_changed, embedding_base64s[i], average_embedding_base64)
379
  # embedding_plots[i].change(on_embeddings_changed_update_average_embeddings, embedding_base64s[i], average_embedding_base64)
images/371739.jpeg ADDED
images/452650.jpeg ADDED
images/557922.jpeg ADDED