waveydaveygravy commited on
Commit
5d574d7
1 Parent(s): 042afc7

Update apphf.py

Browse files
Files changed (1) hide show
  1. apphf.py +16 -16
apphf.py CHANGED
@@ -395,14 +395,14 @@ with gr.Blocks() as demo:
395
  a2v_botton = gr.Button("Generate", variant="primary")
396
  a2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
397
 
398
- gr.Examples(
399
- examples=[
400
- ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/Aragaki.png", None],
401
- ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/solo.png", None],
402
- ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
403
- ],
404
- inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video],
405
- )
406
 
407
 
408
  with gr.Tab("Video2video"):
@@ -423,14 +423,14 @@ with gr.Blocks() as demo:
423
  v2v_botton = gr.Button("Generate", variant="primary")
424
  v2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
425
 
426
- gr.Examples(
427
- examples=[
428
- ["configs/inference/ref_images/Aragaki.png", "configs/inference/video/Aragaki_song.mp4"],
429
- ["configs/inference/ref_images/solo.png", "configs/inference/video/Aragaki_song.mp4"],
430
- ["configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
431
- ],
432
- inputs=[v2v_ref_img, v2v_source_video, a2v_headpose_video],
433
- )
434
 
435
  a2v_botton.click(
436
  fn=audio2video,
 
395
  a2v_botton = gr.Button("Generate", variant="primary")
396
  a2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
397
 
398
+ #gr.Examples(
399
+ #examples=[
400
+ # ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/Aragaki.png", None],
401
+ # ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/solo.png", None],
402
+ # ["configs/inference/audio/lyl.wav", "configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
403
+ # ],
404
+ #inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video],
405
+ #)
406
 
407
 
408
  with gr.Tab("Video2video"):
 
423
  v2v_botton = gr.Button("Generate", variant="primary")
424
  v2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
425
 
426
+ #gr.Examples(
427
+ # examples=[
428
+ # ["configs/inference/ref_images/Aragaki.png", "configs/inference/video/Aragaki_song.mp4"],
429
+ # ["configs/inference/ref_images/solo.png", "configs/inference/video/Aragaki_song.mp4"],
430
+ # ["configs/inference/ref_images/lyl.png", "configs/inference/head_pose_temp/pose_ref_video.mp4"],
431
+ # ],
432
+ # inputs=[v2v_ref_img, v2v_source_video, a2v_headpose_video],
433
+ # )
434
 
435
  a2v_botton.click(
436
  fn=audio2video,