Spaces:
Runtime error
Runtime error
File size: 4,690 Bytes
16d35dc 13d7fda 16d35dc 95819a5 f179117 556ed1d ba11b0c 2c3748c 3caa7d1 1baef98 08e9f82 ba11b0c 08e9f82 16d35dc 0e00791 cda6806 16d35dc ca70bdc 69b01bf cda6806 7010662 08e9f82 cda6806 7010662 19e4fb0 cda6806 556ed1d cda6806 9922164 95819a5 f179117 ed93ec4 43b3f79 95819a5 a06df0b 16d35dc 08e9f82 26281ef 08e9f82 0f64e46 08e9f82 16d35dc cda6806 459d2d6 8752186 16d35dc 459d2d6 ded54c4 16d35dc ff44154 c598d42 459d2d6 cda6806 16d35dc cda6806 b270fe6 cda6806 16d35dc 459d2d6 16d35dc cda6806 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import tempfile
import gradio as gr
from neon_tts_plugin_coqui import CoquiTTS
LANGUAGES = list(CoquiTTS.langs.keys())
LANGUAGES = LANGUAGES + ['cn', 'jp']
default_lang = "en"
#import whisper
#whisper_model = whisper.load_model("small")
#whisper = gr.Interface.load(name="spaces/abidlabs/whisper-large-v2")
whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2")
#cn_a_jp = gr.Blocks.load(name="spaces/Yusin/anime-tts_yusin")
#chatgpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
#chatgpt = gr.Blocks.load(name="spaces/seawolf2357/chatgptclone")
import os
import json
import openai
#session_token = os.environ.get('SessionToken')
api_key = os.environ.get('api_key')
#if you have OpenAI API key as a string, enable the below
openai.api_key = api_key
title = "Speech to ChatGPT to Speech"
#info = "more info at [Neon Coqui TTS Plugin](https://github.com/NeonGeckoCom/neon-tts-plugin-coqui), [Coqui TTS](https://github.com/coqui-ai/TTS)"
#badge = "https://visitor-badge-reloaded.herokuapp.com/badge?page_id=neongeckocom.neon-tts-plugin-coqui"
coquiTTS = CoquiTTS()
# ChatGPT
def chat_hf(audio, custom_token, language):
try:
whisper_text = translate(audio)
if whisper_text == "ERROR: You have to either use the microphone or upload an audio file":
gpt_response = "MISSING AUDIO: Record your voice by clicking the microphone button, do not forget to stop recording before sending your message ;)"
else:
#gpt_response = chatgpt(whisper_text, [], fn_index=0)
#print(gpt_response)
#gpt_response = gpt_response[0]
gpt_response = openai_create(whisper_text)
except:
whisper_text = translate(audio)
gpt_response = """Sorry, I'm quite busy right now, but please try again later :)"""
# to voice
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
coquiTTS.get_tts(gpt_response, fp, speaker = {"language" : language})
return whisper_text, gpt_response, fp.name
# whisper
#def translate(audio):
# print("""
# β
# Sending audio to Whisper ...
# β
# """)
#
# audio = whisper.load_audio(audio)
# audio = whisper.pad_or_trim(audio)
#
# mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
#
# _, probs = whisper_model.detect_language(mel)
#
# transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
#
# transcription = whisper.decode(whisper_model, mel, transcript_options)
#
# print("language spoken: " + transcription.language)
# print("transcript: " + transcription.text)
# print("βββββββββββββββββββββββββββββββββββββββββββ")
#
# return transcription.text
def translate(audio):
print("""
β
Sending audio to Whisper ...
β
""")
#_, text_result = whisper(audio, "", fn_index=0)
text_result = whisper(audio, None, "transcribe", fn_index=0)
print(text_result)
return text_result
def openai_create(prompt):
response = openai.Completion.create(
model="text-chat-davinci-002-20221122",
prompt=prompt,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=[" Human:", " AI:"]
)
print(response.choices[0].text)
return response.choices[0].text
with gr.Blocks() as blocks:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>"
+ title
+ "</h1>")
#gr.Markdown(description)
radio = gr.Radio(label="Language", choices=LANGUAGES, value=default_lang)
with gr.Row(equal_height=True):# equal_height=False
with gr.Column():# variant="panel"
audio_file = gr.Audio(source="microphone", type="filepath")
custom_token = gr.Textbox(label='If it fails, use your own session token', placeholder="your own session token")
with gr.Row():# mobile_collapse=False
submit = gr.Button("Submit", variant="primary")
with gr.Column():
text1 = gr.Textbox(label="Speech to Text")
text2 = gr.Textbox(label="ChatGPT Response")
audio = gr.Audio(label="Output", interactive=False)
#gr.Markdown(info)
#gr.Markdown("<center>"
# +f'<img src={badge} alt="visitors badge"/>'
# +"</center>")
# actions
submit.click(
chat_hf,
[audio_file, custom_token, radio],
[text1, text2, audio],
)
#radio.change(lambda lang: CoquiTTS.langs[lang]["sentence"], radio, text2)
blocks.launch(debug=True)
|