File size: 1,759 Bytes
9b4607e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import gradio as gr
import torch
import librosa
import numpy as np

# Assuming you have a model file for voice conversion
from model import load_model, convert_voice

# Load the pre-trained voice conversion model
model = load_model("path_to_pretrained_model")  # Adjust this based on the actual RVC model

def voice_conversion(source_audio, target_voice):
    """
    Function to perform voice conversion from source to target voice style
    """
    # Convert input audio to the desired format (this may vary depending on your model)
    y, sr = librosa.load(source_audio)
    input_audio = torch.tensor(y).unsqueeze(0)

    # Use model for voice conversion
    converted_audio = convert_voice(model, input_audio, target_voice)
    
    # Convert output tensor back to numpy for playback
    converted_audio_np = converted_audio.detach().cpu().numpy()
    
    # Save to file or return as numpy array
    output_file = "output_converted.wav"
    librosa.output.write_wav(output_file, converted_audio_np, sr)
    
    return output_file

# Define the Gradio interface
def infer(source_audio, target_voice):
    # Call the voice conversion function
    result_audio = voice_conversion(source_audio, target_voice)
    return result_audio

# Gradio interface with inputs and outputs
iface = gr.Interface(
    fn=infer, 
    inputs=[
        gr.Audio(source="microphone", type="filepath", label="Source Audio"),
        gr.Dropdown(["Voice1", "Voice2", "Voice3"], label="Target Voice")  # Dropdown for target voice options
    ], 
    outputs=gr.Audio(type="file", label="Converted Audio"),
    title="Retrieval-based Voice Conversion",
    description="Convert voice from a source audio to a target voice style."
)

if __name__ == "__main__":
    iface.launch()