Spaces:
Sleeping
Sleeping
sahandkh1419
commited on
Commit
•
efc446d
1
Parent(s):
f42c616
Create functions.py
Browse files- functions.py +95 -0
functions.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import whisper
|
2 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
3 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
4 |
+
from pydub import AudioSegment
|
5 |
+
from hezar.models import Model
|
6 |
+
import librosa
|
7 |
+
import soundfile as sf
|
8 |
+
from audio_separator.separator import Separator
|
9 |
+
from logging import ERROR
|
10 |
+
import streamlit as st
|
11 |
+
|
12 |
+
|
13 |
+
def cosine_sim(text1, text2):
|
14 |
+
vectorizer = TfidfVectorizer().fit_transform([text1, text2])
|
15 |
+
vectors = vectorizer.toarray()
|
16 |
+
return cosine_similarity(vectors)[0, 1]
|
17 |
+
|
18 |
+
def take_challenge(music_file, typed_lyrics, key, language, has_background=False, background_audio_path=None):
|
19 |
+
st.write("Listen to music since you have to record 15seconds after that")
|
20 |
+
st.audio(music_file)
|
21 |
+
if has_background:
|
22 |
+
st.write("Play this music while singing which might help you:")
|
23 |
+
st.audio(background_audio_path)
|
24 |
+
audio_value = st.experimental_audio_input("Sing Rest of music:🎙️", key=key)
|
25 |
+
if audio_value:
|
26 |
+
with open("user_sing.mp3", "wb") as f:
|
27 |
+
f.write(audio_value.getbuffer())
|
28 |
+
|
29 |
+
if has_background:
|
30 |
+
file_to_transcribe = split_vocals("user_sing.mp3")[1]
|
31 |
+
else:
|
32 |
+
file_to_transcribe = "user_sing.mp3"
|
33 |
+
|
34 |
+
|
35 |
+
if language == "en":
|
36 |
+
english_model = whisper.load_model("base.en")
|
37 |
+
user_lyrics = english_model.transcribe(file_to_transcribe, language=language)["text"]
|
38 |
+
else:
|
39 |
+
persian_model = Model.load("hezarai/whisper-small-fa")
|
40 |
+
user_lyrics = persian_model.predict(file_to_transcribe)[0]["text"]
|
41 |
+
|
42 |
+
st.write(user_lyrics)
|
43 |
+
similarity_score = cosine_sim(typed_lyrics, user_lyrics)
|
44 |
+
if similarity_score > 0.85:
|
45 |
+
st.success('Awsome! You are doing great', icon="✅")
|
46 |
+
st.markdown('<style>div.stAlert { background-color: rgba(3, 67, 24, 0.9); }</style>', unsafe_allow_html=True)
|
47 |
+
else:
|
48 |
+
st.error('Awful! Try harder next time', icon="🚨")
|
49 |
+
st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
|
50 |
+
|
51 |
+
def change_volume(input_file, output_file, volume_factor):
|
52 |
+
sound = AudioSegment.from_mp3(input_file)
|
53 |
+
volume_changed = sound + volume_factor
|
54 |
+
volume_changed.export(output_file, format="mp3")
|
55 |
+
|
56 |
+
def change_speed(input_file, output_file, speed_factor):
|
57 |
+
sound, sr = librosa.load(input_file)
|
58 |
+
speed_changed = librosa.effects.time_stretch(sound, rate=speed_factor)
|
59 |
+
sf.write(output_file, speed_changed, sr)
|
60 |
+
|
61 |
+
def change_pitch(input_file, output_file, pitch_factor):
|
62 |
+
sound, sr = librosa.load(input_file)
|
63 |
+
pitch_changed = librosa.effects.pitch_shift(sound, sr=sr, n_steps=pitch_factor)
|
64 |
+
sf.write(output_file, pitch_changed, sr)
|
65 |
+
|
66 |
+
def low_pass_filter(input_file, output_file, cutoff_freq):
|
67 |
+
sound = AudioSegment.from_mp3(input_file)
|
68 |
+
low_filtered_sound = sound.low_pass_filter(cutoff_freq)
|
69 |
+
low_filtered_sound.export(output_file, format="mp3")
|
70 |
+
|
71 |
+
def high_pass_filter(input_file, output_file, cutoff_freq):
|
72 |
+
sound = AudioSegment.from_mp3(input_file)
|
73 |
+
high_filtered_sound = sound.high_pass_filter(cutoff_freq)
|
74 |
+
high_filtered_sound.export(output_file, format="mp3")
|
75 |
+
|
76 |
+
def pan_left_right(input_file, output_file, pan_factor):
|
77 |
+
sound = AudioSegment.from_mp3(input_file)
|
78 |
+
pan_sound = sound.pan(pan_factor)
|
79 |
+
pan_sound.export(output_file, format="mp3")
|
80 |
+
|
81 |
+
def fade_in_ms(input_file, output_file, fade_factor):
|
82 |
+
sound = AudioSegment.from_mp3(input_file)
|
83 |
+
faded_sound = sound.fade_in(fade_factor)
|
84 |
+
faded_sound.export(output_file, format="mp3")
|
85 |
+
|
86 |
+
def fade_out_ms(input_file, output_file, fade_factor):
|
87 |
+
sound = AudioSegment.from_mp3(input_file)
|
88 |
+
faded_sound = sound.fade_out(fade_factor)
|
89 |
+
faded_sound.export(output_file, format="mp3")
|
90 |
+
|
91 |
+
def split_vocals(input_file):
|
92 |
+
separator = Separator(output_format="mp3", log_level=ERROR)
|
93 |
+
separator.load_model("MGM_MAIN_v4.pth")
|
94 |
+
result_list = separator.separate(input_file, primary_output_name=input_file[:-4]+"_instruments", secondary_output_name=input_file[:-4]+"_vocals")
|
95 |
+
return result_list
|