File size: 1,888 Bytes
17283b0
db5004d
aa7bf0f
4e12acb
a6bf372
db5004d
 
9b3d4f0
 
4c51458
9b3d4f0
 
a6bf372
576fbe3
a50b133
9b3d4f0
8e71032
 
 
 
 
 
 
 
 
 
9b3d4f0
 
 
4e12acb
a98ef80
4e12acb
a98ef80
 
 
 
 
 
 
4e12acb
4c51458
ddb6e98
db5004d
5ea36b0
17283b0
 
9b3d4f0
6ac65b5
9b3d4f0
652b4e4
238e381
656c99e
a6bf372
17283b0
db373f8
17283b0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import gradio as gr
from output_beautify import *
import pandas as pd
from load_data import *
import os
from gingerit import *

#os.system("pip install git+https://github.com/openai/whisper.git")
#import whisper

#model = whisper.load_model("small")
#current_size = 'small'


hf_writer = gr.HuggingFaceDatasetSaver('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', "Pavankalyan/chitti_data")
'''
def inference(audio):
    audio = whisper.load_audio(audio)
    audio = whisper.pad_or_trim(audio)
    
    mel = whisper.log_mel_spectrogram(audio).to(model.device)
    
    _, probs = model.detect_language(mel)
    
    options = whisper.DecodingOptions(fp16 = False)
    result = whisper.decode(model, mel, options)
    return result.text
    '''
def chitti(query):
    re_table = search(query)
    answers_re_table = [re_table[i][0] for i in range(0,5)]
    answer_links = [re_table[i][3] for i in range(0,5)]
    sorted_indices = sorted(range(len(answers_re_table)), key=lambda k: len(answers_re_table[k]))
    repeated_answers_indices =list()
    for i in range(4):
        if answers_re_table[sorted_indices[i]] in answers_re_table[sorted_indices[i+1]]:
            repeated_answers_indices.append(sorted_indices[i])
    for idx in repeated_answers_indices:
        answers_re_table.pop(idx)
        answer_links.pop(idx)
        
    #return [res1,answers_re_table[0],res2,answers_re_table[1]]
    return [runGinger(answers_re_table[0]),answer_links[0],runGinger(answers_re_table[1]),answer_links[1]]
    #return [re_table[0][0],re_table[0][3],re_table[1][0],re_table[1][3]]

demo = gr.Interface(
    fn=chitti,
    inputs=["text"],
    #inputs=[gr.inputs.Audio(source="microphone", type="filepath", label="Record your audio")],
    outputs=["text","text","text","text"],
    allow_flagging = "manual",
    flagging_options = ["0","1","None"],
    flagging_callback=hf_writer
)
demo.launch()