File size: 4,295 Bytes
95a35fc
 
 
 
 
 
 
 
 
 
 
 
1cf3d6b
95a35fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bbb6e86
194aa77
bbb6e86
194aa77
bbb6e86
194aa77
 
4769752
 
 
 
194aa77
 
 
 
 
bbb6e86
194aa77
 
95a35fc
 
 
 
 
 
 
 
 
 
 
bbb6e86
 
 
95a35fc
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import embed_anything
from embed_anything import EmbedData
from tqdm.autonotebook import tqdm
from pinecone import Pinecone, ServerlessSpec
import numpy as np
import os
from pinecone import PineconeApiException
import uuid
import re
import gradio as gr


audio_files = ["samples_hp0.wav", "samples_gb0.wav"]

embeddings: list[list[EmbedData]] = []

for file in audio_files:
    embedding = embed_anything.embed_file(file, "Whisper-Jina")
    embeddings.append(embedding)

pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
pc.delete_index("search-in-audio")
try:
    index = pc.create_index(
        name="search-in-audio",
        dimension=768,  # Replace with your model dimensions
        metric="cosine",  # Replace with your model metric
        spec=ServerlessSpec(cloud="aws", region="us-east-1"),
    )
    index = pc.Index("search-in-audio")
except PineconeApiException as e:
    index = pc.Index("search-in-audio")
    if e.status == 409:
        print("Index already exists")

    else:
        print(e)


## convert embeddings which is of the form EmbedData : text, embedding, metadata to the form required by pinecone which is id, values, metadata
def convert_to_pinecone_format(embeddings: list[list[EmbedData]]):
    data = []
    for i, embedding in enumerate(embeddings):
        for j, emb in enumerate(embedding):
            data.append(
                {
                    "id": str(uuid.uuid4()),
                    "values": emb.embedding,
                    "metadata": {
                        "text": emb.text,
                        "start": emb.metadata["start"],
                        "end": emb.metadata["end"],
                        "file": re.split(r"/|\\", emb.metadata["file_name"])[-1],
                    },
                }
            )
    return data


data = convert_to_pinecone_format(embeddings)
index.upsert(data)


files = ["samples_hp0.wav", "samples_gb0.wav"]


def search(query, audio):

    results = []
    query = embed_anything.embed_query([query], "Jina")[0]

    if re.split(r"/|\\", audio)[-1] not in files:
        print(file, re.split(r"/|\\", audio)[-1])
        embeddings = embed_anything.embed_file(audio, "Whisper-Jina")
        embeddings = convert_to_pinecone_format([embeddings])
        index.upsert(embeddings)

    files.append(re.split(r"/|\\", audio)[-1])

    result = index.query(
        vector=query.embedding,
        top_k=5,
        include_metadata=True,
    )
    for res in result.matches:
        results.append(res.metadata)

    formatted_results = []
    for result in results:
        display_text = f"""
        
        `File: {result['file']}`
        
        `Start: {result['start']}`
        
        `End: {result['end']}`
        
        Text: {result['text']}"""
        formatted_results.append(display_text)

    return (
        formatted_results[0],
        results[0]["file"],
        formatted_results[1],
        results[1]["file"],
        formatted_results[2],
        results[2]["file"],
    )


demo = gr.Interface(
    title="Search πŸ”Ž in Audio πŸŽ™οΈ",
    description="""
    
    # Search within audio files using text queries.

    ## Models used: 
    
    - **Audio Decoder**: [openai/whisper-tiny.en](https://huggingface.co/openai/whisper-tiny.en)
    - **Embedding Model**: [Jina Embeddings v2 base-en](https://huggingface.co/jinaai/jina-embeddings-v2-base-en)

    ## Vector Database used: **Pinecone**

    ## Powered by [EmbedAnything by Starlight](https://github.com/StarlightSearch/EmbedAnything) πŸš€

    ![EmbedAnything](https://res.cloudinary.com/dltwftrgc/image/upload/v1712504276/Projects/EmbedAnything_500_x_200_px_a4l8xu.png)
    
    """,
    article  = "Created by [Akshay Ballal](https://www.akshaymakes.com)",

    fn=search,
    inputs=["text", gr.Audio(label="Audio", type="filepath")],
    outputs=[
        gr.Markdown(label="Text"),
        gr.Audio(label="Audio", type="filepath"),
        gr.Markdown(label="Text"),
        gr.Audio(label="Audio", type="filepath"),
        gr.Markdown(label="Text"),
        gr.Audio(label="Audio", type="filepath"),
    ],
    examples=[
        ["screwdriver", "samples_hp0.wav"],
        ["united states", "samples_gb0.wav"],
        ["united states", "samples_hp0.wav"],
    ],
)
demo.launch()