Spaces:
Runtime error
Runtime error
File size: 3,742 Bytes
95a35fc bbb6e86 95a35fc bbb6e86 95a35fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
import embed_anything
from embed_anything import EmbedData
from tqdm.autonotebook import tqdm
from pinecone import Pinecone, ServerlessSpec
import numpy as np
import os
from pinecone import PineconeApiException
import uuid
import re
import gradio as gr
audio_files = ["examples/samples_hp0.wav", "examples/samples_gb0.wav"]
embeddings: list[list[EmbedData]] = []
for file in audio_files:
embedding = embed_anything.embed_file(file, "Whisper-Jina")
embeddings.append(embedding)
pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY"))
pc.delete_index("search-in-audio")
try:
index = pc.create_index(
name="search-in-audio",
dimension=768, # Replace with your model dimensions
metric="cosine", # Replace with your model metric
spec=ServerlessSpec(cloud="aws", region="us-east-1"),
)
index = pc.Index("search-in-audio")
except PineconeApiException as e:
index = pc.Index("search-in-audio")
if e.status == 409:
print("Index already exists")
else:
print(e)
## convert embeddings which is of the form EmbedData : text, embedding, metadata to the form required by pinecone which is id, values, metadata
def convert_to_pinecone_format(embeddings: list[list[EmbedData]]):
data = []
for i, embedding in enumerate(embeddings):
for j, emb in enumerate(embedding):
data.append(
{
"id": str(uuid.uuid4()),
"values": emb.embedding,
"metadata": {
"text": emb.text,
"start": emb.metadata["start"],
"end": emb.metadata["end"],
"file": re.split(r"/|\\", emb.metadata["file_name"])[-1],
},
}
)
return data
data = convert_to_pinecone_format(embeddings)
index.upsert(data)
files = ["samples_hp0.wav", "samples_gb0.wav"]
def search(query, audio):
results = []
query = embed_anything.embed_query([query], "Jina")[0]
if re.split(r"/|\\", audio)[-1] not in files:
print(file, re.split(r"/|\\", audio)[-1])
embeddings = embed_anything.embed_file(audio, "Whisper-Jina")
embeddings = convert_to_pinecone_format([embeddings])
index.upsert(embeddings)
files.append(re.split(r"/|\\", audio)[-1])
result = index.query(
vector=query.embedding,
top_k=5,
include_metadata=True,
)
for res in result.matches:
results.append(res.metadata)
formatted_results = []
for result in results:
display_text = f"""
`File: {result['file']}`
`Start: {result['start']}`
`End: {result['end']}`
Text: {result['text']}"""
formatted_results.append(display_text)
return (
formatted_results[0],
results[0]["file"],
formatted_results[1],
results[1]["file"],
formatted_results[2],
results[2]["file"],
)
demo = gr.Interface(
title="Search π in Audio ποΈ",
description="""Search within audio files using text queries.
Models used: Whisper, Jina
""",
fn=search,
inputs=["text", gr.Audio(label="Audio", type="filepath")],
outputs=[
gr.Markdown(label="Text"),
gr.Audio(label="Audio", type="filepath"),
gr.Markdown(label="Text"),
gr.Audio(label="Audio", type="filepath"),
gr.Markdown(label="Text"),
gr.Audio(label="Audio", type="filepath"),
],
examples=[
["screwdriver", "samples_hp0.wav"],
["united states", "samples_gb0.wav"],
["united states", "samples_hp0.wav"],
],
)
demo.launch()
|