Spaces:
Runtime error
Runtime error
from flask import Flask, request, jsonify, send_from_directory | |
from transformers import AutoProcessor, SeamlessM4Tv2Model | |
import numpy as np | |
import wave | |
import os | |
from huggingface_hub import InferenceClient, login | |
import logging | |
import torchaudio | |
# Configurer les logs de debug | |
logging.basicConfig(level=logging.INFO) | |
# Initialisation des variables pour le modèle et le processeur | |
model = None | |
processor = None | |
UPLOAD_FOLDER = "audio_files" | |
os.makedirs(UPLOAD_FOLDER, exist_ok=True) | |
app = Flask(__name__, static_folder="front/dist", static_url_path="/") | |
def serve_react_app(): | |
return send_from_directory("front/dist", "index.html") | |
def serve_static_files(path): | |
return send_from_directory("dist", path) | |
def load_model(): | |
global model, processor | |
load = request.json.get("load", False) | |
if load: | |
processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large") # Remplacez "model_name" par le nom de votre modèle | |
model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large") # Remplacez "model_name" par le nom de votre modèle | |
return jsonify({"message": "Modèle chargé avec succès."}), 200 | |
else: | |
return jsonify({"message": "Le modèle n'est pas chargé."}), 200 | |
def record_audio(): | |
if model is None or processor is None: | |
return jsonify({"error": "Le modèle n'est pas chargé."}), 400 | |
file = request.files['audio'] | |
filename = os.path.join(UPLOAD_FOLDER, file.filename) | |
file.save(filename) | |
# Charger et traiter l'audio | |
audio_data, orig_freq = torchaudio.load(filename) | |
audio_inputs = processor(audios=audio_data, return_tensors="pt") | |
output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False) | |
translated_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True) | |
return jsonify({"translated_text": translated_text}) | |
def text_to_speech(): | |
data = request.get_json() | |
text = data.get("text") | |
src_lang = data.get("src_lang") | |
tgt_lang = data.get("tgt_lang") | |
text_inputs = processor(text=text, src_lang=src_lang, return_tensors="pt") | |
audio_array = model.generate(**text_inputs, tgt_lang=tgt_lang)[0].cpu().numpy().squeeze() | |
output_filename = os.path.join(UPLOAD_FOLDER, "output.wav") | |
with wave.open(output_filename, "wb") as wf: | |
wf.setnchannels(1) | |
wf.setsampwidth(2) | |
wf.setframerate(16000) | |
wf.writeframes((audio_array * 32767).astype(np.int16).tobytes()) | |
return jsonify({"audio_url": output_filename}) | |