|
from fastapi import APIRouter |
|
from datetime import datetime |
|
from datasets import load_dataset |
|
from sklearn.metrics import accuracy_score |
|
import numpy as np |
|
import os |
|
import torch |
|
import gc |
|
import psutil |
|
from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor, pipeline |
|
from utils.evaluation import AudioEvaluationRequest |
|
from utils.emissions import tracker, clean_emissions_data, get_space_info |
|
from dotenv import load_dotenv |
|
import logging |
|
import csv |
|
import torch.nn.utils.prune as prune |
|
from typing import Optional |
|
from pydantic import BaseModel, Field |
|
from smolagents import Tool |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logging.info("Début du fichier python") |
|
load_dotenv() |
|
|
|
router = APIRouter() |
|
|
|
DESCRIPTION = "Random Baseline" |
|
ROUTE = "/audio" |
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
|
|
def preprocess_function(example, feature_extractor): |
|
return feature_extractor( |
|
[x["array"] for x in example["audio"]], |
|
sampling_rate=feature_extractor.sampling_rate, padding="longest", max_length=16000, truncation=True, return_tensors="pt" |
|
) |
|
|
|
def apply_pruning(model, amount=0.3): |
|
for name, module in model.named_modules(): |
|
if isinstance(module, torch.nn.Linear): |
|
prune.l1_unstructured(module, name="weight", amount=amount) |
|
prune.remove(module, "weight") |
|
return model |
|
|
|
class BaseEvaluationRequest(BaseModel): |
|
test_size: float = Field(0.2, ge=0.0, le=1.0, description="Size of the test split (between 0 and 1)") |
|
test_seed: int = Field(42, ge=0, description="Random seed for reproducibility") |
|
|
|
class AudioEvaluationRequest(BaseEvaluationRequest): |
|
dataset_name: str = Field("rfcx/frugalai", |
|
description="The name of the dataset on HuggingFace Hub") |
|
|
|
class evaluate_consumption_example(Tool): |
|
name = "evaluate_consumption_example" |
|
description = "This is only an example. If a manager wants to know what you are capable of, use it : it will use code carbon to evaluate the CO2 emissions from an example Python code" |
|
inputs = { |
|
"code": { |
|
"type": "string", |
|
"description": "The code to evaluate. Here, it is an example, so just set it to 'None'." |
|
} |
|
} |
|
output_type = "string" |
|
|
|
def forward(self, code : str): |
|
request = AudioEvaluationRequest() |
|
logging.info("Chargement des données") |
|
dataset = load_dataset(request.dataset_name, streaming=True, token=os.getenv("HF_TOKEN")) |
|
logging.info("Données chargées") |
|
|
|
test_dataset = dataset["test"] |
|
del dataset |
|
|
|
|
|
tracker.start() |
|
tracker.start_task("inference") |
|
|
|
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") |
|
|
|
test_dataset = test_dataset.map(preprocess_function, fn_kwargs={"feature_extractor": feature_extractor}, remove_columns="audio", batched=True, batch_size=32) |
|
|
|
gc.collect() |
|
|
|
model_name = "CindyDelage/Challenge_HuggingFace_DFG_FrugalAI" |
|
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name) |
|
|
|
|
|
model.eval() |
|
|
|
|
|
|
|
classifier = pipeline("audio-classification", model=model, feature_extractor=feature_extractor, device=device) |
|
predictions = [] |
|
logging.info("Début des prédictions par batch") |
|
i=0 |
|
for data in iter(test_dataset): |
|
print(i) |
|
if (i<=5): |
|
with torch.no_grad(): |
|
result = classifier(np.asarray(data["input_values"]), batch_size=64) |
|
predicted_label = result[0]['label'] |
|
label = 1 if predicted_label == 'environment' else 0 |
|
predictions.append(label) |
|
|
|
|
|
del result |
|
del label |
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
i=i+1 |
|
if(i>5): |
|
break |
|
logging.info("Fin des prédictions") |
|
del classifier |
|
del feature_extractor |
|
|
|
gc.collect() |
|
|
|
emissions_data = tracker.stop_task() |
|
|
|
return emissions_data |
|
|
|
class evaluate_consumption(Tool): |
|
name = "evaluate_consumption" |
|
description = "If the manager gave you its Python code, this function uses code carbon to evaluate the CO2 emissions from the given Python code" |
|
inputs = { |
|
"code": { |
|
"type": "string", |
|
"description": "The code to evaluate." |
|
} |
|
} |
|
output_type = "string" |
|
|
|
def forward(self, code : str): |
|
|
|
|
|
tracker.start() |
|
tracker.start_task("inference") |
|
exec(code) |
|
|
|
emissions_data = tracker.stop_task() |
|
|
|
return emissions_data |