File size: 1,701 Bytes
ea19ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75c113a
ea19ac8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
import json, torch
from transformers import AutoTokenizer, RobertaForSequenceClassification, RobertaConfig

# Load the configuration of your model
config = RobertaConfig.from_pretrained('cardiffnlp/twitter-roberta-base-emotion', num_labels=3)

# Instantiate the model using the specific class
model = RobertaForSequenceClassification(config)

# Load the state dictionary from your .pt file
state_dict = torch.load('transferLearningResults/model_state_dict.pt', map_location=torch.device('cpu'))

# Load the state dictionary into the model
model.load_state_dict(state_dict, strict=False)

# Switch to evaluation mode for inference
model.eval()

tokenizer = AutoTokenizer.from_pretrained('transferLearningResults')

# Load the label mapping
with open('label_to_int_mapping.json', 'r') as file:
    label_mapping = json.load(file)
    int_to_label = {int(k): v for k, v in label_mapping.items()}  # Convert keys to integers

def predict_emotion(text):
    # Tokenize the input text and convert to tensor
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
    # Get model predictions
    with torch.no_grad():
        outputs = model(**inputs)
    # Convert predictions to probabilities
    probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1).squeeze()
    # Convert probabilities to a readable format
    probabilities_list = probabilities.tolist()
    # Create a dictionary for the probabilities with labels
    probabilities_dict = {int_to_label[i]: prob for i, prob in enumerate(probabilities_list)}
    return probabilities_dict

iface = gr.Interface(fn=predict_emotion, inputs="text", outputs="label")
iface.launch()