ChrisLalk commited on
Commit
03f3b55
·
verified ·
1 Parent(s): 3c7e83a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +104 -0
README.md CHANGED
@@ -23,3 +23,107 @@ This is basically the German translation of arpanghoshal/EmoRoBERTa. We used the
23
  - accuracy: 0.27
24
  - f1: 0.382
25
  - roc_auc: 0.658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  - accuracy: 0.27
24
  - f1: 0.382
25
  - roc_auc: 0.658
26
+
27
+
28
+ **Example Code**
29
+ """**Inference**"""
30
+ # pip install transformers[torch]
31
+ # pip install pandas, transformers, numpy, tqdm, openpyxl
32
+
33
+ import pandas as pd
34
+ import torch
35
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer
36
+ import numpy as np
37
+ from tqdm import tqdm
38
+ import time
39
+ import os
40
+ from transformers import DataCollatorWithPadding
41
+ import json
42
+
43
+ # create base path
44
+ base_path = "/share/users/staff/c/clalk/Emotionen"
45
+
46
+ #create input and output path for the model folder and the file folder
47
+ model_path = os.path.join(base_path, 'Modell')
48
+ file_path = os.path.join(base_path, 'Datensatz')
49
+
50
+
51
+ MODEL = "intfloat/multilingual-e5-large"
52
+ tokenizer = AutoTokenizer.from_pretrained(MODEL, do_lower_case=False)
53
+ model = AutoModelForSequenceClassification.from_pretrained(
54
+ model_path,
55
+ from_tf=False,
56
+ from_flax=False,
57
+ trust_remote_code=False,
58
+ num_labels=28,
59
+ ignore_mismatched_sizes=True
60
+ )
61
+
62
+ data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
63
+
64
+ # Path to the file
65
+ os.chdir(file_path)
66
+
67
+ df_full = pd.read_excel("speech_turns_pat.xlsx", index_col=None)
68
+
69
+ if 'Unnamed: 0' in df_full.columns:
70
+ df_full = df_full.drop(columns=['Unnamed: 0'])
71
+
72
+
73
+ df_full.reset_index(drop=True, inplace=True)
74
+
75
+
76
+ # Funktion zur Tokenisierung und Inferenz
77
+ def infer_texts(texts):
78
+ tokenized_texts = tokenizer(texts, return_tensors="pt", padding=True, truncation=True)
79
+ class SimpleDataset:
80
+ def __init__(self, tokenized_texts):
81
+ self.tokenized_texts = tokenized_texts
82
+ def __len__(self):
83
+ return len(self.tokenized_texts["input_ids"])
84
+ def __getitem__(self, idx):
85
+ return {k: v[idx] for k, v in self.tokenized_texts.items()}
86
+ test_dataset = SimpleDataset(tokenized_texts)
87
+ trainer = Trainer(model=model, data_collator=data_collator)
88
+ predictions = trainer.predict(test_dataset)
89
+ sigmoid = torch.nn.Sigmoid()
90
+ probs = sigmoid(torch.Tensor(predictions.predictions))
91
+
92
+ return np.round(np.array(probs), 3).tolist()
93
+
94
+ start_time = time.time()
95
+
96
+ df = df_full
97
+
98
+ # Save results in a dict
99
+ results = []
100
+ for index, row in tqdm(df.iterrows(), total=df.shape[0]):
101
+ patient_texts = row['Patient']
102
+ prob_list = infer_texts(patient_texts)
103
+ results.append({
104
+ "File": row['Class']+"_"+row['session'],
105
+ "Class": row['Class'],
106
+ "session": row['session'],
107
+ "short_id": row["short_id"],
108
+ "long_id": row["long_id"],
109
+ "Sentence": patient_texts,
110
+ "Prediction": prob_list[0],
111
+ "hscl-11": row["Gesamtscore_hscl"],
112
+ "srs": row["srs_ges"],
113
+ })
114
+
115
+ # Convert results to df
116
+ df_results = pd.DataFrame(results)
117
+ df_results.to_json("emo_speech_turn_inference.json")
118
+
119
+ end_time = time.time()
120
+ elapsed_time = end_time - start_time
121
+ print(f"Elapsed time: {elapsed_time:.2f} seconds")
122
+ print(df_results)
123
+
124
+
125
+ emo_df = pd.DataFrame(df_results['Prediction'].tolist(), index=df_results["Class"].index)
126
+ col_names = ['admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring', 'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval', 'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief', 'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief', 'remorse', 'sadness', 'surprise', 'neutral']
127
+ emo_df.columns = col_names
128
+
129
+ print(emo_df)