Gregoryjr
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -30,12 +30,29 @@ if con:
|
|
| 30 |
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
| 32 |
model = AutoModelForSequenceClassification.from_pretrained("Greys/milestonemodel")
|
| 33 |
-
|
| 34 |
def classify_sentence(text):
|
| 35 |
inputs = tokenizer(text, return_tensors="pt")
|
| 36 |
outputs = model(**inputs)
|
| 37 |
probs = outputs.logits.softmax(dim=1)
|
| 38 |
return probs.detach().numpy()[0]
|
| 39 |
probs = classify_sentence(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
print(probs)
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
|
| 32 |
model = AutoModelForSequenceClassification.from_pretrained("Greys/milestonemodel")
|
| 33 |
+
my_list = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
|
| 34 |
def classify_sentence(text):
|
| 35 |
inputs = tokenizer(text, return_tensors="pt")
|
| 36 |
outputs = model(**inputs)
|
| 37 |
probs = outputs.logits.softmax(dim=1)
|
| 38 |
return probs.detach().numpy()[0]
|
| 39 |
probs = classify_sentence(text)
|
| 40 |
+
def find_largest_number(numbers):
|
| 41 |
+
if not numbers:
|
| 42 |
+
print("List is empty.")
|
| 43 |
+
return None, None
|
| 44 |
+
|
| 45 |
+
max_num = numbers[0]
|
| 46 |
+
max_index = 0
|
| 47 |
+
for i in range(1, len(numbers)):
|
| 48 |
+
if numbers[i] > max_num:
|
| 49 |
+
max_num = numbers[i]
|
| 50 |
+
max_index = i
|
| 51 |
+
|
| 52 |
+
return max_index
|
| 53 |
+
|
| 54 |
print(probs)
|
| 55 |
+
|
| 56 |
+
index = find_largest_number(probs)
|
| 57 |
+
st.write(my_list[index])
|
| 58 |
+
#id,toxic,severe_toxic,obscene,threat,insult,identity_hate
|