Create app.py
Browse filesThis work is based on the research work presented in the papers “Unsupervised law article mining based on deep pre-trained language representation models with application to the Italian civil code” , “LamBERTa: Law Article Mining Based on Bert Architecture for the Italian Civil Code” and “Exploring domain and task adaptation of LamBERTa models for article retrieval on the Italian Civil Code” [90]
app.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import re
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
from transformers import BertTokenizerFast, BertForSequenceClassification
|
6 |
+
|
7 |
+
if torch.cuda.is_available():
|
8 |
+
device = torch.device("cuda")
|
9 |
+
print('There are %d GPU(s) available.' % torch.cuda.device_count())
|
10 |
+
print('We will use the GPU:', torch.cuda.get_device_name(0))
|
11 |
+
else:
|
12 |
+
print('No GPU available, using the CPU instead.')
|
13 |
+
device = torch.device("cpu")
|
14 |
+
|
15 |
+
dataset_path = './codice_civile_ITA_LIBRI_2_withArtRef_v2.csv'
|
16 |
+
input_model_path = './MODELLO_LOCALE_LIBRI_2_v5_2_subset60UniRRemphT4'
|
17 |
+
|
18 |
+
|
19 |
+
def load_CC_from_CSV(path):
|
20 |
+
NUM_ART = 0
|
21 |
+
cc = pd.read_csv(path, header=None, sep='|', usecols=[1,2,3], names=['art','title','text'], engine='python')
|
22 |
+
article_id={}
|
23 |
+
id_article={}
|
24 |
+
article_text={}
|
25 |
+
for i in range(len(cc)):
|
26 |
+
NUM_ART +=1
|
27 |
+
art = re.sub('(\s|\.|\-)*', '', str(cc['art'][i]).lower())
|
28 |
+
article_id[art] = i
|
29 |
+
id_article[i] = art
|
30 |
+
article_text[art] = str(cc['title'][i]).lower() + " -> " + str(cc['text'][i]).lower()
|
31 |
+
if i == 59:
|
32 |
+
break
|
33 |
+
return article_id, id_article, article_text, NUM_ART
|
34 |
+
|
35 |
+
article_id, id_article, article_text, NUM_ART = load_CC_from_CSV(dataset_path)
|
36 |
+
|
37 |
+
model = BertForSequenceClassification.from_pretrained(input_model_path)
|
38 |
+
tokenizer = BertTokenizerFast.from_pretrained(input_model_path)
|
39 |
+
|
40 |
+
def LamBERTa_v5_placeholder(query):
|
41 |
+
n = 345
|
42 |
+
predictions = torch.softmax(torch.randn(n), dim=0)
|
43 |
+
values, indices = torch.topk(predictions, 5)
|
44 |
+
confidences = {id_article[i.item()] : v.item() for i, v in zip(indices, values)}
|
45 |
+
# confidences = {id_article[i] : float(predictions[i]) for i in range(n)}
|
46 |
+
return confidences
|
47 |
+
|
48 |
+
def LamBERTa(query):
|
49 |
+
texts = []
|
50 |
+
input_ids = torch.tensor(tokenizer.encode(query, add_special_tokens=True)).unsqueeze(0) # Batch size 1
|
51 |
+
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
|
52 |
+
outputs = model(input_ids, labels=labels)
|
53 |
+
loss, logits = outputs[:2]
|
54 |
+
log_probs = torch.softmax(logits, dim=1)
|
55 |
+
values, indices = torch.topk(log_probs, 5, dim=1)
|
56 |
+
confidences = {id_article[i.item()] : v.item() for i, v in zip(indices[0], values[0])}
|
57 |
+
for art, prob in confidences.items():
|
58 |
+
texts.append(
|
59 |
+
{
|
60 |
+
"art": art,
|
61 |
+
"text": article_text[art],
|
62 |
+
}
|
63 |
+
)
|
64 |
+
return confidences, texts
|
65 |
+
|
66 |
+
demo = gr.Interface(fn=LamBERTa, inputs="text", outputs=["label", "json"], examples=["Quando si apre la successione","Dove si apre la successione","In quali casi, alla morte, non spetta l'eredità"], live=True)
|
67 |
+
|
68 |
+
demo.launch()
|
69 |
+
demo.launch(share=True)
|