File size: 15,036 Bytes
0495d77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
from transformers import AutoTokenizer, SwitchTransformersForConditionalGeneration, AutoModelForSequenceClassification
import torch
import gradio as gr
import argparse
from scipy.special import softmax
import csv
import urllib.request
import numpy as np
import requests




args_dict = dict( 
    EX_LIST = [["This is wonderful!"],
                ["Nice car"],
                ["La France est la meilleure équipe du monde"],
                ["Visca Barca"],
                ["Hala Madrid"],
                ["Buongiorno"],
                # ["Auf einigen deutschen Straßen gibt es kein Radar"],
                ["Tempo soleggiato in Italia"],
                ["Bonjour"],
                ["صباح الخير"],
                ["اكل زوجتي جميل"],
               ],

    #MMiniLM
    # Load the pretrained model and tokenizer
    tokenizer_MMiniLM = AutoTokenizer.from_pretrained("Karim-Gamal/MMiniLM-L12-finetuned-emojis-IID-Fed"),
    model_MMiniLM = AutoModelForSequenceClassification.from_pretrained("Karim-Gamal/MMiniLM-L12-finetuned-emojis-IID-Fed"),

    #XLM
    # Load the pretrained model and tokenizer
    tokenizer_XLM = AutoTokenizer.from_pretrained("Karim-Gamal/XLM-Roberta-finetuned-emojis-IID-Fed"),
    model_XLM = AutoModelForSequenceClassification.from_pretrained("Karim-Gamal/XLM-Roberta-finetuned-emojis-IID-Fed"),

    #Bert
    # Load the pretrained model and tokenizer
    tokenizer_Bert = AutoTokenizer.from_pretrained("Karim-Gamal/BERT-base-finetuned-emojis-IID-Fed"),
    model_Bert = AutoModelForSequenceClassification.from_pretrained("Karim-Gamal/BERT-base-finetuned-emojis-IID-Fed"),
    


    description = 'Real-time Emoji Prediction',
    article = '<head><style>@import url(https://fonts.googleapis.com/css?family=Open+Sans:400italic,600italic,700italic,800italic,400,600,700,800)<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/font/bootstrap-icons.css"> <link rel="stylesheet" href="https://unpkg.com/[email protected]/dist/bootstrap-table.min.css">\
    .table-responsive{-sm|-md|-lg|-xl} body{ background-color: #f5f5f5; padding: 120px 0; font-family: \'Open Sans\', sans-serif; } img{ max-width:100%; } .div_table_{ position:relative; width: max-content; margin:0 auto; } .profile-card{ position:relative; width:280px; margin:0 auto; padding:40px 30px 30px; background:#fff; border: 5px solid rgba(255,255,255,.7); text-align:center; border-radius:40px; transition: all 200ms ease; } .profile-card_2{ position:relative; width:60%; // margin:0 auto; padding:40px 30px 30px; background:#fff; border: 5px solid rgba(255,255,255,.7); text-align:center; border-radius:40px; transition: all 200ms ease; } .mask-shadow{ z-index:-1 !important; width:95%; height:12px; background:#000; bottom:0; left:0; right:0; margin:0 auto; position:absolute; border-radius:4px; opacity:0; transition: all 400ms ease-in; } .mask-shadow_2{ z-index:-1 !important; width:95%; height:12px; background:#000; bottom:0; left:0; right:0; margin:0 auto; position:absolute; border-radius:4px; opacity:0; transition: all 400ms ease-in; } .profile-card:hover{ box-shadow: 0px 30px 60px -5px rgba(55,55,71,0.3); transform: translate3d(0,-5px,0); .mask-shadow{ opacity:1; box-shadow: 0px 30px 60px -5px rgba(55,55,71,0.3); position:absolute; } } .profile-card_2:hover{ box-shadow: 0px 30px 60px -5px rgba(55,55,71,0.3); transform: translate3d(0,-5px,0); .mask-shadow{ opacity:1; box-shadow: 0px 30px 60px -5px rgba(55,55,71,0.3); position:absolute; } } .profile-card header{ display:block; margin-bottom:10px; } .profile-card_2 header{ display:block; margin-bottom:10px; } .profile-card header a{ width:150px; height:150px; display:block; border-radius:100%; margin:-120px auto 0; box-shadow: 0 0 0 5px #82b541; } .profile-card_2 header a{ width:85%; height:85%; display:block; border-radius:10%; margin:-120px auto 0; box-shadow: 0 0 0 5px #82b541; } .profile-card header a img{ border-radius: 50%; width:150px; height:150px; } .profile-card_2 header a img{ border-radius: 10%; width:100%; height:100%; } .profile-card:hover header a, .profile-card header a:hover{ animation: bounceOut .4s linear; -webkit-animation: bounceOut .4s linear; } .profile-card_2:hover header a, .profile-card header a:hover{ animation: bounceOut .4s linear; -webkit-animation: bounceOut .4s linear; } .profile-card header h1{ font-size:20px; padding:20px; color:#444; text-transform:uppercase; margin-bottom:5px; } .profile-card_2 header h1{ font-size:20px; padding:20px; color:#444; text-transform:uppercase; margin-bottom:5px; } .profile-card header h2{ font-size:14px; color:#acacac; text-transform:uppercase; margin:0; } .profile-card_2 header h2{ font-size:14px; color:#acacac; text-transform:uppercase; margin:0; } /*content*/ .profile-bio{ font-size:14px; color:#a5a5a5; line-height:1.7; font-style: italic; margin-bottom:30px; } /*link social*/ .profile-social-links{ margin:0; padding:0; list-style:none; } .profile-social-links li{ display: inline-block; margin: 0 10px; } .profile-social-links li a{ width: 55px; height:55px; display:block; background:#f1f1f1; border-radius:50%; -webkit-transition: all 2.75s cubic-bezier(0,.83,.17,1); -moz-transition: all 2.75s cubic-bezier(0,.83,.17,1); -o-transition: all 2.75s cubic-bezier(0,.83,.17,1); transition: all 2.75s cubic-bezier(0,.83,.17,1); transform-style: preserve-3d; } .profile-social-links li a img{ width:35px; height:35px; margin:10px auto 0; } .profile-social-links li a:hover{ background:#ddd; transform: scale(1.2); -webkit-transform: scale(1.2); } /*animation hover effect*/ @-webkit-keyframes bounceOut { 0% { box-shadow: 0 0 0 4px #82b541; opacity: 1; } 25% { box-shadow: 0 0 0 1px #82b541; opacity: 1; } 50% { box-shadow: 0 0 0 7px #82b541; opacity: 1; } 75% { box-shadow: 0 0 0 4px #82b541; opacity: 1; } 100% { box-shadow: 0 0 0 5px #82b541; opacity: 1; } } @keyframes bounceOut { 0% { box-shadow: 0 0 0 6px #82b541; opacity: 1; } 25% { box-shadow: 0 0 0 2px #82b541; opacity: 1; } 50% { box-shadow: 0 0 0 9px #82b541; opacity: 1; } 75% { box-shadow: 0 0 0 3px #82b541; opacity: 1; } 100% { box-shadow: 0 0 0 5px #82b541; opacity: 1; } }</style></head>',
    

    )

config = argparse.Namespace(**args_dict)



# Preprocess text (username and link placeholders)
def preprocess(text):
    text = text.lower()
    new_text = []
    for t in text.split(" "):
        t = '@user' if t.startswith('@') and len(t) > 1 else t
        t = '' if t.startswith('http') else t
        new_text.append(t)
        # print(" ".join(new_text))
    return " ".join(new_text)



def test_with_sentance(text ,net ,tokenizer):

  # text = "good morning"
  text = preprocess(text)

  # tc = TweetCleaner(remove_stop_words=True, remove_retweets=False)
  # print('before : ' ,text)
  # text = tc.get_cleaned_text(text)
  # print('after : ' ,text)

  net.eval()
  encoded_input = tokenizer.encode(text, padding=True, truncation=True, return_tensors='pt')
  net.to('cpu')
  # print(type())
  # encoded_input = {k: v.to(DEVICE) for k, v in encoded_input.items()}
  output = net(encoded_input)
  scores = output[0][0].detach().numpy()
  scores = softmax(scores)

  # download label mapping
  labels=[]
  mapping_link = f"https://raw.githubusercontent.com/cardiffnlp/tweeteval/main/datasets/emoji/mapping.txt"
  with urllib.request.urlopen(mapping_link) as f:
      html = f.read().decode('utf-8').split("\n")
      csvreader = csv.reader(html, delimiter='\t')
  labels = [row[1] for row in csvreader if len(row) > 1]

  ranking = np.argsort(scores)
  ranking = ranking[::-1]
  output_d = {}
  for i in range(scores.shape[0]):
      l = labels[ranking[i]]
      s = scores[ranking[i]]
      # print(f"{ranking[i]}) {l} {np.round(float(s), 4)}")
      output_d[l] = np.round(float(s), 4)

      if i == 2 :
        # break
        return output_d

  # net.to('cuda:0')



list_interface = []
list_title = []






# BERT

def _method(text):

  # tokenizer = AutoTokenizer.from_pretrained(config.CHECKPOINT_BERT)
  # model_loaded = torch.load('/content/NEW_MODELS_Imbalance/Bert/g_ex3_bert_multi_fed_data_epoch_2.pt', map_location=torch.device('cpu'))

  return test_with_sentance(text , config.model_Bert , config.tokenizer_Bert)

# greet("sun")

interface = gr.Interface(
    
    fn = _method, 
    
    inputs=gr.Textbox(placeholder="Enter sentence here..."), 
    outputs="label",
    examples=config.EX_LIST,
    live = True,
    
    
    title = 'BERT Multilingual',
    
    description=config.description,
    article = '',
    
)
list_interface.append(interface)
list_title.append('BERT Multilingual')


# XLM

def _method(text):

  # tokenizer = AutoTokenizer.from_pretrained(config.CHECKPOINT_BERT)
  # model_loaded = torch.load('/content/NEW_MODELS_Imbalance/Bert/g_ex3_bert_multi_fed_data_epoch_2.pt', map_location=torch.device('cpu'))

  return test_with_sentance(text , config.model_XLM , config.tokenizer_XLM)

# greet("sun")

interface = gr.Interface(
    
    fn = _method, 
    
    inputs=gr.Textbox(placeholder="Enter sentence here..."), 
    outputs="label",
    examples=config.EX_LIST,
    live = True,
    
    
    title = 'XLM Roberta Multilingual',
    
    description=config.description,
    article = '',
    
)
list_interface.append(interface)
list_title.append('XLM Roberta Multilingual')


# MMiniLM

def _method(text):

  # tokenizer = AutoTokenizer.from_pretrained(config.CHECKPOINT_BERT)
  # model_loaded = torch.load('/content/NEW_MODELS_Imbalance/Bert/g_ex3_bert_multi_fed_data_epoch_2.pt', map_location=torch.device('cpu'))

  return test_with_sentance(text , config.model_MMiniLM , config.tokenizer_MMiniLM)

# greet("sun")

interface = gr.Interface(
    
    fn = _method, 
    
    inputs=gr.Textbox(placeholder="Enter sentence here..."), 
    outputs="label",
    examples=config.EX_LIST,
    live = True,
    
    
    title = 'MiniLM Multilingual',
    
    description=config.description,
    article = '',
    
)
list_interface.append(interface)
list_title.append('MiniLM Multilingual')



# Switch

API_URL_Switch = "https://api-inference.huggingface.co/models/Karim-Gamal/switch-base-8-finetuned-SemEval-2018-emojis-IID-Fed"
headers_Switch = {"Authorization": "Bearer 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"}


def query_Switch(payload):
    response = requests.post(API_URL_Switch, headers=headers_Switch, json=payload)
    return response.json()

query_Switch({	"inputs": 'test',})



def _method(text):
  text = preprocess(text)
  output_temp = query_Switch({
  "inputs": text,
  })

  text_to_emoji = {'red' : '❤', 'face': '😍', 'joy':'😂', 'love':'💕', 'fire':'🔥', 'smile':'😊', 'sunglasses':'😎', 'sparkle':'✨', 'blue':'💙', 'kiss':'😘', 'camera':'📷', 'USA':'🇺🇸', 'sun':'☀' , 'purple':'💜', 'blink':'😉', 'hundred':'💯', 'beam':'😁', 'tree':'🎄', 'flash':'📸', 'tongue':'😜'}

  # Extract the dictionary from the list
  try:
    # code that may raise an exception
    d = output_temp[0]
  except:
    pass
      
  # Extract the text from the 'generated_text' key
  text = d['generated_text']

  # my_dict = {}
  # my_dict[str(text_to_emoji[text.split(' ')[0]])] = 0.99
  return text_to_emoji[text.split(' ')[0]]
  

# greet("sun")

interface = gr.Interface(
    
    fn = _method, 
    
    inputs=gr.Textbox(placeholder="Enter sentence here..."), 
    outputs="text",
    examples=config.EX_LIST,
    live = True,
    
    
    title = 'Switch-Base-8',
    
    description=config.description,
    article = '',
    
)
list_interface.append(interface)
list_title.append('Switch-Base-8')



# About us

def _method(input_rating):

  # tokenizer = AutoTokenizer.from_pretrained(config.CHECKPOINT_BERT)
  # model_loaded = torch.load('/content/NEW_MODELS_Imbalance/Bert/g_ex3_bert_multi_fed_data_epoch_2.pt', map_location=torch.device('cpu'))

  if input_rating <=2:
    return {'🔥': 0.6, '✨': 0.3, '💯': 0.1}

  elif input_rating <= 4 and input_rating >2:
    return {'✨': 0.6, '😉': 0.3, '💯': 0.1}

  elif input_rating >4:
    return {'😍': 0.6, '💯': 0.3, '💕': 0.1}

  # return test_with_sentance(text , config.model_loaded_bert_multi_NONIID , config.tokenizer_bert)

# greet("sun")

interface = gr.Interface(
    
    fn = _method, 
    
    inputs=gr.Slider(1, 5, value=4),
    outputs="label",
    # examples=config.EX_LIST,
    live = True,
    
    
    title = 'About us',
    
    description='We don\'t have sad emoji so our rating will always be great. 😂',
    
    # CSS Source : https://codepen.io/bibiangel199/pen/warevP

    article = config.article + '<!-- this is the markup. you can change the details (your own name, your own avatar etc.) but don’t change the basic structure! --> <div class="div_table_"> <table class="table"> <tr> <td><aside class="profile-card"> <div class="mask-shadow"></div> <header> <!-- here’s the avatar --> <a href="https://www.linkedin.com/in/hossam-amer-23b9329b/"> <img src="https://drive.google.com/uc?export=view&id=1-C_UIimeqbofJC_lldC7IQzIOX_OYRSn"> </a> <!-- the username --> <h1 style = " font-size:20px; padding:20px; color:#444;  margin-bottom:5px; " >Dr. Hossam Amer</h1> <!-- and role or location --> <h2 style = "  font-size:14px; color:#acacac; text- margin:0; " >Research Scientist at Microsoft</h2> </header> </aside></td> </tr> </table> </div> <div class="div_table_"> <table class="table"> <tr> <td><aside class="profile-card"> <div class="mask-shadow"></div> <header> <!-- here’s the avatar --> <a href="https://www.linkedin.com/in/ahmed-mohamed-gaber-143b25175/"> <img src="https://drive.google.com/uc?export=view&id=1OiGZwhL23PYhIJzQexYvPDFRrgUIprMj"> </a> <!-- the username --> <h1 style = " font-size:20px; padding:20px; color:#444;  margin-bottom:5px; ">Ahmed Gaber</h1> <!-- and role or location --> <h2 style = "  font-size:14px; color:#acacac; text- margin:0; " >Master\'s student at Queen\'s University</h2> </header> </aside></td> <td><aside class="profile-card"> <div class="mask-shadow"></div> <header> <!-- here’s the avatar --> <a href="https://www.linkedin.com/in/karim-gamal-mahmoud/"> <img src="https://drive.google.com/uc?export=view&id=1Lg2RzimITL9y__X2hycBTX10rJ4o87Ax"> </a> <!-- the username --> <h1 style=" font-size:20px; padding:20px; color:#444;  margin-bottom:5px; ">Karim Gamal</h1> <!-- and role or location --> <h2 style = "  font-size:14px; color:#acacac; text- margin:0; " >Master\'s student at Queen\'s University</h2> </header> </aside></td> </tr> </table> </div>',
    )
list_interface.append(interface)
list_title.append('About us')



demo = gr.TabbedInterface(
    list_interface, 
    list_title,
    title='Multilingual Emoji Prediction Using Federated Learning',
    css='.gradio-container {color : orange}',)
    # css='.gradio-container {background-color: white; color : orange}',)
demo.launch()