alex-abb commited on
Commit
95f7ff3
1 Parent(s): 73f9174

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -103
app.py CHANGED
@@ -1,8 +1,8 @@
1
- import requests
2
- import json
3
  import os
 
 
 
4
 
5
- # Votre analyseur de post
6
  api_token = os.environ.get("TOKEN")
7
  API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
8
  headers = {"Authorization": f"Bearer {api_token}"}
@@ -11,10 +11,10 @@ def query(payload):
11
  response = requests.post(API_URL, headers=headers, json=payload)
12
  return response.json()
13
 
14
- def analyze_sentiment(text):
15
  output = query({
16
- "inputs": f'''
17
- system
18
  You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
19
  For posts that talk about chat models/LLM, return "Chatmodel/LLM"
20
  For posts that talk about image generation models, return "image_generation"
@@ -26,104 +26,46 @@ For posts about tools and libraries, return "tools_libraries"
26
  For posts containing tutorials and guides, return "tutorials_guides"
27
  For posts about debugging and problem-solving, return "debugging"
28
  Respond only with the category name, without any additional explanation or text.
29
-
30
- user
31
- {text}
32
-
33
- assistant
34
-
35
  '''
36
  })
37
-
38
  if isinstance(output, list) and len(output) > 0:
39
  response = output[0].get('generated_text', '').strip().lower()
40
- return response
41
-
42
- return "Erreur: Réponse vide ou non valide"
43
-
44
- def fetch_posts(limit=10, max_posts=50):
45
- url = "https://huggingface.co/api/posts"
46
- params = {'limit': limit}
47
- offset = 0
48
- all_posts = []
49
-
50
- while len(all_posts) < max_posts:
51
- params['offset'] = offset
52
- response = requests.get(url, params=params)
53
- data = response.json()
54
-
55
- if not data['socialPosts']:
56
- break
57
-
58
- all_posts.extend(data['socialPosts'])
59
- offset += len(data['socialPosts'])
60
-
61
- return all_posts[:max_posts]
62
-
63
- def display_post(post):
64
- print(f"\n{'=' * 50}")
65
- print(f"Contenu:\n{post['rawContent']}")
66
- print(f"Likes: {sum(reaction['count'] for reaction in post['reactions'])}")
67
- print(f"URL: {post['url']}")
68
- print(f"Analyse: {post.get('analysis', 'Analyse non disponible')}")
69
- print(f"Catégorie: {post.get('category', 'Non analysé')}")
70
- print(f"{'=' * 50}\n")
71
-
72
- # Récupérer les posts
73
- posts = fetch_posts(limit=10, max_posts=30) # Récupère jusqu'à 30 posts
74
-
75
- # Stocker les posts dans des variables différentes
76
- post_variables = {f'post_{i}': post for i, post in enumerate(posts)}
77
-
78
- # Dictionnaire pour compter les catégories
79
- category_count = {
80
- 'questions': 0,
81
- 'Chatmodel/LLM': 0,
82
- 'image_generation': 0,
83
- 'fine_tuning': 0,
84
- 'ethics_bias': 0,
85
- 'datasets': 0,
86
- 'tools_libraries': 0,
87
- 'tutorials_guides': 0,
88
- 'debugging': 0,
89
- 'Erreur: Réponse ambiguë': 0,
90
- 'Erreur: Réponse vide ou non valide': 0
91
- }
92
-
93
- # Analyser chaque post et ajouter les résultats de l'analyse au dictionnaire
94
- for key in post_variables:
95
- category = analyze_sentiment(post_variables[key]['rawContent'])
96
-
97
- if category == 'questions':
98
- category_count['questions'] += 2
99
- elif category == 'Chatmodel/LLM':
100
- category_count['Chatmodel/LLM'] += 2
101
- elif category == 'image_generation':
102
- category_count['image_generation'] += 2
103
- elif category == 'fine_tuning':
104
- category_count['fine_tuning'] += 2
105
- elif category == 'ethics_bias':
106
- category_count['ethics_bias'] += 2
107
- elif category == 'datasets':
108
- category_count['datasets'] += 2
109
- elif category == 'tools_libraries':
110
- category_count['tools_libraries'] += 2
111
- elif category == 'tutorials_guides':
112
- category_count['tutorials_guides'] += 2
113
- elif category == 'debugging':
114
- category_count['debugging'] += 2
115
- elif category == "Erreur: Réponse ambiguë":
116
- category_count["Erreur: Réponse ambiguë"] += 2
117
- elif category == "Erreur: Réponse vide ou non valide":
118
- category_count["Erreur: Réponse vide ou non valide"] += 2
119
-
120
- post_variables[key]['category'] = category
121
-
122
- # Imprimer un post spécifique
123
- post_to_print = 'post_2' # Par exemple, le troisième post (index 2)
124
- display_post(post_variables[post_to_print])
125
-
126
- # Afficher le nombre de posts par catégorie
127
- print("\nNombre de posts par catégorie:")
128
- for category, count in category_count.items():
129
- print(f"{category}: {count}")
 
 
 
1
  import os
2
+ import requests
3
+ import gradio as gr
4
+ from bs4 import BeautifulSoup
5
 
 
6
  api_token = os.environ.get("TOKEN")
7
  API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
8
  headers = {"Authorization": f"Bearer {api_token}"}
 
11
  response = requests.post(API_URL, headers=headers, json=payload)
12
  return response.json()
13
 
14
+ def analyze_sentiment(pl7_texts):
15
  output = query({
16
+ "inputs": f'''<|begin_of_text|>
17
+ <|start_header_id|>system<|end_header_id|>
18
  You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
19
  For posts that talk about chat models/LLM, return "Chatmodel/LLM"
20
  For posts that talk about image generation models, return "image_generation"
 
26
  For posts containing tutorials and guides, return "tutorials_guides"
27
  For posts about debugging and problem-solving, return "debugging"
28
  Respond only with the category name, without any additional explanation or text.
29
+ <|eot_id|>
30
+ <|start_header_id|>user<|end_header_id|>
31
+ {pl7_texts}
32
+ <|eot_id|>
33
+ <|start_header_id|>assistant<|end_header_id|>
 
34
  '''
35
  })
 
36
  if isinstance(output, list) and len(output) > 0:
37
  response = output[0].get('generated_text', '').strip().lower()
38
+ categories = {
39
+ 'questions': 'Questions',
40
+ 'chatmodel/llm': 'Chat Model/LLM',
41
+ 'image_generation': 'Image Generation',
42
+ 'fine_tuning': 'Fine-tuning',
43
+ 'ethics_bias': 'Ethics and Bias',
44
+ 'datasets': 'Datasets',
45
+ 'tools_libraries': 'Tools and Libraries',
46
+ 'tutorials_guides': 'Tutorials and Guides',
47
+ 'debugging': 'Debugging'
48
+ }
49
+ return categories.get(response, f"Error: Ambiguous response - '{response}'")
50
+ return "Error: No valid response received"
51
+
52
+ url = 'https://huggingface.co/posts'
53
+ response = requests.get(url)
54
+
55
+ if response.status_code == 200:
56
+ soup = BeautifulSoup(response.content, 'html.parser')
57
+ pl7_elements = soup.find_all(class_='pl-7')
58
+ pl7_texts = [element.text.strip() for element in pl7_elements]
59
+
60
+ for idx, text in enumerate(pl7_texts, start=1):
61
+ print(f"Text pl-7 {idx}: {text}")
62
+ sentiment = analyze_sentiment(text)
63
+ print(f"Sentiment: {sentiment}\n")
64
+
65
+ if len(pl7_texts) >= 4:
66
+ print(f"Content of pl7_text_1: {pl7_texts[0]}")
67
+ print(f"Content of pl7_text_2: {pl7_texts[1]}")
68
+ else:
69
+ print("Not enough pl-7 elements found")
70
+ else:
71
+ print(f"Error {response.status_code} when retrieving {url}")