Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,40 +1,167 @@
|
|
1 |
import requests
|
2 |
-
import
|
3 |
|
4 |
-
# Assurez-vous d'avoir défini votre token API dans les variables d'environnement
|
5 |
api_token = os.environ.get("TOKEN")
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import requests
|
2 |
+
import json
|
3 |
|
|
|
4 |
api_token = os.environ.get("TOKEN")
|
5 |
+
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
|
6 |
+
headers = {"Authorization": f"Bearer {api_token}"}
|
7 |
|
8 |
+
def query(payload):
|
9 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
10 |
+
return response.json()
|
11 |
+
|
12 |
+
def analyze_sentiment(text):
|
13 |
+
output = query({
|
14 |
+
"inputs": f'''
|
15 |
+
system
|
16 |
+
You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
|
17 |
+
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
|
18 |
+
For posts that talk about image generation models, return "image_generation"
|
19 |
+
For texts that ask for information from the community, return "questions"
|
20 |
+
For posts about fine-tuning or model adjustment, return "fine_tuning"
|
21 |
+
For posts related to ethics and bias in AI, return "ethics_bias"
|
22 |
+
For posts about datasets and data preparation, return "datasets"
|
23 |
+
For posts about tools and libraries, return "tools_libraries"
|
24 |
+
For posts containing tutorials and guides, return "tutorials_guides"
|
25 |
+
For posts about debugging and problem-solving, return "debugging"
|
26 |
+
Respond only with the category name, without any additional explanation or text.
|
27 |
+
|
28 |
+
user
|
29 |
+
{text}
|
30 |
+
|
31 |
+
assistant
|
32 |
+
'''
|
33 |
+
})
|
34 |
+
|
35 |
+
if isinstance(output, list) and len(output) > 0:
|
36 |
+
response = output[0].get('generated_text', '').strip().lower()
|
37 |
+
|
38 |
+
questions = response.count('questions')
|
39 |
+
ChatmodelLLM = response.count('Chatmodel/LLM')
|
40 |
+
other = response.count('other')
|
41 |
+
image_generation = response.count("image_generation")
|
42 |
+
fine_tuning = response.count("fine_tuning")
|
43 |
+
ethics_bias = response.count("ethics_bias")
|
44 |
+
datasets = response.count("datasets")
|
45 |
+
tools_libraries = response.count("tools_libraries")
|
46 |
+
tutorials_guides = response.count("tutorials_guides")
|
47 |
+
debugging = response.count("debugging")
|
48 |
+
|
49 |
+
if questions == 2:
|
50 |
+
return 'questions'
|
51 |
+
elif ChatmodelLLM == 2:
|
52 |
+
return 'Chat Model/LLM'
|
53 |
+
elif other == 2:
|
54 |
+
return "Other"
|
55 |
+
elif image_generation == 2:
|
56 |
+
return "Image Generation"
|
57 |
+
elif fine_tuning == 2:
|
58 |
+
return "Fine-tuning"
|
59 |
+
elif ethics_bias == 2:
|
60 |
+
return "Ethics and Bias"
|
61 |
+
elif datasets == 2:
|
62 |
+
return "Datasets"
|
63 |
+
elif tools_libraries == 2:
|
64 |
+
return "Tools and Libraries"
|
65 |
+
elif tutorials_guides == 2:
|
66 |
+
return "Tutorials and Guides"
|
67 |
+
elif debugging == 2:
|
68 |
+
return "Debugging"
|
69 |
+
else:
|
70 |
+
return f"Erreur: Réponse ambiguë - '{response}'"
|
71 |
+
|
72 |
+
# URL de base de l'API
|
73 |
+
base_url = "https://huggingface.co/api/posts"
|
74 |
+
|
75 |
+
# Paramètres pour la pagination
|
76 |
+
skip = 0 # Nombre d'éléments à sauter
|
77 |
+
limit = 1000 # Nombre maximal d'éléments à récupérer par requête
|
78 |
+
|
79 |
+
# Liste pour stocker tous les posts avec leur texte
|
80 |
+
all_posts_with_text = []
|
81 |
+
|
82 |
+
while True:
|
83 |
+
# Construire l'URL avec les paramètres de pagination
|
84 |
+
url = f"{base_url}?skip={skip}&limit={limit}&sort=recent"
|
85 |
+
|
86 |
+
# Effectuer une requête HTTP pour récupérer les données
|
87 |
+
response = requests.get(url)
|
88 |
+
|
89 |
+
# Vérifier si la requête a réussi
|
90 |
+
if response.status_code == 200:
|
91 |
+
# Charger les données JSON à partir du contenu de la réponse
|
92 |
+
data = response.json()
|
93 |
+
|
94 |
+
# Vérifier s'il y a des posts à ajouter
|
95 |
+
if not data["socialPosts"]:
|
96 |
+
break # Sortir de la boucle si aucun post n'est retourné
|
97 |
+
|
98 |
+
# Ajouter les posts récupérés à la liste avec leur texte
|
99 |
+
for post in data["socialPosts"]:
|
100 |
+
post_text = ""
|
101 |
+
for item in post["content"]:
|
102 |
+
if item["type"] == "text":
|
103 |
+
post_text += item["value"] + " "
|
104 |
+
all_posts_with_text.append({"slug": post["slug"], "text": post_text.strip()})
|
105 |
+
|
106 |
+
# Mettre à jour le paramètre skip pour la prochaine requête
|
107 |
+
skip += limit
|
108 |
+
|
109 |
+
else:
|
110 |
+
print(f"Erreur lors de la récupération des données: {response.status_code}")
|
111 |
+
break
|
112 |
+
|
113 |
+
# Maintenant, all_posts_with_text contient tous les posts récupérés avec leur texte
|
114 |
+
|
115 |
+
# Initialisation des compteurs
|
116 |
+
questions_count = 0
|
117 |
+
Chat_ModelLLM_count = 0
|
118 |
+
Other_count = 0
|
119 |
+
ImageGeneration_count = 0
|
120 |
+
Fine_tuning_count = 0
|
121 |
+
EthicsandBias_count = 0
|
122 |
+
Datasets_count = 0
|
123 |
+
ToolsandLibraries_count = 0
|
124 |
+
Tutorials_and_Guides_count = 0
|
125 |
+
Debugging_count = 0
|
126 |
+
|
127 |
+
# Appliquer votre algorithme d'analyse à tous les posts
|
128 |
+
for i, post in enumerate(all_posts_with_text, 1):
|
129 |
+
slug = post["slug"]
|
130 |
+
text = post["text"]
|
131 |
+
|
132 |
+
# Appeler votre algorithme d'analyse
|
133 |
+
resultat = analyze_sentiment(text)
|
134 |
+
|
135 |
+
# Incrémenter les compteurs en fonction du résultat
|
136 |
+
if resultat == 'questions':
|
137 |
+
questions_count += 1
|
138 |
+
elif resultat == "Chat Model/LLM":
|
139 |
+
Chat_ModelLLM_count += 1
|
140 |
+
elif resultat == 'Other':
|
141 |
+
Other_count += 1
|
142 |
+
elif resultat == "Image Generation":
|
143 |
+
ImageGeneration_count += 1
|
144 |
+
elif resultat == "Fine-tuning":
|
145 |
+
Fine_tuning_count += 1
|
146 |
+
elif resultat == "Ethics and Bias":
|
147 |
+
EthicsandBias_count += 1
|
148 |
+
elif resultat == 'Datasets':
|
149 |
+
Datasets_count += 1
|
150 |
+
elif resultat == 'Tools and Libraries':
|
151 |
+
ToolsandLibraries_count += 1
|
152 |
+
elif resultat == "Tutorials and Guides":
|
153 |
+
Tutorials_and_Guides_count += 1
|
154 |
+
elif resultat == "Debugging":
|
155 |
+
Debugging_count += 1
|
156 |
+
|
157 |
+
# Affichage des résultats
|
158 |
+
print(f"Questions: {questions_count}")
|
159 |
+
print(f"Chat Model/LLM: {Chat_ModelLLM_count}")
|
160 |
+
print(f"Other: {Other_count}")
|
161 |
+
print(f"Image Generation: {ImageGeneration_count}")
|
162 |
+
print(f"Fine-tuning: {Fine_tuning_count}")
|
163 |
+
print(f"Ethics and Bias: {EthicsandBias_count}")
|
164 |
+
print(f"Datasets: {Datasets_count}")
|
165 |
+
print(f"Tools and Libraries: {ToolsandLibraries_count}")
|
166 |
+
print(f"Tutorials and Guides: {Tutorials_and_Guides_count}")
|
167 |
+
print(f"Debugging: {Debugging_count}")
|