alex-abb's picture
Update app.py
73f9174 verified
raw
history blame
4.53 kB
import requests
import json
import os
# Votre analyseur de post
api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {api_token}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def analyze_sentiment(text):
output = query({
"inputs": f'''
system
You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
For posts that talk about image generation models, return "image_generation"
For texts that ask for information from the community, return "questions"
For posts about fine-tuning or model adjustment, return "fine_tuning"
For posts related to ethics and bias in AI, return "ethics_bias"
For posts about datasets and data preparation, return "datasets"
For posts about tools and libraries, return "tools_libraries"
For posts containing tutorials and guides, return "tutorials_guides"
For posts about debugging and problem-solving, return "debugging"
Respond only with the category name, without any additional explanation or text.
user
{text}
assistant
'''
})
if isinstance(output, list) and len(output) > 0:
response = output[0].get('generated_text', '').strip().lower()
return response
return "Erreur: Réponse vide ou non valide"
def fetch_posts(limit=10, max_posts=50):
url = "https://huggingface.co/api/posts"
params = {'limit': limit}
offset = 0
all_posts = []
while len(all_posts) < max_posts:
params['offset'] = offset
response = requests.get(url, params=params)
data = response.json()
if not data['socialPosts']:
break
all_posts.extend(data['socialPosts'])
offset += len(data['socialPosts'])
return all_posts[:max_posts]
def display_post(post):
print(f"\n{'=' * 50}")
print(f"Contenu:\n{post['rawContent']}")
print(f"Likes: {sum(reaction['count'] for reaction in post['reactions'])}")
print(f"URL: {post['url']}")
print(f"Analyse: {post.get('analysis', 'Analyse non disponible')}")
print(f"Catégorie: {post.get('category', 'Non analysé')}")
print(f"{'=' * 50}\n")
# Récupérer les posts
posts = fetch_posts(limit=10, max_posts=30) # Récupère jusqu'à 30 posts
# Stocker les posts dans des variables différentes
post_variables = {f'post_{i}': post for i, post in enumerate(posts)}
# Dictionnaire pour compter les catégories
category_count = {
'questions': 0,
'Chatmodel/LLM': 0,
'image_generation': 0,
'fine_tuning': 0,
'ethics_bias': 0,
'datasets': 0,
'tools_libraries': 0,
'tutorials_guides': 0,
'debugging': 0,
'Erreur: Réponse ambiguë': 0,
'Erreur: Réponse vide ou non valide': 0
}
# Analyser chaque post et ajouter les résultats de l'analyse au dictionnaire
for key in post_variables:
category = analyze_sentiment(post_variables[key]['rawContent'])
if category == 'questions':
category_count['questions'] += 2
elif category == 'Chatmodel/LLM':
category_count['Chatmodel/LLM'] += 2
elif category == 'image_generation':
category_count['image_generation'] += 2
elif category == 'fine_tuning':
category_count['fine_tuning'] += 2
elif category == 'ethics_bias':
category_count['ethics_bias'] += 2
elif category == 'datasets':
category_count['datasets'] += 2
elif category == 'tools_libraries':
category_count['tools_libraries'] += 2
elif category == 'tutorials_guides':
category_count['tutorials_guides'] += 2
elif category == 'debugging':
category_count['debugging'] += 2
elif category == "Erreur: Réponse ambiguë":
category_count["Erreur: Réponse ambiguë"] += 2
elif category == "Erreur: Réponse vide ou non valide":
category_count["Erreur: Réponse vide ou non valide"] += 2
post_variables[key]['category'] = category
# Imprimer un post spécifique
post_to_print = 'post_2' # Par exemple, le troisième post (index 2)
display_post(post_variables[post_to_print])
# Afficher le nombre de posts par catégorie
print("\nNombre de posts par catégorie:")
for category, count in category_count.items():
print(f"{category}: {count}")