Spaces:
Runtime error
Runtime error
File size: 4,528 Bytes
b11c8cd 73f9174 50639ab 73f9174 2e937f5 b11c8cd 2e937f5 57d46c6 b11c8cd 2e937f5 73f9174 2e937f5 b11c8cd 2e937f5 73f9174 b11c8cd 73f9174 21a1796 73f9174 0658229 73f9174 b11c8cd 73f9174 2f7d2fd 73f9174 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import requests
import json
import os
# Votre analyseur de post
api_token = os.environ.get("TOKEN")
API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct"
headers = {"Authorization": f"Bearer {api_token}"}
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def analyze_sentiment(text):
output = query({
"inputs": f'''
system
You're going to deeply analyze the texts I'm going to give you and you're only going to tell me which category they belong to by answering only the words that correspond to the following categories:
For posts that talk about chat models/LLM, return "Chatmodel/LLM"
For posts that talk about image generation models, return "image_generation"
For texts that ask for information from the community, return "questions"
For posts about fine-tuning or model adjustment, return "fine_tuning"
For posts related to ethics and bias in AI, return "ethics_bias"
For posts about datasets and data preparation, return "datasets"
For posts about tools and libraries, return "tools_libraries"
For posts containing tutorials and guides, return "tutorials_guides"
For posts about debugging and problem-solving, return "debugging"
Respond only with the category name, without any additional explanation or text.
user
{text}
assistant
'''
})
if isinstance(output, list) and len(output) > 0:
response = output[0].get('generated_text', '').strip().lower()
return response
return "Erreur: Réponse vide ou non valide"
def fetch_posts(limit=10, max_posts=50):
url = "https://huggingface.co/api/posts"
params = {'limit': limit}
offset = 0
all_posts = []
while len(all_posts) < max_posts:
params['offset'] = offset
response = requests.get(url, params=params)
data = response.json()
if not data['socialPosts']:
break
all_posts.extend(data['socialPosts'])
offset += len(data['socialPosts'])
return all_posts[:max_posts]
def display_post(post):
print(f"\n{'=' * 50}")
print(f"Contenu:\n{post['rawContent']}")
print(f"Likes: {sum(reaction['count'] for reaction in post['reactions'])}")
print(f"URL: {post['url']}")
print(f"Analyse: {post.get('analysis', 'Analyse non disponible')}")
print(f"Catégorie: {post.get('category', 'Non analysé')}")
print(f"{'=' * 50}\n")
# Récupérer les posts
posts = fetch_posts(limit=10, max_posts=30) # Récupère jusqu'à 30 posts
# Stocker les posts dans des variables différentes
post_variables = {f'post_{i}': post for i, post in enumerate(posts)}
# Dictionnaire pour compter les catégories
category_count = {
'questions': 0,
'Chatmodel/LLM': 0,
'image_generation': 0,
'fine_tuning': 0,
'ethics_bias': 0,
'datasets': 0,
'tools_libraries': 0,
'tutorials_guides': 0,
'debugging': 0,
'Erreur: Réponse ambiguë': 0,
'Erreur: Réponse vide ou non valide': 0
}
# Analyser chaque post et ajouter les résultats de l'analyse au dictionnaire
for key in post_variables:
category = analyze_sentiment(post_variables[key]['rawContent'])
if category == 'questions':
category_count['questions'] += 2
elif category == 'Chatmodel/LLM':
category_count['Chatmodel/LLM'] += 2
elif category == 'image_generation':
category_count['image_generation'] += 2
elif category == 'fine_tuning':
category_count['fine_tuning'] += 2
elif category == 'ethics_bias':
category_count['ethics_bias'] += 2
elif category == 'datasets':
category_count['datasets'] += 2
elif category == 'tools_libraries':
category_count['tools_libraries'] += 2
elif category == 'tutorials_guides':
category_count['tutorials_guides'] += 2
elif category == 'debugging':
category_count['debugging'] += 2
elif category == "Erreur: Réponse ambiguë":
category_count["Erreur: Réponse ambiguë"] += 2
elif category == "Erreur: Réponse vide ou non valide":
category_count["Erreur: Réponse vide ou non valide"] += 2
post_variables[key]['category'] = category
# Imprimer un post spécifique
post_to_print = 'post_2' # Par exemple, le troisième post (index 2)
display_post(post_variables[post_to_print])
# Afficher le nombre de posts par catégorie
print("\nNombre de posts par catégorie:")
for category, count in category_count.items():
print(f"{category}: {count}")
|