Anne314159 commited on
Commit
205f7ce
·
verified ·
1 Parent(s): dd1f93b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -38
app.py CHANGED
@@ -1,47 +1,48 @@
1
  import streamlit as st
2
  from transformers import pipeline
3
  import requests
4
- from bs4 import BeautifulSoup
5
 
6
  # Initialize a text generation pipeline
7
  generator = pipeline('text-generation', model='dbmdz/german-gpt2')
8
 
9
- # Define a function to fetch trending news related to a specific niche
10
- import streamlit as st
11
- from transformers import pipeline
12
- import requests
13
- from bs4 import BeautifulSoup
14
-
15
- # Initialize a text generation pipeline
16
- generator = pipeline('text-generation', model='dbmdz/german-gpt2')
17
-
18
- def fetch_trending_news(niche):
19
- url = f"https://www.google.com/search?q={niche}+news&tbs=qdr:d"
20
- headers = {
21
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"}
22
  try:
23
- response = requests.get(url, headers=headers)
24
- if response.status_code == 200:
25
- soup = BeautifulSoup(response.content, "html.parser")
26
- # Adjusted to use more generic selectors that might be more stable
27
- news_items = soup.find_all("div", class_="ZINbbc xpd O9g5cc uUPGi")
28
- if not news_items:
29
- print("No news items found, check your selectors.")
30
- return []
31
- trending_news = [item.find("div", class_="BNeawe vvjwJb AP7Wnd").text for item in news_items[:5]]
32
- return trending_news
33
- else:
34
- print(f"Failed to fetch news, status code: {response.status_code}")
35
- return []
36
- except Exception as e:
37
- print(f"Error fetching news: {e}")
 
 
 
38
  return []
39
 
40
-
41
-
42
  # Define the pages
43
  def page_trending_niche():
44
- # Using st.columns to create a two-column layout
 
45
  col1, col2 = st.columns([3, 1])
46
  with col1:
47
  st.title("What is trending in my niche?")
@@ -49,12 +50,19 @@ def page_trending_niche():
49
  st.image('Robot.png', use_column_width=True)
50
 
51
  niche = st.text_input('Enter your niche', 'German clinics')
52
- if st.button('Fetch Trending News'):
53
- st.write(f"Trending news in {niche}:")
54
- trending_news = fetch_trending_news(niche)
55
- print("Trending news:", trending_news) # Debug print
56
- for idx, news_item in enumerate(trending_news, start=1):
57
- st.write(f"{idx}. {news_item}")
 
 
 
 
 
 
 
58
 
59
  # Define the pages
60
  def page_social_media_generator():
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  import requests
4
+ import os
5
 
6
  # Initialize a text generation pipeline
7
  generator = pipeline('text-generation', model='dbmdz/german-gpt2')
8
 
9
+ api_key = os.environ['NEWS_API_KEY']
10
+
11
+ def fetch_trending_news(topic, api_key, max_items=5):
12
+ base_url = "https://newsapi.org/v2/everything"
13
+ params = {
14
+ "q": topic,
15
+ "apiKey": api_key,
16
+ "language": "en",
17
+ "sortBy": "relevancy",
18
+ "pageSize": max_items
19
+ }
20
+
 
21
  try:
22
+ response = requests.get(base_url, params=params)
23
+ response.raise_for_status()
24
+ news_data = response.json()
25
+ articles = news_data.get("articles", [])
26
+
27
+ formatted_articles = []
28
+ for article in articles:
29
+ formatted_article = {
30
+ "title": article["title"],
31
+ "source": article["source"]["name"],
32
+ "publishedAt": article["publishedAt"],
33
+ "url": article["url"]
34
+ }
35
+ formatted_articles.append(formatted_article)
36
+
37
+ return formatted_articles
38
+ except requests.RequestException as e:
39
+ st.error(f"Error fetching news data: {e}")
40
  return []
41
 
 
 
42
  # Define the pages
43
  def page_trending_niche():
44
+ api_key = "YOUR_API_KEY_HERE" # Securely fetch your API key
45
+
46
  col1, col2 = st.columns([3, 1])
47
  with col1:
48
  st.title("What is trending in my niche?")
 
50
  st.image('Robot.png', use_column_width=True)
51
 
52
  niche = st.text_input('Enter your niche', 'German clinics')
53
+ if niche:
54
+ news_items = fetch_trending_news(niche, api_key)
55
+ if news_items:
56
+ for item in news_items:
57
+ st.write(f"**Title:** {item['title']}")
58
+ st.write(f"**Source:** {item['source']}")
59
+ st.write(f"**Published At:** {item['publishedAt']}")
60
+ st.write(f"**URL:** [Read more]({item['url']})")
61
+ st.write("---")
62
+
63
+
64
+
65
+
66
 
67
  # Define the pages
68
  def page_social_media_generator():