Prudvireddy commited on
Commit
31fd2e3
·
verified ·
1 Parent(s): b71614f

Upload 16 files

Browse files
Files changed (17) hide show
  1. .env +1 -0
  2. .gitattributes +1 -0
  3. .gitignore +0 -0
  4. Dockerfile +26 -0
  5. Montserrat-Bold.ttf +0 -0
  6. agents.py +188 -0
  7. blog_post.docx +0 -0
  8. blog_post.md +54 -0
  9. packages.txt +2 -0
  10. requirements.txt +22 -0
  11. service_account.json +13 -0
  12. streamlit_app.py +61 -0
  13. token.json +1 -0
  14. tools.py +627 -0
  15. trained_agents_data.pkl +3 -0
  16. utils.py +217 -0
  17. video.mp4 +3 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ OPENAI_API_KEY = 'sk-proj-i6hkOW8E1LXn4N49xH3eT3BlbkFJUQbMtVJTTSHhcLAlAx14'
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ video.mp4 filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
Binary file (114 Bytes). View file
 
Dockerfile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.10
3
+
4
+ # Install system dependencies
5
+ RUN apt-get update && apt-get install -y \
6
+ pandoc \
7
+ libgl1-mesa-glx \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ # Create a non-root user and switch to it
11
+ RUN useradd -m -u 1000 user
12
+ USER user
13
+ ENV PATH="/home/user/.local/bin:$PATH"
14
+
15
+ # Set the working directory in the container
16
+ WORKDIR /app
17
+
18
+ # Copy the requirements file and install dependencies
19
+ COPY --chown=user ./requirements.txt requirements.txt
20
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
21
+
22
+ # Copy the rest of the application files
23
+ COPY --chown=user . /app
24
+
25
+ # Define the command to run the Streamlit application
26
+ CMD ["streamlit", "run", "app.py", "--server.port", "8501", "--server.enableCORS", "false"]
Montserrat-Bold.ttf ADDED
Binary file (29.6 kB). View file
 
agents.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crewai import Agent, Task
2
+ from tools import scrape_website, generate_images_and_add_to_blog, generate_video
3
+ # from langchain_groq import ChatGroq
4
+ from langchain_openai import ChatOpenAI
5
+ from utils import post_image_and_text
6
+
7
+ def get_agents_and_tasks(is_token):
8
+
9
+ # llm = ChatGroq(model="llama3-70b-8192", api_key=groq_api_key)
10
+ llm = ChatOpenAI(model='gpt-4o-mini')
11
+
12
+ information_retriever_agent = Agent(
13
+ role="Web Information Retriever",
14
+ goal="To retieve all the information from the website and summarize the information.",
15
+ backstory="You are web information retriever agent. You are expert in scraping websites and summarizing the content.",
16
+ verbose=True,
17
+ llm=llm,
18
+ allow_delegation=False
19
+ )
20
+
21
+ task_scrape = Task(
22
+ description="Scrape all the informatin from the website: {website}.",
23
+ expected_output="Scraped information from the website: {website}",
24
+ agent=information_retriever_agent,
25
+ tools=[scrape_website]
26
+ )
27
+
28
+ task_summarize = Task(
29
+ description="provide a neet summary of the company. Do not add up things. ",
30
+ expected_output="Detailed summary of a company web page. "
31
+ # "Start with Company name, Mission and Vision, Leadership. "
32
+ # "Explain it's Products and Services, Market presence, Financial highlights, Recent Developments and Future plans. "
33
+ # "If there are not mentioned, explain they have mentioned."
34
+ "Do not add up things.",
35
+ agent=information_retriever_agent,
36
+ context=[task_scrape]
37
+ )
38
+
39
+ blog_agent = Agent(
40
+ role="Blog Writer",
41
+ goal="Create captivating blog that inspire and educate readers.",
42
+ backstory="You are a skilled blog writer for a company, creating insightful and engaging blog posts on various topics. You have a passion for sharing knowledge through writing. With years of experience in the industry, you know how to craft compelling narratives and provide valuable insights to your audience.",
43
+ verbose=True,
44
+ llm=llm,
45
+ allow_delegation=False
46
+ )
47
+
48
+ task_create_blog = Task(
49
+ description=
50
+ "Write a compelling blog on the topic '{topic}' for the company."
51
+ "Begin by explaining the topic, followed by an introduction to the company. Do not add up things. "
52
+ # "In the second half of blog, include a brief mention of the company to highlight its relevance to the topic. "
53
+ "The blog should cover various aspects relevant to {topic}, ensuring it provides comprehensive insights and value to the readers. "
54
+ "The blog should contain 2 image, so insert '<-IMAGE->' where image has to be inserted. One image should be after first paragraph. "
55
+ "The blog should not contain author details.",
56
+ expected_output="A full engaging and informative blog post about the topic: '{topic}'",
57
+ # output_file="topic_blog_post.md",
58
+ agent=blog_agent,
59
+ context=[task_summarize]
60
+ )
61
+
62
+ task_visual_prompts = Task(
63
+ description = "Replace <-IMAGE-> with prompts.There should be 2 images in the blog. "
64
+ "Every prompt should be enclosed in '' tag. "
65
+ "The image should not contain any form of text, names of persons, company or company logo, etc. "
66
+ "Prompt is 'What you wish to see in the output image'. "
67
+ "A descriptive prompt that clearly defines elements, colors, and subjects will lead to better results. "
68
+ # "To control the weight of a given word use the format (word:weight), where word is the word you'd like to control the weight of and weight is a value between 0 and 1. "
69
+ "For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue. The weight applies to all words in the prompt. ",
70
+ # "Image should not contain names of persons, company or company logo, etc. ",
71
+ expected_output= "A full blog with prompts enclosed in '' tag.",
72
+ agent = blog_agent,
73
+ context = [task_create_blog]
74
+ )
75
+
76
+ task_add_images = Task(
77
+ description = "Generate images and add to blog using the provided tool. If image generation fails, stop the execution.",
78
+ expected_output= "A full blog with images.",
79
+ agent = blog_agent,
80
+ # output_file="blog_post.md",
81
+ tools = [generate_images_and_add_to_blog],
82
+ context = [task_visual_prompts]
83
+ )
84
+
85
+ content_creation_agent = Agent(
86
+ role="Content Creator",
87
+ goal="To generate accurate and engaging narration and image prompt pairs for video scripts and subsequently generate videos using these pairs.",
88
+ backstory="The agent is designed to assist in creating engaging video content by generating narrations and image prompts, and then compiling them into videos.",
89
+ verbose=True,
90
+ llm=llm,
91
+ allow_delegation=False
92
+ )
93
+
94
+ # task_summarize_blog = Task(
95
+ # description = "Summarize the blog into two to three paragraphs.",
96
+ # # focusing on the topic: '{topic}'. "
97
+ # # "Ensure the summary is concise yet comprehensive, capturing the essence of the '{topic}' and its significance. ",
98
+ # expected_output = "Two to three paragraphs of summary of the blog, mainly focussed on '{topic}'. ",
99
+ # agent = content_creation_agent,
100
+ # context = [task_visual_prompts]
101
+ # )
102
+
103
+ # task_video_script = Task(
104
+ # description = "Write a script for video about the topic: {topic}. Remember to use context as just reference, do not use context as script.",
105
+ # expected_output = "Two to three paragraphs of script for the video, mainly focussed on '{topic}'. ",
106
+ # agent = content_creation_agent,
107
+ # context = [task_visual_prompts]
108
+ # )
109
+
110
+ task_generate_narration_image_pairs = Task(
111
+ description = "Generate narration and image prompt pairs for video script about the topic: '{topic}'. The number of pairs are limited to two. Total words in narration should be less than 100."
112
+ "Image should not contain any form of text, names of persons, company or company logo, etc. "
113
+ "Prompt is 'What you wish to see in the output image'. "
114
+ "A descriptive prompt that clearly defines elements, colors, and subjects will lead to better results. "
115
+ "To control the weight of a given word use the format (word:weight), where word is the word you'd like to control the weight of and weight is a value between 0 and 1. "
116
+ "For example: The sky was a crisp (blue:0.3) and (green:0.8) would convey a sky that was blue and green, but more green than blue. The weight applies to all words in the prompt. ",
117
+ # "Image should not contain names of persons, company or company logo, etc. ",
118
+ expected_output="Pairs of sentences. Narrations are enclosed in <narration> narration here </narration> tag. Image prompts are enclosed in  tag.",
119
+ agent=content_creation_agent,
120
+ # context = [task_video_script]
121
+ )
122
+
123
+ task_generate_video = Task(
124
+ description="Generate video using narration and image prompt pairs. If image generation fails, stop the execution.",
125
+ expected_output="Path of the video",
126
+ agent=content_creation_agent,
127
+ context = [task_generate_narration_image_pairs],
128
+ tools=[generate_video]
129
+ )
130
+
131
+ LinkedInPosterAgent = Agent(
132
+ role="LinkedIn Poster",
133
+ goal="To post articles on LinkedIn",
134
+ backstory="This agent is responsible for automating the posting of articles on LinkedIn to keep the profile active and engaging.",
135
+ verbose=True,
136
+ llm=llm,
137
+ allow_delegation=False
138
+ )
139
+
140
+ BlogtoArticle = Task(
141
+ description="Convert the blog into engaging LinkedIn post of 150 words. "
142
+ "Make the post attractive using emojis and symbols ."
143
+ "Use one image from blog for LinkedIn post",
144
+ expected_output="A dictionary containing image_path, post_content. ",
145
+ agent=LinkedInPosterAgent,
146
+ context = [task_add_images]
147
+ )
148
+
149
+ PostArticleToLinkedIn = Task(
150
+ description="""post article on LinkedIn.
151
+
152
+ token:
153
+
154
+ {token}
155
+
156
+ """,
157
+ expected_output="A confirmation that the article was successfully posted on LinkedIn.",
158
+ agent=LinkedInPosterAgent,
159
+ context = [BlogtoArticle],
160
+ tools=[post_image_and_text]
161
+ )
162
+
163
+ agents = [
164
+ information_retriever_agent,
165
+ blog_agent,
166
+ content_creation_agent,
167
+ ]
168
+
169
+ tasks = [
170
+ task_scrape,
171
+ task_summarize,
172
+ task_create_blog,
173
+ task_visual_prompts,
174
+ task_add_images,
175
+ # task_summarize_blog,
176
+ # task_video_script,
177
+ task_generate_narration_image_pairs,
178
+ task_generate_video,
179
+
180
+ ]
181
+
182
+ if is_token:
183
+ agents.append(LinkedInPosterAgent)
184
+ tasks.append(BlogtoArticle)
185
+ tasks.append(PostArticleToLinkedIn)
186
+
187
+ return agents, tasks
188
+
blog_post.docx ADDED
Binary file (762 kB). View file
 
blog_post.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **Marketing Case Study: Unveiling the Success Behind DIGIOTAI's Digital Transformation**
2
+
3
+ In today's rapidly evolving digital landscape, effective marketing strategies are paramount for businesses seeking to establish a competitive edge. One way to understand the intricacies and impact of marketing is through detailed case studies that provide actionable insights. This blog delves into a marketing case study, focusing on DIGIOTAI, a digital transformation (DX) enablement partner that empowers companies globally with advanced digital solutions.
4
+
5
+ ![](C:\Users\prudh\AppData\Local\Temp\tmpwwn2flpx.png)
6
+
7
+ **Introduction to DIGIOTAI**
8
+
9
+ DIGIOTAI stands as a beacon of innovation, facilitating digital transformation for reputed entities worldwide. With a mission to enable companies to thrive in the digital economy, DIGIOTAI offers a suite of services including IoT & Mobility, Data Science, Blockchain, AI/GenAI, AR/VR/MR, and Cloud Enablement. The company's prowess in delivering customized, integrated solutions has made it a trusted partner for businesses across various industries.
10
+
11
+ **Understanding the Marketing Case Study**
12
+
13
+ A marketing case study is a comprehensive analysis of how a company or brand implemented a marketing strategy to achieve its business objectives. It covers the challenges faced, the strategies employed, the execution process, and the results obtained. For DIGIOTAI, the marketing case study revolves around their successful campaigns and initiatives that have significantly impacted their market presence and client engagement.
14
+
15
+ **Key Aspects of DIGIOTAI's Marketing Strategy**
16
+
17
+ 1. **Target Audience Identification:**
18
+ DIGIOTAI's marketing team meticulously identified their target audience, focusing on industries such as Manufacturing, Automotive, Logistics & Supply Chain, Energy & Utilities. This targeted approach ensured that their marketing efforts were directed towards businesses that would benefit the most from their services.
19
+
20
+ 2. **Content Marketing:**
21
+ Leveraging rich content such as blogs, case studies, eBooks, and white papers, DIGIOTAI positioned itself as a thought leader in the digital transformation space. This not only educated their audience but also built trust and credibility.
22
+
23
+ 3. **Use of Advanced Technologies:**
24
+ The integration of AI and Data Science in their marketing strategies enabled DIGIOTAI to analyze market trends, customer behavior, and campaign performance with precision. This data-driven approach allowed for continuous optimization of their marketing efforts.
25
+
26
+ 4. **Strategic Partnerships:**
27
+ Forming alliances with other tech leaders and open-source communities, DIGIOTAI expanded its reach and enhanced its service offerings. These partnerships played a crucial role in amplifying their marketing message and establishing a robust network.
28
+
29
+ 5. **Customer-Centric Approach:**
30
+ By focusing on delivering exceptional value and personalized experiences, DIGIOTAI ensured high customer satisfaction and loyalty. Their marketing campaigns highlighted real-world benefits and success stories, making their solutions relatable and attractive to potential clients.
31
+
32
+ **Results and Achievements**
33
+
34
+ The implementation of these marketing strategies yielded remarkable results for DIGIOTAI. They witnessed a significant increase in their client base, enhanced brand recognition, and improved market penetration. Their tailored solutions and innovative approach resonated well with their target audience, leading to successful digital transformation projects across various sectors.
35
+
36
+ ![](C:\Users\prudh\AppData\Local\Temp\tmpidgx13ew.png)
37
+
38
+ **Conclusion**
39
+
40
+ The marketing case study of DIGIOTAI showcases the power of a well-crafted and executed marketing strategy in driving business success. By understanding their audience, leveraging advanced technologies, forming strategic partnerships, and maintaining a customer-centric approach, DIGIOTAI not only achieved its business goals but also set a benchmark in the digital transformation industry.
41
+
42
+ This case study serves as an inspiring example for other businesses aiming to navigate the complexities of the digital economy and underscores the importance of strategic marketing in achieving sustainable growth.
43
+
44
+ **Explore More with DIGIOTAI**
45
+
46
+ To learn more about DIGIOTAI's innovative solutions and how they can help your business thrive in the digital era, visit their website and explore their extensive range of services and resources. Stay updated with the latest trends and insights by subscribing to their newsletter.
47
+
48
+ **Join the Digital Transformation Journey with DIGIOTAI**
49
+
50
+ Embrace the future of business with DIGIOTAI's cutting-edge digital solutions. Connect with their team today and start your journey towards unparalleled digital excellence.
51
+
52
+ ---
53
+
54
+ This comprehensive blog post aims to provide valuable insights into the marketing strategies employed by DIGIOTAI, offering readers a clear understanding of the methodologies that led to their success.
packages.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ pandoc
2
+ libgl1-mesa-glx
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pysqlite3-binary
2
+ embedchain
3
+ crewai
4
+ crewai-tools
5
+ langchain-openai
6
+ streamlit
7
+ gtts
8
+ pydub
9
+ groq
10
+ Pillow
11
+ sendgrid
12
+ moviepy
13
+ requests
14
+ pypandoc
15
+ opencv-python-headless
16
+ opencv-python
17
+ numpy
18
+ openai
19
+ google-api-python-client
20
+ google-auth
21
+ google-auth-oauthlib
22
+ google-auth-httplib2
service_account.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "type": "service_account",
3
+ "project_id": "lithe-climber-430405-c1",
4
+ "private_key_id": "62057d717771d86af720216847c6e84c4b59660a",
5
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDNU0o022lo79iL\noyHzX7Q1GSQ+ZBje99sAvn4v1XE91zQVbtS+tzgnd4ixnyMZ1setkwo4FENOCS9+\nvhDt+MxbrVVFp2wNVyvyFi0/a2MZl9NRfBqDH6W+/zDgcTdHKCUDMRV77PBWESrm\n6rQKpWwNkQPFeJEDALESMvkDbhQ0mLfiEyDa8TNdhNDCvtl8QN5HLeQ8q9ZJXNml\nN8zFpSOSugr83tU6SO4neCixSS2g/02Hz4bjf5L5jzVVBF+tvxlly7rWMGhrL7Q5\nm/F/c8l2ArNZgLHlOG1OoND5HPY9aq9V2HCB55Hl17ChE974rGT0QTBvyQFCgLxS\nuFHpE/VpAgMBAAECggEAGl5ectBUxrm6rSArrYY2bfVHHs4bFZNuKDfOm9UFAISh\nNzlvD/kWuJbU0r7Vsq1PIn72OoJ7rEtospW1NeKUWBe4EkRwj3wI9EOd1v2zEefa\nJyZRf1Jypkc+nyxbBYPl0PWeRTaGHP+Ca3pr2cwpxso/Wj5kHScbFnO+YA7kgGZ7\nnIQQlCrLHcvIxA6OswnYrkzLGU1BxAml58iRtswggElLacWRys/8yubrjrN/djNj\nMjB81ymBlsgsiN21W8D/RU4u8NNIRGPyaeNCYeZne11AZmTmjMKeDMxIj/MD6bIT\nQkEOVLagRuImbi2G83/GAFdJdQnEh7EvV52+if/OwwKBgQDqIkj797j0RHVTcPwF\npceaUs338VmhZTC/nBbdSSiL8Av6HtHqvdFclB/IwiUsu5xNI++xAfI+SKGIVNYg\nAsQSwkzyICulorcbAQ+0o1XMBxI3RzzTXDleJ6dCGhZ6X4TGRJOdJ2OMGlHcVbBl\n/CA7IgKrTRHaSbW46rnbiKp56wKBgQDggD6LgW9Kp0aOfooX0My0z3MuAYzohZhP\nV7HyKr9RIf6C5YiOjlvPefTK488PhTMF0S964r4z0UDLVwMgZqOjhHjMuciQXNIe\nvSiCW0eSpfSkx4HvjFota095PO8JdtEu2RSBqXplaKgF5MuRp0iMLz+6R0jMnvtf\n2Qyd1aRE+wKBgD/Ma9rM28cjyFyeUu4vCD7TBXDkgSnraucrX0eZIcVP+dOEBmtS\nofP6INcRoBOaY7LZcfznrNyv6eQ+ScKbPlZmP5VSktIljoN7PI5aW4ym+J27eaWW\nYcN4RqlKdomN8Z4dHaQbEZMhsOC7ML/5fcbfM5799zlmEHB89XwOt3VVAoGAb30A\nkoMq46YCkg0hQwZ8QmarSnOWqHp/0xc31Y2JCR+apyKaGEF2Mqjb+k37rDd3yTHD\nJIGp9025ocGKWfLe3PuSigjRI3AVIRLLJUFzX85umc5CJtZKije6dfjetJJ++4bu\neh3SHL5GgvbGaYTrsEJeoYF5E7T4HPdLHq7ULzUCgYBAtNHW2+xWvVCg08O6fiJP\nPaFlMf+aW+F7x5eWdz+h1r/L3EqK/Wd+BFHq3oj/18Qe9zQQlU1S/FHGMmmTezod\ncxV6bb7uVOG6dY2XSzngB2mF43Zk7VVdro6V2NnnqZ9zBCciSzRI5UX3/R9Kuo/S\nvahSzKojgxdWAgtcAkW6LA==\n-----END PRIVATE KEY-----\n",
6
+ "client_email": "[email protected]",
7
+ "client_id": "106218391187943604877",
8
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
9
+ "token_uri": "https://oauth2.googleapis.com/token",
10
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
11
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/digiotai-ai%40lithe-climber-430405-c1.iam.gserviceaccount.com",
12
+ "universe_domain": "googleapis.com"
13
+ }
streamlit_app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from agents import get_agents_and_tasks
2
+ from crewai import Crew
3
+ from utils import send_email_with_company_details, post_image_and_text
4
+ import streamlit as st
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+
9
+ # groq_api_key = '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'
10
+
11
+ def generate_bolg_and_video(title, url, token):
12
+
13
+ if token is not None:
14
+ agents, tasks = get_agents_and_tasks(is_token=True)
15
+ else:
16
+ agents, tasks = get_agents_and_tasks(is_token=False)
17
+
18
+ print(agents, '\n', tasks)
19
+
20
+ crew = Crew(
21
+ agents=agents,
22
+ tasks= tasks,
23
+ verbose = 2,
24
+ )
25
+
26
+ pairs = crew.kickoff(inputs={'topic':title, 'website':url, 'token':token})
27
+ print(1)
28
+
29
+ return 'video.mp4'
30
+
31
+ st.title("Blog and Video Generator")
32
+
33
+ title = st.text_input("Title")
34
+ url = st.text_input("Website URL")
35
+ email = st.text_input("Email ID")
36
+
37
+ # token = 'AQXZFWxVIyE0IJ3vopgzsuG0t4uSg9lUnVwLOTLOkEOKIU9hhswEYXOpEzEveBFCZdcRP4B3vN8gd_HI920LTH5LFbO9TVkHbtn8P2qE_GcwBq_1LzGw-HwatIY3zU7auWhxCMVYAXsklAJx6FAa_Sx_MUtaVcnA42K1vhYxSS7s0ecQq0Thsdod1KrK2_nA0YjMc1lSnQQy1WDiK0HGN2-2jbDt13NpJTkmZqEWm6G9BRplTkUSeSSqGNuLEGpuY0hd50GcRovkcqpz9ZfvqkeiKhAYPPDTAGDX7HO5VjtHTui3ZCFEXvEbAHzng116xDfNnBE8-fsig7c9HP6c06UmmN6evA'
38
+
39
+ token = None
40
+ if st.checkbox(label='post on linkedIn'):
41
+ token = st.text_input('Enter you LinkedIn access token')
42
+
43
+
44
+
45
+ if st.button("Generate"):
46
+ if title and url and email:
47
+
48
+ st.success("Blog and Video will be sent to your email.")
49
+ video_path = generate_bolg_and_video(title, url, token)
50
+ # st.write(video_path)
51
+
52
+ # with open('blog_post.md', 'r', encoding='latin-1') as f:
53
+ # blog = f.read()
54
+ # st.markdown(blog)
55
+ # st.video(video_path)
56
+
57
+ send_email_with_company_details(email, 'DIGIOTAI SOLUTIONS', title)
58
+
59
+
60
+ else:
61
+ st.error("Please provide all inputs.")
token.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"token": "ya29.a0AXooCgtRt0YL19FiMHwf1UzqHNlOF0CEgQxJ-lbjy5UXqg-CsRhIx2HZmalHUZLtkGnfI8KB_eKxttDVz9y05HYhIaqQYawwHVfF2AJ9SeEP7f7C-QmSxEarLEQ4WQ1T-eNJijTFjlZvoRgoRt21FE3K6fsKM89odWntgAaCgYKAaoSARASFQHGX2Mit0KJz3qesS_Bb4IpffHDJg0173", "refresh_token": "1//0gh5ZW3LeAPZHCgYIARAAGBASNwF-L9IrfG7kpoExwP30CVArRQebwQ3aXDxBdNWpuTrjd4BWTs0pLLrDj69_rRDZHDuo6bT297A", "token_uri": "https://oauth2.googleapis.com/token", "client_id": "576447402194-q30hgo3vttv2o98ajrm0b7qqgm9mpe7k.apps.googleusercontent.com", "client_secret": "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", "scopes": ["https://www.googleapis.com/auth/gmail.send"], "universe_domain": "googleapis.com", "account": "", "expiry": "2024-07-25T12:32:35.732138Z"}
tools.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.tools import tool
2
+ from crewai_tools import ScrapeWebsiteTool
3
+ from gtts import gTTS
4
+ from pydub import AudioSegment
5
+ from groq import Groq
6
+ from PIL import Image, ImageDraw, ImageFont
7
+ from moviepy.editor import VideoFileClip, AudioFileClip, concatenate_videoclips, ImageClip
8
+ import requests
9
+ import os
10
+ import tempfile
11
+ import re
12
+ import base64
13
+ import pypandoc
14
+ import cv2
15
+ import numpy as np
16
+ import warnings
17
+ warnings.filterwarnings('ignore')
18
+
19
+ from pathlib import Path
20
+ from openai import OpenAI
21
+
22
+ # !sudo apt-get install pandoc
23
+
24
+
25
+ @tool
26
+ def scrape_website(website_url):
27
+ """Scrapes all the information from the given website.
28
+ Args:
29
+ website_url: A url of a company website.
30
+ Returns:
31
+ Scraped information from the given website.
32
+ """
33
+ scrapper = ScrapeWebsiteTool()
34
+ data = scrapper.run(website_url=website_url)
35
+
36
+ return data
37
+
38
+ def convert_md_to_docx(md_file_path, docx_file_path):
39
+ output = pypandoc.convert_file(md_file_path, 'docx', outputfile=docx_file_path)
40
+ assert output == "", "Conversion failed"
41
+ print(f"Converted {md_file_path} to {docx_file_path}")
42
+
43
+ # def generate_image(text, num):
44
+ # engine_id = "stable-diffusion-v1-6"
45
+ # api_host = os.getenv('API_HOST', 'https://api.stability.ai')
46
+ # api_key = 'sk-5VTo97D19Ruf2zLinj3pQbVXmLmh2Ps354PGkufTHtqmB2BN'
47
+ # if api_key is None:
48
+ # raise Exception("Missing Stability API key.")
49
+
50
+ # response = requests.post(
51
+ # f"{api_host}/v1/generation/{engine_id}/text-to-image",
52
+ # headers={
53
+ # "Content-Type": "application/json",
54
+ # "Accept": "application/json",
55
+ # "Authorization": f"Bearer {api_key}"
56
+ # },
57
+ # json={
58
+ # "text_prompts": [
59
+ # {
60
+ # "text": text
61
+ # }
62
+ # ],
63
+ # "cfg_scale": 7,
64
+ # "height": 512,
65
+ # "width": 512,
66
+ # "samples": 1,
67
+ # "steps": 10,
68
+ # },
69
+ # )
70
+
71
+ # print(response.status_code)
72
+ # if response.status_code != 200:
73
+ # raise Exception("Non-200 response: " + str(response.text))
74
+
75
+ # data = response.json()
76
+ # # base64_image = None
77
+ # for image in data["artifacts"]:
78
+ # with open(f"image_{num}.png", "wb") as f:
79
+ # f.write(base64.b64decode(image["base64"]))
80
+
81
+ # # if base64_image is None:
82
+ # # raise Exception("No image was generated.")
83
+
84
+ # return f'image_{num}.png'
85
+
86
+ # def generate_image_core(text, num):
87
+ # response = requests.post(
88
+ # f"https://api.stability.ai/v2beta/stable-image/generate/core",
89
+ # headers={
90
+ # "authorization": f"sk-6iUj0Jg2eeKDOpRJuDmCDSvPJdUJ6oP6qrQY3sujqR8h4ycF",
91
+ # "accept": "image/*"
92
+ # },
93
+ # files={"none": ''},
94
+ # data={
95
+ # "prompt": text,
96
+ # "output_format": "png",
97
+ # 'aspect_ratio': "3:2"
98
+ # },
99
+ # )
100
+
101
+ # print(response.status_code)
102
+ # if response.status_code == 200:
103
+ # with open(f"image_{num}.png", 'wb') as file:
104
+ # file.write(response.content)
105
+ # else:
106
+ # raise Exception(str(response.json()))
107
+ # return f'image_{num}.png'
108
+
109
+ # def generate_image_openai(text, num):
110
+
111
+ # client = OpenAI(api_key='sk-proj-TVCjX5VGWF5s18k0Z1G1T3BlbkFJZYp0HIC4NnxzqC0ne4YG')
112
+
113
+ # try:
114
+ # print(2)
115
+ # response = client.images.generate(
116
+ # model="dall-e-2",
117
+ # prompt=text,
118
+ # size="512x512",
119
+ # quality="standard",
120
+ # n=1
121
+ # )
122
+ # print(3)
123
+ # image_url = response.data[0].url
124
+ # print(4)
125
+ # print(f'image {num} generated')
126
+
127
+ # image_response = requests.get(image_url)
128
+ # print(5)
129
+ # if image_response.status_code == 200:
130
+ # with open(os.path.join(f'image_{num}.png'), 'wb') as file:
131
+ # print(6)
132
+ # file.write(image_response.content)
133
+ # print(7)
134
+ # else:
135
+ # raise Exception(f"Failed to download image with status code {image_response.status_code} and message: {image_response.text}")
136
+
137
+ # except Exception as e:
138
+ # raise Exception(f"Image generation failed: {e}")
139
+
140
+ # return f'image_{num}.png'
141
+
142
+ # @tool
143
+ # def generate_images_and_add_to_blog(blog_content):
144
+ # """This tool is used to generate images and add them to blog
145
+ # Args:
146
+ # blog_content: A complete blog with prompts enclosed in  tag.
147
+ # Returns:
148
+ # A complete blog"""
149
+ # print('hi')
150
+ # image_descriptions = re.findall(r'', blog_content)
151
+
152
+ # for i, text in enumerate(image_descriptions):
153
+
154
+ # try:
155
+ # print(1)
156
+ # img_path = generate_image_openai(text, i)
157
+ # print(8)
158
+ # # image_tag = f'data:image/png;base64,{base64_img}'
159
+ # blog_content = blog_content.replace(f'', f'![]({img_path})')
160
+ # print(9)
161
+ # except Exception as e:
162
+ # print(e)
163
+ # raise Exception(f"Image generation failed: {e}")
164
+
165
+ # with open('blog_post.md', 'w') as f:
166
+ # f.write(blog_content)
167
+
168
+ # convert_md_to_docx('blog_post.md', 'blog_post.docx')
169
+
170
+ # return blog_content
171
+
172
+ def generate_image_openai(text, num):
173
+
174
+ temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
175
+ output_image = temp_output_file.name
176
+
177
+ client = OpenAI()
178
+
179
+ try:
180
+ response = client.images.generate(
181
+ model="dall-e-2",
182
+ prompt=text,
183
+ size="512x512",
184
+ quality="standard",
185
+ n=1
186
+ )
187
+ image_url = response.data[0].url
188
+
189
+ print(f'image {num} generated')
190
+
191
+ image_response = requests.get(image_url)
192
+ print('response')
193
+ if image_response.status_code == 200:
194
+ with open(output_image, 'wb') as file:
195
+ file.write(image_response.content)
196
+ print('write')
197
+ else:
198
+ raise Exception(f"Failed to download image with status code {image_response.status_code} and message: {image_response.text}")
199
+
200
+ except Exception as e:
201
+ raise Exception(f"Image generation failed: {e}")
202
+
203
+ return output_image
204
+
205
+ @tool
206
+ def generate_images_and_add_to_blog(blog_content):
207
+ """This tool is used to generate images and add them to blog
208
+ Args:
209
+ blog_content: A complete blog with prompts enclosed in  tag.
210
+ Returns:
211
+ A complete blog"""
212
+ print(blog_content)
213
+ print('*****************************************************')
214
+ print(type(blog_content))
215
+
216
+ blog_content = str(blog_content)
217
+
218
+ image_descriptions = re.findall(r'', blog_content)
219
+
220
+ for i, text in enumerate(image_descriptions):
221
+
222
+ try:
223
+ temp_folder = tempfile.mkdtemp()
224
+ img_path = generate_image_openai(text, i)
225
+ # image_tag = f'data:image/png;base64,{base64_img}'
226
+ print(img_path)
227
+ blog_content = blog_content.replace(f'', f'![]({img_path})')
228
+ print('blog content')
229
+ except Exception as e:
230
+ print(e)
231
+ raise Exception(f"Image generation failed: {e}")
232
+
233
+ try:
234
+
235
+ print('blog')
236
+ with open('blog_post.md', 'w') as f:
237
+ f.write(blog_content)
238
+
239
+ print('convert')
240
+
241
+ convert_md_to_docx('blog_post.md', 'blog_post.docx')
242
+
243
+ print('converted')
244
+
245
+ except error:
246
+ print(error)
247
+
248
+ return blog_content
249
+
250
+ def process_script(script):
251
+ """Used to process the script into dictionary format"""
252
+ dict = {}
253
+ text_for_image_generation = re.findall(r'<image>(.*?)</?image>', script, re.DOTALL)
254
+ text_for_speech_generation = re.findall(r'<narration>(.*?)</?narration>', script, re.DOTALL)
255
+ dict['text_for_image_generation'] = text_for_image_generation
256
+ dict['text_for_speech_generation'] = text_for_speech_generation
257
+ return dict
258
+
259
+ def generate_speech(text, lang='en', speed=1.0, num=0):
260
+ """
261
+ Generates speech for the given script using gTTS and adjusts the speed.
262
+ """
263
+ temp_speech_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3')
264
+ temp_speech_path = temp_speech_file.name
265
+
266
+
267
+ client = OpenAI()
268
+
269
+ speech_file_path = temp_speech_path
270
+ response = client.audio.speech.create(
271
+ model="tts-1",
272
+ voice="echo",
273
+ input= text
274
+ )
275
+
276
+ response.stream_to_file(speech_file_path)
277
+
278
+ # tts = gTTS(text=text, lang=lang)
279
+ # tts.save(temp_speech_path)
280
+
281
+ sound = AudioSegment.from_file(temp_speech_path)
282
+ if speed != 1.0:
283
+ sound_with_altered_speed = sound._spawn(sound.raw_data, overrides={
284
+ "frame_rate": int(sound.frame_rate * speed)
285
+ }).set_frame_rate(sound.frame_rate)
286
+ sound_with_altered_speed.export(temp_speech_path, format="mp3")
287
+ else:
288
+ sound.export(temp_speech_path, format="mp3")
289
+
290
+ temp_speech_file.close()
291
+ return temp_speech_path
292
+
293
+ # def image_generator(script):
294
+ # """Generates images for the given script.
295
+ # Saves it to a temporary directory and returns the path.
296
+ # Args:
297
+ # script: a complete script containing narrations and image descriptions."""
298
+
299
+ # # remove_temp_files('/tmp')
300
+
301
+ # images_dir = tempfile.mkdtemp()
302
+
303
+ # dict = process_script(script)
304
+ # for i, text in enumerate(dict['text_for_image_generation']):
305
+ # try:
306
+ # # core
307
+ # # response = requests.post(
308
+ # # f"https://api.stability.ai/v2beta/stable-image/generate/core",
309
+ # # headers={
310
+ # # "authorization": f"sk-5VTo97D19Ruf2zLinj3pQbVXmLmh2Ps354PGkufTHtqmB2BN",
311
+ # # "accept": "image/*"
312
+ # # },
313
+ # # files={"none": ''},
314
+ # # data={
315
+ # # "prompt": text,
316
+ # # "output_format": "png",
317
+ # # 'aspect_ratio': "9:16"
318
+ # # },
319
+ # # )
320
+
321
+ # # print(response.status_code)
322
+ # # if response.status_code == 200:
323
+ # # with open(os.path.join(images_dir, f'image_{i}.png'), 'wb') as file:
324
+ # # file.write(response.content)
325
+ # # else:
326
+ # # raise Exception(str(response.json()))
327
+
328
+ # # v1
329
+ # # engine_id = "stable-diffusion-v1-6"
330
+ # # api_host = os.getenv('API_HOST', 'https://api.stability.ai')
331
+ # # api_key = 'sk-Z3EF1ebJ9oJUht6Q9fsh861wOsNhRFkxYXMYHNl7gt7xpBMD'
332
+ # # if api_key is None:
333
+ # # raise Exception("Missing Stability API key.")
334
+
335
+ # # response = requests.post(
336
+ # # f"{api_host}/v1/generation/{engine_id}/text-to-image",
337
+ # # headers={
338
+ # # "Content-Type": "application/json",
339
+ # # "Accept": "application/json",
340
+ # # "Authorization": f"Bearer {api_key}"
341
+ # # },
342
+ # # json={
343
+ # # "text_prompts": [
344
+ # # {
345
+ # # "text": text
346
+ # # }
347
+ # # ],
348
+ # # "cfg_scale": 7,
349
+ # # "height": 512,
350
+ # # "width": 512,
351
+ # # "samples": 1,
352
+ # # "steps": 10,
353
+ # # },
354
+ # # )
355
+
356
+ # # print(response.status_code)
357
+ # # if response.status_code != 200:
358
+ # # raise Exception("Non-200 response: " + str(response.text))
359
+
360
+ # # data = response.json()
361
+ # # # base64_image = None
362
+ # # for image in data["artifacts"]:
363
+ # # with open(os.path.join(images_dir, f'image_{i}.png'), "wb") as f:
364
+ # # f.write(base64.b64decode(image["base64"]))
365
+
366
+ # pass
367
+
368
+ # except Exception as e:
369
+ # print(e)
370
+ # raise Exception(f"Image generation failed: {e}")
371
+
372
+ # return images_dir
373
+
374
+ def image_generator(script):
375
+ """Generates images for the given script.
376
+ Saves it to a temporary directory and returns the path.
377
+ Args:
378
+ script: a complete script containing narrations and image descriptions."""
379
+
380
+ # remove_temp_files('/tmp')
381
+
382
+ images_dir = tempfile.mkdtemp()
383
+
384
+ client = OpenAI()
385
+ dict = process_script(script)
386
+ for i, text in enumerate(dict['text_for_image_generation']):
387
+ try:
388
+ response = client.images.generate(
389
+ model="dall-e-2",
390
+ prompt=text,
391
+ size="512x512",
392
+ quality="standard",
393
+ n=1
394
+ )
395
+ image_url = response.data[0].url
396
+
397
+ print(f'image {i} generated')
398
+ # Download the image
399
+ image_response = requests.get(image_url)
400
+ if image_response.status_code == 200:
401
+ with open(os.path.join(images_dir, f'image_{i}.png'), 'wb') as file:
402
+ file.write(image_response.content)
403
+ else:
404
+ raise Exception(f"Failed to download image with status code {image_response.status_code} and message: {image_response.text}")
405
+
406
+ except Exception as e:
407
+ raise Exception(f"Image generation failed: {e}")
408
+
409
+ return images_dir
410
+
411
+ def speech_generator(script):
412
+ """
413
+ Generates speech files for the given script using gTTS.
414
+ Saves them to a temporary directory and returns the path.
415
+ Args:
416
+ script: a complete script containing narrations and image descriptions.
417
+ """
418
+ speeches_dir = tempfile.mkdtemp()
419
+
420
+ dict = process_script(script)
421
+ for i, text in enumerate(dict['text_for_speech_generation']):
422
+ speech_path = generate_speech(text, num=i)
423
+ print(f'speech {i} generated')
424
+ os.rename(speech_path, os.path.join(speeches_dir, f'speech_{i}.mp3'))
425
+
426
+ return speeches_dir, dict['text_for_speech_generation']
427
+
428
+ def split_text_into_chunks(text, chunk_size):
429
+ words = text.split()
430
+ return [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
431
+
432
+ def add_text_to_video(input_video, text, duration=1, fontsize=40, fontcolor=(255, 255, 255),
433
+ outline_thickness=2, outline_color=(0, 0, 0), delay_between_chunks=0.3,
434
+ font_path='Montserrat-Bold.ttf'):
435
+ temp_output_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
436
+ output_video = temp_output_file.name
437
+
438
+ chunks = split_text_into_chunks(text, 3) # Adjust chunk size as needed
439
+
440
+ cap = cv2.VideoCapture(input_video)
441
+ if not cap.isOpened():
442
+ raise ValueError("Error opening video file.")
443
+
444
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
445
+ fps = int(cap.get(cv2.CAP_PROP_FPS))
446
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
447
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
448
+ out = cv2.VideoWriter(output_video, fourcc, fps, (width, height))
449
+
450
+ frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
451
+ chunk_duration_frames = duration * fps
452
+ delay_frames = int(delay_between_chunks * fps)
453
+
454
+ if not os.path.exists(font_path):
455
+ raise FileNotFoundError(f"Font file not found: {font_path}")
456
+
457
+ try:
458
+ font = ImageFont.truetype(font_path, fontsize)
459
+ except Exception as e:
460
+ raise RuntimeError(f"Error loading font: {e}")
461
+
462
+ current_frame = 0
463
+
464
+ while cap.isOpened():
465
+ ret, frame = cap.read()
466
+ if not ret:
467
+ break
468
+
469
+ frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
470
+ draw = ImageDraw.Draw(frame_pil)
471
+
472
+ chunk_index = current_frame // (chunk_duration_frames + delay_frames)
473
+
474
+ if current_frame % (chunk_duration_frames + delay_frames) < chunk_duration_frames and chunk_index < len(chunks):
475
+ chunk = chunks[chunk_index]
476
+ text_bbox = draw.textbbox((0, 0), chunk, font=font)
477
+ text_width, text_height = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1]
478
+ text_x = (width - text_width) // 2
479
+ text_y = height - 100 # Position text at the bottom
480
+
481
+ if text_width > width:
482
+ words = chunk.split()
483
+ half = len(words) // 2
484
+ line1 = ' '.join(words[:half])
485
+ line2 = ' '.join(words[half:])
486
+
487
+ text_size_line1 = draw.textsize(line1, font=font)
488
+ text_size_line2 = draw.textsize(line2, font=font)
489
+ text_x_line1 = (width - text_size_line1[0]) // 2
490
+ text_x_line2 = (width - text_size_line2[0]) // 2
491
+ text_y = height - 250 - text_size_line1[1] # Adjust vertical position for two lines
492
+
493
+ for dx in range(-outline_thickness, outline_thickness + 1):
494
+ for dy in range(-outline_thickness, outline_thickness + 1):
495
+ if dx != 0 or dy != 0:
496
+ draw.text((text_x_line1 + dx, text_y + dy), line1, font=font, fill=outline_color)
497
+ draw.text((text_x_line2 + dx, text_y + text_size_line1[1] + dy), line2, font=font, fill=outline_color)
498
+
499
+ draw.text((text_x_line1, text_y), line1, font=font, fill=fontcolor)
500
+ draw.text((text_x_line2, text_y + text_size_line1[1]), line2, font=font, fill=fontcolor)
501
+
502
+ else:
503
+ for dx in range(-outline_thickness, outline_thickness + 1):
504
+ for dy in range(-outline_thickness, outline_thickness + 1):
505
+ if dx != 0 or dy != 0:
506
+ draw.text((text_x + dx, text_y + dy), chunk, font=font, fill=outline_color)
507
+
508
+ draw.text((text_x, text_y), chunk, font=font, fill=fontcolor)
509
+
510
+ frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
511
+
512
+ out.write(frame)
513
+ current_frame += 1
514
+
515
+ # Ensure loop breaks after processing all frames
516
+ if current_frame >= frame_count:
517
+ break
518
+
519
+ cap.release()
520
+ out.release()
521
+ cv2.destroyAllWindows()
522
+
523
+ return output_video
524
+
525
+ def apply_zoom_in_effect(clip, zoom_factor=1.2):
526
+ width, height = clip.size
527
+ duration = clip.duration
528
+
529
+ def zoom_in_effect(get_frame, t):
530
+ frame = get_frame(t)
531
+ zoom = 1 + (zoom_factor - 1) * (t / duration)
532
+ new_width, new_height = int(width * zoom), int(height * zoom)
533
+ resized_frame = cv2.resize(frame, (new_width, new_height))
534
+
535
+ x_start = (new_width - width) // 2
536
+ y_start = (new_height - height) // 2
537
+ cropped_frame = resized_frame[y_start:y_start + height, x_start:x_start + width]
538
+
539
+ return cropped_frame
540
+
541
+ return clip.fl(zoom_in_effect, apply_to=['mask'])
542
+
543
+ def create_video_from_images_and_audio(images_dir, speeches_dir, final_video_filename, all_captions):
544
+ """Creates video using images and audios.
545
+ Args:
546
+ images_dir: path to images folder
547
+ speeches_dir: path to speeches folder
548
+ final_video_filename: the topic name which will be used as final video file name"""
549
+ print('hi')
550
+ client = Groq(api_key='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')
551
+ # images_paths = sorted(os.listdir(images_dir))
552
+ # audio_paths = sorted(os.listdir(speeches_dir))
553
+ images_paths = sorted([os.path.join(images_dir, img) for img in os.listdir(images_dir) if img.endswith('.png') or img.endswith('.jpg')])
554
+ audio_paths = sorted([os.path.join(speeches_dir, speech) for speech in os.listdir(speeches_dir) if speech.endswith('.mp3')])
555
+ clips = []
556
+ temp_files = []
557
+ video_dir = tempfile.mkdtemp()
558
+
559
+ for i in range(min(len(images_paths), len(audio_paths))):
560
+ img_clip = ImageClip(os.path.join(images_dir, images_paths[i]))
561
+ audioclip = AudioFileClip(os.path.join(speeches_dir, audio_paths[i]))
562
+ videoclip = img_clip.set_duration(audioclip.duration)
563
+ zoomed_clip = apply_zoom_in_effect(videoclip, 1.3)
564
+
565
+ # with open(os.path.join(speeches_dir, audio_paths[i]), "rb") as file:
566
+ # transcription = client.audio.transcriptions.create(
567
+ # file=(audio_paths[i], file.read()),
568
+ # model="whisper-large-v3",
569
+ # response_format="verbose_json",
570
+ # )
571
+ # caption = transcription.text
572
+ temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
573
+ zoomed_clip.write_videofile(temp_video_path, codec='libx264', fps=24)
574
+ temp_files.append(temp_video_path)
575
+
576
+ caption = all_captions[i]
577
+ final_video_path = add_text_to_video(temp_video_path, caption, duration=1, fontsize=20)
578
+ temp_files.append(final_video_path)
579
+
580
+ final_clip = VideoFileClip(final_video_path)
581
+ final_clip = final_clip.set_audio(audioclip)
582
+
583
+ print(f'create small video {i}')
584
+ clips.append(final_clip)
585
+
586
+ final_clip = concatenate_videoclips(clips)
587
+ if not final_video_filename.endswith('.mp4'):
588
+ final_video_filename = final_video_filename + '.mp4'
589
+ final_clip.write_videofile(os.path.join(video_dir, final_video_filename), codec='libx264', fps=24)
590
+
591
+ # Close all video files properly
592
+ for clip in clips:
593
+ clip.close()
594
+
595
+ # Remove all temporary files
596
+ for temp_file in temp_files:
597
+ try:
598
+ os.remove(temp_file)
599
+ except Exception as e:
600
+ print(f"Error removing file {temp_file}: {e}")
601
+
602
+ return os.path.join(video_dir, final_video_filename)
603
+
604
+ @tool
605
+ def generate_video(pairs, final_video_filename):
606
+ """ Generates video using narration and image prompt pairs.
607
+
608
+ Args:
609
+ pairs:A string of arration and image prompt pairs enclosed in <narration> and <image> tags.
610
+ final_video_filename: the topic name which will be used as final video file name
611
+
612
+ Returns:
613
+ Generated video path"""
614
+
615
+ images_dir = image_generator(pairs)
616
+ print(images_dir)
617
+ speeches_dir, all_captions = speech_generator(pairs)
618
+ print(speeches_dir)
619
+ video_path = create_video_from_images_and_audio(images_dir, speeches_dir, final_video_filename, all_captions)
620
+ print('video', video_path)
621
+
622
+ with open(video_path, 'rb') as f:
623
+ video = f.read()
624
+ with open('video.mp4', 'wb') as f:
625
+ f.write(video)
626
+
627
+ return video_path
trained_agents_data.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926248e52d1fa532c317e37da24ed652ae64110f8219cb5e061668bd3091f048
3
+ size 5
utils.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from email.mime.text import MIMEText
2
+ from google.oauth2 import service_account
3
+ from googleapiclient.http import MediaFileUpload
4
+ import base64
5
+ import os
6
+ import base64
7
+ import streamlit as st
8
+ from google.auth.transport.requests import Request
9
+ from google.oauth2.credentials import Credentials
10
+ from google_auth_oauthlib.flow import InstalledAppFlow
11
+ from googleapiclient.discovery import build
12
+ from email.mime.multipart import MIMEMultipart
13
+ import requests
14
+ import json
15
+ from langchain.tools import tool
16
+
17
+ # Constants
18
+ SCOPES_DRIVE = ['https://www.googleapis.com/auth/drive']
19
+ SERVICE_ACCOUNT_FILE = 'service_account.json'
20
+ PARENT_FOLDER_ID = "1REXfwxk9dcPdpZXJOFZSur3880soVN9y"
21
+
22
+
23
+ # Authenticate and return credentials for Google Drive
24
+ def authenticate_drive():
25
+ credentials = service_account.Credentials.from_service_account_file(
26
+ SERVICE_ACCOUNT_FILE, scopes=SCOPES_DRIVE)
27
+ return credentials
28
+
29
+ # Upload file to Google Drive
30
+ def upload_file(filepath, filename, parent_folder_id):
31
+ creds = authenticate_drive()
32
+ service = build('drive', 'v3', credentials=creds)
33
+
34
+ file_metadata = {
35
+ 'name': filename,
36
+ 'parents': [parent_folder_id]
37
+ }
38
+
39
+ media = MediaFileUpload(filepath, resumable=True)
40
+ file = service.files().create(body=file_metadata, media_body=media, fields='id').execute()
41
+ print(f'File ID: {file.get("id")}')
42
+ return file.get('id')
43
+
44
+ SCOPES = ["https://www.googleapis.com/auth/gmail.send"]
45
+
46
+ def authenticate_gmail():
47
+ """Authenticate and return the Gmail service."""
48
+ creds = None
49
+ if os.path.exists("token.json"):
50
+ creds = Credentials.from_authorized_user_file("token.json", SCOPES)
51
+ if not creds or not creds.valid:
52
+ if creds and creds.expired and creds.refresh_token:
53
+ creds.refresh(Request())
54
+ else:
55
+ flow = InstalledAppFlow.from_client_secrets_file("credentials.json", SCOPES)
56
+ creds = flow.run_local_server(port=0)
57
+ with open("token.json", "w") as token:
58
+ token.write(creds.to_json())
59
+ service = build("gmail", "v1", credentials=creds)
60
+ return service
61
+
62
+ def send_email_with_company_details(user_email, company_name, topic):
63
+
64
+
65
+ blog_id = upload_file('blog_post.docx', 'blog post', PARENT_FOLDER_ID)
66
+ video_id = upload_file('video.mp4', 'video', PARENT_FOLDER_ID)
67
+
68
+ # Prepare email content
69
+ blog_link = f"https://drive.google.com/file/d/{blog_id}/view?usp=sharing"
70
+ video_link = f"https://drive.google.com/file/d/{video_id}/view?usp=sharing"
71
+ email_subject = f"Blog and Video for the topic {topic}"
72
+ email_body = f"""
73
+ <html>
74
+ <body>
75
+ <p>Hello,</p>
76
+ <p>The requested blog and video has been shared with you by <b>{company_name}</b>.</p>
77
+ <p>You can view the files using the following links:</p>
78
+ <b>Blog:</b>
79
+ <a href="{blog_link}"> {topic}</a>
80
+ <br>
81
+ <br>
82
+ <b>Video:</b>
83
+ <a href="{video_link}"> {topic}</a>
84
+ <br>
85
+ <p>Best regards,<br>{company_name}</p>
86
+ </body>
87
+ </html>
88
+ """
89
+
90
+ try:
91
+ # Create message container - the correct MIME type is multipart/alternative.
92
+ msg = MIMEMultipart('alternative')
93
+ msg['to'] = user_email
94
+ msg['subject'] = email_subject
95
+
96
+ # Attach parts into message container
97
+ part1 = MIMEText(email_body, 'plain')
98
+ part2 = MIMEText(email_body, 'html')
99
+ msg.attach(part1)
100
+ msg.attach(part2)
101
+
102
+ raw_string = base64.urlsafe_b64encode(msg.as_bytes()).decode()
103
+
104
+ service = authenticate_gmail()
105
+ sent_message = service.users().messages().send(userId='me', body={'raw': raw_string}).execute()
106
+
107
+ # # Connect to the SMTP server
108
+ # server = smtplib.SMTP('smtp.gmail.com', 587)
109
+ # server.starttls() # Secure the connection
110
+ # server.login(SENDER_EMAIL, PASSWORD)
111
+ # server.sendmail(SENDER_EMAIL, user_email, msg.as_string())
112
+ print('Email sent successfully!')
113
+ except Exception as e:
114
+ print(f'Error sending email: {str(e)}')
115
+
116
+ # Upload blog post and video, then share them and send an email
117
+ # def main():
118
+ # blog_filepath = 'blog_post.docx'
119
+ # video_filepath = 'video.mp4'
120
+ # user_email = receiver_email
121
+ # company_name = 'digiotai'
122
+
123
+ # # Upload blog post
124
+ # blog_file_id = upload_file(blog_filepath, 'blog post', PARENT_FOLDER_ID)
125
+ # # Upload video
126
+ # video_file_id = upload_file(video_filepath, 'video', PARENT_FOLDER_ID)
127
+
128
+ # send_email_with_company_details(blog_file_id, video_file_id, user_email, company_name)
129
+
130
+ # main()
131
+
132
+
133
+ def get_urn(token):
134
+ # access_token = '<your_access_token>'
135
+ url = 'https://api.linkedin.com/v2/userinfo'
136
+
137
+ headers = {
138
+ 'Authorization': f'Bearer {token}'
139
+ }
140
+
141
+ response = requests.get(url, headers=headers)
142
+
143
+ if response.status_code == 200:
144
+ user_info = response.json()
145
+ print(user_info['sub'])
146
+ return user_info['sub']
147
+ else:
148
+ print(f'Failed to fetch user info: {response.status_code}')
149
+ print(response.text)
150
+
151
+ @tool
152
+ def post_image_and_text(
153
+ token: str, title: str, image_path: str, text_content: str
154
+ ):
155
+ """
156
+ Posts an article on LinkedIn with an image.
157
+
158
+ :param token: str. LinkedIn OAuth token.
159
+ :param title: str. Article title.
160
+ :param text_content: str. Article content.
161
+ :param image_path: str. Local file path of the image to be used as a thumbnail.
162
+ """
163
+
164
+ owner = get_urn(token)
165
+
166
+ # Initialize the upload to get the upload URL and image URN
167
+ init_url = "https://api.linkedin.com/rest/images?action=initializeUpload"
168
+ headers = {
169
+ "LinkedIn-Version": "202401",
170
+ "X-RestLi-Protocol-Version": "2.0.0",
171
+ "Content-Type": "application/json",
172
+ "Authorization": f"Bearer {token}",
173
+ }
174
+ init_data = json.dumps({"initializeUploadRequest": {"owner": f'urn:li:person:{owner}'}})
175
+ init_response = requests.post(init_url, headers=headers, data=init_data)
176
+ print(init_response.content)
177
+ if init_response.status_code != 200:
178
+ raise Exception(f"Failed to initialize upload: {init_response.text}")
179
+
180
+ init_response_data = init_response.json()["value"]
181
+ upload_url = init_response_data["uploadUrl"]
182
+ image_urn = init_response_data["image"]
183
+
184
+ # Upload the file
185
+ with open(image_path, "rb") as f:
186
+ upload_response = requests.post(upload_url, files={"file": f})
187
+ if upload_response.status_code not in [200, 201]:
188
+ raise Exception(f"Failed to upload file: {upload_response.text}")
189
+
190
+ # Create the post with the uploaded image URN as thumbnail
191
+ post_url = "https://api.linkedin.com/rest/posts"
192
+ post_data = json.dumps(
193
+ {
194
+ "author": f'urn:li:person:{owner}',
195
+ "commentary": text_content,
196
+ "visibility": "PUBLIC",
197
+ "distribution": {
198
+ "feedDistribution": "MAIN_FEED",
199
+ "targetEntities": [],
200
+ "thirdPartyDistributionChannels": [],
201
+ },
202
+ "content": {
203
+ "media": {
204
+ "title": title,
205
+ "id": image_urn,
206
+ }
207
+ },
208
+ "lifecycleState": "PUBLISHED",
209
+ "isReshareDisabledByAuthor": False,
210
+ }
211
+ )
212
+ post_response = requests.post(post_url, headers=headers, data=post_data)
213
+ print(post_response.content)
214
+ if post_response.status_code in [200, 201]:
215
+ return "Linkedin article has been posted successfully!"
216
+ else:
217
+ raise Exception(f"Failed to post article: {post_response.text}")
video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e72d993994dccaa7ae8c0236059aa2d457a8ce79f31a42b98adaa19de8d05ada
3
+ size 2672279