File size: 7,822 Bytes
4fdf3f3
d45dac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fdf3f3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06d8dfd
 
4fdf3f3
 
 
 
06d8dfd
 
4fdf3f3
 
 
06d8dfd
 
4fdf3f3
 
 
06d8dfd
 
4fdf3f3
06d8dfd
4fdf3f3
 
 
 
06d8dfd
 
4fdf3f3
06d8dfd
 
4fdf3f3
 
 
 
06d8dfd
 
4fdf3f3
 
06d8dfd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4fdf3f3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197

import os
import subprocess

# Ensure the required library is installed
try:
    import huggingface_hub
except ImportError:
    subprocess.run(["pip", "install", "huggingface_hub"])

# Access Hugging Face token from environment variables
hf_token = os.getenv("HF_TOKEN")
if not hf_token:
    raise ValueError("Hugging Face token not found. Please set it as an environment variable.")

# Log in programmatically
from huggingface_hub import login
login(token=hf_token)
print("Successfully logged in to Hugging Face!")


import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from ast import literal_eval
import gradio as gr
import time

# Device configuration
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load tokenizer and model, and move model to device for marketing schedule generation
tokenizer_schedule = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
model_schedule = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct").to(device)

# Load tokenizer and model, and move model to device for captions generation
tokenizer_caption = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
model_caption = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B-Instruct").to(device)

# Function to clean and extract schedule from raw output
def clean_and_extract_schedule(raw_output):
    try:
        start_idx = raw_output.find("{")
        end_idx = raw_output.rfind("}") + 1
        if start_idx != -1 and end_idx != -1:
            dictionary_text = raw_output[start_idx:end_idx]
            return literal_eval(dictionary_text)
        else:
            raise ValueError("No valid dictionary structure found.")
    except Exception as e:
        print(f"Error extracting dictionary: {e}")
        return None

# Generate the marketing schedule based on user prompt
def generate_marketing_schedule(prompt):
    complete_prompt = f"""
    You are an expert marketing strategist. Your task is to create a detailed marketing schedule based on the user's request.

    User request: {prompt}

    Provide a weekly marketing schedule in the following format:
    - Each day of the week (Monday to Sunday) should have a specific marketing activity.
    - Activities should be tailored to the user's campaign goal and brand.
    - The output should be in a Python dictionary format, like:
      {{
          "Monday": "Task description",
          "Tuesday": "Task description",
          ...
      }}
    Output only the schedule. Do not include explanations or additional text.

    Marketing schedule:
    """
    inputs = tokenizer_schedule(complete_prompt, return_tensors="pt").to(device)

    outputs = model_schedule.generate(
        **inputs,
        max_new_tokens=200,
        num_return_sequences=1,
        temperature=0.8,
        top_p=0.9,
        top_k=50,
        do_sample=True,
        pad_token_id=tokenizer_schedule.eos_token_id
    )

    raw_output = tokenizer_schedule.decode(outputs[0], skip_special_tokens=True)
    raw_output = raw_output[len(complete_prompt):].strip()

    schedule = clean_and_extract_schedule(raw_output)
    return schedule or "Failed to generate a valid marketing schedule."

# Generate custom social media caption for each day's task
def generate_custom_caption(day, task):
    prompt = f"""
    You are a social media expert helping a brand schedule posts for their upcoming campaigns. Today is '{day}', and the task is: '{task}'.

    Create an engaging and captivating social media caption based on the following:
    1. Tailor the caption to the specific product or event described in the task.
    2. Ensure the caption is relevant to the day and highlights why followers should pay attention today.
    3. Include a call to action and optional hashtags or emojis that match the tone of the post.
    4. Output only the caption. Do not include reasoning or additional text.

    Caption:
    """
    inputs = tokenizer_caption(prompt, return_tensors="pt").to(device)

    outputs = model_caption.generate(
        **inputs,
        max_new_tokens=100,
        num_return_sequences=1,
        temperature=0.9,
        no_repeat_ngram_size=3,
        top_p=0.9,
        top_k=60,
        do_sample=True,
        pad_token_id=tokenizer_caption.eos_token_id
    )

    caption = tokenizer_caption.decode(outputs[0], skip_special_tokens=True)
    caption = caption[len(prompt):].strip()
    return caption

def gradio_interface(user_prompt):
    conversation = []

    # Generate the marketing schedule
    conversation.append('<span style="color: #3498db; font-weight: bold;">Hassan - Marketing Dept.:</span> I\'m processing your request. Please wait a moment...')
    yield "<br>".join(conversation)  # Update the interface with each message on a new line

    schedule = generate_marketing_schedule(user_prompt)

    if isinstance(schedule, dict):
        conversation.append('<span style="color: #3498db; font-weight: bold;">Hassan - Marketing Dept.:</span> I have created the marketing schedule based on your request.')
        yield "<br>".join(conversation)  # Update the interface with schedule processing

        time.sleep(2)

        conversation.append('<span style="color: #3498db; font-weight: bold;">Hassan - Marketing Dept.:</span> Let\'s now consult the Caption Generator to create captions for each day.')
        yield "<br>".join(conversation)  # Update the interface with new dialogue

        captions = {}
        for day, task in schedule.items():
            conversation.append(f'<span style="color: #3498db; font-weight: bold;">Hassan - Marketing Dept.:</span> For <strong>{day}</strong>, the task is <em>\'{task}\'</em>. Caption Generator, can you create a caption?')
            yield "<br>".join(conversation)  # Update the interface with the conversation

            time.sleep(2)

            caption = generate_custom_caption(day, task)
            captions[day] = caption

            conversation.append(f'<span style="color: #e74c3c; font-weight: bold;">Ahmed - Media Dept.:</span> Here\'s the caption for <strong>{day}</strong>: <em>\'{caption}\'</em>')
            yield "<br>".join(conversation)  # Update the interface with the caption for the day

        conversation.append('<span style="color: #3498db; font-weight: bold;">Hassan - Marketing Dept.:</span> The final marketing schedule with captions is ready.')
        yield "<br>".join(conversation)  # Final update

    else:
        yield schedule  # If there was an error, just return the error message


# Launch Gradio interface with improved design
interface = gr.Interface(
    fn=gradio_interface,
    inputs=gr.Textbox(label="Enter Your Marketing Prompt", placeholder="e.g., Campaign for a new hoodie release.", lines=2),
    outputs=gr.Markdown(label="Generated Marketing Schedule & Captions"),  # Changed this to Markdown
    title="✨ laibor Marketing Demo ✨",
    description="🧑‍💻 Enter a campaign description to generate a weekly marketing schedule with captions tailored to your brand.",
    theme="compact",  # Sleek compact theme
    css="""
        .gradio-container {
            background-color: #000000;
            color: white;
            font-family: 'Arial', sans-serif;
            border-radius: 10px;
            border: 2px solid #dee2e6;
        }
        .gr-button {
            background-color: #4CAF50;
            color: white;
            font-size: 16px;
            font-weight: bold;
        }
        .gr-textbox, .gr-markdown {
            background-color: #333333;
            color: white;
            border: 1px solid #555555;
            border-radius: 8px;
        }
        .gr-textbox input, .gr-markdown textarea {
            background-color: #333333;
            color: white;
        }
    """
)

interface.launch()