Spaces:
Runtime error
Runtime error
from __future__ import annotations | |
from google.oauth2 import service_account | |
from vertexai.language_models import TextGenerationModel,TextEmbeddingModel | |
import vertexai | |
import streamlit as st | |
# st.title("Product Description Enhancer") | |
# with st.form(key="Product"): | |
# | |
import os | |
import openai | |
from langchain.prompts import PromptTemplate | |
# from langchain.chat_models import ChatOpenAI | |
from typing import Any | |
from langchain.base_language import BaseLanguageModel | |
from langchain.chains.llm import LLMChain | |
import gradio as gr | |
# from google.cloud import auth | |
# auth.authenticate_user() | |
# OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] | |
# OPENAI_API_KEY='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' | |
PROJECT_ID = "agileai-poc" | |
vertexai.init(project=PROJECT_ID, location="us-central1") | |
generation_model = TextGenerationModel.from_pretrained("text-bison@001") | |
# embedding_model = TextEmbeddingModel.from_pretrained("textembedding-gecko@001") | |
# prompt_file = "prompt_template.txt" | |
# class ProductDescGen(LLMChain): | |
# """LLM Chain specifically for generating multi paragraph rich text product description using emojis.""" | |
# @classmethod | |
# def from_llm( | |
# cls, llm: BaseLanguageModel, prompt: str, **kwargs: Any | |
# ) -> ProductDescGen: | |
# """Load ProductDescGen Chain from LLM.""" | |
# return cls(llm=llm, prompt=prompt, **kwargs) | |
# def product_desc_generator(product_name, keywords, style): | |
# with open(prompt_file, "r") as file: | |
# prompt_template = file.read() | |
# PROMPT = PromptTemplate( | |
# input_variables=["product_name", "keywords"], template=prompt_template | |
# ) | |
# # llm = ChatOpenAI( | |
# # model_name="gpt-3.5-turbo", | |
# # temperature=0.7, | |
# # openai_api_key=OPENAI_API_KEY, | |
# # ) | |
# llm = vertexai( | |
# model_name="text-bison@001", | |
# max_output_tokens=500, | |
# temperature=0.1, | |
# top_p=0.8, | |
# top_k=40, | |
# ) | |
# ProductDescGen_chain = ProductDescGen.from_llm(llm=llm, prompt=PROMPT) | |
# ProductDescGen_query = ProductDescGen_chain.apply_and_parse( | |
# [{"product_name": product_name, "keywords": keywords}] | |
# ) | |
# Writing_Style = st.selectbox([{"Select a Writing Style": style}]) | |
# response = generation_model.predict( | |
# "Generate a product description that is creative and SEO compliant. Emojis should be added to make product description look appealing. Begin!", **llm) | |
# return ProductDescGen_query[0]["text"], Writing_Style, {response.text} | |
# prod_nm = st.text_input("Product Name") | |
# keywords = st.text_input("Filters") | |
# style = st.selectbox("Select the response style", [ | |
# "Funny", "Sarcastic", "Casual"]) | |
# generate = st.button("Generate Product Description") | |
# if generate: | |
# message = st.empty() | |
# message.text("Describing...") | |
# content = product_desc_generator(prod_nm, keywords, style) | |
# message.text("") | |
# st.write(content) | |