daniellefranca96 commited on
Commit
f6c4092
·
1 Parent(s): 3f12a86

Delete generate_text.py

Browse files
Files changed (1) hide show
  1. generate_text.py +0 -78
generate_text.py DELETED
@@ -1,78 +0,0 @@
1
- from langchain.base_language import BaseLanguageModel
2
- from langchain.chains import LLMChain, SequentialChain
3
- from langchain.chat_models import ChatAnthropic
4
- from langchain.chat_models import ChatOpenAI
5
- from langchain.llms import HuggingFaceHub
6
- from langchain.prompts import (
7
- PromptTemplate,
8
- ChatPromptTemplate,
9
- SystemMessagePromptTemplate,
10
- HumanMessagePromptTemplate,
11
- )
12
-
13
-
14
- class GenerateStyleText:
15
- example: str
16
- prompt: str
17
- llm: BaseLanguageModel
18
-
19
- def __init__(self, example=None, prompt=None, llm=None):
20
- self.example = example
21
- self.prompt = prompt
22
- self.llm = llm
23
-
24
- def set_imp_llm(self, model):
25
- if model == 'GPT3':
26
- self.llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
27
- elif model == "GPT4":
28
- self.llm = ChatOpenAI(model_name="gpt-4")
29
- elif model == "Claude":
30
- self.llm = ChatAnthropic()
31
- else:
32
- self.llm = HuggingFaceHub(repo_id=model)
33
-
34
- def run(self):
35
- return self.process()
36
-
37
- def process(self):
38
- seq_chain = SequentialChain(
39
- chains=[self.get_extract_tone_chain(), self.get_generate_text_chain(self.prompt),
40
- self.get_apply_style_chain()],
41
- input_variables=["text"], verbose=True)
42
- result = seq_chain({'text': self.example, "style": ""})
43
- return str(result.get('result'))
44
-
45
- def create_chain(self, chat_prompt, output_key):
46
- return LLMChain(llm=self.llm,
47
- prompt=chat_prompt, output_key=output_key)
48
-
49
- def get_extract_tone_chain(self):
50
- template = """Based on the tone and writing style in the seed text, create a style guide for a blog or
51
- publication that captures the essence of the seed’s tone. Emphasize engaging techniques that help readers
52
- feel connected to the content.
53
- """
54
- system_message_prompt = SystemMessagePromptTemplate.from_template(template)
55
- human_template = "{text}"
56
- human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
57
- chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
58
-
59
- return self.create_chain(chat_prompt, "style")
60
-
61
- def get_generate_text_chain(self, prompt):
62
- template = """Generate a text following the user_request(use same language of the request):
63
- {user_request}
64
- """.replace("{user_request}", prompt)
65
- return self.create_chain(PromptTemplate.from_template(template),
66
- "generated_text")
67
-
68
- def get_apply_style_chain(self):
69
- template = """STYLE:
70
- {style}
71
- REWRITE THE TEXT BELLOW APPLYING THE STYLE ABOVE(use same language of the request),
72
- ONLY GENERATE NEW TEXT BASED ON THE STYLE CONTEXT, DO NOT COPY STYLE EXACT PARTS:
73
- {generated_text}
74
- """
75
-
76
- prompt = PromptTemplate.from_template(template=template)
77
- prompt.partial(style="")
78
- return self.create_chain(prompt, "result")