storresbusquets commited on
Commit
53f76b1
1 Parent(s): 4efa9de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -32
app.py CHANGED
@@ -1,44 +1,46 @@
1
  import gradio as gr
 
 
 
 
2
 
3
  def greet(text):
4
-
5
- from langchain.chat_models import ChatOllama
6
- from langchain.document_loaders import WebBaseLoader
7
- from langchain.chains.summarize import load_summarize_chain
8
-
9
- loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
10
- docs = loader.load()
11
-
12
- llm = ChatOllama(temperature=0, model_name="falcon:7b")
13
- chain = load_summarize_chain(llm, chain_type="stuff")
14
-
15
- chain.run(docs)
16
 
17
- from langchain.chains.llm import LLMChain
18
- from langchain.prompts import PromptTemplate
19
- from langchain.chains.combine_documents.stuff import StuffDocumentsChain
20
-
21
- # Define prompt
22
- prompt_template = """Write a concise summary of the following:
23
- "{text}"
24
- CONCISE SUMMARY:"""
25
- prompt = PromptTemplate.from_template(prompt_template)
26
-
27
- # Define LLM chain
28
- llm = ChatOllama(temperature=0, model_name="falcon:7b")
29
- llm_chain = LLMChain(llm=llm, prompt=prompt)
30
-
31
- # Define StuffDocumentsChain
32
- stuff_chain = StuffDocumentsChain(
33
- llm_chain=llm_chain, document_variable_name="text"
34
  )
35
-
36
- docs = loader.load()
37
- summary = stuff_chain.run(docs)
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  return summary
40
 
41
  with gr.Blocks() as demo:
 
42
  text = gr.Textbox(label="Text")
43
  summary = gr.Textbox(label="Summary")
44
  greet_btn = gr.Button("Submit")
 
1
  import gradio as gr
2
+ import torch
3
+ import transformers
4
+ from transformers import AutoTokenizer
5
+ from langchain import LLMChain, HuggingFacePipeline, PromptTemplate
6
 
7
  def greet(text):
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ model = "meta-llama/Llama-2-7b-chat-hf"
10
+ tokenizer = AutoTokenizer.from_pretrained(model)
11
+
12
+ pipeline = transformers.pipeline(
13
+ "text-generation",
14
+ model=model,
15
+ tokenizer=tokenizer,
16
+ torch_dtype=torch.bfloat16,
17
+ trust_remote_code=True,
18
+ device_map="auto",
19
+ max_length=1000,
20
+ do_sample=True,
21
+ top_k=10,
22
+ num_return_sequences=1,
23
+ eos_token_id=tokenizer.eos_token_id
 
 
24
  )
25
+
26
+ llm = HuggingFacePipeline(pipeline = pipeline, model_kwargs = {'temperature':0})
27
+
28
+ template = """
29
+ Write a summary of the following text delimited by triple backticks.
30
+ Return your response which covers the key points of the text.
31
+ ```{text}```
32
+ SUMMARY:
33
+ """
34
+
35
+ prompt = PromptTemplate(template=template, input_variables=["text"])
36
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
37
+
38
+ summary = llm_chain.run(text)
39
 
40
  return summary
41
 
42
  with gr.Blocks() as demo:
43
+
44
  text = gr.Textbox(label="Text")
45
  summary = gr.Textbox(label="Summary")
46
  greet_btn = gr.Button("Submit")