Spaces:
Sleeping
Sleeping
add chain
Browse files- Dockerfile +0 -1
- app.py +10 -16
- pipline.py +13 -25
Dockerfile
CHANGED
@@ -4,7 +4,6 @@ WORKDIR /code
|
|
4 |
|
5 |
COPY ./requirements.txt /code/requirements.txt
|
6 |
|
7 |
-
RUN pip3 install torch torchvision torchaudio
|
8 |
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
9 |
|
10 |
COPY . .
|
|
|
4 |
|
5 |
COPY ./requirements.txt /code/requirements.txt
|
6 |
|
|
|
7 |
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
8 |
|
9 |
COPY . .
|
app.py
CHANGED
@@ -3,29 +3,23 @@ import torch
|
|
3 |
import streamlit as st
|
4 |
from streamlit import session_state as state
|
5 |
import streamlit_ace
|
6 |
-
import model
|
7 |
import pipline
|
8 |
|
9 |
if "app" not in state:
|
10 |
state.app = "model"
|
11 |
-
# state.input_text = "This is the input text."
|
12 |
-
# state.word = "input"
|
13 |
state.out = ""
|
14 |
-
|
15 |
st.title("Streamlit using Huggingface Transformers and langchain")
|
|
|
|
|
16 |
|
17 |
-
def __run_pipline():
|
18 |
-
st.text(f"input_text: {state.input_text}\nword: {state.word}")
|
19 |
-
st.markdown(":green[Running pipline]")
|
20 |
-
st.text(pipline.pipeline(state.input_text, state.word))
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
26 |
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
st.button("model", on_click=__run_model)
|
|
|
3 |
import streamlit as st
|
4 |
from streamlit import session_state as state
|
5 |
import streamlit_ace
|
|
|
6 |
import pipline
|
7 |
|
8 |
if "app" not in state:
|
9 |
state.app = "model"
|
|
|
|
|
10 |
state.out = ""
|
|
|
11 |
st.title("Streamlit using Huggingface Transformers and langchain")
|
12 |
+
in_area = st.container()
|
13 |
+
out_area = st.container()
|
14 |
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
|
17 |
+
def __run_pipline():
|
18 |
+
out_area.text(f"input_text: {state.input_text}\nword: {state.word}")
|
19 |
+
out_area.markdown(":green[Running pipline]")
|
20 |
+
out_area.text(pipline.chain(state.input_text))
|
21 |
|
22 |
|
23 |
+
in_area.text_area("input_text", key="input_text")
|
24 |
+
in_area.text_input("word", key="word")
|
25 |
+
in_area.button("pip line", on_click=__run_pipline)
|
|
pipline.py
CHANGED
@@ -1,36 +1,24 @@
|
|
1 |
import langchain as lc
|
2 |
-
from langchain import PromptTemplate
|
3 |
from langchain.prompts import load_prompt
|
4 |
import wikipedia
|
5 |
-
import
|
6 |
-
|
7 |
# save templates to a file
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
# An example prompt with multiple input variables
|
11 |
-
|
12 |
-
input_variables=["
|
13 |
-
template=
|
14 |
)
|
15 |
-
|
16 |
-
# multiple_input_prompt.format(adjective="funny", content="chickens")
|
17 |
-
# -> "Tell me a funny joke about chickens."
|
18 |
|
19 |
|
20 |
prompt = load_prompt("awesome_prompt.json")
|
21 |
|
22 |
-
|
23 |
-
|
24 |
-
model_output = ""
|
25 |
-
input_text = prompt.format(adjective="funny", content=text)
|
26 |
-
while word not in model_output:
|
27 |
-
model_output = model.run(input_text)
|
28 |
-
wikipedia_entry = wikipedia.search(word)[1]
|
29 |
-
wiki = wikipedia.summary(wikipedia_entry, auto_suggest=False, redirect=True)
|
30 |
-
input_text += model_output + wiki
|
31 |
-
return model_output
|
32 |
-
|
33 |
-
|
34 |
-
if __name__ == "__main__":
|
35 |
-
print("pipline test")
|
36 |
-
pipeline("This is the input text.", "input")
|
|
|
1 |
import langchain as lc
|
2 |
+
from langchain import PromptTemplate, OpenAI, LLMChain
|
3 |
from langchain.prompts import load_prompt
|
4 |
import wikipedia
|
5 |
+
import os
|
6 |
+
llm = OpenAI()
|
7 |
# save templates to a file
|
8 |
+
template = """Question:
|
9 |
+
The user wrote me the following text, what is he trying to imply to me?
|
10 |
+
{user_input}
|
11 |
|
12 |
+
Answer: Let's think step by step."""
|
13 |
# An example prompt with multiple input variables
|
14 |
+
input_prompt = PromptTemplate(
|
15 |
+
input_variables=["user_input"],
|
16 |
+
template=template,
|
17 |
)
|
18 |
+
input_prompt.save("awesome_prompt.json") # Save to JSON file
|
|
|
|
|
19 |
|
20 |
|
21 |
prompt = load_prompt("awesome_prompt.json")
|
22 |
|
23 |
+
prompt = PromptTemplate(template=template, input_variables=["user_input"])
|
24 |
+
chain = LLMChain(prompt=prompt, llm=llm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|