Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ from llama_index.llms import HuggingFaceLLM
|
|
11 |
from langchain.document_loaders import PyPDFLoader
|
12 |
|
13 |
|
14 |
-
|
15 |
|
16 |
import pandas as pd
|
17 |
from datasets import load_dataset, concatenate_datasets
|
@@ -34,7 +34,7 @@ combined_dataset = concatenate_datasets(datasets)
|
|
34 |
#from google.colab import drive
|
35 |
#drive.mount('/content/drive')
|
36 |
|
37 |
-
|
38 |
|
39 |
#documents = SimpleDirectoryReader("/content/drive/MyDrive/Data").load_data()
|
40 |
|
@@ -42,24 +42,23 @@ from langchain.text_splitter import CharacterTextSplitter
|
|
42 |
from langchain import OpenAI
|
43 |
from langchain.document_loaders import PyPDFLoader
|
44 |
|
45 |
-
|
46 |
|
47 |
|
48 |
from llama_index.prompts.prompts import SimpleInputPrompt
|
49 |
|
50 |
|
51 |
-
system_prompt = "You are a medical
|
52 |
|
53 |
|
54 |
|
55 |
# This will wrap the default prompts that are internal to llama-index
|
56 |
query_wrapper_prompt = SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
|
57 |
|
58 |
-
**Log in to Hugging Face**
|
59 |
|
60 |
#!huggingface-cli login
|
61 |
|
62 |
-
|
63 |
|
64 |
import torch
|
65 |
|
@@ -75,7 +74,7 @@ llm = HuggingFaceLLM(
|
|
75 |
# uncomment this if using CUDA to reduce memory usage
|
76 |
model_kwargs={"torch_dtype": torch.float16 , "load_in_8bit":True})
|
77 |
|
78 |
-
|
79 |
|
80 |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
81 |
from llama_index import LangchainEmbedding, ServiceContext
|
@@ -85,7 +84,6 @@ embed_model = LangchainEmbedding(
|
|
85 |
)
|
86 |
|
87 |
|
88 |
-
**Configure the service context**
|
89 |
|
90 |
service_context = ServiceContext.from_defaults(
|
91 |
chunk_size=1024,
|
@@ -93,18 +91,18 @@ service_context = ServiceContext.from_defaults(
|
|
93 |
embed_model=embed_model
|
94 |
)
|
95 |
|
96 |
-
|
97 |
|
98 |
index = VectorStoreIndex.from_documents(combined_dataset, service_context=service_context)
|
99 |
|
100 |
-
|
101 |
|
102 |
query_engine = index.as_query_engine()
|
103 |
response = query_engine.query("What is gross profit?")
|
104 |
|
105 |
print(response)
|
106 |
|
107 |
-
|
108 |
|
109 |
import gradio as gr
|
110 |
|
|
|
11 |
from langchain.document_loaders import PyPDFLoader
|
12 |
|
13 |
|
14 |
+
|
15 |
|
16 |
import pandas as pd
|
17 |
from datasets import load_dataset, concatenate_datasets
|
|
|
34 |
#from google.colab import drive
|
35 |
#drive.mount('/content/drive')
|
36 |
|
37 |
+
|
38 |
|
39 |
#documents = SimpleDirectoryReader("/content/drive/MyDrive/Data").load_data()
|
40 |
|
|
|
42 |
from langchain import OpenAI
|
43 |
from langchain.document_loaders import PyPDFLoader
|
44 |
|
45 |
+
|
46 |
|
47 |
|
48 |
from llama_index.prompts.prompts import SimpleInputPrompt
|
49 |
|
50 |
|
51 |
+
system_prompt = "You are a medical chatbot designed to provide general medical information and guidance to patients. You are not a substitute for a licensed healthcare professional. Please always consult with a doctor or other qualified healthcare provider for any medical concerns. Here are some specific examples of how you can be helpful: Answer general medical questions about symptoms, conditions, and treatments, Provide information on healthy lifestyle habits and preventive care, Help patients find reliable medical resources and support groups, Offer emotional support and reassurance to patients facing medical challenges. Please keep in mind the following: Always direct patients to seek professional medical attention if they have serious concerns, Use clear and concise language that is easy for patients to understand. Be respectful and sensitive to patients individual needs and concerns.I am confident that you can be a valuable tool for patients seeking medical information and guidance. Thank you for your dedication to helping others."
|
52 |
|
53 |
|
54 |
|
55 |
# This will wrap the default prompts that are internal to llama-index
|
56 |
query_wrapper_prompt = SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
|
57 |
|
|
|
58 |
|
59 |
#!huggingface-cli login
|
60 |
|
61 |
+
|
62 |
|
63 |
import torch
|
64 |
|
|
|
74 |
# uncomment this if using CUDA to reduce memory usage
|
75 |
model_kwargs={"torch_dtype": torch.float16 , "load_in_8bit":True})
|
76 |
|
77 |
+
|
78 |
|
79 |
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
80 |
from llama_index import LangchainEmbedding, ServiceContext
|
|
|
84 |
)
|
85 |
|
86 |
|
|
|
87 |
|
88 |
service_context = ServiceContext.from_defaults(
|
89 |
chunk_size=1024,
|
|
|
91 |
embed_model=embed_model
|
92 |
)
|
93 |
|
94 |
+
|
95 |
|
96 |
index = VectorStoreIndex.from_documents(combined_dataset, service_context=service_context)
|
97 |
|
98 |
+
|
99 |
|
100 |
query_engine = index.as_query_engine()
|
101 |
response = query_engine.query("What is gross profit?")
|
102 |
|
103 |
print(response)
|
104 |
|
105 |
+
|
106 |
|
107 |
import gradio as gr
|
108 |
|