Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,9 @@ import faiss
|
|
6 |
import fitz # PyMuPDF
|
7 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
8 |
|
|
|
|
|
|
|
9 |
# Configuration
|
10 |
MODEL_NAME = "ibm-granite/granite-3.1-1b-a400m-instruct"
|
11 |
EMBED_MODEL = "sentence-transformers/all-mpnet-base-v2"
|
@@ -115,7 +118,6 @@ def generate_answer(query, context):
|
|
115 |
return tokenizer.decode(outputs[0], skip_special_tokens=True).split("<|assistant|>")[-1].strip()
|
116 |
|
117 |
# Streamlit UI
|
118 |
-
st.set_page_config(page_title="π Smart Book Analyst", layout="wide")
|
119 |
st.title("π AI-Powered Book Analysis System")
|
120 |
|
121 |
uploaded_file = st.file_uploader("Upload book (PDF or TXT)", type=["pdf", "txt"])
|
|
|
6 |
import fitz # PyMuPDF
|
7 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
8 |
|
9 |
+
# 1. Set page config FIRST
|
10 |
+
st.set_page_config(page_title="π Smart Book Analyst", layout="wide")
|
11 |
+
|
12 |
# Configuration
|
13 |
MODEL_NAME = "ibm-granite/granite-3.1-1b-a400m-instruct"
|
14 |
EMBED_MODEL = "sentence-transformers/all-mpnet-base-v2"
|
|
|
118 |
return tokenizer.decode(outputs[0], skip_special_tokens=True).split("<|assistant|>")[-1].strip()
|
119 |
|
120 |
# Streamlit UI
|
|
|
121 |
st.title("π AI-Powered Book Analysis System")
|
122 |
|
123 |
uploaded_file = st.file_uploader("Upload book (PDF or TXT)", type=["pdf", "txt"])
|