Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -29,10 +29,17 @@ print("Loading the model...")
|
|
29 |
llm = Llama(model_path=model_path)
|
30 |
print("Model loaded successfully!")
|
31 |
|
32 |
-
def generate_response(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
try:
|
34 |
-
|
35 |
-
return
|
36 |
except Exception as e:
|
37 |
return f"Error generating response: {e}"
|
38 |
|
@@ -43,4 +50,4 @@ if __name__ == "__main__":
|
|
43 |
description=description,
|
44 |
)
|
45 |
|
46 |
-
rag.launch()
|
|
|
29 |
llm = Llama(model_path=model_path)
|
30 |
print("Model loaded successfully!")
|
31 |
|
32 |
+
def generate_response(message, history, temperature=0.7, top_p=1.0, max_tokens=256):
|
33 |
+
# history_langchain_format = []
|
34 |
+
# for human, ai in history:
|
35 |
+
# history_langchain_format.append(HumanMessage(content=human))
|
36 |
+
# history_langchain_format.append(AIMessage(content=ai))
|
37 |
+
|
38 |
+
# history_langchain_format.append(HumanMessage(content=message))
|
39 |
+
|
40 |
try:
|
41 |
+
gpt_response = llm(message, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
42 |
+
return gpt_response["choices"][0]["text"].strip()
|
43 |
except Exception as e:
|
44 |
return f"Error generating response: {e}"
|
45 |
|
|
|
50 |
description=description,
|
51 |
)
|
52 |
|
53 |
+
rag.launch(share=True)
|