Alijeff1214 commited on
Commit
f6e2b5d
·
verified ·
1 Parent(s): 34ea1d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  @st.cache_resource(show_spinner=False)
5
  def load_model():
6
- model_name = "Alijeff1214/DeutscheLexAI_BGB"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  return tokenizer, model
@@ -18,6 +18,6 @@ user_input = st.text_input("Enter your question or prompt:")
18
  if st.button("Generate Response") and user_input:
19
  # Tokenize and generate response (adjust parameters as needed)
20
  inputs = tokenizer(user_input, return_tensors="pt")
21
- outputs = model.generate(**inputs, max_length=200, do_sample=True, temperature=0.7)
22
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
  st.text_area("Model Response:", value=response, height=300)
 
3
 
4
  @st.cache_resource(show_spinner=False)
5
  def load_model():
6
+ model_name = "Alijeff1214/DeutscheLexAI_BGB_2.0"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  return tokenizer, model
 
18
  if st.button("Generate Response") and user_input:
19
  # Tokenize and generate response (adjust parameters as needed)
20
  inputs = tokenizer(user_input, return_tensors="pt")
21
+ outputs = model.generate(**inputs, max_length=500, do_sample=True, temperature=0.7)
22
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
23
  st.text_area("Model Response:", value=response, height=300)