fede97 commited on
Commit
2fb04fa
1 Parent(s): 83d3a90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -11,8 +11,8 @@ CHECKPOINT_PATH= 'scratch_2-nodes_tokenizer_latbert-original_packing_fcocchi/mod
11
  CHECKPOINT_PATH= 'itserr/latin_llm_alpha'
12
 
13
  print(f"Loading model from: {CHECKPOINT_PATH}")
14
- #tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH, token=st.secrets["HF_TOKEN"])
15
- #model = AutoModelForCausalLM.from_pretrained(CHECKPOINT_PATH, token=st.secrets["HF_TOKEN"])
16
 
17
  description="""
18
  This is a Latin Language Model (LLM) based on GPT-2 and it was trained on a large corpus of Latin texts and can generate text in Latin.
@@ -28,13 +28,11 @@ def generate_text(prompt):
28
  else:
29
  device = torch.device("cpu")
30
  print("No GPU available")
31
- out_text= st.secrets["HF_TOKEN"]
32
- return out_text
33
 
34
- #print("***** Generate *****")
35
- #text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
36
- #generated_text = text_generator(prompt, max_length=50, do_sample=True, temperature=1.0, repetition_penalty=2.0, truncation=True)
37
- #return generated_text[0]['generated_text']
38
 
39
  custom_css = """
40
  #logo {
 
11
  CHECKPOINT_PATH= 'itserr/latin_llm_alpha'
12
 
13
  print(f"Loading model from: {CHECKPOINT_PATH}")
14
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT_PATH, token=os.environ['HF_TOKEN'])
15
+ model = AutoModelForCausalLM.from_pretrained(CHECKPOINT_PATH, token=os.environ['HF_TOKEN'])
16
 
17
  description="""
18
  This is a Latin Language Model (LLM) based on GPT-2 and it was trained on a large corpus of Latin texts and can generate text in Latin.
 
28
  else:
29
  device = torch.device("cpu")
30
  print("No GPU available")
 
 
31
 
32
+ print("***** Generate *****")
33
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
34
+ generated_text = text_generator(prompt, max_length=50, do_sample=True, temperature=1.0, repetition_penalty=2.0, truncation=True)
35
+ return generated_text[0]['generated_text']
36
 
37
  custom_css = """
38
  #logo {