loubnabnl HF staff commited on
Commit
1248f93
1 Parent(s): 1d9a2e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -16,15 +16,15 @@ example = [
16
  ]
17
 
18
  # change model to the finetuned one
19
- tokenizer = AutoTokenizer.from_pretrained("codeparrot/codeparrot-small-code-to-text")
20
- model = AutoModelForCausalLM.from_pretrained("codeparrot/codeparrot-small-code-to-text")
 
21
 
22
  def make_doctring(gen_prompt):
23
  return gen_prompt + f"\n\n\"\"\"\nExplanation:"
24
 
25
  def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
26
  set_seed(seed)
27
- pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
28
  prompt = make_doctring(gen_prompt)
29
  generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
30
  return generated_text
 
16
  ]
17
 
18
  # change model to the finetuned one
19
+ tokenizer = AutoTokenizer.from_pretrained("loubnabnl/santacoder-code-to-text")
20
+ model = AutoModelForCausalLM.from_pretrained("loubnabnl/santacoder-code-to-text", trust_remote_code=True)
21
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
22
 
23
  def make_doctring(gen_prompt):
24
  return gen_prompt + f"\n\n\"\"\"\nExplanation:"
25
 
26
  def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
27
  set_seed(seed)
 
28
  prompt = make_doctring(gen_prompt)
29
  generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
30
  return generated_text