Abhijit-192-168-1-1 commited on
Commit
77be9aa
·
1 Parent(s): ffce486

modified app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -1,9 +1,9 @@
 
1
  import gradio as gr
2
  from llm2vec import LLM2Vec
3
  from transformers import AutoTokenizer, AutoModel, AutoConfig
4
  from peft import PeftModel
5
  import torch
6
- import os
7
 
8
  torch.backends.cuda.enable_mem_efficient_sdp(False)
9
  torch.backends.cuda.enable_flash_sdp(False)
@@ -18,7 +18,6 @@ if not GROQ_API_KEY or not HF_TOKEN:
18
  os.environ['GROQ_API_KEY'] = GROQ_API_KEY
19
  os.environ['HF_TOKEN'] = HF_TOKEN
20
 
21
-
22
  # Load tokenizer and model
23
  tokenizer = AutoTokenizer.from_pretrained("McGill-NLP/LLM2Vec-Sheared-LLaMA-mntp")
24
  config = AutoConfig.from_pretrained("McGill-NLP/LLM2Vec-Sheared-LLaMA-mntp", trust_remote_code=True)
@@ -40,8 +39,8 @@ def encode_text(input_text):
40
  # Define Gradio interface
41
  iface = gr.Interface(
42
  fn=encode_text,
43
- inputs=gr.inputs.Textbox(lines=2, placeholder="Enter text here..."),
44
- outputs=gr.outputs.JSON()
45
  )
46
 
47
  # Launch Gradio app
 
1
+ import os
2
  import gradio as gr
3
  from llm2vec import LLM2Vec
4
  from transformers import AutoTokenizer, AutoModel, AutoConfig
5
  from peft import PeftModel
6
  import torch
 
7
 
8
  torch.backends.cuda.enable_mem_efficient_sdp(False)
9
  torch.backends.cuda.enable_flash_sdp(False)
 
18
  os.environ['GROQ_API_KEY'] = GROQ_API_KEY
19
  os.environ['HF_TOKEN'] = HF_TOKEN
20
 
 
21
  # Load tokenizer and model
22
  tokenizer = AutoTokenizer.from_pretrained("McGill-NLP/LLM2Vec-Sheared-LLaMA-mntp")
23
  config = AutoConfig.from_pretrained("McGill-NLP/LLM2Vec-Sheared-LLaMA-mntp", trust_remote_code=True)
 
39
  # Define Gradio interface
40
  iface = gr.Interface(
41
  fn=encode_text,
42
+ inputs=gr.Textbox(lines=2, placeholder="Enter text here..."),
43
+ outputs=gr.JSON()
44
  )
45
 
46
  # Launch Gradio app