Jyotiyadav commited on
Commit
a2df6d3
·
verified ·
1 Parent(s): cf493a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -51,13 +51,13 @@ def process_output(output):
51
 
52
 
53
  # Define the function for generating output based on input
54
- def generate_output(input_text,model):
55
  # Prompt for the instruction
56
 
57
  output = ""
58
  # Initialize the FastLanguageModel
59
  model, tokenizer = FastLanguageModel.from_pretrained(
60
- model_name = model,
61
  max_seq_length = 2500,
62
  dtype = None,
63
  load_in_4bit = True,
@@ -112,7 +112,7 @@ model_options = ["DataIntelligenceTeam/NER-Phi-3-mini-4k-instruct"]
112
  #,"DataIntelligenceTeam/NER-gemma-7b-bnb-4bit","DataIntelligenceTeam/llama-3-8b-Instruct-bnb-4bit","DataIntelligenceTeam/mistral-7b-instruct-v0.2-bnb-4bit"]
113
  inputs = [
114
  gr.inputs.Textbox(label="Input Text"),
115
- gr.inputs.Dropdown(label="Select the Fine-tuned Model", choices=["DataIntelligenceTeam/NER-Phi-3-mini-4k-instruct","DataIntelligenceTeam/llama-3-8b-Instruct-bnb-4bit"]),
116
  ]
117
 
118
  outputs = [
 
51
 
52
 
53
  # Define the function for generating output based on input
54
+ def generate_output(input_text):
55
  # Prompt for the instruction
56
 
57
  output = ""
58
  # Initialize the FastLanguageModel
59
  model, tokenizer = FastLanguageModel.from_pretrained(
60
+ model_name = "DataIntelligenceTeam/llama-3-8b-Instruct-bnb-4bit",
61
  max_seq_length = 2500,
62
  dtype = None,
63
  load_in_4bit = True,
 
112
  #,"DataIntelligenceTeam/NER-gemma-7b-bnb-4bit","DataIntelligenceTeam/llama-3-8b-Instruct-bnb-4bit","DataIntelligenceTeam/mistral-7b-instruct-v0.2-bnb-4bit"]
113
  inputs = [
114
  gr.inputs.Textbox(label="Input Text"),
115
+ #gr.inputs.Dropdown(label="Select the Fine-tuned Model", choices=["DataIntelligenceTeam/NER-Phi-3-mini-4k-instruct","DataIntelligenceTeam/llama-3-8b-Instruct-bnb-4bit"]),
116
  ]
117
 
118
  outputs = [