DR-Rakshitha commited on
Commit
402975e
1 Parent(s): fdb4240

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -25
app.py CHANGED
@@ -1,23 +1,4 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
-
4
- # # Specify the directory containing the tokenizer's configuration file (config.json)
5
- # model_name = "pytorch_model-00001-of-00002.bin"
6
-
7
- # # Initialize the tokenizer
8
- # # tokenizer = AutoTokenizer.from_pretrained(model_name, local_files_only=True)
9
- # tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
10
- # tokenizer.pad_token = tokenizer.eos_token
11
- # tokenizer.padding_side = "right"
12
-
13
-
14
- # # Initialize the GPT4All model
15
- # model = AutoModelForCausalLM.from_pretrained(model_name)
16
-
17
- # def generate_text(input_text):
18
- # pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
19
- # result = pipe(f"<s>[INST] {input_text} [/INST]")
20
- # return result[0]['generated_text']
21
  from transformers import AutoModelForCausalLM, AutoTokenizer
22
 
23
  # Specify the path to your fine-tuned model and tokenizer
@@ -28,16 +9,14 @@ model_name = "pytorch_model-00001-of-00002.bin" # Replace with your model name
28
  model = AutoModelForCausalLM.from_pretrained(model_path)
29
  tokenizer = AutoTokenizer.from_pretrained(model_path)
30
 
31
- # Example usage
32
- # input_text = "Once upon a time"
33
- def generated_text(input_text):
34
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
35
  output = model.generate(input_ids, max_length=50, num_return_sequences=1)
36
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
37
  return generated_text
38
- # print(generated_text)
39
-
40
 
 
41
  text_generation_interface = gr.Interface(
42
  fn=generate_text,
43
  inputs=[
@@ -45,4 +24,7 @@ text_generation_interface = gr.Interface(
45
  ],
46
  outputs=gr.outputs.Textbox(label="Generated Text"),
47
  title="GPT-4 Text Generation",
48
- ).launch()
 
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Specify the path to your fine-tuned model and tokenizer
 
9
  model = AutoModelForCausalLM.from_pretrained(model_path)
10
  tokenizer = AutoTokenizer.from_pretrained(model_path)
11
 
12
+ # Define the function for text generation
13
+ def generate_text(input_text):
 
14
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids
15
  output = model.generate(input_ids, max_length=50, num_return_sequences=1)
16
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
17
  return generated_text
 
 
18
 
19
+ # Create the Gradio interface
20
  text_generation_interface = gr.Interface(
21
  fn=generate_text,
22
  inputs=[
 
24
  ],
25
  outputs=gr.outputs.Textbox(label="Generated Text"),
26
  title="GPT-4 Text Generation",
27
+ )
28
+
29
+ # Launch the Gradio interface
30
+ text_generation_interface.launch()