DR-Rakshitha commited on
Commit
4789c94
1 Parent(s): 16425f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -44
app.py CHANGED
@@ -1,49 +1,16 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Specify the path to your fine-tuned model and tokenizer
5
- # model_path = "./" # Assuming the model is in the same directory as your notebook
6
- # model_name = "https://huggingface.co/spaces/DR-Rakshitha/wizardlm_api/blob/main/pytorch_model-00001-of-00002.bin" # Replace with your model name
7
 
8
- from llama_cpp import Llama
9
- import timeit
10
 
11
- # Load Llama 2 model
12
- llm = Llama(model_path="./pytorch_model-00001-of-00002.bin",
13
- n_ctx=512,
14
- n_batch=128)
15
-
16
- # Start timer
17
- start = timeit.default_timer()
18
-
19
- # Generate LLM response
20
- # prompt = "What is Python?"
21
-
22
- # output = llm(prompt,
23
- # max_tokens=-1,
24
- # echo=False,
25
- # temperature=0.1,
26
- # top_p=0.9)
27
-
28
-
29
- # Load the model and tokenizer
30
- model = AutoModelForCausalLM.from_pretrained(model_path)
31
- tokenizer = AutoTokenizer.from_pretrained(model_path)
32
-
33
- # Define the function for text generation
34
  def generate_text(input_text):
35
- # input_ids = tokenizer(input_text, return_tensors="pt").input_ids
36
- # output = model.generate(input_ids, max_length=50, num_return_sequences=1)
37
- # generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
38
- # return generated_text
39
 
40
- output = llm(input_text,
41
- max_tokens=-1,
42
- echo=False,
43
- temperature=0.1,
44
- top_p=0.9)
45
-
46
- # Create the Gradio interface
47
  text_generation_interface = gr.Interface(
48
  fn=generate_text,
49
  inputs=[
@@ -51,7 +18,4 @@ text_generation_interface = gr.Interface(
51
  ],
52
  outputs=gr.outputs.Textbox(label="Generated Text"),
53
  title="GPT-4 Text Generation",
54
- )
55
-
56
- # Launch the Gradio interface
57
- text_generation_interface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ # # Specify the directory containing the tokenizer's configuration file (config.json)
5
+ model_name = "pytorch_model-00001-of-00002.bin"
 
6
 
7
+ # # Initialize the GPT4All model
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def generate_text(input_text):
11
+ text= model.generate(input_text)
12
+ return text
 
 
13
 
 
 
 
 
 
 
 
14
  text_generation_interface = gr.Interface(
15
  fn=generate_text,
16
  inputs=[
 
18
  ],
19
  outputs=gr.outputs.Textbox(label="Generated Text"),
20
  title="GPT-4 Text Generation",
21
+ ).launch()