yusufathi commited on
Commit
718a143
·
verified ·
1 Parent(s): 26da7ec

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +64 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from unsloth import FastLanguageModel
2
+ from transformers import TextStreamer
3
+ import gradio as gr
4
+ import torch
5
+
6
+ # Model Configuration
7
+ max_seq_length = 2048
8
+ dtype = None
9
+ load_in_4bit = True
10
+
11
+ # Load the model and tokenizer
12
+ model_name = "unsloth/Mistral-Nemo-Base-2407"
13
+ model, tokenizer = FastLanguageModel.from_pretrained(
14
+ model_name=model_name,
15
+ max_seq_length=max_seq_length,
16
+ dtype=dtype,
17
+ load_in_4bit=load_in_4bit,
18
+ )
19
+ FastLanguageModel.for_inference(model) # Optimize for inference
20
+
21
+ # Define the Alpaca-style prompt
22
+ alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
23
+
24
+ ### Instruction:
25
+ {}
26
+
27
+ ### Input:
28
+ {}
29
+
30
+ ### Response:
31
+ {}
32
+ """
33
+
34
+ # Define the MCQ generation function
35
+ def generate_mcq(passage):
36
+ instruction = "Generate a multiple-choice question (MCQ) based on the passage, provide options, and indicate the correct option."
37
+ input_text = f"Passage: {passage}"
38
+ prompt = alpaca_prompt.format(instruction, input_text, "")
39
+
40
+ # Tokenize input and generate output
41
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=max_seq_length).to("cuda")
42
+ outputs = model.generate(
43
+ input_ids=inputs.input_ids,
44
+ attention_mask=inputs.attention_mask,
45
+ max_new_tokens=128,
46
+ pad_token_id=tokenizer.eos_token_id,
47
+ num_return_sequences=1,
48
+ do_sample=True,
49
+ )
50
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
51
+ return response
52
+
53
+ # Define the Gradio interface
54
+ interface = gr.Interface(
55
+ fn=generate_mcq,
56
+ inputs=[gr.Textbox(label="Enter Passage", placeholder="Enter a passage to generate MCQs")],
57
+ outputs="text",
58
+ title="MCQ Generator with Mistral",
59
+ description="Generate multiple-choice questions using Mistral LLM. Enter a passage and get an MCQ with answer options."
60
+ )
61
+
62
+ # Launch the app
63
+ if __name__ == "__main__":
64
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch
2
+ datasets
3
+ unsloth
4
+ transformers
5
+ trl
6
+ bert-score
7
+ rouge-score
8
+ tqdm