Update README.md
Browse files
README.md
CHANGED
@@ -4,9 +4,9 @@ language:
|
|
4 |
pipeline_tag: conversational
|
5 |
---
|
6 |
|
7 |
-
# How to Use
|
8 |
|
9 |
-
## Load the LoRA model
|
10 |
|
11 |
```python
|
12 |
import torch
|
@@ -23,3 +23,55 @@ model = LlamaForCausalLM.from_pretrained("decapoda-research/llama-7b-hf",
|
|
23 |
# Load the LoRA model
|
24 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
25 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
pipeline_tag: conversational
|
5 |
---
|
6 |
|
7 |
+
# How to Use :
|
8 |
|
9 |
+
## Load the 🦙 Alpaca-LoRA model
|
10 |
|
11 |
```python
|
12 |
import torch
|
|
|
23 |
# Load the LoRA model
|
24 |
model = PeftModel.from_pretrained(model, peft_model_id)
|
25 |
```
|
26 |
+
## Create Prompt Template
|
27 |
+
|
28 |
+
```python
|
29 |
+
def generate_prompt(instruction, input=None):
|
30 |
+
if input:
|
31 |
+
return f"""Berikut ini adalah petunjuk yang menjelaskan tugas, serta masukan yang menyediakan konteks tambahan. Tulis balasan yang melengkapi permintaan dengan tepat.
|
32 |
+
|
33 |
+
Petunjuk:
|
34 |
+
{instruction}
|
35 |
+
|
36 |
+
Masukan:
|
37 |
+
{input}
|
38 |
+
|
39 |
+
Output:"""
|
40 |
+
|
41 |
+
else:
|
42 |
+
return f"""Berikut ini terdapat panduan yang menjelaskan tugas. Mohon tuliskan balasan yang melengkapi permintaan dengan tepat.
|
43 |
+
|
44 |
+
Panduan:
|
45 |
+
{instruction}
|
46 |
+
|
47 |
+
Output:"""
|
48 |
+
```
|
49 |
+
|
50 |
+
## Evaluation
|
51 |
+
You are free to change parameters inside `GenerationConfig` to get better result.
|
52 |
+
|
53 |
+
```python
|
54 |
+
generation_config = GenerationConfig(
|
55 |
+
temperature=0.2,
|
56 |
+
top_p=0.75,
|
57 |
+
num_beams=8
|
58 |
+
)
|
59 |
+
|
60 |
+
def evaluate(instruction, input=None):
|
61 |
+
prompt = generate_prompt(instruction, input)
|
62 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
63 |
+
input_ids = inputs["input_ids"].cuda()
|
64 |
+
generation_output = model.generate(
|
65 |
+
input_ids=input_ids,
|
66 |
+
generation_config=generation_config,
|
67 |
+
return_dict_in_generate=True,
|
68 |
+
output_scores=True,
|
69 |
+
max_new_tokens=256
|
70 |
+
)
|
71 |
+
for s in generation_output.sequences:
|
72 |
+
output = tokenizer.decode(s)
|
73 |
+
print("Output:", output.split("Output:")[1].strip())
|
74 |
+
|
75 |
+
# input your question/instruction
|
76 |
+
evaluate(input("Petunjuk: "))
|
77 |
+
```
|