Update README.md
Browse files
README.md
CHANGED
@@ -43,10 +43,14 @@ model_name = "UBC-NLP/GreenLLaMA-7b"
|
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
44 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
45 |
|
46 |
-
|
47 |
-
|
|
|
|
|
|
|
|
|
48 |
|
49 |
-
outputs = model.generate(**input_ids)
|
50 |
print(tokenizer.decode(outputs[0]))
|
51 |
```
|
52 |
|
@@ -66,11 +70,11 @@ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
|
|
66 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
67 |
|
68 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
69 |
-
input_text = prompt+
|
70 |
|
71 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
72 |
|
73 |
-
outputs = model.generate(**input_ids)
|
74 |
print(tokenizer.decode(outputs[0]))
|
75 |
```
|
76 |
|
@@ -91,11 +95,11 @@ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torc
|
|
91 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
92 |
|
93 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
94 |
-
input_text = prompt+
|
95 |
|
96 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
97 |
|
98 |
-
outputs = model.generate(**input_ids)
|
99 |
print(tokenizer.decode(outputs[0]))
|
100 |
```
|
101 |
|
@@ -112,11 +116,11 @@ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torc
|
|
112 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
113 |
|
114 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
115 |
-
input_text = prompt+
|
116 |
|
117 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
118 |
|
119 |
-
outputs = model.generate(**input_ids)
|
120 |
print(tokenizer.decode(outputs[0]))
|
121 |
```
|
122 |
|
@@ -137,11 +141,11 @@ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=qua
|
|
137 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
138 |
|
139 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
140 |
-
input_text = prompt+
|
141 |
|
142 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
143 |
|
144 |
-
outputs = model.generate(**input_ids)
|
145 |
print(tokenizer.decode(outputs[0]))
|
146 |
```
|
147 |
|
@@ -160,11 +164,11 @@ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=qua
|
|
160 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
161 |
|
162 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
163 |
-
input_text = prompt+
|
164 |
|
165 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
166 |
|
167 |
-
outputs = model.generate(**input_ids)
|
168 |
print(tokenizer.decode(outputs[0]))
|
169 |
```
|
170 |
|
|
|
43 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
44 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
45 |
|
46 |
+
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
47 |
+
|
48 |
+
input = "Those shithead should stop talking and get the f*ck out of this place"
|
49 |
+
input_text = prompt+input+"\n"
|
50 |
+
|
51 |
+
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
52 |
|
53 |
+
outputs = model.generate(**input_ids, do_sample=False)
|
54 |
print(tokenizer.decode(outputs[0]))
|
55 |
```
|
56 |
|
|
|
70 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
71 |
|
72 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
73 |
+
input_text = prompt+input+"\n"
|
74 |
|
75 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
76 |
|
77 |
+
outputs = model.generate(**input_ids, do_sample=False)
|
78 |
print(tokenizer.decode(outputs[0]))
|
79 |
```
|
80 |
|
|
|
95 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
96 |
|
97 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
98 |
+
input_text = prompt+input+"\n"
|
99 |
|
100 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
101 |
|
102 |
+
outputs = model.generate(**input_ids, do_sample=False)
|
103 |
print(tokenizer.decode(outputs[0]))
|
104 |
```
|
105 |
|
|
|
116 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
117 |
|
118 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
119 |
+
input_text = prompt+input+"\n"
|
120 |
|
121 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
122 |
|
123 |
+
outputs = model.generate(**input_ids, do_sample=False)
|
124 |
print(tokenizer.decode(outputs[0]))
|
125 |
```
|
126 |
|
|
|
141 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
142 |
|
143 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
144 |
+
input_text = prompt+input+"\n"
|
145 |
|
146 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
147 |
|
148 |
+
outputs = model.generate(**input_ids, do_sample=False)
|
149 |
print(tokenizer.decode(outputs[0]))
|
150 |
```
|
151 |
|
|
|
164 |
prompt = "Rewrite the following toxic input into non-toxic version. Let's break the input down step by step to rewrite the non-toxic version. You should first think about the expanation of why the input text is toxic. Then generate the detoxic output. You must preserve the original meaning as much as possible.\nInput: "
|
165 |
|
166 |
input = "Those shithead should stop talking and get the f*ck out of this place"
|
167 |
+
input_text = prompt+input+"\n"
|
168 |
|
169 |
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
170 |
|
171 |
+
outputs = model.generate(**input_ids, do_sample=False)
|
172 |
print(tokenizer.decode(outputs[0]))
|
173 |
```
|
174 |
|