Update README.md
Browse files
README.md
CHANGED
@@ -6,37 +6,45 @@ language:
|
|
6 |
- en
|
7 |
---
|
8 |
from transformers import AutoTokenizer
|
|
|
9 |
import transformers
|
|
|
10 |
import torch
|
11 |
|
12 |
model = "newsmediabias/UnBIAS-LLama2-Debiaser-Chat-QLoRA"
|
|
|
13 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
14 |
|
15 |
pipeline = transformers.pipeline(
|
|
|
16 |
"text-generation",
|
|
|
17 |
model=model,
|
|
|
18 |
torch_dtype=torch.float16,
|
|
|
19 |
device_map="auto",
|
20 |
)
|
21 |
-
sys_message = "Task: Please generate a bias-free version of the text provided, ensuring it's free from biases related to age, gender, politics, social nuances, or economic background, while keeping it roughly the same length as the original:"
|
22 |
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
25 |
|
26 |
sequences = pipeline(
|
|
|
27 |
intput_text,
|
|
|
28 |
do_sample=True,
|
|
|
29 |
top_k=10,
|
|
|
30 |
num_return_sequences=1,
|
|
|
31 |
eos_token_id=tokenizer.eos_token_id,
|
|
|
32 |
max_length=len(prompt)+100,
|
33 |
-
#max_length=200,
|
34 |
)
|
35 |
|
36 |
res=sequences[0]['generated_text']
|
37 |
-
result_part = res.split('[/INST]')[-1]
|
38 |
-
clean_result = ''.join(c for c in result_part if c.isprintable())
|
39 |
-
print(clean_result)
|
40 |
-
|
41 |
-
|
42 |
-
Output is: Qatar financing of terrorism and extremist ideology should be sufficient for the United States to consider imposing sanctions on Qatari LNG/oil exports. However, there are other factors at play, such as the significant investments Qatar makes in US defense contractors and the close relationship between ExxonMobil and the Emir, Tamim bin Hamad Al Thani. It appears that the United States is not fully committed to addressing the issue of terrorism. In light of this, Alaska LNG should be explored as an alternative to the LNG currently sold by Qatar.
|
|
|
6 |
- en
|
7 |
---
|
8 |
from transformers import AutoTokenizer
|
9 |
+
|
10 |
import transformers
|
11 |
+
|
12 |
import torch
|
13 |
|
14 |
model = "newsmediabias/UnBIAS-LLama2-Debiaser-Chat-QLoRA"
|
15 |
+
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model)
|
17 |
|
18 |
pipeline = transformers.pipeline(
|
19 |
+
|
20 |
"text-generation",
|
21 |
+
|
22 |
model=model,
|
23 |
+
|
24 |
torch_dtype=torch.float16,
|
25 |
+
|
26 |
device_map="auto",
|
27 |
)
|
|
|
28 |
|
29 |
+
sys_message = "Task:""
|
30 |
+
|
31 |
+
prompt=""
|
32 |
+
|
33 |
+
intput_text=""
|
34 |
|
35 |
sequences = pipeline(
|
36 |
+
|
37 |
intput_text,
|
38 |
+
|
39 |
do_sample=True,
|
40 |
+
|
41 |
top_k=10,
|
42 |
+
|
43 |
num_return_sequences=1,
|
44 |
+
|
45 |
eos_token_id=tokenizer.eos_token_id,
|
46 |
+
|
47 |
max_length=len(prompt)+100,
|
|
|
48 |
)
|
49 |
|
50 |
res=sequences[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|