Update README.md
Browse files
README.md
CHANGED
@@ -23,9 +23,8 @@ More information needed
|
|
23 |
## Driectly uses
|
24 |
|
25 |
```Python
|
|
|
26 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
27 |
-
from transformers import pipeline
|
28 |
-
from transformers import GenerationConfig
|
29 |
additional_special_tokens = {'additional_special_tokens':['<|begin_of_java_code|>','<|end_of_java_code|>'\
|
30 |
,'<|begin_of_c-sharp_code|>','<|end_of_c-sharp_code|>',\
|
31 |
'<|translate|>']}
|
@@ -38,13 +37,14 @@ model = AutoModelForCausalLM.from_pretrained(basemodel,config=config)
|
|
38 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device('cpu')
|
39 |
model.to(device)
|
40 |
|
41 |
-
ger = pipeline(task='text-generation',model= model,tokenizer=tokenizer,config=GenerationConfig(pad_token_id = tokenizer.eos_token_id))
|
42 |
code = "public void serialize(LittleEndianOutput out) {out.writeShort(field_1_vcenter);}\n"
|
43 |
prefix = additional_special_tokens['additional_special_tokens'][0]
|
44 |
input_str = prefix + code +additional_special_tokens['additional_special_tokens'][1] + additional_special_tokens['additional_special_tokens'][2]
|
45 |
-
|
|
|
|
|
|
|
46 |
|
47 |
-
print(ger(input_str,max_new_tokens = 256))
|
48 |
```
|
49 |
|
50 |
More information needed
|
|
|
23 |
## Driectly uses
|
24 |
|
25 |
```Python
|
26 |
+
|
27 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
|
|
|
28 |
additional_special_tokens = {'additional_special_tokens':['<|begin_of_java_code|>','<|end_of_java_code|>'\
|
29 |
,'<|begin_of_c-sharp_code|>','<|end_of_c-sharp_code|>',\
|
30 |
'<|translate|>']}
|
|
|
37 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device('cpu')
|
38 |
model.to(device)
|
39 |
|
|
|
40 |
code = "public void serialize(LittleEndianOutput out) {out.writeShort(field_1_vcenter);}\n"
|
41 |
prefix = additional_special_tokens['additional_special_tokens'][0]
|
42 |
input_str = prefix + code +additional_special_tokens['additional_special_tokens'][1] + additional_special_tokens['additional_special_tokens'][2]
|
43 |
+
input = tokenizer(input_str,return_tensors = "pt")
|
44 |
+
output = model.generate(**input, max_length = 256)
|
45 |
+
outputs_str = tokenizer.decode(output[0])
|
46 |
+
print(outputs_str)
|
47 |
|
|
|
48 |
```
|
49 |
|
50 |
More information needed
|