Update README.md
Browse files
README.md
CHANGED
@@ -34,7 +34,8 @@ This is the model card of a 🤗 transformers model that has been pushed on the
|
|
34 |
|
35 |
## Driectly Uses
|
36 |
|
37 |
-
```
|
|
|
38 |
from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline
|
39 |
from peft import PeftModelForCausalLM
|
40 |
from transformers import BitsAndBytesConfig
|
@@ -51,11 +52,13 @@ prompt = "<|translate|> public void removePresentationFormat() {remove1stPropert
|
|
51 |
input = tokenzier(prompt,return_tensors="pt")
|
52 |
output_ids = model.generate(**input)
|
53 |
print(tokenzier.batch_decode(output_ids))
|
|
|
54 |
```
|
55 |
|
56 |
### Use with vLLM
|
57 |
|
58 |
-
```
|
|
|
59 |
from vllm import LLM, SamplingParams,EngineArgs, LLMEngine, RequestOutput
|
60 |
from vllm.lora.request import LoRARequest
|
61 |
engine_args = EngineArgs(model="codellama/CodeLlama-7b-hf",
|
@@ -83,6 +86,7 @@ while engine.has_unfinished_requests():
|
|
83 |
for request_output in request_outputs:
|
84 |
finished = finished | request_output.finished
|
85 |
print(request_outputs[0].outputs[0].text)
|
|
|
86 |
```
|
87 |
|
88 |
[More Information Needed]
|
|
|
34 |
|
35 |
## Driectly Uses
|
36 |
|
37 |
+
```Python
|
38 |
+
|
39 |
from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline
|
40 |
from peft import PeftModelForCausalLM
|
41 |
from transformers import BitsAndBytesConfig
|
|
|
52 |
input = tokenzier(prompt,return_tensors="pt")
|
53 |
output_ids = model.generate(**input)
|
54 |
print(tokenzier.batch_decode(output_ids))
|
55 |
+
|
56 |
```
|
57 |
|
58 |
### Use with vLLM
|
59 |
|
60 |
+
```Python
|
61 |
+
|
62 |
from vllm import LLM, SamplingParams,EngineArgs, LLMEngine, RequestOutput
|
63 |
from vllm.lora.request import LoRARequest
|
64 |
engine_args = EngineArgs(model="codellama/CodeLlama-7b-hf",
|
|
|
86 |
for request_output in request_outputs:
|
87 |
finished = finished | request_output.finished
|
88 |
print(request_outputs[0].outputs[0].text)
|
89 |
+
|
90 |
```
|
91 |
|
92 |
[More Information Needed]
|