fionazhang
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -34,23 +34,35 @@ This repository includes the weights learned during the training process. It sho
|
|
34 |
|
35 |
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
36 |
```python
|
37 |
-
from transformers import
|
38 |
|
39 |
# Load the tokenizer, adjust configuration if needed
|
40 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
41 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
42 |
|
43 |
-
#
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
)
|
47 |
-
|
48 |
-
# Now you can use `fine_tuned_model` for inference or further training
|
49 |
-
input_text = "The impact of climate change on"
|
50 |
-
output_text = fine_tuned_model.generate(tokenizer.encode(input_text, return_tensors="pt"))
|
51 |
-
|
52 |
-
print(tokenizer.decode(output_text[0], skip_special_tokens=True))
|
53 |
-
|
54 |
```
|
55 |
|
56 |
|
|
|
34 |
|
35 |
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
36 |
```python
|
37 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
38 |
|
39 |
# Load the tokenizer, adjust configuration if needed
|
40 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
41 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
42 |
|
43 |
+
# Text generation
|
44 |
+
def generate_text_sequences(pipe, prompt):
|
45 |
+
sequences = pipe(
|
46 |
+
f"prompt",
|
47 |
+
do_sample=True,
|
48 |
+
max_new_tokens=100,
|
49 |
+
temperature=0.8,
|
50 |
+
top_k=50,
|
51 |
+
top_p=0.95,
|
52 |
+
num_return_sequences=1,
|
53 |
+
)
|
54 |
+
return sequences[0]['generated_text']
|
55 |
+
|
56 |
+
# Now you can use the model for inference
|
57 |
+
pipe = pipeline(
|
58 |
+
"text-generation",
|
59 |
+
model=model,
|
60 |
+
tokenizer=tokenizer,
|
61 |
+
torch_dtype=torch.bfloat16,
|
62 |
+
device_map="auto",
|
63 |
+
pad_token_id=2
|
64 |
)
|
65 |
+
print(generate_text_sequences(pipe, "your prompt"))
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
```
|
67 |
|
68 |
|