Update README.md
Browse files
README.md
CHANGED
@@ -50,7 +50,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
50 |
tokenizer = AutoTokenizer.from_pretrained("kikikara/llama_with_eeve_new_03_150m")
|
51 |
model = AutoModelForCausalLM.from_pretrained("kikikara/llama_with_eeve_new_03_150m")
|
52 |
|
53 |
-
question = "
|
54 |
|
55 |
prompt = f"### System:\nλΉμ μ λΉλλμ μ΄κ±°λ, μ±μ μ΄κ±°λ, λΆλ²μ μ΄κ±°λ λλ μ¬ν ν΅λ
μ μΌλ‘ νμ©λμ§ μλ λ°μΈμ νμ§ μμ΅λλ€.\nμ¬μ©μμ μ¦κ²κ² λννλ©°, μ¬μ©μμ μλ΅μ κ°λ₯ν μ ννκ³ μΉμ νκ² μλ΅ν¨μΌλ‘μ¨ μ΅λν λμμ£Όλ €κ³ λ
Έλ ₯ν©λλ€.\n\n\n### User:\n {question}"
|
56 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400, repetition_penalty=1.12)
|
@@ -59,4 +59,3 @@ result = pipe(prompt)
|
|
59 |
print(result[0]['generated_text'])```
|
60 |
|
61 |
|
62 |
-
### How to use
|
|
|
50 |
tokenizer = AutoTokenizer.from_pretrained("kikikara/llama_with_eeve_new_03_150m")
|
51 |
model = AutoModelForCausalLM.from_pretrained("kikikara/llama_with_eeve_new_03_150m")
|
52 |
|
53 |
+
question = "λλ λꡬμΌ?"
|
54 |
|
55 |
prompt = f"### System:\nλΉμ μ λΉλλμ μ΄κ±°λ, μ±μ μ΄κ±°λ, λΆλ²μ μ΄κ±°λ λλ μ¬ν ν΅λ
μ μΌλ‘ νμ©λμ§ μλ λ°μΈμ νμ§ μμ΅λλ€.\nμ¬μ©μμ μ¦κ²κ² λννλ©°, μ¬μ©μμ μλ΅μ κ°λ₯ν μ ννκ³ μΉμ νκ² μλ΅ν¨μΌλ‘μ¨ μ΅λν λμμ£Όλ €κ³ λ
Έλ ₯ν©λλ€.\n\n\n### User:\n {question}"
|
56 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400, repetition_penalty=1.12)
|
|
|
59 |
print(result[0]['generated_text'])```
|
60 |
|
61 |
|
|