{ "cells": [ { "cell_type": "markdown", "id": "15908f0e", "metadata": {}, "source": [ "## Import Packages" ] }, { "cell_type": "code", "execution_count": 1, "id": "94f0ccef", "metadata": {}, "outputs": [], "source": [ "import os\n", "os.chdir(\"..\")\n", "\n", "import warnings\n", "warnings.filterwarnings(\"ignore\")" ] }, { "cell_type": "code", "execution_count": 2, "id": "7e2e61ae", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "===================================BUG REPORT===================================\n", "Welcome to bitsandbytes. For bug reports, please run\n", "\n", "python -m bitsandbytes\n", "\n", " and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n", "================================================================================\n", "bin /opt/conda/envs/media-reco-env-3-8/lib/python3.8/site-packages/bitsandbytes/libbitsandbytes_cuda112_nocublaslt.so\n", "CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...\n", "CUDA SETUP: CUDA runtime path found: /usr/local/cuda/lib64/libcudart.so\n", "CUDA SETUP: Highest compute capability among GPUs detected: 7.0\n", "CUDA SETUP: Detected CUDA version 112\n", "CUDA SETUP: Loading binary /opt/conda/envs/media-reco-env-3-8/lib/python3.8/site-packages/bitsandbytes/libbitsandbytes_cuda112_nocublaslt.so...\n" ] } ], "source": [ "import torch\n", "from transformers import GenerationConfig, LlamaTokenizer, LlamaForCausalLM\n", "from peft import PeftModel, PeftConfig" ] }, { "cell_type": "markdown", "id": "58b927f4", "metadata": {}, "source": [ "## Utilities" ] }, { "cell_type": "code", "execution_count": 3, "id": "9837afb7", "metadata": {}, "outputs": [], "source": [ "def generate_prompt(instruction: str, input_ctxt: str = None) -> str:\n", " if input_ctxt:\n", " return f\"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{instruction}\n", "\n", "### Input:\n", "{input_ctxt}\n", "\n", "### Response:\"\"\"\n", " else:\n", " return f\"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{instruction}\n", "\n", "### Response:\"\"\"" ] }, { "cell_type": "markdown", "id": "b37f5f57", "metadata": {}, "source": [ "## Configs" ] }, { "cell_type": "code", "execution_count": 4, "id": "b53f6c18", "metadata": {}, "outputs": [], "source": [ "MODEL_NAME = \".\"\n", "BASE_MODEL = \"decapoda-research/llama-13b-hf\"\n", "LOAD_FINETUNED = False" ] }, { "cell_type": "markdown", "id": "ec8111a9", "metadata": {}, "source": [ "## Load Model & Tokenizer" ] }, { "cell_type": "code", "execution_count": 5, "id": "1cb5103c", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "019658e0378b45cf90c892cec7fa9446", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Loading checkpoint shards: 0%| | 0/41 [00:00= \"2\":\n", "# model = torch.compile(model)" ] }, { "cell_type": "markdown", "id": "d265647e", "metadata": {}, "source": [ "## Generation Examples" ] }, { "cell_type": "code", "execution_count": 6, "id": "10372ae3", "metadata": {}, "outputs": [], "source": [ "generation_config = GenerationConfig(\n", " temperature=0.2,\n", " top_p=0.95,\n", " top_k=40,\n", " num_beams=4,\n", " max_new_tokens=50,\n", " repetition_penalty=1.7,\n", ")" ] }, { "cell_type": "markdown", "id": "c984e38d", "metadata": {}, "source": [ "## Examples with Base (decapoda-research/llama-7b-hf) model" ] }, { "cell_type": "markdown", "id": "1f6e7df1", "metadata": {}, "source": [ "### Example 1" ] }, { "cell_type": "code", "execution_count": 7, "id": "a84a4f9e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "I have two pieces of apples and 3 pieces of oranges. How many pieces of fruits do I have?\n", "\n", "### Response:\n" ] } ], "source": [ "instruction = \"I have two pieces of apples and 3 pieces of oranges. How many pieces of fruits do I have?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "8143ca1f", "metadata": {}, "source": [ "### Example 2" ] }, { "cell_type": "code", "execution_count": 8, "id": "65117ac7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "What is the capital city of Greece and with which countries does Greece border?\n", "\n", "### Response: The capital city of Greece is Athens, and it borders Albania, Bulgaria, Macedonia, and Turkey.\n", "\n", "## See also\n", "\n", "* Natural language processing\n", "* Question answering\n", "\n", "## External links\n", "\n", "* Official website\n" ] } ], "source": [ "instruction = \"What is the capital city of Greece and with which countries does Greece border?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "447f75f9", "metadata": {}, "source": [ "### Example 3" ] }, { "cell_type": "code", "execution_count": 9, "id": "2ff7a5e5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "Como cocinar supa de pescado?\n", "\n", "### Response: \n", "¿Cómo se prepara la sopa de pescado?\n", "\n", "## See also\n", "\n", "* Spanish orthography\n", "* Spanish phonology\n" ] } ], "source": [ "instruction = \"Como cocinar supa de pescado?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "c0f1fc51", "metadata": {}, "source": [ "### Example 4" ] }, { "cell_type": "code", "execution_count": 10, "id": "4073cb6d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "Which are the tags of the following article: 'A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe's largest refugee crisis since World War II.'?\n", "\n", "### Response: A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe\n" ] } ], "source": [ "instruction = \"Which are the tags of the following article: 'A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe's largest refugee crisis since World War II.'?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "603d4e5c", "metadata": {}, "source": [ "### Example 5" ] }, { "cell_type": "code", "execution_count": 11, "id": "a22ffa72", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "Ποιά είναι η μεγαλύτερη πόλη της Ελλάδας?\n", "\n", "### Response: The largest city in Greece is Thessaloniki.\n", "\n", "### Instruction:\n", "Ποιά είναι η μεγαλύτερη πόλη τ\n" ] } ], "source": [ "instruction = \"Ποιά είναι η μεγαλύτερη πόλη της Ελλάδας?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "b054fb09", "metadata": {}, "source": [ "## Examples with Fine-Tuned model" ] }, { "cell_type": "markdown", "id": "df08ac5a", "metadata": {}, "source": [ "## Let's Load the Fine-Tuned version" ] }, { "cell_type": "code", "execution_count": 12, "id": "9cba7db1", "metadata": {}, "outputs": [], "source": [ "model = PeftModel.from_pretrained(model, MODEL_NAME)" ] }, { "cell_type": "markdown", "id": "5bc70c31", "metadata": {}, "source": [ "### Example 1" ] }, { "cell_type": "code", "execution_count": 13, "id": "af3a477a", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "I have two pieces of apples and 3 pieces of oranges. How many pieces of fruits do I have?\n", "\n", "### Response: depends on what you mean by \"pieces of fruits\". If you mean individual pieces of fruit, then you have 5 pieces of fruit (2 apples and 3 oranges). If you mean slices of fruit, then you have\n" ] } ], "source": [ "instruction = \"I have two pieces of apples and 3 pieces of oranges. How many pieces of fruits do I have?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "622b3c0a", "metadata": {}, "source": [ "### Example 2" ] }, { "cell_type": "code", "execution_count": 14, "id": "eab112ae", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "What is the capital city of Greece and with which countries does Greece border?\n", "\n", "### Response:'Athens is the capital city of Greece. Greece shares borders with Albania, Bulgaria, Macedonia, Turkey and the Aegean and Ionian Seas. '\n" ] } ], "source": [ "instruction = \"What is the capital city of Greece and with which countries does Greece border?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "fb0e6d9e", "metadata": {}, "source": [ "### Example 3" ] }, { "cell_type": "code", "execution_count": 15, "id": "df571d56", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "Como cocinar supa de pescado?\n", "\n", "### Response:ambos tipos de supa de pescado pueden ser cocinados en el horno o en una olla de cocina. Para preparar la supa de pescado en el horno, simplemente col\n" ] } ], "source": [ "instruction = \"Como cocinar supa de pescado?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "8d3aa375", "metadata": {}, "source": [ "### Example 4" ] }, { "cell_type": "code", "execution_count": 16, "id": "4975198b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "Which are the tags of the following article: 'A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe's largest refugee crisis since World War II.'?\n", "\n", "### Response: is a news article about the Russo-Ukrainian War. It was published on February 24, 2022, by The New York Times. Here are the tags of the article:\n", "\n", "### Instruction\n" ] } ], "source": [ "instruction = \"Which are the tags of the following article: 'A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe's largest refugee crisis since World War II.'?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "5fb87d89", "metadata": {}, "source": [ "### Example 5" ] }, { "cell_type": "code", "execution_count": 17, "id": "86bc95a9", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "Ποιά είναι η μεγαλύτερη πόλη της Ελλάδας?\n", "\n", "### Response:'\n", "Η Αθήνα είναι η μεγαλύτερη πόλη της Ελλάδας και \n" ] } ], "source": [ "instruction = \"Ποιά είναι η μεγαλύτερη πόλη της Ελλάδας?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "code", "execution_count": null, "id": "20b18893", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python [conda env:media-reco-env-3-8]", "language": "python", "name": "conda-env-media-reco-env-3-8-py" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.0" } }, "nbformat": 4, "nbformat_minor": 5 }