{ "cells": [ { "cell_type": "markdown", "id": "15908f0e", "metadata": {}, "source": [ "## Import Packages" ] }, { "cell_type": "code", "execution_count": null, "id": "94f0ccef", "metadata": {}, "outputs": [], "source": [ "import os\n", "os.chdir(\"..\")\n", "\n", "import torch\n", "from transformers import GenerationConfig, LlamaTokenizer, LlamaForCausalLM\n", "from peft import PeftModel, PeftConfig" ] }, { "cell_type": "markdown", "id": "58b927f4", "metadata": {}, "source": [ "## Utilities" ] }, { "cell_type": "code", "execution_count": null, "id": "9837afb7", "metadata": {}, "outputs": [], "source": [ "def generate_prompt(instruction: str, input_ctxt: str = None) -> str:\n", " if input_ctxt:\n", " return f\"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{instruction}\n", "\n", "### Input:\n", "{input_ctxt}\n", "\n", "### Response:\"\"\"\n", " else:\n", " return f\"\"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n", "\n", "### Instruction:\n", "{instruction}\n", "\n", "### Response:\"\"\"" ] }, { "cell_type": "markdown", "id": "b37f5f57", "metadata": {}, "source": [ "## Configs" ] }, { "cell_type": "code", "execution_count": null, "id": "b53f6c18", "metadata": {}, "outputs": [], "source": [ "MODEL_NAME = \".\"\n", "BASE_MODEL = \"decapoda-research/llama-7b-hf\"\n", "LOAD_FINETUNED = False" ] }, { "cell_type": "markdown", "id": "ec8111a9", "metadata": {}, "source": [ "## Load Model & Tokenizer" ] }, { "cell_type": "code", "execution_count": null, "id": "1cb5103c", "metadata": {}, "outputs": [], "source": [ "config = PeftConfig.from_pretrained(MODEL_NAME)\n", "\n", "tokenizer = LlamaTokenizer.from_pretrained(MODEL_NAME)\n", "\n", "model = LlamaForCausalLM.from_pretrained(\n", " BASE_MODEL,\n", " load_in_8bit=True,\n", " torch_dtype=torch.float16,\n", " device_map=\"auto\",\n", ")\n", " \n", "# model.eval()\n", "# if torch.__version__ >= \"2\":\n", "# model = torch.compile(model)" ] }, { "cell_type": "markdown", "id": "d265647e", "metadata": {}, "source": [ "## Generation Examples" ] }, { "cell_type": "code", "execution_count": null, "id": "10372ae3", "metadata": {}, "outputs": [], "source": [ "generation_config = GenerationConfig(\n", " temperature=0.2,\n", " top_p=0.95,\n", " top_k=40,\n", " num_beams=4,\n", " max_new_tokens=40,\n", " repetition_penalty=1.7,\n", ")" ] }, { "cell_type": "markdown", "id": "c984e38d", "metadata": {}, "source": [ "## Examples with Base (decapoda-research/llama-7b-hf) model" ] }, { "cell_type": "markdown", "id": "1f6e7df1", "metadata": {}, "source": [ "### Example 1" ] }, { "cell_type": "code", "execution_count": null, "id": "a84a4f9e", "metadata": {}, "outputs": [], "source": [ "instruction = \"I have two pieces of apples and 3 pieces of oranges. How many pieces of fruits do I have?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "8143ca1f", "metadata": {}, "source": [ "### Example 2" ] }, { "cell_type": "code", "execution_count": null, "id": "65117ac7", "metadata": {}, "outputs": [], "source": [ "instruction = \"What is the capital city of Greece and with which countries does Greece border?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "447f75f9", "metadata": {}, "source": [ "### Example 3" ] }, { "cell_type": "code", "execution_count": null, "id": "2ff7a5e5", "metadata": {}, "outputs": [], "source": [ "instruction = \"Como cocinar supa de pescado?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "c0f1fc51", "metadata": {}, "source": [ "### Example 4" ] }, { "cell_type": "code", "execution_count": null, "id": "4073cb6d", "metadata": {}, "outputs": [], "source": [ "instruction = \"Which are the tags of the following article: 'A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe's largest refugee crisis since World War II.'?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "603d4e5c", "metadata": {}, "source": [ "### Example 5" ] }, { "cell_type": "code", "execution_count": null, "id": "a22ffa72", "metadata": {}, "outputs": [], "source": [ "instruction = \"Ποιά είναι η μεγαλύτερη πόλη της Ελλάδας?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "b054fb09", "metadata": {}, "source": [ "## Examples with Fine-Tuned model" ] }, { "cell_type": "markdown", "id": "df08ac5a", "metadata": {}, "source": [ "## Let's Load the Fine-Tuned version" ] }, { "cell_type": "code", "execution_count": null, "id": "9cba7db1", "metadata": {}, "outputs": [], "source": [ "model = PeftModel.from_pretrained(model, MODEL_NAME)" ] }, { "cell_type": "markdown", "id": "5bc70c31", "metadata": {}, "source": [ "### Example 1" ] }, { "cell_type": "code", "execution_count": null, "id": "af3a477a", "metadata": {}, "outputs": [], "source": [ "instruction = \"I have two pieces of apples and 3 pieces of oranges. How many pieces of fruits do I have?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "622b3c0a", "metadata": {}, "source": [ "### Example 2" ] }, { "cell_type": "code", "execution_count": null, "id": "eab112ae", "metadata": {}, "outputs": [], "source": [ "instruction = \"What is the capital city of Greece and with which countries does Greece border?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "fb0e6d9e", "metadata": {}, "source": [ "### Example 3" ] }, { "cell_type": "code", "execution_count": null, "id": "df571d56", "metadata": {}, "outputs": [], "source": [ "instruction = \"Como cocinar supa de pescado?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "8d3aa375", "metadata": {}, "source": [ "### Example 4" ] }, { "cell_type": "code", "execution_count": null, "id": "4975198b", "metadata": {}, "outputs": [], "source": [ "instruction = \"Which are the tags of the following article: 'A year ago, Russia invaded Ukraine in a major escalation of the Russo-Ukrainian War, which had begun in 2014. The invasion has resulted in thousands of deaths, and instigated Europe's largest refugee crisis since World War II.'?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "markdown", "id": "5fb87d89", "metadata": {}, "source": [ "### Example 5" ] }, { "cell_type": "code", "execution_count": null, "id": "86bc95a9", "metadata": {}, "outputs": [], "source": [ "instruction = \"Ποιά είναι η μεγαλύτερη πόλη της Ελλάδας?\"\n", "input_ctxt = None # For some tasks, you can provide an input context to help the model generate a better response.\n", "\n", "prompt = generate_prompt(instruction, input_ctxt)\n", "input_ids = tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "input_ids = input_ids.to(model.device)\n", "\n", "with torch.no_grad():\n", " outputs = model.generate(\n", " input_ids=input_ids,\n", " generation_config=generation_config,\n", " return_dict_in_generate=True,\n", " output_scores=True,\n", " )\n", "\n", "response = tokenizer.decode(outputs.sequences[0], skip_special_tokens=True)\n", "print(response)" ] }, { "cell_type": "code", "execution_count": null, "id": "5c7bd8d5", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python [conda env:media-reco-env-3-8]", "language": "python", "name": "conda-env-media-reco-env-3-8-py" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.0" } }, "nbformat": 4, "nbformat_minor": 5 }