{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os \n", "import numpy as np" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [], "source": [ "import getpass\n", "from langchain_groq import ChatGroq\n", "os.environ[\"GROQ_API_KEY\"] = getpass.getpass()\n", "llm_groq = ChatGroq(model=\"llama3-8b-8192\")" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[Document(metadata={'source': 'https://www.youtube.com/watch?v=Ilg3gGewQ5U&ab_channel=3Blue1Brown'}, page_content=\" Here, we tackle backpropagation, the core algorithm behind how neural networks learn. After a quick recap for where we are, the first thing I'll do is an intuitive walkthrough for what the algorithm is actually doing, without any reference to the formulas. Then, for those of you who do want to dive into the math, the next video goes into the calculus underlying all this. If you watched the last two videos, or if you're just jumping in with the appropriate background, you know what a neural network is, and how it feeds forward information. Here, we're doing the classic example of recognizing handwritten digits whose pixel values get fed into the first layer of the network with 784 neurons, and I've been showing a network with two hidden layers having just 16 neurons each, and an output layer of 10 neurons, indicating which digit the network is choosing as its answer. I'm also expecting you to understand gradient descent, as described in the last video, and how what we mean by l\")]\n" ] } ], "source": [ "from youtube_transcript_api import YouTubeTranscriptApi\n", "from langchain.docstore.document import Document\n", "\n", "def get_text_from_youtube_link(video_link,max_video_length=1000):\n", " video_text = \"\"\n", " meta_data = {\"source\": f\"{video_link}\"} \n", " video_id = video_link.split(\"watch?v=\")[1].split(\"&\")[0]\n", " srt = YouTubeTranscriptApi.get_transcript(video_id)\n", " for text_data in srt:\n", " video_text = video_text + \" \" + text_data.get(\"text\")\n", " if len(video_text) > max_video_length:\n", " video_text = video_text[0:max_video_length]\n", " document = [Document(page_content= video_text, metadata= meta_data)]\n", " return document\n", "\n", "\n", "video_document = get_text_from_youtube_link(\"https://www.youtube.com/watch?v=Ilg3gGewQ5U&ab_channel=3Blue1Brown\")\n", "\n", "print(video_document)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [], "source": [ "from langchain.chains.summarize.chain import load_summarize_chain\n", "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "def prompt_template_to_analyze_resume():\n", " template = \"\"\"\n", " You are provided with the Context of the you tube link . your task is to summarize of the content \n", " in few lines and higlight key points. Do not make up answers.\n", " \\n\\n:{context}\n", " \"\"\"\n", " prompt = ChatPromptTemplate.from_messages(\n", " [\n", " ('system',template),\n", " ('human','input'),\n", " ]\n", " )\n", " return prompt" ] }, { "cell_type": "code", "execution_count": 29, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new StuffDocumentsChain chain...\u001b[0m\n", "\n", "\n", "\u001b[1m> Entering new LLMChain chain...\u001b[0m\n", "Prompt after formatting:\n", "\u001b[32;1m\u001b[1;3mWrite a concise summary of the following:\n", "\n", "\n", "\" Here, we tackle backpropagation, the core algorithm behind how neural networks learn. After a quick recap for where we are, the first thing I'll do is an intuitive walkthrough for what the algorithm is actually doing, without any reference to the formulas. Then, for those of you who do want to dive into the math, the next video goes into the calculus underlying all this. If you watched the last two videos, or if you're just jumping in with the appropriate background, you know what a neural network is, and how it feeds forward information. Here, we're doing the classic example of recognizing handwritten digits whose pixel values get fed into the first layer of the network with 784 neurons, and I've been showing a network with two hidden layers having just 16 neurons each, and an output layer of 10 neurons, indicating which digit the network is choosing as its answer. I'm also expecting you to understand gradient descent, as described in the last video, and how what we mean by l\"\n", "\n", "\n", "CONCISE SUMMARY:\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n", "The article discusses the algorithm of backpropagation, which is the key to how neural networks learn. It will provide an intuitive explanation of the algorithm without referencing formulas, followed by a mathematical breakdown for those who want to dive deeper. The article assumes a basic understanding of neural networks, including recognizing handwritten digits and using gradient descent.\n" ] } ], "source": [ "summarize_chain = load_summarize_chain(llm=llm_groq, chain_type='stuff', verbose = True )\n", "results = summarize_chain.invoke({'input_documents':video_document})\n", "print(results['output_text'])\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "pylangchain", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 2 }