Dataset Viewer (First 5GB)
Search is not available for this dataset
text
stringlengths 1.49k
90.8M
| id
stringlengths 23
24
| file_path
stringclasses 52
values |
---|---|---|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
},
"source": [
"# MNIST - TensorFlow Basics using CNN\n",
"\n",
"The objective of this notebook is to build a basic model for MNIST dataset using TensorFlow Recurrent Neural Network(CNN). This code is from [pythonprogramming.net](https://pythonprogramming.net/recurrent-neural-network-deep-learning-python-tensorflow-keras/)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0",
"_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
},
"outputs": [],
"source": [
"import numpy as np\n",
"\n",
"import tensorflow as tf\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Dense, Dropout, LSTM, CuDNNLSTM"
]
},
{
"cell_type": "markdown",
"metadata": {
"_uuid": "e6e262dc0719696f35503d30b2f09a814443309f"
},
"source": [
"## Loading the MNIST Data"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"_uuid": "49ae806cd8c982809c6b6feb459da56666bd5631"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(60000, 28, 28)\n",
"(28, 28)\n"
]
}
],
"source": [
"def load_data():\n",
" with np.load(\"../input/mnist.npz\") as f:\n",
" x_train, y_train = f['x_train'], f['y_train']\n",
" x_test, y_test = f['x_test'], f['y_test']\n",
" return (x_train, y_train), (x_test, y_test)\n",
"\n",
"(x_train, y_train), (x_test, y_test) = load_data()\n",
"print(x_train.shape)\n",
"print(x_train[0].shape)"
]
},
{
"cell_type": "markdown",
"metadata": {
"_uuid": "56807b6d7e933035acfcab5d94a38a5f0594b729"
},
"source": [
"## Normalize the data"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"_uuid": "2cd40096e731a5a8f8c53f3b530a2c5d81de6435"
},
"outputs": [],
"source": [
"x_train = x_train/255.0\n",
"x_test = x_test/255.0"
]
},
{
"cell_type": "markdown",
"metadata": {
"_uuid": "4bda7edf208247141af0c2a190d2b2b8ededb080"
},
"source": [
"## Build the Model"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"_uuid": "6cc3a770b1f757aff3e75d5218d5a007cb646326"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Train on 60000 samples, validate on 10000 samples\n",
"Epoch 1/3\n",
"60000/60000 [==============================] - 20s 335us/step - loss: 0.4009 - acc: 0.8745 - val_loss: 0.1041 - val_acc: 0.9698\n",
"Epoch 2/3\n",
"60000/60000 [==============================] - 16s 273us/step - loss: 0.1229 - acc: 0.9676 - val_loss: 0.0687 - val_acc: 0.9806\n",
"Epoch 3/3\n",
"60000/60000 [==============================] - 18s 298us/step - loss: 0.0879 - acc: 0.9765 - val_loss: 0.0748 - val_acc: 0.9792\n"
]
},
{
"data": {
"text/plain": [
"<tensorflow.python.keras.callbacks.History at 0x7f9f5f5b5ac8>"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = Sequential()\n",
"\n",
"# Layers\n",
"model.add(CuDNNLSTM(128, input_shape=(x_train.shape[1:]), return_sequences=True))\n",
"model.add(Dropout(0.2))\n",
"\n",
"model.add(CuDNNLSTM(128))\n",
"model.add(Dropout(0.2))\n",
"\n",
"model.add(Dense(32, activation='relu'))\n",
"model.add(Dropout(0.2))\n",
"\n",
"model.add(Dense(10, activation='softmax'))\n",
"\n",
"# Optimizer\n",
"opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)\n",
"\n",
"#Compile\n",
"model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n",
"\n",
"#Fit\n",
"model.fit(x_train, y_train, epochs=3, validation_data=(x_test, y_test))\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"_uuid": "72d3edb8129d3c5d92fcbe784b5fbcea70836838"
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
| 0011/402/11402087.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
"_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['train', 'sampleSubmission.csv', 'test1']\n"
]
}
],
"source": [
"# This Python 3 environment comes with many helpful analytics libraries installed\n",
"# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n",
"# For example, here's several helpful packages to load in \n",
"\n",
"import numpy as np # linear algebra\n",
"import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
"\n",
"# Input data files are available in the \"../input/\" directory.\n",
"# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n",
"\n",
"import os\n",
"print(os.listdir(\"../input\"))\n",
"\n",
"# Any results you write to the current directory are saved as output."
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"_uuid": "14477cdaa5eca7a39b5548a7b37a64f4963acd3d"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting tensornets\r\n",
"\u001b[?25l Downloading https://files.pythonhosted.org/packages/fc/e3/74e43fe9ab8203cd4ffe97f3943d631a587e2bea651be4f09713993d423f/tensornets-0.4.0.tar.gz (587kB)\r\n",
"\u001b[K 100% |ββββββββββββββββββββββββββββββββ| 593kB 25.4MB/s \r\n",
"\u001b[?25hBuilding wheels for collected packages: tensornets\r\n",
" Building wheel for tensornets (setup.py) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \bdone\r\n",
"\u001b[?25h Stored in directory: /tmp/.cache/pip/wheels/0c/ae/7a/6d31e83c89c2b02c13e8f08ee8e20abe71670061e057a6058f\r\n",
"Successfully built tensornets\r\n",
"Installing collected packages: tensornets\r\n",
"Successfully installed tensornets-0.4.0\r\n"
]
}
],
"source": [
"!pip install tensornets"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"_uuid": "8da53e7b53ca3298c1355eb2a1ffae83fd3554b9"
},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import cv2\n",
"import matplotlib.pyplot as plt\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"_uuid": "44572da60d945ff1c459a03bf2a3931e9180e889"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(1000, 224, 224, 3)\n",
"(1000, 2)\n"
]
}
],
"source": [
"data=[]\n",
"label=[]\n",
"images_cat_class=500\n",
"images_dog_class=500\n",
"count_cat=0\n",
"count_dog=0\n",
"for file in os.listdir(\"../input/train/train\"):\n",
" if count_cat<images_cat_class or count_dog<images_dog_class:\n",
" image=cv2.imread(os.path.join(\"../input/train/train\",file))\n",
" image=cv2.resize(image,(224,224))\n",
" if file.startswith(\"cat\") and count_cat<images_cat_class:\n",
" label.append([1,0])\n",
" data.append(image)\n",
" count_cat+=1\n",
" elif file.startswith(\"dog\") and count_dog<images_dog_class:\n",
" label.append([0,1])\n",
" data.append(image)\n",
" count_dog+=1\n",
" else:\n",
" break\n",
"data=np.array(data)\n",
"label=np.array(label)\n",
"print(data.shape)\n",
"print(label.shape)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"_uuid": "c8e738409c149bcd4bf9aa4c85caf8a9818c3e67"
},
"outputs": [],
"source": [
"import tensornets as nets"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"_uuid": "15f80b4ec8f89f1e953cd631eca55fcfeb6cb55f"
},
"outputs": [],
"source": [
"inputs = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])\n",
"outputs = tf.placeholder(tf.float32, shape=[None, 2])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"_uuid": "4468dc6bb004015cd3b4f3bf333c09a4821c65e3"
},
"outputs": [],
"source": [
"logits = nets.VGG19(inputs, is_training=True, classes=2)\n",
"model = tf.identity(logits, name='logits')"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"_uuid": "30f1ab7dd0980f3f0acd708ba33a1fa3f7168b76"
},
"outputs": [],
"source": [
"loss = tf.losses.softmax_cross_entropy(outputs, logits)\n",
"train = tf.train.AdamOptimizer(learning_rate=1e-5).minimize(loss)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"_uuid": "2b042ce6b4f6dd5dbf20c2c6c8abdc64a71ae40e"
},
"outputs": [],
"source": [
"correct_pred = tf.equal(tf.argmax(model, 1), tf.argmax(outputs, 1))\n",
"accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"_uuid": "3238f47650e8968be806e1096b06f7f8b7c8408f"
},
"outputs": [],
"source": [
"epoch=10\n",
"batch_size=10"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"_uuid": "8b36998e73575d806f37ecb938eae3c9a39c9074",
"scrolled": false
},
"outputs": [],
"source": [
"# from tqdm import tqdm\n",
"# with tf.Session() as sess:\n",
"# sess.run(tf.global_variables_initializer())\n",
"# for iterate in range(epoch):\n",
"# batch_number_count=data.shape[0]//batch_size\n",
"# train_loss=0.0\n",
"# train_accuracy=0.0\n",
"# for batch in tqdm(range(batch_number_count)):\n",
"# images_train=data[(batch*batch_size):(batch*batch_size)+batch_size,:,:,:]\n",
"# label_train=label[(batch*batch_size):(batch*batch_size)+batch_size,:]\n",
"# print(\"image_shape\",images_train.shape)\n",
"# print(\"label_shape\",label_train.shape)\n",
"# print(label_train)\n",
"# _,train_loss,train_accuracy=sess.run([train,loss,accuracy],feed_dict={inputs:images_train,outputs:label_train})\n",
"# print(\"epoch\",iterate,\"loss\",train_loss,\"accuracy\",train_accuracy)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"_uuid": "554296186de52a22df8ca4d48751ab9132e00e7b"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:15<00:00, 8.36it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.41it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 0 loss 0.7009159255027771 accuracy 0.5080000058561563\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.38it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.45it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 1 loss 0.6956994760036469 accuracy 0.5030000057071448\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.33it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.43it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 2 loss 0.6944569319486618 accuracy 0.510000007674098\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.35it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.42it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 3 loss 0.6930805385112763 accuracy 0.5120000061392784\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.35it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.44it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 4 loss 0.6930338919162751 accuracy 0.5250000065565109\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.33it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.41it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 5 loss 0.6924789798259735 accuracy 0.5180000067502261\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.37it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.46it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 6 loss 0.6917124676704407 accuracy 0.5120000047981739\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.37it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.43it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 7 loss 0.6884200692176818 accuracy 0.5370000059902668\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.35it/s]\n",
" 1%| | 1/100 [00:00<00:11, 8.38it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 8 loss 0.683492180109024 accuracy 0.564000006839633\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|ββββββββββ| 100/100 [00:11<00:00, 8.36it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"epoch 9 loss 0.6684183555841446 accuracy 0.5970000067353248\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
}
],
"source": [
"from tqdm import tqdm\n",
"with tf.Session() as sess:\n",
" sess.run(tf.global_variables_initializer())\n",
" for iterate in range(epoch):\n",
" batch_count_total=data.shape[0]//batch_size\n",
" loss_list=[]\n",
" accuracy_list=[]\n",
" for batch in tqdm(range(batch_count_total)):\n",
" _,train_loss,train_accuracy=sess.run([train,loss,accuracy],feed_dict={inputs:data[(batch*batch_size):(batch*batch_size)+batch_size,:,:,:],outputs:label[(batch*batch_size):(batch*batch_size)+batch_size,:]})\n",
" loss_list.append(train_loss)\n",
" accuracy_list.append(train_accuracy)\n",
" print(\"epoch\",iterate,\"loss\",sum(loss_list)/batch_count_total,\"accuracy\",sum(accuracy_list)/batch_count_total)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"_uuid": "43f0ddca17eb2a36f11907460a00cd997f086caa"
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.6"
}
},
"nbformat": 4,
"nbformat_minor": 1
}
| 0011/402/11402238.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\n \"_uuid\": \"a4e95c(...TRUNCATED) | 0011/402/11402367.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\(...TRUNCATED) | 0011/402/11402545.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\(...TRUNCATED) | 0011/403/11403386.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\(...TRUNCATED) | 0011/403/11403462.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\(...TRUNCATED) | 0011/403/11403638.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\(...TRUNCATED) | 0011/403/11403971.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\n \"_uuid\": \"51f618(...TRUNCATED) | 0011/404/11404787.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
"{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": 1,\n \"metadata\": {\(...TRUNCATED) | 0011/405/11405087.ipynb | s3://data-agents/kaggle-outputs/sharded/010_00011.jsonl.gz |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 28