-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Orif Milod
committed
Mar 14, 2024
1 parent
f32863e
commit 6ba0e28
Showing
2 changed files
with
240 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -6,3 +6,4 @@ output* | |
env/ | ||
build/ | ||
.eggs/ | ||
**/data/** |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,239 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import torch\n", | ||
"import torch.nn as nn\n", | ||
"import torch.nn.functional as F\n", | ||
"import torchvision\n", | ||
"from torchvision.transforms import v2\n", | ||
"import numpy as np" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 10, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stderr", | ||
"output_type": "stream", | ||
"text": [ | ||
"/Users/omilod/Desktop/projects/GigaTorch/env/lib/python3.11/site-packages/torchvision/transforms/v2/_deprecated.py:41: UserWarning: The transform `ToTensor()` is deprecated and will be removed in a future release. Instead, please use `v2.Compose([v2.ToImage(), v2.ToDtype(torch.float32, scale=True)])`.\n", | ||
" warnings.warn(\n" | ||
] | ||
}, | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Files already downloaded and verified\n", | ||
"Files already downloaded and verified\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# Device configuration\n", | ||
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", | ||
"\n", | ||
"num_epochs = 10\n", | ||
"batch_size = 4\n", | ||
"learning_rate = 0.001\n", | ||
"\n", | ||
"transform = v2.Compose(\n", | ||
" [v2.ToTensor(),\n", | ||
" v2.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", | ||
"\n", | ||
"train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n", | ||
"test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n", | ||
"\n", | ||
"tarin_loder = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n", | ||
"test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 11, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Epoche [1/10], Step [2000/12500], Loss: 2.2467\n", | ||
"Epoche [1/10], Step [4000/12500], Loss: 2.2539\n", | ||
"Epoche [1/10], Step [6000/12500], Loss: 2.2477\n", | ||
"Epoche [1/10], Step [8000/12500], Loss: 2.4960\n", | ||
"Epoche [1/10], Step [10000/12500], Loss: 2.4319\n", | ||
"Epoche [1/10], Step [12000/12500], Loss: 2.3993\n", | ||
"Epoche [2/10], Step [2000/12500], Loss: 1.7446\n", | ||
"Epoche [2/10], Step [4000/12500], Loss: 1.7958\n", | ||
"Epoche [2/10], Step [6000/12500], Loss: 1.9598\n", | ||
"Epoche [2/10], Step [8000/12500], Loss: 2.0397\n", | ||
"Epoche [2/10], Step [10000/12500], Loss: 1.8597\n", | ||
"Epoche [2/10], Step [12000/12500], Loss: 1.1347\n", | ||
"Epoche [3/10], Step [2000/12500], Loss: 1.2750\n", | ||
"Epoche [3/10], Step [4000/12500], Loss: 1.4700\n", | ||
"Epoche [3/10], Step [6000/12500], Loss: 0.8299\n", | ||
"Epoche [3/10], Step [8000/12500], Loss: 2.4020\n", | ||
"Epoche [3/10], Step [10000/12500], Loss: 0.8445\n", | ||
"Epoche [3/10], Step [12000/12500], Loss: 1.2414\n", | ||
"Epoche [4/10], Step [2000/12500], Loss: 1.1043\n", | ||
"Epoche [4/10], Step [4000/12500], Loss: 1.6044\n", | ||
"Epoche [4/10], Step [6000/12500], Loss: 1.8810\n", | ||
"Epoche [4/10], Step [8000/12500], Loss: 1.4032\n", | ||
"Epoche [4/10], Step [10000/12500], Loss: 1.9440\n", | ||
"Epoche [4/10], Step [12000/12500], Loss: 1.4680\n", | ||
"Epoche [5/10], Step [2000/12500], Loss: 0.9280\n", | ||
"Epoche [5/10], Step [4000/12500], Loss: 1.0829\n", | ||
"Epoche [5/10], Step [6000/12500], Loss: 0.8902\n", | ||
"Epoche [5/10], Step [8000/12500], Loss: 1.4200\n", | ||
"Epoche [5/10], Step [10000/12500], Loss: 2.7336\n", | ||
"Epoche [5/10], Step [12000/12500], Loss: 1.7483\n", | ||
"Epoche [6/10], Step [2000/12500], Loss: 1.4049\n", | ||
"Epoche [6/10], Step [4000/12500], Loss: 0.8819\n", | ||
"Epoche [6/10], Step [6000/12500], Loss: 2.0509\n", | ||
"Epoche [6/10], Step [8000/12500], Loss: 1.5775\n", | ||
"Epoche [6/10], Step [10000/12500], Loss: 1.4660\n", | ||
"Epoche [6/10], Step [12000/12500], Loss: 1.2865\n", | ||
"Epoche [7/10], Step [2000/12500], Loss: 1.3137\n", | ||
"Epoche [7/10], Step [4000/12500], Loss: 1.3415\n", | ||
"Epoche [7/10], Step [6000/12500], Loss: 0.9533\n", | ||
"Epoche [7/10], Step [8000/12500], Loss: 1.1399\n", | ||
"Epoche [7/10], Step [10000/12500], Loss: 1.3637\n", | ||
"Epoche [7/10], Step [12000/12500], Loss: 0.6954\n", | ||
"Epoche [8/10], Step [2000/12500], Loss: 0.9204\n", | ||
"Epoche [8/10], Step [4000/12500], Loss: 1.1020\n", | ||
"Epoche [8/10], Step [6000/12500], Loss: 1.0856\n", | ||
"Epoche [8/10], Step [8000/12500], Loss: 1.2919\n", | ||
"Epoche [8/10], Step [10000/12500], Loss: 0.5048\n", | ||
"Epoche [8/10], Step [12000/12500], Loss: 1.6188\n", | ||
"Epoche [9/10], Step [2000/12500], Loss: 0.7271\n", | ||
"Epoche [9/10], Step [4000/12500], Loss: 0.8624\n", | ||
"Epoche [9/10], Step [6000/12500], Loss: 1.6987\n", | ||
"Epoche [9/10], Step [8000/12500], Loss: 0.8891\n", | ||
"Epoche [9/10], Step [10000/12500], Loss: 1.0421\n", | ||
"Epoche [9/10], Step [12000/12500], Loss: 0.9009\n", | ||
"Epoche [10/10], Step [2000/12500], Loss: 1.8559\n", | ||
"Epoche [10/10], Step [4000/12500], Loss: 1.9273\n", | ||
"Epoche [10/10], Step [6000/12500], Loss: 1.1227\n", | ||
"Epoche [10/10], Step [8000/12500], Loss: 1.3718\n", | ||
"Epoche [10/10], Step [10000/12500], Loss: 1.6195\n", | ||
"Epoche [10/10], Step [12000/12500], Loss: 0.9517\n", | ||
"Finished training\n", | ||
"Accuracy of the network: 58.32 %\n", | ||
"Accuracy: 73.4 %\n", | ||
"Accuracy: 60.8 %\n", | ||
"Accuracy: 39.0 %\n", | ||
"Accuracy: 38.7 %\n", | ||
"Accuracy: 50.4 %\n", | ||
"Accuracy: 38.7 %\n", | ||
"Accuracy: 73.6 %\n", | ||
"Accuracy: 66.8 %\n", | ||
"Accuracy: 75.0 %\n", | ||
"Accuracy: 66.8 %\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"class ConvNet(nn.Module):\n", | ||
" def __init__(self) -> None:\n", | ||
" super().__init__()\n", | ||
" self.conv1 = nn.Conv2d(3, 6, 5)\n", | ||
" self.pool = nn.MaxPool2d(2, 2)\n", | ||
" self.conv2 = nn.Conv2d(6, 16, 5)\n", | ||
" self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", | ||
" self.fc2 = nn.Linear(120, 84)\n", | ||
" self.fc3 = nn.Linear(84, 10)\n", | ||
"\n", | ||
" def forward(self, x):\n", | ||
" # -> n, 3, 32, 32\n", | ||
" x = self.pool(F.relu(self.conv1(x))) # -> n, 6, 14, 14\n", | ||
" x = self.pool(F.relu(self.conv2(x))) # -> n, 16, 5, 5\n", | ||
" x = x.view(-1, 16 * 5 * 5) # -> n, 400\n", | ||
" x = F.relu(self.fc1(x)) # -> n, 120\n", | ||
" x = F.relu(self.fc2(x)) # -> n, 84\n", | ||
" x = self.fc3(x) # -> n, 10\n", | ||
" return x\n", | ||
"\n", | ||
"model = ConvNet()\n", | ||
"model.to(device)\n", | ||
"critertion = nn.CrossEntropyLoss()\n", | ||
"optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\n", | ||
"\n", | ||
"n_total_steps = len(tarin_loder)\n", | ||
"for epoch in range(num_epochs):\n", | ||
" for i, (images, labels) in enumerate(tarin_loder):\n", | ||
" images = images.to(device)\n", | ||
" labels = labels.to(device)\n", | ||
"\n", | ||
" # Forward pass\n", | ||
" outputs = model(images)\n", | ||
" loss = critertion(outputs, labels)\n", | ||
"\n", | ||
" # Backward and optimize\n", | ||
" optimizer.zero_grad()\n", | ||
" loss.backward()\n", | ||
" optimizer.step()\n", | ||
"\n", | ||
" if (i+1) % 2000 == 0:\n", | ||
" print(f'Epoche [{epoch+1}/{num_epochs}], Step [{i+1}/{n_total_steps}], Loss: {loss.item():.4f}')\n", | ||
"print(\"Finished training\")\n", | ||
"\n", | ||
"\n", | ||
"with torch.no_grad():\n", | ||
" n_correct = 0\n", | ||
" n_samples = 0\n", | ||
" n_class_correct = [0 for i in range(10)]\n", | ||
" n_class_samples = [0 for i in range(10)]\n", | ||
" for images, labels in test_loader:\n", | ||
" images = images.to(device)\n", | ||
" labels = labels.to(device)\n", | ||
" outputs = model(images)\n", | ||
" # max returns (value ,index)\n", | ||
" _, predicted = torch.max(outputs, 1)\n", | ||
" n_samples += labels.size(0)\n", | ||
" n_correct += (predicted == labels).sum().item()\n", | ||
" \n", | ||
" for i in range(batch_size):\n", | ||
" label = labels[i]\n", | ||
" pred = predicted[i]\n", | ||
" if (label == pred):\n", | ||
" n_class_correct[label] += 1\n", | ||
" n_class_samples[label] += 1\n", | ||
"\n", | ||
" acc = 100.0 * n_correct / n_samples\n", | ||
" print(f'Accuracy of the network: {acc} %')\n", | ||
"\n", | ||
" for i in range(10):\n", | ||
" acc = 100.0 * n_class_correct[i] / n_class_samples[i]\n", | ||
" print(f'Accuracy: {acc} %')\n" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "env", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.11.7" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |