Skip to content

Commit

Permalink
[4/N] Refine beginner tutorial by accelerator api
Browse files Browse the repository at this point in the history
  • Loading branch information
guangyey committed Dec 20, 2024
1 parent 540bd0c commit b7d5e44
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
12 changes: 4 additions & 8 deletions beginner_source/fgsm_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,14 +125,9 @@
# `pytorch/examples/mnist <https://github.com/pytorch/examples/tree/master/mnist>`__.
# For simplicity, download the pretrained model `here <https://drive.google.com/file/d/1HJV2nUHJqclXQ8flKvcWmjZ-OU5DGatl/view?usp=drive_link>`__.
#
# - ``use_cuda`` - boolean flag to use CUDA if desired and available.
# Note, a GPU with CUDA is not critical for this tutorial as a CPU will
# not take much time.
#

epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = "data/lenet_mnist_model.pth"
use_cuda=True
# Set random seed for reproducibility
torch.manual_seed(42)

Expand Down Expand Up @@ -184,9 +179,10 @@ def forward(self, x):
])),
batch_size=1, shuffle=True)

# Define what device we are using
print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
# We want to be able to train our model on an `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
# such as CUDA, MPS, MTIA, or XPU. If the current accelerator is available, we will use it. Otherwise, we use the CPU.
device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
print(f"Using {device} device")

# Initialize the network
model = Net().to(device)
Expand Down
6 changes: 5 additions & 1 deletion beginner_source/transfer_learning_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,11 @@
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# We want to be able to train our model on an `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
# such as CUDA, MPS, MTIA, or XPU. If the current accelerator is available, we will use it. Otherwise, we use the CPU.

device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
print(f"Using {device} device")

######################################################################
# Visualize a few images
Expand Down

0 comments on commit b7d5e44

Please sign in to comment.