From d37a7fbdd551440937c4846ef2f44f715f046ff9 Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Thu, 12 Jun 2025 19:16:18 +0000 Subject: [PATCH 1/3] update. --- beginner_source/basics/autogradqs_tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 8eff127ddee..e702b29810f 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -116,7 +116,7 @@ with torch.no_grad(): z = torch.matmul(x, w)+b -print(z.requires_grad) +prin(z.requires_grad) ###################################################################### From ad5eb25b139fc3bfd9a01a020b8d220093b13f3d Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Fri, 13 Jun 2025 09:38:48 +0000 Subject: [PATCH 2/3] update the autograd tutorial. --- beginner_source/basics/autogradqs_tutorial.py | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index e702b29810f..418de49cc6f 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -116,7 +116,7 @@ with torch.no_grad(): z = torch.matmul(x, w)+b -prin(z.requires_grad) +print(z.requires_grad) ###################################################################### @@ -133,7 +133,8 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. - +# For additional reference, you can view the autograd mechanics +# documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation ###################################################################### @@ -160,6 +161,39 @@ # - accumulates them in the respective tensor’s ``.grad`` attribute # - using the chain rule, propagates all the way to the leaf tensors. # +# We can also visualize the computational graph by the following 2 methods: +# +# 1. TORCH_LOGS="+autograd" +# By setting the TORCH_LOGS="+autograd" environment variable, we can enable runtime autograd logs for debugging. +# +# We can perform the logging in the following manner: +# TORCH_LOGS="+autograd" python test.py +# +# 2. Torchviz +# Torchviz is a package to render the computational graph visually. +# +# We can generate an image for the computational graph in the example given below: +# +# import torch +# from torch import nn +# from torchviz import make_dot +# +# model = nn.Sequential( +# nn.Linear(8, 16), +# nn.ReLU(), +# nn.Linear(16, 1) +# ) + +# x = torch.randn(1, 8, requires_grad=True) +# y = model(x).mean() + +# log the internal operations using torchviz +# import os +# os.environ['TORCH_LOGS'] = "+autograd" + +# dot = make_dot(y, params=dict(model.named_parameters()), show_attrs=True, show_saved=True) +# dot.render('simple_graph', format='png') +# # .. note:: # **DAGs are dynamic in PyTorch** # An important thing to note is that the graph is recreated from scratch; after each From 1532c0de875b79f541371438d8d1d08be18f87de Mon Sep 17 00:00:00 2001 From: Parag Ekbote Date: Fri, 13 Jun 2025 13:40:05 +0000 Subject: [PATCH 3/3] update the tutorial. --- beginner_source/basics/autogradqs_tutorial.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beginner_source/basics/autogradqs_tutorial.py b/beginner_source/basics/autogradqs_tutorial.py index 418de49cc6f..671ed67c817 100644 --- a/beginner_source/basics/autogradqs_tutorial.py +++ b/beginner_source/basics/autogradqs_tutorial.py @@ -32,7 +32,7 @@ y = torch.zeros(3) # expected output w = torch.randn(5, 3, requires_grad=True) b = torch.randn(3, requires_grad=True) -z = torch.matmul(x, w)+b +z = torch.matmul(x, w) + b loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y) @@ -133,7 +133,7 @@ # - To mark some parameters in your neural network as **frozen parameters**. # - To **speed up computations** when you are only doing forward pass, because computations on tensors that do # not track gradients would be more efficient. -# For additional reference, you can view the autograd mechanics +# For additional reference, you can view the autograd mechanics # documentation:https://docs.pytorch.org/docs/stable/notes/autograd.html#locally-disabling-gradient-computation ###################################################################### @@ -171,7 +171,7 @@ # # 2. Torchviz # Torchviz is a package to render the computational graph visually. -# +# # We can generate an image for the computational graph in the example given below: # # import torch