Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
pytorch
GitHub Repository: pytorch/tutorials
Path: blob/main/beginner_source/examples_autograd/polynomial_autograd.py
2151 views
1
r"""
2
PyTorch: Tensors and autograd
3
-------------------------------
4
5
A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
6
to :math:`\pi` by minimizing squared Euclidean distance.
7
8
This implementation computes the forward pass using operations on PyTorch
9
Tensors, and uses PyTorch autograd to compute gradients.
10
11
12
A PyTorch Tensor represents a node in a computational graph. If ``x`` is a
13
Tensor that has ``x.requires_grad=True`` then ``x.grad`` is another Tensor
14
holding the gradient of ``x`` with respect to some scalar value.
15
"""
16
import torch
17
import math
18
19
# We want to be able to train our model on an `accelerator <https://pytorch.org/docs/stable/torch.html#accelerators>`__
20
# such as CUDA, MPS, MTIA, or XPU. If the current accelerator is available, we will use it. Otherwise, we use the CPU.
21
22
dtype = torch.float
23
device = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else "cpu"
24
print(f"Using {device} device")
25
torch.set_default_device(device)
26
27
# Create Tensors to hold input and outputs.
28
# By default, requires_grad=False, which indicates that we do not need to
29
# compute gradients with respect to these Tensors during the backward pass.
30
x = torch.linspace(-1, 1, 2000, dtype=dtype)
31
y = torch.exp(x) # A Taylor expansion would be 1 + x + (1/2) x**2 + (1/3!) x**3 + ...
32
33
# Create random Tensors for weights. For a third order polynomial, we need
34
# 4 weights: y = a + b x + c x^2 + d x^3
35
# Setting requires_grad=True indicates that we want to compute gradients with
36
# respect to these Tensors during the backward pass.
37
a = torch.randn((), dtype=dtype, requires_grad=True)
38
b = torch.randn((), dtype=dtype, requires_grad=True)
39
c = torch.randn((), dtype=dtype, requires_grad=True)
40
d = torch.randn((), dtype=dtype, requires_grad=True)
41
42
initial_loss = 1.
43
learning_rate = 1e-5
44
for t in range(5000):
45
# Forward pass: compute predicted y using operations on Tensors.
46
y_pred = a + b * x + c * x ** 2 + d * x ** 3
47
48
# Compute and print loss using operations on Tensors.
49
# Now loss is a Tensor of shape (1,)
50
# loss.item() gets the scalar value held in the loss.
51
loss = (y_pred - y).pow(2).sum()
52
53
# Calculare initial loss, so we can report loss relative to it
54
if t==0:
55
initial_loss=loss.item()
56
57
if t % 100 == 99:
58
print(f'Iteration t = {t:4d} loss(t)/loss(0) = {round(loss.item()/initial_loss, 6):10.6f} a = {a.item():10.6f} b = {b.item():10.6f} c = {c.item():10.6f} d = {d.item():10.6f}')
59
60
# Use autograd to compute the backward pass. This call will compute the
61
# gradient of loss with respect to all Tensors with requires_grad=True.
62
# After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding
63
# the gradient of the loss with respect to a, b, c, d respectively.
64
loss.backward()
65
66
# Manually update weights using gradient descent. Wrap in torch.no_grad()
67
# because weights have requires_grad=True, but we don't need to track this
68
# in autograd.
69
with torch.no_grad():
70
a -= learning_rate * a.grad
71
b -= learning_rate * b.grad
72
c -= learning_rate * c.grad
73
d -= learning_rate * d.grad
74
75
# Manually zero the gradients after updating weights
76
a.grad = None
77
b.grad = None
78
c.grad = None
79
d.grad = None
80
81
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
82
83