CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
pytorch

CoCalc provides the best real-time collaborative environment for Jupyter Notebooks, LaTeX documents, and SageMath, scalable from individual users to large groups and classes!

GitHub Repository: pytorch/tutorials
Path: blob/main/beginner_source/blitz/tensor_tutorial.py
Views: 494
1
"""
2
Tensors
3
========
4
5
Tensors are a specialized data structure that are very similar to arrays
6
and matrices. In PyTorch, we use tensors to encode the inputs and
7
outputs of a model, as well as the model’s parameters.
8
9
Tensors are similar to NumPy’s ndarrays, except that tensors can run on
10
GPUs or other specialized hardware to accelerate computing. If you’re familiar with ndarrays, you’ll
11
be right at home with the Tensor API. If not, follow along in this quick
12
API walkthrough.
13
14
"""
15
16
import torch
17
import numpy as np
18
19
20
######################################################################
21
# Tensor Initialization
22
# ~~~~~~~~~~~~~~~~~~~~~
23
#
24
# Tensors can be initialized in various ways. Take a look at the following examples:
25
#
26
# **Directly from data**
27
#
28
# Tensors can be created directly from data. The data type is automatically inferred.
29
30
data = [[1, 2], [3, 4]]
31
x_data = torch.tensor(data)
32
33
######################################################################
34
# **From a NumPy array**
35
#
36
# Tensors can be created from NumPy arrays (and vice versa - see :ref:`bridge-to-np-label`).
37
np_array = np.array(data)
38
x_np = torch.from_numpy(np_array)
39
40
41
###############################################################
42
# **From another tensor:**
43
#
44
# The new tensor retains the properties (shape, datatype) of the argument tensor, unless explicitly overridden.
45
46
x_ones = torch.ones_like(x_data) # retains the properties of x_data
47
print(f"Ones Tensor: \n {x_ones} \n")
48
49
x_rand = torch.rand_like(x_data, dtype=torch.float) # overrides the datatype of x_data
50
print(f"Random Tensor: \n {x_rand} \n")
51
52
53
######################################################################
54
# **With random or constant values:**
55
#
56
# ``shape`` is a tuple of tensor dimensions. In the functions below, it determines the dimensionality of the output tensor.
57
58
shape = (2, 3,)
59
rand_tensor = torch.rand(shape)
60
ones_tensor = torch.ones(shape)
61
zeros_tensor = torch.zeros(shape)
62
63
print(f"Random Tensor: \n {rand_tensor} \n")
64
print(f"Ones Tensor: \n {ones_tensor} \n")
65
print(f"Zeros Tensor: \n {zeros_tensor}")
66
67
68
69
70
######################################################################
71
# --------------
72
#
73
74
75
######################################################################
76
# Tensor Attributes
77
# ~~~~~~~~~~~~~~~~~
78
#
79
# Tensor attributes describe their shape, datatype, and the device on which they are stored.
80
81
tensor = torch.rand(3, 4)
82
83
print(f"Shape of tensor: {tensor.shape}")
84
print(f"Datatype of tensor: {tensor.dtype}")
85
print(f"Device tensor is stored on: {tensor.device}")
86
87
88
######################################################################
89
# --------------
90
#
91
92
93
######################################################################
94
# Tensor Operations
95
# ~~~~~~~~~~~~~~~~~
96
#
97
# Over 100 tensor operations, including transposing, indexing, slicing,
98
# mathematical operations, linear algebra, random sampling, and more are
99
# comprehensively described
100
# `here <https://pytorch.org/docs/stable/torch.html>`__.
101
#
102
# Each of them can be run on the GPU (at typically higher speeds than on a
103
# CPU). If you’re using Colab, allocate a GPU by going to Edit > Notebook
104
# Settings.
105
#
106
107
# We move our tensor to the GPU if available
108
if torch.cuda.is_available():
109
tensor = tensor.to('cuda')
110
print(f"Device tensor is stored on: {tensor.device}")
111
112
113
######################################################################
114
# Try out some of the operations from the list.
115
# If you're familiar with the NumPy API, you'll find the Tensor API a breeze to use.
116
#
117
118
###############################################################
119
# **Standard numpy-like indexing and slicing:**
120
121
tensor = torch.ones(4, 4)
122
tensor[:,1] = 0
123
print(tensor)
124
125
######################################################################
126
# **Joining tensors** You can use ``torch.cat`` to concatenate a sequence of tensors along a given dimension.
127
# See also `torch.stack <https://pytorch.org/docs/stable/generated/torch.stack.html>`__,
128
# another tensor joining op that is subtly different from ``torch.cat``.
129
t1 = torch.cat([tensor, tensor, tensor], dim=1)
130
print(t1)
131
132
######################################################################
133
# **Multiplying tensors**
134
135
# This computes the element-wise product
136
print(f"tensor.mul(tensor) \n {tensor.mul(tensor)} \n")
137
# Alternative syntax:
138
print(f"tensor * tensor \n {tensor * tensor}")
139
140
######################################################################
141
#
142
# This computes the matrix multiplication between two tensors
143
print(f"tensor.matmul(tensor.T) \n {tensor.matmul(tensor.T)} \n")
144
# Alternative syntax:
145
print(f"tensor @ tensor.T \n {tensor @ tensor.T}")
146
147
148
######################################################################
149
# **In-place operations**
150
# Operations that have a ``_`` suffix are in-place. For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``.
151
152
print(tensor, "\n")
153
tensor.add_(5)
154
print(tensor)
155
156
######################################################################
157
# .. note::
158
# In-place operations save some memory, but can be problematic when computing derivatives because of an immediate loss
159
# of history. Hence, their use is discouraged.
160
161
######################################################################
162
# --------------
163
#
164
165
166
######################################################################
167
# .. _bridge-to-np-label:
168
#
169
# Bridge with NumPy
170
# ~~~~~~~~~~~~~~~~~
171
# Tensors on the CPU and NumPy arrays can share their underlying memory
172
# locations, and changing one will change the other.
173
174
175
######################################################################
176
# Tensor to NumPy array
177
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
178
t = torch.ones(5)
179
print(f"t: {t}")
180
n = t.numpy()
181
print(f"n: {n}")
182
183
######################################################################
184
# A change in the tensor reflects in the NumPy array.
185
186
t.add_(1)
187
print(f"t: {t}")
188
print(f"n: {n}")
189
190
191
######################################################################
192
# NumPy array to Tensor
193
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
194
n = np.ones(5)
195
t = torch.from_numpy(n)
196
197
######################################################################
198
# Changes in the NumPy array reflects in the tensor.
199
np.add(n, 1, out=n)
200
print(f"t: {t}")
201
print(f"n: {n}")
202
203