Skip to content

Commit

Permalink
Add tensor.
Browse files Browse the repository at this point in the history
  • Loading branch information
Mansterteddy committed Nov 6, 2020
1 parent cc7281f commit ee9cb7d
Show file tree
Hide file tree
Showing 7 changed files with 139 additions and 1 deletion.
13 changes: 13 additions & 0 deletions pytorch/Basic/Autograd/autograd.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import torch
from matplotlib import pyplot as plt

x = torch.linspace(0, 5, 100, requires_grad=True)

y = (x**2).cos()

dydx = torch.autograd.grad(y.sum(), [x])[0]

plt.plot(x.detach(), y.detach(), label='y')
plt.plot(x.detach(), dydx, label='dy/dx')
plt.legend()
plt.show()
1 change: 0 additions & 1 deletion pytorch/Basic/Tensor/README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
There are two things that pytorch Tensors have that numpy arrays lack:
1. pytorch Tensors can live on either GPU or CPU (numpy is cpu-only);
2. pytorch can automatically track tensor computations to enable automatic differentiation;

4 changes: 4 additions & 0 deletions pytorch/Basic/Tensor/broadcast.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
import torch

a = torch.tensor([1, 2, 3], dtype=torch.float)
print(a + 1)
43 changes: 43 additions & 0 deletions pytorch/Basic/Tensor/device.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import torch, time
from matplotlib import pyplot as plt

# Here is a demonstration of moving data between GPU and CPU.
# We multiply a batch of vectors through a big linear operation 10 times
r = torch.randn(1024, 1024, dtype=torch.float)
x = torch.randn(32768, 1024, dtype=r.dtype)
iterations = 10

def time_iterated_mm(x, matrix):
start = time.time()
result = 0
for i in range(iterations):
result += torch.mm(matrix, x.to(matrix.device).t())
torch.cuda.synchronize()
elapsed = time.time() - start
return elapsed, result.cpu()

cpu_time, cpu_result = time_iterated_mm(x.cpu(), r.cpu())
print(f'time using the CPU alone: {cpu_time:.3g} seconds')

mixed_time, mixed_result = time_iterated_mm(x.cpu(), r.cuda())
print(f'time using GPU, moving data from CPU: {mixed_time:.3g} seconds')

pinned_time, pinned_result = time_iterated_mm(x.cpu().pin_memory(), r.cuda())
print(f'time using GPU on pinned CPU memory: {pinned_time:.3g} seconds')

gpu_time, gpu_result = time_iterated_mm(x.cuda(), r.cuda())
print(f'time using the GPU alone: {gpu_time:.3g} seconds')

plt.figure(figsize=(4,2), dpi=150)
plt.ylabel('iterations per sec')
plt.bar(['cpu', 'mixed', 'pinned', 'gpu'],
[iterations/cpu_time,
iterations/mixed_time,
iterations/pinned_time,
iterations/gpu_time])
plt.show()

print(f'Your GPU is {cpu_time / gpu_time:.3g}x faster than CPU'
f' but only {cpu_time / mixed_time:.3g}x if data is repeatedly copied from the CPU')
print(f'When copying from pinned memory, speedup is {cpu_time / pinned_time:.3g}x')
print(f'Numerical differences between GPU and CPU: {(cpu_result - gpu_result).norm() / cpu_result.norm()}')
38 changes: 38 additions & 0 deletions pytorch/Basic/Tensor/dim.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import torch
from matplotlib import pyplot as plt

a = torch.randn(2, 5)
print(a)
a = a[None]
print(a)

# Make an array of normally distributed randoms.
m = torch.randn(2, 5).abs()
print(f'm is {m}, and m[1,2] is {m[1,2]}\n')
print(f'column zero, m[:,0] is {m[:,0]}')
print(f'row zero m[0,:] is {m[0,:]}\n')

dot_product = (m[0,:] * m[1,:]).sum()
print(f'The dot product of rows (m[0,:] * m[1,:]).sum() is {dot_product}\n')
outer_product = m[0,:][None,:] * m[1,:][:,None]
print(f'The outer product of rows m[0,:][None,:] * m[1,:][:,None] is:\n{outer_product}')

dot_product = torch.mm(m[0,:][None], m[1,:][None].t())
print(f'The dot product of rows (m[0,:] * m[1,:]).sum() is {dot_product}\n')
outer_product = torch.mm(m[0,:][None].t(), m[1,:][None]).t()
print(f'The outer product of rows m[0,:][None,:] * m[1,:][:,None] is:\n{outer_product}')

'''
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(5, 5), dpi=100)
def color_mat(ax, m, title):
ax.set_title(title)
ax.imshow(m, cmap='hot', vmax=1.5, interpolation='nearest')
ax.get_xaxis().set_ticks(range(m.shape[1]))
ax.get_yaxis().set_ticks(range(m.shape[0]))
color_mat(ax1, m, 'm[:,:]')
color_mat(ax2, m[0,:][None,:], 'm[0,:][None,:]')
color_mat(ax3, m[1,:][:,None], 'm[1,:][:,None]')
color_mat(ax4, outer_product, 'm[0,:][None,:] * m[1,:][:,None]')
fig.tight_layout()
fig.show()
'''
26 changes: 26 additions & 0 deletions pytorch/Basic/Tensor/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,29 @@
import torch
from matplotlib import pyplot as plt

# Make a vector of 101 equally spaced numbers from 0 to 5.
x = torch.linspace(0, 5, 101)

# Print the first five things in x.
print(x[:5])

# Print the last five things in x.
print(x[-5:])

# Do some vector computations
y1, y2 = x.sin(), x ** x.cos()

y3 = y2 - y1

y4 = y3.min()

# Print and plot some answers
print(f'The shape of x is {x.shape}')
print(f'The shape of y1=x.sin() is {y1.shape}')
print(f'The shape of y2=x ** x.cos() is {y2.shape}')
print(f'The shape of y3=y2 - y1 is {y3.shape}')
print(f'The shape of y4=y3.min() is {y4.shape}, a zero-d scalar')

plt.plot(x, y1, 'red', x, y2, 'blue', x, y3, 'green')
plt.axhline(y4, color='green', linestyle='--')
plt.show()
15 changes: 15 additions & 0 deletions pytorch/Basic/Tensor/view.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import torch

x = torch.randn((2, 3, 4, 5))
print(x.shape)
print(x)

y = x.permute(0, 1, 3, 2)
print(y.shape)
print(y)
print(x)

z = x.view(2, -1)
print(z.shape)
print(z)
print(x)

0 comments on commit ee9cb7d

Please sign in to comment.