Skip to content

Commit

Permalink
Merge branch 'main' into replace_deepcopy
Browse files Browse the repository at this point in the history
  • Loading branch information
Svetlana Karslioglu authored Jun 8, 2023
2 parents 33db7a0 + 1fe4025 commit f1bf472
Show file tree
Hide file tree
Showing 8 changed files with 45 additions and 19 deletions.
2 changes: 1 addition & 1 deletion advanced_source/rpc_ddp_tutorial.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Combining Distributed DataParallel with Distributed RPC Framework
=================================================================
**Authors**: `Pritam Damania <https://github.com/pritamdamania87>`_ and `Yi Wang <https://github.com/SciPioneer>`_
**Authors**: `Pritam Damania <https://github.com/pritamdamania87>`_ and `Yi Wang <https://github.com/wayi1>`_

.. note::
|edit| View and edit this tutorial in `github <https://github.com/pytorch/tutorials/blob/main/advanced_source/rpc_ddp_tutorial.rst>`__.
Expand Down
3 changes: 2 additions & 1 deletion beginner_source/Intro_to_TorchScript_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@

import torch # This is all you need to use both PyTorch and TorchScript!
print(torch.__version__)
torch.manual_seed(191009) # set the seed for reproducibility


######################################################################
Expand Down Expand Up @@ -308,7 +309,7 @@ def forward(self, x, h):

# New inputs
x, h = torch.rand(3, 4), torch.rand(3, 4)
traced_cell(x, h)
print(scripted_cell(x, h))


######################################################################
Expand Down
9 changes: 8 additions & 1 deletion conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import pytorch_sphinx_theme
import torch
import glob
import random
import shutil
from custom_directives import IncludeDirective, GalleryItemDirective, CustomGalleryItemDirective, CustomCalloutItemDirective, CustomCardItemDirective
import distutils.file_util
Expand Down Expand Up @@ -85,6 +86,11 @@

# -- Sphinx-gallery configuration --------------------------------------------

def reset_seeds(gallery_conf, fname):
torch.manual_seed(42)
torch.set_default_device(None)
random.seed(10)

sphinx_gallery_conf = {
'examples_dirs': ['beginner_source', 'intermediate_source',
'advanced_source', 'recipes_source', 'prototype_source'],
Expand All @@ -94,7 +100,8 @@
'backreferences_dir': None,
'first_notebook_cell': ("# For tips on running notebooks in Google Colab, see\n"
"# https://pytorch.org/tutorials/beginner/colab\n"
"%matplotlib inline")
"%matplotlib inline"),
'reset_modules': (reset_seeds)
}

if os.getenv('GALLERY_PATTERN'):
Expand Down
11 changes: 7 additions & 4 deletions intermediate_source/mario_rl_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@
# Super Mario environment for OpenAI Gym
import gym_super_mario_bros

from tensordict import TensorDict
from torchrl.data import TensorDictReplayBuffer, LazyMemmapStorage

######################################################################
# RL Definitions
Expand Down Expand Up @@ -348,7 +350,7 @@ def act(self, state):
class Mario(Mario): # subclassing for continuity
def __init__(self, state_dim, action_dim, save_dir):
super().__init__(state_dim, action_dim, save_dir)
self.memory = deque(maxlen=100000)
self.memory = TensorDictReplayBuffer(storage=LazyMemmapStorage(100000))
self.batch_size = 32

def cache(self, state, next_state, action, reward, done):
Expand All @@ -373,14 +375,15 @@ def first_if_tuple(x):
reward = torch.tensor([reward], device=self.device)
done = torch.tensor([done], device=self.device)

self.memory.append((state, next_state, action, reward, done,))
# self.memory.append((state, next_state, action, reward, done,))
self.memory.add(TensorDict({"state": state, "next_state": next_state, "action": action, "reward": reward, "done": done}, batch_size=[]))

def recall(self):
"""
Retrieve a batch of experiences from memory
"""
batch = random.sample(self.memory, self.batch_size)
state, next_state, action, reward, done = map(torch.stack, zip(*batch))
batch = self.memory.sample(self.batch_size)
state, next_state, action, reward, done = (batch.get(key) for key in ("state", "next_state", "action", "reward", "done"))
return state, next_state, action.squeeze(), reward.squeeze(), done.squeeze()


Expand Down
6 changes: 3 additions & 3 deletions intermediate_source/seq2seq_translation_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
mechanism <https://arxiv.org/abs/1508.04025>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
Expand All @@ -66,8 +66,8 @@
Statistical Machine Translation <https://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <https://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `Effective Approaches to Attention-based Neural Machine
Translation <https://arxiv.org/abs/1508.04025>`__
- `A Neural Conversational Model <https://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
Expand Down
6 changes: 3 additions & 3 deletions intermediate_source/torch_compile_tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@

def foo(x, y):
a = torch.sin(x)
b = torch.cos(x)
b = torch.cos(y)
return a + b
opt_foo1 = torch.compile(foo)
print(opt_foo1(torch.randn(10, 10), torch.randn(10, 10)))
Expand All @@ -80,7 +80,7 @@ def foo(x, y):
@torch.compile
def opt_foo2(x, y):
a = torch.sin(x)
b = torch.cos(x)
b = torch.cos(y)
return a + b
print(opt_foo2(torch.randn(10, 10), torch.randn(10, 10)))

Expand All @@ -105,7 +105,7 @@ def forward(self, x):
#
# Let's now demonstrate that using ``torch.compile`` can speed
# up real models. We will compare standard eager mode and
# ``torch.compile`` by evaluating and training ResNet-18 on random data.
# ``torch.compile`` by evaluating and training a ``torchvision`` model on random data.
#
# Before we start, we need to define some utility functions.

Expand Down
24 changes: 21 additions & 3 deletions prototype_source/fx_graph_mode_ptq_dynamic.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,9 +239,27 @@ def evaluate(model_, data_source):
.set_object_type(nn.LSTM, default_dynamic_qconfig)
.set_object_type(nn.Linear, default_dynamic_qconfig)
)
# Deepcopying the original model because quantization api changes the model inplace and we want
# Load model to create the original model because quantization api changes the model inplace and we want
# to keep the original model for future comparison
model_to_quantize = copy.deepcopy(model)


model_to_quantize = LSTMModel(
ntoken = ntokens,
ninp = 512,
nhid = 256,
nlayers = 5,
)

model_to_quantize.load_state_dict(
torch.load(
model_data_filepath + 'word_language_model_quantize.pth',
map_location=torch.device('cpu')
)
)

model_to_quantize.eval()


prepared_model = prepare_fx(model_to_quantize, qconfig_mapping, example_inputs)
print("prepared model:", prepared_model)
quantized_model = convert_fx(prepared_model)
Expand Down Expand Up @@ -289,4 +307,4 @@ def time_model_evaluation(model, test_data):
# 3. Conclusion
# -------------
# This tutorial introduces the api for post training dynamic quantization in FX Graph Mode,
# which dynamically quantizes the same modules as Eager Mode Quantization.
# which dynamically quantizes the same modules as Eager Mode Quantization.
3 changes: 0 additions & 3 deletions recipes_source/recipes/changing_default_device.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,6 @@
print(mod.weight.device)
print(mod(torch.randn(128, 20)).device)

# And then globally return it back to CPU
torch.set_default_device('cpu')

################################################################
# This function imposes a slight performance cost on every Python
# call to the torch API (not just factory functions). If this
Expand Down

0 comments on commit f1bf472

Please sign in to comment.