Skip to content

Commit

Permalink
[5/N][Easy] fix typo for usort config in pyproject.toml (kown -…
Browse files Browse the repository at this point in the history
…> `known`): sort torch (pytorch#127126)

The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

Pull Request resolved: pytorch#127126
Approved by: https://github.com/kit1980
  • Loading branch information
XuehaiPan authored and pytorchmergebot committed May 27, 2024
1 parent c7f6fbf commit 26f4f10
Show file tree
Hide file tree
Showing 296 changed files with 574 additions and 468 deletions.
3 changes: 2 additions & 1 deletion android/test_app/make_assets.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
from torchvision import models

import torch

print(torch.version.__version__)

resnet18 = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)
Expand Down
3 changes: 2 additions & 1 deletion android/test_app/make_assets_custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,11 @@
build script to create a tailored build which only contains these used ops.
"""

import torch
import yaml
from torchvision import models

import torch

# Download and trace the model.
model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1)
model.eval()
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/distributed/ddp/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@
import time

import numpy as np
import torchvision

import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
import torchvision


def allgather_object(obj):
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/distributed/pipeline/pipe.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
import os
import time

from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm

import torch
import torch.nn as nn

from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
from torch.distributed import rpc

from torch.distributed.pipeline.sync import Pipe
Expand Down
16 changes: 8 additions & 8 deletions benchmarks/distributed/rpc/parameter_server/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,10 @@
import os
from pathlib import Path

import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp

from data import data_map
from metrics.ProcessedMetricsPrinter import ProcessedMetricsPrinter
from models import model_map
from server import server_map
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader
from trainer import (
criterion_map,
ddp_hook_map,
Expand All @@ -25,6 +17,14 @@
trainer_map,
)

import torch
import torch.distributed as c10d
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
from torch.distributed.rpc import TensorPipeRpcBackendOptions
from torch.futures import wait_all
from torch.utils.data import DataLoader


def get_name(rank, args):
r"""
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/distributed/rpc/parameter_server/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
import time
from abc import ABC, abstractmethod

import torch
import torch.distributed.rpc as rpc

from metrics.MetricsLogger import MetricsLogger
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format

import torch
import torch.distributed.rpc as rpc


class ParameterServerBase(ABC):
PARAMETER_SERVER_BATCH_METRIC = "parameter_server_batch_metric"
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/distributed/rpc/parameter_server/trainer/hooks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from utils import process_bucket_with_remote_server

import torch
import torch.distributed as c10d
from utils import process_bucket_with_remote_server


def allreduce_hook(state, bucket):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
import time
from abc import ABC, abstractmethod

import torch

from metrics.MetricsLogger import MetricsLogger

import torch


class TrainerBase(ABC):
BATCH_LEVEL_METRIC = "batch_level_metric"
Expand Down
6 changes: 3 additions & 3 deletions benchmarks/distributed/rpc/rl/coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@

import numpy as np

import torch
import torch.distributed.rpc as rpc

from agent import AgentBase
from observer import ObserverBase

import torch
import torch.distributed.rpc as rpc

COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/distributed/rpc/rl/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
import os
import time

from coordinator import CoordinatorBase

import torch.distributed.rpc as rpc
import torch.multiprocessing as mp

from coordinator import CoordinatorBase

COORDINATOR_NAME = "coordinator"
AGENT_NAME = "agent"
OBSERVER_NAME = "observer{}"
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/distributed/rpc/rl/observer.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import random
import time

from agent import AgentBase

import torch
import torch.distributed.rpc as rpc

from agent import AgentBase
from torch.distributed.rpc import rpc_sync


Expand Down
20 changes: 9 additions & 11 deletions benchmarks/dynamo/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,21 @@
Type,
TYPE_CHECKING,
)

from typing_extensions import Self
from unittest.mock import MagicMock

import numpy as np
import pandas as pd
import psutil
from scipy.stats import gmean, ttest_ind
from tqdm.auto import tqdm, trange

import torch
import torch._dynamo
import torch._dynamo.utils
import torch._export
import torch.distributed
import torch.multiprocessing as mp
from scipy.stats import gmean, ttest_ind
from torch._C import _has_cuda as HAS_CUDA, _has_xpu as HAS_XPU
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import (
Expand All @@ -59,8 +60,6 @@
same,
)

from tqdm.auto import tqdm, trange

try:
from torch._dynamo.utils import (
clone_inputs,
Expand All @@ -74,15 +73,14 @@
graph_break_reasons,
maybe_enable_compiled_autograd,
)

import torch._functorch.config
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config, metrics
from torch._subclasses.fake_tensor import FakeTensorMode

from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map, tree_map_only


try:
import torch_xla
import torch_xla.core.xla_model as xm
Expand Down Expand Up @@ -2343,17 +2341,17 @@ def get_benchmark_indices(self, length):

def get_fsdp_auto_wrap_policy(self, model_name: str):
from diffusers.models.transformer_2d import Transformer2DModel

from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)
from torchbenchmark.models.nanogpt.model import Block
from transformers.models.llama.modeling_llama import LlamaDecoderLayer

from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer

from torch.distributed.fsdp.wrap import (
ModuleWrapPolicy,
size_based_auto_wrap_policy,
)

# handcrafted wrap policy
MODEL_FSDP_WRAP = {
"stable_diffusion_unet": (Transformer2DModel,),
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@
import sys
import warnings

import torch
from common import BenchmarkRunner, download_retry_decorator, main, reset_rng_state

import torch

from torch._dynamo.testing import collect_results
from torch._dynamo.utils import clone_inputs

Expand Down
5 changes: 3 additions & 2 deletions benchmarks/dynamo/microbenchmarks/bench_mm_fusion.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
# flake8: noqa
import triton
from prettytable import PrettyTable

import torch

import torch._dynamo
import torch._inductor.config
import triton
from prettytable import PrettyTable

# torch._inductor.config.debug = True
torch._inductor.config.triton.dense_indexing = True
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/microbenchmarks/inductor_bmm.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
from benchmark_helper import time_with_torch_timer

import torch

import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
from benchmark_helper import time_with_torch_timer


@torch._dynamo.optimize("inductor", nopython=True)
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/microbenchmarks/inductor_cpu_atomic.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import itertools

from benchmark_helper import time_with_torch_timer

import torch
import torch._dynamo
from benchmark_helper import time_with_torch_timer


@torch._dynamo.optimize("inductor", nopython=True)
Expand Down
5 changes: 3 additions & 2 deletions benchmarks/dynamo/microbenchmarks/inductor_mm.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import triton
from benchmark_helper import time_with_torch_timer

import torch

import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
import triton
from benchmark_helper import time_with_torch_timer

# The flag below controls whether to allow TF32 on matmul. This flag defaults to True.
torch.backends.cuda.matmul.allow_tf32 = True
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/microbenchmarks/matmul_relu.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from benchmark_helper import time_with_torch_timer

import torch

import torch._dynamo
import torch._inductor.config as inductor_config
from benchmark_helper import time_with_torch_timer

inductor_config.triton.mm = "triton"

Expand Down
1 change: 1 addition & 0 deletions benchmarks/dynamo/microbenchmarks/microbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import numpy as np
import tabulate

import torch

import torch._inductor
Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/microbenchmarks/operatorbench.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
#!/usr/bin/env python3
import click
import numpy as np
import torch
from operator_inp_utils import OperatorInputsLoader

import torch

from torch._dynamo.backends.cudagraphs import cudagraphs_inner
from torch._dynamo.testing import same
from torch._inductor.compile_fx import compile_fx
Expand Down
7 changes: 4 additions & 3 deletions benchmarks/dynamo/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,14 @@

import numpy as np
import pandas as pd
import torch

import torch._dynamo
from matplotlib import rcParams
from scipy.stats import gmean
from tabulate import tabulate

import torch

import torch._dynamo

rcParams.update({"figure.autolayout": True})
plt.rc("axes", axisbelow=True)

Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/timm_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@
import sys
import warnings

import torch
from common import BenchmarkRunner, download_retry_decorator, main

import torch

from torch._dynamo.testing import collect_results, reduce_to_scalar_loss
from torch._dynamo.utils import clone_inputs

Expand Down
3 changes: 2 additions & 1 deletion benchmarks/dynamo/torchbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from collections import namedtuple
from os.path import abspath, exists

import torch
import yaml

import torch

try:
from .common import BenchmarkRunner, main
except ImportError:
Expand Down
5 changes: 3 additions & 2 deletions benchmarks/dynamo/training_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
import time
from datetime import timedelta

from datasets import load_dataset, load_metric
from transformers import AutoModelForSequenceClassification, AutoTokenizer

import torch

import torch._dynamo
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer

torch.backends.cuda.matmul.allow_tf32 = True

Expand Down
Loading

0 comments on commit 26f4f10

Please sign in to comment.