Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][ruff] clean some F401 step: 6 #59584

Merged
merged 8 commits into from
Dec 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions python/paddle/base/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@
from collections.abc import Sequence

import paddle.base
from paddle.base import framework, program_guard

from . import core, log_helper, unique_name
from . import core, framework, log_helper, unique_name
from .data_feeder import check_type
from .framework import program_guard
from .proto import framework_pb2

__all__ = []
Expand Down
22 changes: 11 additions & 11 deletions python/paddle/base/dygraph/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from . import base
from .base import (
no_grad,
no_grad_,
grad,
guard,
enable_dygraph,
from . import ( # noqa: F401
base,
tracer,
)
from .base import ( # noqa: F401
disable_dygraph,
enable_dygraph,
enabled,
grad,
guard,
no_grad,
no_grad_,
to_variable,
)

from . import tracer
from .tracer import Tracer

from .tracer import Tracer # noqa: F401

__all__ = []
4 changes: 3 additions & 1 deletion python/paddle/base/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
Value,
translate_to_pir,
)
from . import compiler, core, framework, get_flags, set_flags, unique_name
from . import compiler, core, framework, unique_name
from .data_feeder import convert_dtype
from .framework import (
Operator,
Expand All @@ -38,8 +38,10 @@
_apply_pass,
convert_np_dtype_to_dtype_,
default_main_program,
get_flags,
in_pir_mode,
paddle_type_to_proto_type,
set_flags,
)
from .incubate.checkpoint import auto_checkpoint as acp
from .trainer_factory import FetchHandlerMonitor, TrainerFactory
Expand Down
1 change: 0 additions & 1 deletion python/paddle/device/xpu/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
from paddle.base import core
from paddle.utils import deprecated

Expand Down
24 changes: 13 additions & 11 deletions python/paddle/distributed/auto_parallel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .strategy import Strategy
from .process_mesh import ProcessMesh
from .static.engine import Engine
from .interface import shard_tensor
from .interface import shard_op
from .interface import recompute
from .interface import exclude_ops_in_recompute
from .interface import fetch
from .interface import create_mesh
from .interface import get_mesh
from .random import parallel_manual_seed
from .interface import ( # noqa: F401
create_mesh,
exclude_ops_in_recompute,
fetch,
get_mesh,
recompute,
shard_op,
shard_tensor,
)
from .process_mesh import ProcessMesh # noqa: F401
from .random import parallel_manual_seed # noqa: F401
from .static.engine import Engine # noqa: F401
from .strategy import Strategy # noqa: F401

__all__ = []
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .profiler import profiler
from .profiler import profiler # noqa: F401

__all__ = []
30 changes: 15 additions & 15 deletions python/paddle/distributed/communication/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,22 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .all_gather import all_gather, all_gather_object
from .all_reduce import all_reduce
from .broadcast import broadcast, broadcast_object_list
from .reduce import reduce, ReduceOp
from .send import send, isend
from .recv import recv, irecv
from .scatter import scatter, scatter_object_list
from .gather import gather
from .batch_isend_irecv import batch_isend_irecv, P2POp
from .reduce_scatter import reduce_scatter
from .all_to_all import alltoall, alltoall_single
from .group import (
is_initialized,
from .all_gather import all_gather, all_gather_object # noqa: F401
from .all_reduce import all_reduce # noqa: F401
from .all_to_all import alltoall, alltoall_single # noqa: F401
from .batch_isend_irecv import P2POp, batch_isend_irecv # noqa: F401
from .broadcast import broadcast, broadcast_object_list # noqa: F401
from .gather import gather # noqa: F401
from .group import ( # noqa: F401
barrier,
destroy_process_group,
get_backend,
get_group,
is_initialized,
wait,
barrier,
get_backend,
)
from .recv import irecv, recv # noqa: F401
from .reduce import ReduceOp, reduce # noqa: F401
from .reduce_scatter import reduce_scatter # noqa: F401
from .scatter import scatter, scatter_object_list # noqa: F401
from .send import isend, send # noqa: F401
23 changes: 13 additions & 10 deletions python/paddle/distributed/fleet/layers/mpu/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .mp_layers import VocabParallelEmbedding
from .mp_layers import ColumnParallelLinear
from .mp_layers import RowParallelLinear
from .mp_layers import ParallelCrossEntropy

from .random import RNGStatesTracker
from .random import get_rng_state_tracker
from .random import model_parallel_random_seed
from .random import determinate_seed
from .random import dropout
from .mp_layers import ( # noqa: F401
ColumnParallelLinear,
ParallelCrossEntropy,
RowParallelLinear,
VocabParallelEmbedding,
)
from .random import ( # noqa: F401
RNGStatesTracker,
determinate_seed,
dropout,
get_rng_state_tracker,
model_parallel_random_seed,
)
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from .hybrid_parallel_optimizer import HybridParallelOptimizer
from .hybrid_parallel_gradscaler import HybridParallelGradScaler
from .dygraph_sharding_optimizer import DygraphShardingOptimizer
from .heter_parallel_optimizer import HeterParallelOptimizer
from .dygraph_sharding_optimizer import DygraphShardingOptimizer # noqa: F401
from .heter_parallel_optimizer import HeterParallelOptimizer # noqa: F401
from .hybrid_parallel_gradscaler import HybridParallelGradScaler # noqa: F401
from .hybrid_parallel_optimizer import HybridParallelOptimizer # noqa: F401

__all__ = []
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/recompute/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .recompute import recompute, recompute_sequential
from .recompute_hybrid import recompute_hybrid
from .recompute import recompute, recompute_sequential # noqa: F401
from .recompute_hybrid import recompute_hybrid # noqa: F401

__all__ = []
2 changes: 1 addition & 1 deletion python/paddle/io/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
BatchSampler,
ChainDataset,
ComposeDataset,
ConcatDataset,
Dataset,
DistributedBatchSampler,
IterableDataset,
Expand All @@ -30,7 +31,6 @@
WeightedRandomSampler,
get_worker_info,
random_split,
ConcatDataset,
)
from .reader import DataLoader

Expand Down
36 changes: 17 additions & 19 deletions python/paddle/quantization/imperative/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,25 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from . import qat
from .qat import ImperativeQuantAware

from . import ptq
from .ptq import ImperativePTQ

from . import ptq_config
from .ptq_config import PTQConfig, default_ptq_config

from . import ptq_quantizer
from .ptq_quantizer import (
BaseQuantizer,
AbsmaxQuantizer,
PerChannelAbsmaxQuantizer,
KLQuantizer,
HistQuantizer,
from . import (
ptq, # noqa: F401
ptq_config, # noqa: F401
ptq_quantizer, # noqa: F401
ptq_registry, # noqa: F401
qat, # noqa: F401
)
from .ptq import ImperativePTQ # noqa: F401
from .ptq_config import PTQConfig, default_ptq_config # noqa: F401
from .ptq_quantizer import ( # noqa: F401
SUPPORT_ACT_QUANTIZERS,
SUPPORT_WT_QUANTIZERS,
AbsmaxQuantizer,
BaseQuantizer,
HistQuantizer,
KLQuantizer,
PerChannelAbsmaxQuantizer,
)

from . import ptq_registry
from .ptq_registry import PTQRegistry
from .ptq_registry import PTQRegistry # noqa: F401
from .qat import ImperativeQuantAware # noqa: F401