Skip to content

Commit

Permalink
Bump ruff to 0.3.2 and black to 24 (microsoft#19878)
Browse files Browse the repository at this point in the history
### Motivation and Context

Routing updates
  • Loading branch information
justinchuby authored Mar 13, 2024
1 parent 9e0a0f0 commit faea42a
Show file tree
Hide file tree
Showing 103 changed files with 702 additions and 764 deletions.
4 changes: 2 additions & 2 deletions cgmanifests/generate_cgmanifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ def normalize_path_separators(path):
submodule_lines = proc.stdout.splitlines()
for submodule_line in submodule_lines:
(absolute_path, url, commit) = submodule_line.split(" ")
git_deps[GitDep(commit, url)] = "git submodule at {}".format(
normalize_path_separators(os.path.relpath(absolute_path, REPO_DIR))
git_deps[GitDep(commit, url)] = (
f"git submodule at {normalize_path_separators(os.path.relpath(absolute_path, REPO_DIR))}"
)

with open(os.path.join(SCRIPT_DIR, "..", "cmake", "deps.txt")) as f:
Expand Down
2 changes: 1 addition & 1 deletion docs/python/examples/plot_train_convert_predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def loop(X_test, fct, n=None):
nrow = X_test.shape[0]
if n is None:
n = nrow
for i in range(0, n):
for i in range(n):
im = i % nrow
fct(X_test[im : im + 1])

Expand Down
9 changes: 3 additions & 6 deletions onnxruntime/python/tools/microbench/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,20 +147,17 @@ def __init__(self, args):

@classmethod
@abstractmethod
def create_inputs_outputs(cls, op_param):
...
def create_inputs_outputs(cls, op_param): ...

def add_case(self, op_param, model):
self.cases += [(op_param, model)]

@abstractmethod
def create_cases(self):
...
def create_cases(self): ...

@classmethod
@abstractmethod
def case_profile(cls, op_param, time):
...
def case_profile(cls, op_param, time): ...

def benchmark(self):
self.create_cases()
Expand Down
12 changes: 4 additions & 8 deletions onnxruntime/python/tools/quantization/base_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,27 +187,23 @@ def check_opset_version(self):

if opset_version == 10:
logging.warning(
"The original model opset version is {}, which does not support node fusions. Please update the model to opset >= 11 for better performance.".format(
opset_version
)
f"The original model opset version is {opset_version}, which does not support node fusions. Please update the model to opset >= 11 for better performance."
)
return 10

if opset_version < 10:
logging.warning(
"The original model opset version is {}, which does not support quantization. Please update the model to opset >= 11. Updating the model automatically to opset 11. Please verify the quantized model.".format(
opset_version
)
f"The original model opset version is {opset_version}, which does not support quantization. Please update the model to opset >= 11. Updating the model automatically to opset 11. Please verify the quantized model."
)
self.model.model.opset_import.remove(ai_onnx_domain[0])
self.model.model.opset_import.extend([onnx.helper.make_opsetid("", 11)])
opset_version = 11

if opset_version < 19 and self.weight_qType == onnx.TensorProto.FLOAT8E4M3FN:
logging.warning(
"The original model opset version is {}, which does not support quantization to float 8. "
f"The original model opset version is {opset_version}, which does not support quantization to float 8. "
"Please update the model to opset >= 19. Updating the model automatically to opset 19. "
"Please verify the quantized model.".format(opset_version)
"Please verify the quantized model."
)
self.model.model.opset_import.remove(ai_onnx_domain[0])
self.model.model.opset_import.extend([onnx.helper.make_opsetid("", 19)])
Expand Down
6 changes: 1 addition & 5 deletions onnxruntime/python/tools/quantization/calibrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -918,11 +918,7 @@ def compute_entropy(self):
thresholds_dict = {} # per tensor thresholds

print(f"Number of tensors : {len(histogram_dict)}")
print(
"Number of histogram bins : {} (The number may increase depends on the data it collects)".format(
self.num_bins
)
)
print(f"Number of histogram bins : {self.num_bins} (The number may increase depends on the data it collects)")
print(f"Number of quantized bins : {self.num_quantized_bins}")

for tensor, histogram in histogram_dict.items():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def pack_on_row_fast_248bit(pack_tensor, ori_int_tensor, bits):
pack_tensor = pack_tensor.T
if bits in [2, 4, 8]:
compress_ratio = pack_tensor.element_size() * 8 // bits
for j in range(0, compress_ratio):
for j in range(compress_ratio):
pack_tensor[0:] |= ori_int_tensor[j::compress_ratio] << (bits * (j))
else:
raise NotImplementedError("Only 2,4,8 bits are supported.")
Expand Down
6 changes: 1 addition & 5 deletions onnxruntime/python/tools/quantization/onnx_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,7 @@ def _clean_initializers_helper(graph, model):
graph.input.remove(name_to_input[initializer.name])
except StopIteration:
if model.ir_version < 4:
print(
"Warning: invalid weight name {} found in the graph (not a graph input)".format(
initializer.name
)
)
print(f"Warning: invalid weight name {initializer.name} found in the graph (not a graph input)")

requesting_tensor_names.difference_update(input.name for input in graph.input)

Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/python/tools/quantization/operators/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def quantize(self):
zero_point_names,
scale_names,
nodes,
) = self.quantizer.quantize_activation(node, [*range(0, len(node.input))])
) = self.quantizer.quantize_activation(node, [*range(len(node.input))])
if not data_found or q_input_names is None:
return super().quantize()

Expand All @@ -52,7 +52,7 @@ def quantize(self):
qnode_name = node.name + "_quant" if node.name else ""

qlconcat_inputs = [output_scale_name, output_zp_name]
for i in range(0, len(q_input_names)):
for i in range(len(q_input_names)):
qlconcat_inputs.extend([q_input_names[i], scale_names[i], zero_point_names[i]])
qlconcat_node = onnx.helper.make_node(
"QLinearConcat", qlconcat_inputs, [quantized_output_value.q_name], qnode_name, **kwargs
Expand Down
4 changes: 1 addition & 3 deletions onnxruntime/python/tools/quantization/operators/gemm.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,5 @@ def quantize(self):
set_default_beta(self.node)
else:
logging.warning(
"Bias of Gemm node '{}' is not constant. Please exclude this node for better performance.".format(
self.node.name
)
f"Bias of Gemm node '{self.node.name}' is not constant. Please exclude this node for better performance."
)
4 changes: 1 addition & 3 deletions onnxruntime/python/tools/quantization/qdq_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,7 @@ def _is_tensor_quantizable(self, tensor_name):
return True
else:
logging.warning(
"failed to infer the type of tensor: {}. Skip to quantize it. Please check if it is expected.".format(
tensor_name
)
f"failed to infer the type of tensor: {tensor_name}. Skip to quantize it. Please check if it is expected."
)

return False
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/python/tools/quantization/quant_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def compute_scale_zp_float8(element_type, std):
from onnx.reference.custom_element_types import float8e4m3fn

zp_dtype = float8e4m3fn
all_values = [float8e4m3_to_float32(i) for i in range(0, 256)]
all_values = [float8e4m3_to_float32(i) for i in range(256)]
values = numpy.array(
[f for f in all_values if not numpy.isnan(f) and not numpy.isinf(f)], dtype=numpy.float32
)
Expand Down Expand Up @@ -530,7 +530,7 @@ def get_elem_index(elem_name, elem_list):
Helper function to return index of an item in a node list
"""
elem_idx = -1
for i in range(0, len(elem_list)):
for i in range(len(elem_list)):
if elem_list[i] == elem_name:
elem_idx = i
return elem_idx
Expand Down
58 changes: 17 additions & 41 deletions onnxruntime/python/tools/symbolic_shape_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ def _add_suggested_merge(self, symbols, apply=False):
# when nothing to map to, use the shorter one
if map_to is None:
if self.verbose_ > 0:
logger.warning("Potential unsafe merge between symbolic expressions: ({})".format(",".join(symbols)))
logger.warning("Potential unsafe merge between symbolic expressions: (%s)", ",".join(symbols))
symbols_list = list(symbols)
lens = [len(s) for s in symbols_list]
map_to = symbols_list[lens.index(min(lens))]
Expand Down Expand Up @@ -335,10 +335,7 @@ def _merge_symbols(self, dims):
int_dim = is_int.index(1)
if self.verbose_ > 0:
logger.debug(
"dim {} has been merged with value {}".format(
unique_dims[:int_dim] + unique_dims[int_dim + 1 :],
unique_dims[int_dim],
)
f"dim {unique_dims[:int_dim] + unique_dims[int_dim + 1 :]} has been merged with value {unique_dims[int_dim]}"
)
self._check_merged_dims(unique_dims, allow_broadcast=False)
return unique_dims[int_dim]
Expand Down Expand Up @@ -379,7 +376,7 @@ def _broadcast_shapes(self, shape1, shape2):
if self.auto_merge_:
self._add_suggested_merge([dim1, dim2], apply=True)
else:
logger.warning("unsupported broadcast between " + str(dim1) + " " + str(dim2))
logger.warning("unsupported broadcast between " + str(dim1) + " " + str(dim2)) # noqa: G003
new_shape = [new_dim, *new_shape]
return new_shape

Expand Down Expand Up @@ -663,12 +660,7 @@ def _new_symbolic_dim(self, prefix, dim):

def _new_symbolic_dim_from_output(self, node, out_idx=0, dim=0):
return self._new_symbolic_dim(
"{}{}_{}_o{}_".format(
node.op_type,
self.prefix_,
list(self.out_mp_.graph.node).index(node),
out_idx,
),
f"{node.op_type}{self.prefix_}_{list(self.out_mp_.graph.node).index(node)}_o{out_idx}_",
dim,
)

Expand Down Expand Up @@ -1216,9 +1208,7 @@ def _infer_Loop(self, node): # noqa: N802
if need_second_infer:
if self.verbose_ > 2:
logger.debug(
"Rerun Loop: {}({}...), because of sequence in loop carried variables".format(
node.name, node.output[0]
)
f"Rerun Loop: {node.name}({node.output[0]}...), because of sequence in loop carried variables"
)
self._onnx_infer_subgraph(node, subgraph, inc_subgraph_id=False)

Expand Down Expand Up @@ -1843,7 +1833,7 @@ def handle_negative_index(index, bound):
axes = self._try_get_value(node, 3)
steps = self._try_get_value(node, 4)
if axes is None and not (starts is None and ends is None):
axes = list(range(0, len(starts if starts is not None else ends)))
axes = list(range(len(starts if starts is not None else ends)))
if steps is None and not (starts is None and ends is None):
steps = [1] * len(starts if starts is not None else ends)
axes = as_list(axes, keep_none=True)
Expand Down Expand Up @@ -2669,11 +2659,9 @@ def get_prereq(node):
break

if self.verbose_ > 2:
logger.debug(node.op_type + ": " + node.name)
logger.debug(node.op_type + ": " + node.name) # noqa: G003
for i, name in enumerate(node.input):
logger.debug(
" Input {}: {} {}".format(i, name, "initializer" if name in self.initializers_ else "")
)
logger.debug(" Input %s: %s %s", i, name, "initializer" if name in self.initializers_ else "")

# onnx automatically merge dims with value, i.e. Mul(['aaa', 'bbb'], [1000, 1]) -> [1000, 'bbb']
# symbolic shape inference needs to apply merge of 'aaa' -> 1000 in this case
Expand Down Expand Up @@ -2722,7 +2710,7 @@ def get_prereq(node):
seq_cls_type = out_type.sequence_type.elem_type.WhichOneof("value")
if seq_cls_type == "tensor_type":
logger.debug(
" {}: sequence of {} {}".format(
" {}: sequence of {} {}".format( # noqa: G001
node.output[i_o],
str(get_shape_from_value_info(vi)),
onnx.TensorProto.DataType.Name(
Expand All @@ -2740,14 +2728,10 @@ def get_prereq(node):
out_type_undefined = out_type.tensor_type.elem_type == onnx.TensorProto.UNDEFINED
if self.verbose_ > 2:
logger.debug(
" {}: {} {}".format(
node.output[i_o],
str(out_shape),
onnx.TensorProto.DataType.Name(vi.type.tensor_type.elem_type),
)
f" {node.output[i_o]}: {out_shape!s} {onnx.TensorProto.DataType.Name(vi.type.tensor_type.elem_type)}"
)
if node.output[i_o] in self.sympy_data_:
logger.debug(" Sympy Data: " + str(self.sympy_data_[node.output[i_o]]))
logger.debug(" Sympy Data: " + str(self.sympy_data_[node.output[i_o]])) # noqa: G003

# onnx >= 1.11.0, use unk__#index instead of None when the shape dim is uncertain
if (
Expand Down Expand Up @@ -2848,24 +2832,16 @@ def get_prereq(node):
if self.verbose_ > 0:
if is_unknown_op:
logger.debug(
"Possible unknown op: {} node: {}, guessing {} shape".format(
node.op_type, node.name, vi.name
)
f"Possible unknown op: {node.op_type} node: {node.name}, guessing {vi.name} shape"
)
if self.verbose_ > 2:
logger.debug(
" {}: {} {}".format(
node.output[i_o],
str(new_shape),
vi.type.tensor_type.elem_type,
)
)
logger.debug(f" {node.output[i_o]}: {new_shape!s} {vi.type.tensor_type.elem_type}")

self.run_ = True
continue # continue the inference after guess, no need to stop as no merge is needed

if self.verbose_ > 0 or not self.auto_merge_ or out_type_undefined:
logger.debug("Stopping at incomplete shape inference at " + node.op_type + ": " + node.name)
logger.debug("Stopping at incomplete shape inference at %s: %s", node.op_type, node.name)
logger.debug("node inputs:")
for i in node.input:
if i in self.known_vi_:
Expand All @@ -2879,7 +2855,7 @@ def get_prereq(node):
else:
logger.debug(f"not in known_vi_ for {o}")
if self.auto_merge_ and not out_type_undefined:
logger.debug("Merging: " + str(self.suggested_merge_))
logger.debug("Merging: " + str(self.suggested_merge_)) # noqa: G003
return False

self.run_ = False
Expand Down Expand Up @@ -2964,9 +2940,9 @@ def parse_arguments():

if __name__ == "__main__":
args = parse_arguments()
logger.info("input model: " + args.input)
logger.info("input model: " + args.input) # noqa: G003
if args.output:
logger.info("output model " + args.output)
logger.info("output model " + args.output) # noqa: G003
logger.info("Doing symbolic shape inference...")
out_mp = SymbolicShapeInference.infer_shapes(
onnx.load(args.input),
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/python/tools/tensorrt/perf/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -790,7 +790,7 @@ def skip_ep(model_name, ep, model_to_fail_ep):

# if ep in fail_ep_list and fail_ep_list[ep] == "runtime error":
if ep in fail_ep_list:
logger.info("Skip testing " + model_name + " using " + ep + " since it has some issues.")
logger.info("Skip testing " + model_name + " using " + ep + " since it has some issues.") # noqa: G003
return True

return False
Expand Down Expand Up @@ -925,7 +925,7 @@ def find_model_path(path):

logger.info(target_model_path)
if len(target_model_path) > 1:
logger.error("We expect to find only one model in " + path)
logger.error("We expect to find only one model in " + path) # noqa: G003
raise

return target_model_path[0]
Expand Down
6 changes: 3 additions & 3 deletions onnxruntime/python/tools/tensorrt/perf/benchmark_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,9 +80,9 @@ def main():
benchmark = is_benchmark_mode(args.running_mode) # noqa: F405

for model, model_info in models.items():
logger.info("\n" + "=" * 40 + "=" * len(model)) # noqa: F405
logger.info("=" * 20 + model + "=" * 20) # noqa: F405
logger.info("=" * 40 + "=" * len(model)) # noqa: F405
logger.info("\n" + "=" * 40 + "=" * len(model)) # noqa: F405, G003
logger.info("=" * 20 + model + "=" * 20) # noqa: F405, G003
logger.info("=" * 40 + "=" * len(model)) # noqa: F405, G003

model_info["model_name"] = model

Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/python/tools/transformers/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -802,7 +802,7 @@ def main():
try:
os.mkdir(args.cache_dir)
except OSError:
logger.error("Creation of the directory %s failed" % args.cache_dir)
logger.error("Creation of the directory %s failed" % args.cache_dir) # noqa: G002

enable_torch = "torch" in args.engines
enable_torch2 = "torch2" in args.engines
Expand Down Expand Up @@ -921,7 +921,7 @@ def main():
args,
)
except Exception:
logger.error("Exception", exc_info=True)
logger.exception("Exception")

time_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
if model_fusion_statistics:
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/python/tools/transformers/benchmark_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def create_onnxruntime_session(

session = onnxruntime.InferenceSession(onnx_model_path, sess_options, providers=providers)
except Exception:
logger.error("Exception", exc_info=True)
logger.error("Exception", exc_info=True) # noqa: G201

return session

Expand Down
6 changes: 3 additions & 3 deletions onnxruntime/python/tools/transformers/bert_perf_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,9 +232,9 @@ def onnxruntime_inference(session, all_inputs, output_names):
def to_string(model_path, session, test_setting):
sess_options = session.get_session_options()
option = f"model={os.path.basename(model_path)},"
option += "graph_optimization_level={},intra_op_num_threads={},".format(
sess_options.graph_optimization_level, sess_options.intra_op_num_threads
).replace("GraphOptimizationLevel.ORT_", "")
option += f"graph_optimization_level={sess_options.graph_optimization_level},intra_op_num_threads={sess_options.intra_op_num_threads},".replace(
"GraphOptimizationLevel.ORT_", ""
)

option += f"batch_size={test_setting.batch_size},sequence_length={test_setting.sequence_length},"
option += f"test_cases={test_setting.test_cases},test_times={test_setting.test_times},"
Expand Down
Loading

0 comments on commit faea42a

Please sign in to comment.