-
Notifications
You must be signed in to change notification settings - Fork 109
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* Add ViT model * update the script based on zhiyuan's model * Update script based on PR review * Update ViT performance in README.md
- Loading branch information
Showing
8 changed files
with
289 additions
and
282 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
5 changes: 5 additions & 0 deletions
5
inference/configs/vit_l_16/vendor_config/kunlunxin_configurations.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
compiler: xtcl | ||
# skip validation(will also skip create_model, export onnx). Assert exist_onnx_path != null | ||
no_validation: true | ||
# set a real onnx_path to use exist, or set it to anything but null to avoid export onnx manually(like torch-tensorrt) | ||
exist_onnx_path: /home/FlagPerf/inference/onnxs/vit_l_16_bs32_pytorch_fp16False.onnx |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,23 +1,21 @@ | ||
def analysis_log(logpath): | ||
logfile = open(logpath) | ||
|
||
max_usage = 0.0 ## usage_mem | ||
max_mem = 0.0 | ||
for line in logfile.readlines(): | ||
''' | ||
xpu_smi temp power mem w_mem use_rate | ||
''' | ||
if "xpu_smi" in line: | ||
line = line[:-1] | ||
usage = line.split(" ")[4] | ||
usage = float(usage) | ||
max_usage = max(max_usage, usage) | ||
max_mem = line.split(" ")[5] | ||
max_mem = float(max_mem) | ||
|
||
return round(max_usage / 1024.0, | ||
2), round(max_mem / 1024.0, 2), eval("32e12"), eval("128e12") | ||
|
||
|
||
if __name__ == "__main__": | ||
max1, max2, max2,max4 = analysis_log("/home/zhoujiamin01/workspace/zjm_flag/FlagPerf/inference/result/run20230809192313/resnet50:pytorch_1.13/127.0.0.1_noderank0/kunlunxin_monitor.log") | ||
def analysis_log(logpath): | ||
logfile = open(logpath) | ||
|
||
max_usage = 0.0 ## usage_mem | ||
max_mem = 0.0 | ||
for line in logfile.readlines(): | ||
''' | ||
xpu_smi temp power mem w_mem use_rate | ||
''' | ||
if "xpu_smi" in line: | ||
line = line[:-1] | ||
usage = line.split(" ")[4] | ||
usage = float(usage) | ||
max_usage = max(max_usage, usage) | ||
max_mem = line.split(" ")[5] | ||
max_mem = float(max_mem) | ||
|
||
return round(max_usage / 1024.0, | ||
2), round(max_mem / 1024.0, 2), eval("32e12"), eval("128e12") | ||
|
||
|
Oops, something went wrong.