diff --git "a/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" "b/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" index 2143a6da86..e64a22e169 100644 --- "a/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" +++ "b/applications/\345\244\232\346\250\241\346\200\201\350\241\250\345\215\225\350\257\206\345\210\253.md" @@ -809,7 +809,7 @@ plt.imshow(img) ``` fout.write(img_path + "\t" + json.dumps( { - "ser_resule": result, + "ser_result": result, }, ensure_ascii=False) + "\n") ``` diff --git a/doc/doc_ch/add_new_algorithm.md b/doc/doc_ch/add_new_algorithm.md index 79c29249dd..bb97e00aa6 100644 --- a/doc/doc_ch/add_new_algorithm.md +++ b/doc/doc_ch/add_new_algorithm.md @@ -246,7 +246,7 @@ class MyMetric(object): def get_metric(self): """ - return metircs { + return metrics { 'acc': 0, 'norm_edit_dis': 0, } diff --git a/doc/doc_en/add_new_algorithm_en.md b/doc/doc_en/add_new_algorithm_en.md index db72fe7d4b..a8903b0a20 100644 --- a/doc/doc_en/add_new_algorithm_en.md +++ b/doc/doc_en/add_new_algorithm_en.md @@ -237,7 +237,7 @@ class MyMetric(object): def get_metric(self): """ - return metircs { + return metrics { 'acc': 0, 'norm_edit_dis': 0, } diff --git "a/notebook/notebook_ch/5.ppocrv2_inference_deployment/PP-OCRv2\351\242\204\346\265\213\351\203\250\347\275\262\345\256\236\346\210\230.ipynb" "b/notebook/notebook_ch/5.ppocrv2_inference_deployment/PP-OCRv2\351\242\204\346\265\213\351\203\250\347\275\262\345\256\236\346\210\230.ipynb" index c65627acc8..3b8550d339 100644 --- "a/notebook/notebook_ch/5.ppocrv2_inference_deployment/PP-OCRv2\351\242\204\346\265\213\351\203\250\347\275\262\345\256\236\346\210\230.ipynb" +++ "b/notebook/notebook_ch/5.ppocrv2_inference_deployment/PP-OCRv2\351\242\204\346\265\213\351\203\250\347\275\262\345\256\236\346\210\230.ipynb" @@ -1876,11 +1876,11 @@ " rec_res)\n", " filter_boxes, filter_rec_res = [], []\n", " # 根据识别得分的阈值对结果进行过滤,如果得分小于阈值,就过滤掉\n", - " for box, rec_reuslt in zip(dt_boxes, rec_res):\n", - " text, score = rec_reuslt\n", + " for box, rec_result in zip(dt_boxes, rec_res):\n", + " text, score = rec_result\n", " if score >= self.drop_score:\n", " filter_boxes.append(box)\n", - " filter_rec_res.append(rec_reuslt)\n", + " filter_rec_res.append(rec_result)\n", " return filter_boxes, filter_rec_res\n", "\n", "def sorted_boxes(dt_boxes):\n", diff --git a/notebook/notebook_en/5.ppocrv2_inference_deployment/ppocrv2_inference_deployment_practice.ipynb b/notebook/notebook_en/5.ppocrv2_inference_deployment/ppocrv2_inference_deployment_practice.ipynb index 61cd456151..780f948579 100644 --- a/notebook/notebook_en/5.ppocrv2_inference_deployment/ppocrv2_inference_deployment_practice.ipynb +++ b/notebook/notebook_en/5.ppocrv2_inference_deployment/ppocrv2_inference_deployment_practice.ipynb @@ -1886,11 +1886,11 @@ " rec_res)\n", " filter_boxes, filter_rec_res = [], []\n", " #Filter the results according to the threshold of the recognition score, if the score is less than the threshold, filter out\n", - " for box, rec_reuslt in zip(dt_boxes, rec_res):\n", - " text, score = rec_reuslt\n", + " for box, rec_result in zip(dt_boxes, rec_res):\n", + " text, score = rec_result\n", " if score >= self.drop_score:\n", " filter_boxes.append(box)\n", - " filter_rec_res.append(rec_reuslt)\n", + " filter_rec_res.append(rec_result)\n", " return filter_boxes, filter_rec_res\n", "\n", "def sorted_boxes(dt_boxes):\n", diff --git a/ppocr/metrics/det_metric.py b/ppocr/metrics/det_metric.py index c9ec8dd2e9..dca94c0927 100644 --- a/ppocr/metrics/det_metric.py +++ b/ppocr/metrics/det_metric.py @@ -64,9 +64,9 @@ def get_metric(self): } """ - metircs = self.evaluator.combine_results(self.results) + metrics = self.evaluator.combine_results(self.results) self.reset() - return metircs + return metrics def reset(self): self.results = [] # clear results @@ -127,20 +127,20 @@ def get_metric(self): 'thr 0.9':'precision: 0 recall: 0 hmean: 0', } """ - metircs = {} + metrics = {} hmean = 0 for score_thr in self.results.keys(): - metirc = self.evaluator.combine_results(self.results[score_thr]) - # for key, value in metirc.items(): - # metircs['{}_{}'.format(key, score_thr)] = value - metirc_str = 'precision:{:.5f} recall:{:.5f} hmean:{:.5f}'.format( - metirc['precision'], metirc['recall'], metirc['hmean']) - metircs['thr {}'.format(score_thr)] = metirc_str - hmean = max(hmean, metirc['hmean']) - metircs['hmean'] = hmean + metric = self.evaluator.combine_results(self.results[score_thr]) + # for key, value in metric.items(): + # metrics['{}_{}'.format(key, score_thr)] = value + metric_str = 'precision:{:.5f} recall:{:.5f} hmean:{:.5f}'.format( + metric['precision'], metric['recall'], metric['hmean']) + metrics['thr {}'.format(score_thr)] = metric_str + hmean = max(hmean, metric['hmean']) + metrics['hmean'] = hmean self.reset() - return metircs + return metrics def reset(self): self.results = { diff --git a/ppocr/metrics/e2e_metric.py b/ppocr/metrics/e2e_metric.py index 41b7ac2bad..2f8ba3b222 100644 --- a/ppocr/metrics/e2e_metric.py +++ b/ppocr/metrics/e2e_metric.py @@ -78,9 +78,9 @@ def __call__(self, preds, batch, **kwargs): self.results.append(result) def get_metric(self): - metircs = combine_results(self.results) + metrics = combine_results(self.results) self.reset() - return metircs + return metrics def reset(self): self.results = [] # clear results diff --git a/ppocr/metrics/kie_metric.py b/ppocr/metrics/kie_metric.py index f3bce0411d..28ab22b807 100644 --- a/ppocr/metrics/kie_metric.py +++ b/ppocr/metrics/kie_metric.py @@ -61,9 +61,9 @@ def combine_results(self, results): def get_metric(self): - metircs = self.combine_results(self.results) + metrics = self.combine_results(self.results) self.reset() - return metircs + return metrics def reset(self): self.results = [] # clear results diff --git a/ppocr/metrics/vqa_token_ser_metric.py b/ppocr/metrics/vqa_token_ser_metric.py index 92d80d0970..286d8addaf 100644 --- a/ppocr/metrics/vqa_token_ser_metric.py +++ b/ppocr/metrics/vqa_token_ser_metric.py @@ -34,13 +34,13 @@ def __call__(self, preds, batch, **kwargs): def get_metric(self): from seqeval.metrics import f1_score, precision_score, recall_score - metircs = { + metrics = { "precision": precision_score(self.gt_list, self.pred_list), "recall": recall_score(self.gt_list, self.pred_list), "hmean": f1_score(self.gt_list, self.pred_list), } self.reset() - return metircs + return metrics def reset(self): self.pred_list = [] diff --git a/test_tipc/docs/jeston_test_train_inference_python.md b/test_tipc/docs/jeston_test_train_inference_python.md index d96505985e..9e9d15fb67 100644 --- a/test_tipc/docs/jeston_test_train_inference_python.md +++ b/test_tipc/docs/jeston_test_train_inference_python.md @@ -1,6 +1,6 @@ -# Jeston端基础训练预测功能测试 +# Jetson端基础训练预测功能测试 -Jeston端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jeston端CPU较差,Jeston只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 +Jetson端基础训练预测功能测试的主程序为`test_inference_inference.sh`,由于Jetson端CPU较差,Jetson只需要测试TIPC关于GPU和TensorRT预测推理的部分即可。 ## 1. 测试结论汇总 @@ -42,7 +42,7 @@ Jeston端基础训练预测功能测试的主程序为`test_inference_inference. 先运行`prepare.sh`准备数据和模型,然后运行`test_inference_inference.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。 -`test_inference_inference.sh`仅有一个模式`whole_infer`,在Jeston端,仅需要测试预测推理的模式即可: +`test_inference_inference.sh`仅有一个模式`whole_infer`,在Jetson端,仅需要测试预测推理的模式即可: ``` - 模式3:whole_infer,不训练,全量数据预测,走通开源模型评估、动转静,检查inference model预测时间和精度; @@ -51,7 +51,7 @@ bash test_tipc/prepare.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_lin # 用法1: bash test_tipc/test_inference_inference.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' # 用法2: 指定GPU卡预测,第三个传入参数为GPU卡号 -bash test_tipc/test_inference_jeston.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1' +bash test_tipc/test_inference_jetson.sh ./test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_infer_python_jetson.txt 'whole_infer' '1' ``` 运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如`whole_infer`模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件: diff --git a/tools/infer/utility.py b/tools/infer/utility.py index c92e8e152a..ce4e2d92c2 100644 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -193,7 +193,7 @@ def create_predictor(args, mode, logger): gpu_id = get_infer_gpuid() if gpu_id is None: logger.warning( - "GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jeston." + "GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jetson." ) config.enable_use_gpu(args.gpu_mem, 0) if args.use_tensorrt: diff --git a/tools/infer_e2e.py b/tools/infer_e2e.py index f3d5712fdd..d3e6b28fca 100755 --- a/tools/infer_e2e.py +++ b/tools/infer_e2e.py @@ -104,7 +104,7 @@ def main(): preds = model(images) post_result = post_process_class(preds, shape_list) points, strs = post_result['points'], post_result['texts'] - # write resule + # write result dt_boxes_json = [] for poly, str in zip(points, strs): tmp_json = {"transcription": str} diff --git a/tools/infer_vqa_token_ser_re.py b/tools/infer_vqa_token_ser_re.py index 2c7cb5e425..6210f7f3c2 100755 --- a/tools/infer_vqa_token_ser_re.py +++ b/tools/infer_vqa_token_ser_re.py @@ -193,7 +193,7 @@ def preprocess(): result = result[0] fout.write(img_path + "\t" + json.dumps( { - "ser_resule": result, + "ser_result": result, }, ensure_ascii=False) + "\n") img_res = draw_re_results(img_path, result) cv2.imwrite(save_img_path, img_res)