From e5d983d92fe0046aa3211c591f73f50e522911db Mon Sep 17 00:00:00 2001 From: Adam Hines Date: Mon, 25 Mar 2024 10:00:44 +1000 Subject: [PATCH 1/2] Fixed nn.Sequentials to remove ReLU which was unnecessary --- docs/.gitignore | 5 ----- docs/.gitkeep | 0 docs/README.md | 1 - main.py | 2 +- vprtempo/VPRTempo.py | 6 ++---- vprtempo/VPRTempoQuant.py | 4 +--- vprtempo/models/springfall_VPRTempo_IN3136_FN6272_DB500.pth | 2 +- 7 files changed, 5 insertions(+), 15 deletions(-) delete mode 100644 docs/.gitignore delete mode 100644 docs/.gitkeep delete mode 100644 docs/README.md diff --git a/docs/.gitignore b/docs/.gitignore deleted file mode 100644 index f40fbd8..0000000 --- a/docs/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -_site -.sass-cache -.jekyll-cache -.jekyll-metadata -vendor diff --git a/docs/.gitkeep b/docs/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 5e82132..0000000 --- a/docs/README.md +++ /dev/null @@ -1 +0,0 @@ -I am writing something as a test. Hello world. diff --git a/main.py b/main.py index 5300ae5..933b73b 100644 --- a/main.py +++ b/main.py @@ -226,7 +226,7 @@ def parse_network(use_quantize=False, train_new_model=False): help="Ground truth tolerance for matching") # Define training parameters - parser.add_argument('--filter', type=int, default=1, + parser.add_argument('--filter', type=int, default=8, help="Images to skip for training and/or inferencing") parser.add_argument('--epoch', type=int, default=4, help="Number of epochs to train the model") diff --git a/vprtempo/VPRTempo.py b/vprtempo/VPRTempo.py index 691ffdf..f4e2480 100644 --- a/vprtempo/VPRTempo.py +++ b/vprtempo/VPRTempo.py @@ -131,11 +131,9 @@ def evaluate(self, models, test_loader): for model in models: self.inferences.append(nn.Sequential( model.feature_layer.w, - nn.Hardtanh(0, 0.9), - nn.ReLU(), + nn.Hardtanh(0,1.0), model.output_layer.w, - nn.Hardtanh(0, 0.9), - nn.ReLU() + nn.Hardtanh(0,1.0) )) self.inferences[-1].to(torch.device(self.device)) # Initiliaze the output spikes variable diff --git a/vprtempo/VPRTempoQuant.py b/vprtempo/VPRTempoQuant.py index a752574..47e1591 100644 --- a/vprtempo/VPRTempoQuant.py +++ b/vprtempo/VPRTempoQuant.py @@ -134,10 +134,8 @@ def evaluate(self, models, test_loader, layers=None): self.inferences.append(nn.Sequential( model.feature_layer.w, nn.Hardtanh(0, maxSpike), - nn.ReLU(), model.output_layer.w, - nn.Hardtanh(0, maxSpike), - nn.ReLU() + nn.Hardtanh(0, maxSpike) )) # Initialize the tqdm progress bar pbar = tqdm(total=self.query_places, diff --git a/vprtempo/models/springfall_VPRTempo_IN3136_FN6272_DB500.pth b/vprtempo/models/springfall_VPRTempo_IN3136_FN6272_DB500.pth index 1631641..7b0c057 100644 --- a/vprtempo/models/springfall_VPRTempo_IN3136_FN6272_DB500.pth +++ b/vprtempo/models/springfall_VPRTempo_IN3136_FN6272_DB500.pth @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:220b84fbc7c1efdfd3243137ee60cc9b54c2e609b09c785deb517a950214902e +oid sha256:aca23133a42c38d41488cc53af2456c7056f5a83d5f07a83744e7813a1cd4118 size 91249119 From a42fada54ee34be3ac7b9d1c63c24eaa99f4281f Mon Sep 17 00:00:00 2001 From: Adam Hines Date: Mon, 25 Mar 2024 10:03:30 +1000 Subject: [PATCH 2/2] Update readme --- README.md | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 3b13f4e..0bb8e92 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ ![GitHub repo size](https://img.shields.io/github/repo-size/QVPR/VPRTempo.svg?style=flat-square) [![PyPI downloads](https://img.shields.io/pypi/dw/VPRTempo.svg)](https://pypistats.org/packages/vprtempo) -This repository contains code for VPRTempo, a spiking neural network that uses temporally encoding to perform visual place recognition tasks. The network is based off of [BLiTNet](https://arxiv.org/pdf/2208.01204.pdf) and adapted to the [VPRSNN](https://github.com/QVPR/VPRSNN) framework. +This repository contains code for [VPRTempo](vprtempo.github.io), a spiking neural network that uses temporally encoding to perform visual place recognition tasks. The network is based off of [BLiTNet](https://arxiv.org/pdf/2208.01204.pdf) and adapted to the [VPRSNN](https://github.com/QVPR/VPRSNN) framework.

VPRTempo method diagram @@ -31,15 +31,14 @@ To use VPRTempo, please follow the instructions below for installation and usage ## License & Citation This repository is licensed under the [MIT License](./LICENSE) -If you use our code, please cite the following [paper](https://arxiv.org/abs/2309.10225): +If you use our code, please cite our IEEE ICRA [paper](https://arxiv.org/abs/2309.10225): ``` -@misc{hines2023vprtempo, +@inproceedings{hines2024vprtempo, title={VPRTempo: A Fast Temporally Encoded Spiking Neural Network for Visual Place Recognition}, author={Adam D. Hines and Peter G. Stratton and Michael Milford and Tobias Fischer}, - year={2023}, - eprint={2309.10225}, - archivePrefix={arXiv}, - primaryClass={cs.RO} + year={2024}, + booktitle={2024 IEEE International Conference on Robotics and Automation (ICRA)} + } ``` ## Installation and setup @@ -106,9 +105,7 @@ For convenience, all data should be organised in the `./dataset` folder in the f |--winter ``` ### Custom Datasets -To define your own custom dataset to use with VPRTempo, you will need to follow the conventions for [PyTorch Datasets & Dataloaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html). We provide a simple script `./dataset/custom_dataset.py` which will rename images in user defined directories and generate the necessary `.csv` file to load into VPRTempo. - -To learn how to use custom datasets, please see the [CustomDatasets.ipynb](https://github.com/AdamDHines/VPRTempo-quant/tree/main/tutorials) tutorial. +To define your own custom dataset to use with VPRTempo, you will need to follow the conventions for [PyTorch Datasets & Dataloaders](https://pytorch.org/tutorials/beginner/basics/data_tutorial.html). ## Usage Running VPRTempo and VPRTempoQuant is handlded by `main.py`, which can be operated either through the command terminal or directly running the script. See below for more details.