diff --git a/.gitignore b/.gitignore index 6056764..9a70cc7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,18 +1,10 @@ __pycache__/ -.ipynb_checkpoints/ -src/__pycache__/ -dataset/conv/ -dataset/Dusk/ -dataset/fall/ -dataset/model/ -dataset/output_database/ -dataset/output_query/ -dataset/Rain/ -dataset/spring/ -dataset/summer/ -dataset/Sun/ -dataset/winter/ -dataset/event.csv/ -models/VPRTempo78415685001.pth -models/VPRTempoQuant78415685001.pth -VPRTempo.egg-info/ +.pyest_cache/ +vprtempo/__pycache__/ +vprtempo/dataset/fall/ +vprtempo/dataset/spring/ +vprtempo/dataset/summer/ +vprtempo/dataset/winter/ +vprtempo/dataset/event.csv +vprtempo/output/ +vprtempo/src/__pycache__/ diff --git a/README.md b/README.md index 4244cda..a22dcc7 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Once downloaded, please install the required dependencies to run the network thr Dependencies for VPRTempo can downloaded from our [PyPi package](https://pypi.org/project/VPRTempo/). ```python -pip3 install VPRTempo +pip install vprtempo ``` If you wish to enable CUDA, please follow the instructions on the [PyTorch - Get Started](https://pytorch.org/get-started/locally/) page to install the required software versions for your hardware and operating system. diff --git a/dataset/event.csv b/dataset/event.csv deleted file mode 100644 index d39b74a..0000000 --- a/dataset/event.csv +++ /dev/null @@ -1,160 +0,0 @@ -Image_names,Index -match_0245.png,0 -match_0247.png,1 -match_0248.png,2 -match_0249.png,3 -match_0250.png,4 -match_0251.png,5 -match_0253.png,6 -match_0256.png,7 -match_0329.png,8 -match_0332.png,9 -match_0333.png,10 -match_0334.png,11 -match_0337.png,12 -match_0338.png,13 -match_0345.png,14 -match_0346.png,15 -match_0347.png,16 -match_0348.png,17 -match_0349.png,18 -match_0350.png,19 -match_0351.png,20 -match_0352.png,21 -match_0354.png,22 -match_0355.png,23 -match_0356.png,24 -match_0357.png,25 -match_0358.png,26 -match_0359.png,27 -match_0361.png,28 -match_0362.png,29 -match_0363.png,30 -match_0364.png,31 -match_0365.png,32 -match_0366.png,33 -match_0367.png,34 -match_0368.png,35 -match_0369.png,36 -match_0370.png,37 -match_0371.png,38 -match_0372.png,39 -match_0373.png,40 -match_0374.png,41 -match_0375.png,42 -match_0376.png,43 -match_0377.png,44 -match_0378.png,45 -match_0379.png,46 -match_0380.png,47 -match_0381.png,48 -match_0382.png,49 -match_0383.png,50 -match_0384.png,51 -match_0385.png,52 -match_0386.png,53 -match_0387.png,54 -match_0388.png,55 -match_0389.png,56 -match_0390.png,57 -match_0391.png,58 -match_0392.png,59 -match_0393.png,60 -match_0394.png,61 -match_0395.png,62 -match_0396.png,63 -match_0397.png,64 -match_0398.png,65 -match_0399.png,66 -match_0400.png,67 -match_0401.png,68 -match_0402.png,69 -match_0403.png,70 -match_0404.png,71 -match_0405.png,72 -match_0491.png,73 -match_0492.png,74 -match_0494.png,75 -match_0538.png,76 -match_0539.png,77 -match_0540.png,78 -match_0541.png,79 -match_0542.png,80 -match_0543.png,81 -match_0544.png,82 -match_0545.png,83 -match_0546.png,84 -match_0547.png,85 -match_0548.png,86 -match_0549.png,87 -match_0550.png,88 -match_0551.png,89 -match_0552.png,90 -match_0553.png,91 -match_0554.png,92 -match_0555.png,93 -match_0720.png,94 -match_0790.png,95 -match_0792.png,96 -match_0793.png,97 -match_0794.png,98 -match_0795.png,99 -match_0796.png,100 -match_0797.png,101 -match_0798.png,102 -match_0799.png,103 -match_0800.png,104 -match_0801.png,105 -match_0802.png,106 -match_0803.png,107 -match_0804.png,108 -match_0805.png,109 -match_0806.png,110 -match_0807.png,111 -match_0808.png,112 -match_0809.png,113 -match_0810.png,114 -match_0811.png,115 -match_0812.png,116 -match_0813.png,117 -match_0814.png,118 -match_0815.png,119 -match_0816.png,120 -match_0817.png,121 -match_0818.png,122 -match_0819.png,123 -match_0820.png,124 -match_0821.png,125 -match_0822.png,126 -match_0823.png,127 -match_0824.png,128 -match_0825.png,129 -match_0826.png,130 -match_0827.png,131 -match_0828.png,132 -match_0829.png,133 -match_0830.png,134 -match_0831.png,135 -match_0832.png,136 -match_0833.png,137 -match_0834.png,138 -match_0835.png,139 -match_0840.png,140 -match_0880.png,141 -match_0881.png,142 -match_0882.png,143 -match_0883.png,144 -match_0884.png,145 -match_0885.png,146 -match_0886.png,147 -match_0887.png,148 -match_0888.png,149 -match_0889.png,150 -match_0890.png,151 -match_0911.png,152 -match_0914.png,153 -match_0919.png,154 -match_0921.png,155 -match_0922.png,156 -match_0924.png,157 -match_0925.png,158 diff --git a/dataset/test/images-00202.png b/dataset/test/images-00202.png deleted file mode 100644 index 56ae0ca..0000000 Binary files a/dataset/test/images-00202.png and /dev/null differ diff --git a/dataset/test/images-07028.png b/dataset/test/images-07028.png deleted file mode 100755 index 9d5bc63..0000000 Binary files a/dataset/test/images-07028.png and /dev/null differ diff --git a/main.py b/main.py index a5cb8c0..753163e 100644 --- a/main.py +++ b/main.py @@ -23,17 +23,16 @@ ''' Imports ''' -import argparse import sys -sys.path.append('./src') -sys.path.append('./vprtempo') +import argparse + import torch.quantization as quantization -from VPRTempoTrain import VPRTempoTrain, generate_model_name, check_pretrained_model, train_new_model -from VPRTempo import VPRTempo, run_inference -from VPRTempoQuantTrain import VPRTempoQuantTrain, generate_model_name_quant, train_new_model_quant -from VPRTempoQuant import VPRTempoQuant, run_inference_quant -from loggers import model_logger, model_logger_quant +from vprtempo.VPRTempo import VPRTempo, run_inference +from vprtempo.src.loggers import model_logger, model_logger_quant +from vprtempo.VPRTempoQuant import VPRTempoQuant, run_inference_quant +from vprtempo.VPRTempoQuantTrain import VPRTempoQuantTrain, generate_model_name_quant, train_new_model_quant +from vprtempo.VPRTempoTrain import VPRTempoTrain, generate_model_name, check_pretrained_model, train_new_model def initialize_and_run_model(args,dims): # If user wants to train a new network @@ -109,7 +108,7 @@ def parse_network(use_quantize=False, train_new_model=False): # Define the dataset arguments parser.add_argument('--dataset', type=str, default='nordland', help="Dataset to use for training and/or inferencing") - parser.add_argument('--data_dir', type=str, default='./dataset/', + parser.add_argument('--data_dir', type=str, default='./vprtempo/dataset/', help="Directory where dataset files are stored") parser.add_argument('--num_places', type=int, default=500, help="Number of places to use for training and/or inferencing") diff --git a/setup.py b/setup.py index 616f7b9..8552819 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ # define the setup setup( name="VPRTempo", - version="1.1.2", + version="1.1.3", description='VPRTempo: A Fast Temporally Encoded Spiking Neural Network for Visual Place Recognition', long_description=long_description, long_description_content_type='text/markdown', diff --git a/src/.DS_Store b/src/.DS_Store deleted file mode 100644 index 1e5c5df..0000000 Binary files a/src/.DS_Store and /dev/null differ diff --git a/vprtempo/VPRTempo.py b/vprtempo/VPRTempo.py index 18f4d17..6a0a2f0 100644 --- a/vprtempo/VPRTempo.py +++ b/vprtempo/VPRTempo.py @@ -26,22 +26,16 @@ import os import torch -import gc -import sys -sys.path.append('./src') -sys.path.append('./models') -sys.path.append('./output') -sys.path.append('./dataset') - -import blitnet as bn + import numpy as np import torch.nn as nn +import vprtempo.src.blitnet as bn -from dataset import CustomImageDataset, ProcessImage -from torch.utils.data import DataLoader from tqdm import tqdm from prettytable import PrettyTable -from metrics import recallAtK +from torch.utils.data import DataLoader +from vprtempo.src.metrics import recallAtK +from vprtempo.src.dataset import CustomImageDataset, ProcessImage class VPRTempo(nn.Module): def __init__(self, dims, args=None, logger=None): @@ -60,7 +54,7 @@ def __init__(self, dims, args=None, logger=None): self.logger = logger # Set the dataset file - self.dataset_file = os.path.join('./dataset', self.dataset + '.csv') + self.dataset_file = os.path.join('./vprtempo/dataset', self.dataset + '.csv') # Layer dict to keep track of layer names and their order self.layer_dict = {} @@ -220,7 +214,7 @@ def run_inference(models, model_name): persistent_workers=True) # Load the model - models[0].load_model(models, os.path.join('./models', model_name)) + models[0].load_model(models, os.path.join('./vprtempo/models', model_name)) # Retrieve layer names for inference layer_names = list(models[0].layer_dict.keys()) diff --git a/vprtempo/VPRTempoQuant.py b/vprtempo/VPRTempoQuant.py index f616289..b667923 100644 --- a/vprtempo/VPRTempoQuant.py +++ b/vprtempo/VPRTempoQuant.py @@ -26,26 +26,17 @@ import os import torch -import subprocess -import sys -sys.path.append('./src') -sys.path.append('./models') -sys.path.append('./output') -sys.path.append('./dataset') - -import blitnet as bn + import numpy as np import torch.nn as nn -import torch.quantization as quantization +import vprtempo.src.blitnet as bn -from loggers import model_logger_quant -from VPRTempoQuantTrain import generate_model_name_quant -from dataset import CustomImageDataset, ProcessImage -from torch.utils.data import DataLoader -from torch.ao.quantization import QuantStub, DeQuantStub from tqdm import tqdm from prettytable import PrettyTable -from metrics import recallAtK +from torch.utils.data import DataLoader +from vprtempo.src.metrics import recallAtK +from torch.ao.quantization import QuantStub, DeQuantStub +from vprtempo.src.dataset import CustomImageDataset, ProcessImage #from main import parse_network @@ -59,7 +50,7 @@ def __init__(self, dims, args=None, logger=None): setattr(self, arg, getattr(args, arg)) setattr(self, 'dims', dims) # Set the dataset file - self.dataset_file = os.path.join('./dataset', self.dataset + '.csv') + self.dataset_file = os.path.join('./vprtempo/dataset', self.dataset + '.csv') # Set the model logger and return the device self.logger = logger @@ -240,7 +231,7 @@ def run_inference_quant(models, model_name, qconfig): persistent_workers=True) # Load the model - models[0].load_model(models, os.path.join('./models', model_name)) + models[0].load_model(models, os.path.join('./vprtempo/models', model_name)) # Use evaluate method for inference accuracy with torch.no_grad(): diff --git a/vprtempo/VPRTempoQuantTrain.py b/vprtempo/VPRTempoQuantTrain.py index 6c2448a..451420a 100644 --- a/vprtempo/VPRTempoQuantTrain.py +++ b/vprtempo/VPRTempoQuantTrain.py @@ -26,23 +26,17 @@ import os import torch -import gc -import sys -sys.path.append('./src') -sys.path.append('./models') -sys.path.append('./output') -sys.path.append('./dataset') -import blitnet as bn import numpy as np import torch.nn as nn +import vprtempo.src.blitnet as bn import torch.quantization as quantization import torchvision.transforms as transforms -from dataset import CustomImageDataset, ProcessImage +from tqdm import tqdm from torch.utils.data import DataLoader from torch.ao.quantization import QuantStub, DeQuantStub -from tqdm import tqdm +from vprtempo.src.dataset import CustomImageDataset, ProcessImage class VPRTempoQuantTrain(nn.Module): def __init__(self, args, dims, logger): @@ -61,7 +55,7 @@ def __init__(self, args, dims, logger): self.logger = logger # Set the dataset file - self.dataset_file = os.path.join('./dataset', self.dataset + '.csv') + self.dataset_file = os.path.join('./vprtempo/dataset', self.dataset + '.csv') # Add quantization stubs for Quantization Aware Training (QAT) self.quant = QuantStub() @@ -288,4 +282,4 @@ def train_new_model_quant(models, model_name, qconfig): # After training the current layer, add it to the list of trained layer # Save the model - model.save_model(trained_models,os.path.join('./models', model_name)) \ No newline at end of file + model.save_model(trained_models,os.path.join('./vprtempo/models', model_name)) \ No newline at end of file diff --git a/vprtempo/VPRTempoTrain.py b/vprtempo/VPRTempoTrain.py index 077ccf3..de3d449 100644 --- a/vprtempo/VPRTempoTrain.py +++ b/vprtempo/VPRTempoTrain.py @@ -25,23 +25,18 @@ ''' import os -import torch import gc -import sys -sys.path.append('./src') -sys.path.append('./models') -sys.path.append('./output') -sys.path.append('./dataset') +import torch -import blitnet as bn import numpy as np import torch.nn as nn +import vprtempo.src.blitnet as bn import torchvision.transforms as transforms -from loggers import model_logger -from dataset import CustomImageDataset, ProcessImage -from torch.utils.data import DataLoader from tqdm import tqdm +from torch.utils.data import DataLoader +from vprtempo.src.loggers import model_logger +from vprtempo.src.dataset import CustomImageDataset, ProcessImage class VPRTempoTrain(nn.Module): def __init__(self, args, dims, logger): @@ -59,7 +54,7 @@ def __init__(self, args, dims, logger): self.logger = logger # Set the dataset file - self.dataset_file = os.path.join('./dataset', self.dataset + '.csv') + self.dataset_file = os.path.join('./vprtempo/dataset', self.dataset + '.csv') # Layer dict to keep track of layer names and their order self.layer_dict = {} @@ -293,4 +288,4 @@ def train_new_model(models, model_name): for model in models: model.eval() # Save the model - model.save_model(models,os.path.join('./models', model_name)) \ No newline at end of file + model.save_model(models,os.path.join('./vprtempo/models', model_name)) \ No newline at end of file diff --git a/vprtempo/__init__.py b/vprtempo/__init__.py index 8aafc8b..d61c457 100644 --- a/vprtempo/__init__.py +++ b/vprtempo/__init__.py @@ -1,2 +1,6 @@ -from .VPRTempo import VPRTempo -from .VPRTempoQuantTrain import VPRTempoQuantTrain \ No newline at end of file +from . import dataset +from . import models +from . import output +from . import src + +__version__ = '1.1.3' \ No newline at end of file diff --git a/dataset/.DS_Store b/vprtempo/dataset/.DS_Store similarity index 100% rename from dataset/.DS_Store rename to vprtempo/dataset/.DS_Store diff --git a/dataset/nordland.csv b/vprtempo/dataset/nordland.csv similarity index 100% rename from dataset/nordland.csv rename to vprtempo/dataset/nordland.csv diff --git a/dataset/orc.csv b/vprtempo/dataset/orc.csv similarity index 100% rename from dataset/orc.csv rename to vprtempo/dataset/orc.csv diff --git a/models/.DS_Store b/vprtempo/models/.DS_Store similarity index 100% rename from models/.DS_Store rename to vprtempo/models/.DS_Store diff --git a/models/.gitkeep b/vprtempo/models/.gitkeep similarity index 100% rename from models/.gitkeep rename to vprtempo/models/.gitkeep diff --git a/output/.gitkeep b/vprtempo/src/__init__.py similarity index 100% rename from output/.gitkeep rename to vprtempo/src/__init__.py diff --git a/src/blitnet.py b/vprtempo/src/blitnet.py similarity index 100% rename from src/blitnet.py rename to vprtempo/src/blitnet.py diff --git a/src/dataset.py b/vprtempo/src/dataset.py similarity index 99% rename from src/dataset.py rename to vprtempo/src/dataset.py index 9ed9112..976dbf2 100644 --- a/src/dataset.py +++ b/vprtempo/src/dataset.py @@ -5,7 +5,6 @@ import pandas as pd import numpy as np import torch.nn.functional as F -import torch.quantization as tq from torchvision.io import read_image from torch.utils.data import Dataset diff --git a/src/loggers.py b/vprtempo/src/loggers.py similarity index 97% rename from src/loggers.py rename to vprtempo/src/loggers.py index eace9ef..3081a02 100644 --- a/src/loggers.py +++ b/vprtempo/src/loggers.py @@ -9,7 +9,7 @@ def model_logger(): Configure the model logger """ now = datetime.now() - output_folder = './output/' + now.strftime("%d%m%y-%H-%M-%S") + output_folder = './vprtempo/output/' + now.strftime("%d%m%y-%H-%M-%S") os.mkdir(output_folder) # Create the logger @@ -54,7 +54,7 @@ def model_logger_quant(): """ now = datetime.now() - output_folder = './output/' + now.strftime("%d%m%y-%H-%M-%S") + output_folder = './vprtempo/output/' + now.strftime("%d%m%y-%H-%M-%S") os.mkdir(output_folder) # Create the logger diff --git a/src/metrics.py b/vprtempo/src/metrics.py similarity index 100% rename from src/metrics.py rename to vprtempo/src/metrics.py diff --git a/src/nordland.py b/vprtempo/src/nordland.py similarity index 100% rename from src/nordland.py rename to vprtempo/src/nordland.py