Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

How to inference with weights finish_converted.hdf5 ? #16

Open
leoluopy opened this issue Dec 29, 2021 · 5 comments
Open

How to inference with weights finish_converted.hdf5 ? #16

leoluopy opened this issue Dec 29, 2021 · 5 comments

Comments

@leoluopy
Copy link

Hi @DingXiaoH :
thanks to your great masterpiece .
and as i wanna check the finished_converted.hdf5 that if the acc has dropped after folding conv and fusing bn ? so i need to load finished_converted.hdf5 to a engine or model then do inference to eval .
do u have any idea ?

@leoluopy
Copy link
Author

leoluopy commented Jan 4, 2022

hi, i am here again. tips for guys looking for this question.
codes below can compare the eval difference by original weights and folded weights:

import os

import torch
from tensorboardX import SummaryWriter

from base_config import get_baseconfig_by_epoch
from base_model.stagewise_resnet import SRCNet, create_SRC56
from builder import ConvBuilder
from constants import LRSchedule, rc_origin_deps_flattened, rc_succeeding_strategy, rc_pacesetter_dict, \
    rc_internal_layers
from data.data_factory import create_dataset
from model_map import get_dataset_name_by_model_name
from ndp_test import val_during_train
from rr.resrep_builder import ResRepBuilder
from rr.resrep_config import ResRepConfig
from rr.resrep_convert import compactor_convert
from rr.resrep_scripts import calculate_rc56_flops
from rr.resrep_train import get_optimizer, get_criterion
from rr.resrep_util import get_compactor_mask_dict
from utils.engine import Engine
from utils.lr_scheduler import get_lr_scheduler


def load_model_with_compactor(hdf5_file, with_compactor=True):
    network_type = "src56"
    weight_decay_strength = 1e-4
    batch_size = 64
    deps = rc_origin_deps_flattened(9)
    succeeding_strategy = rc_succeeding_strategy(9)
    pacesetter_dict = rc_pacesetter_dict(9)
    flops_func = calculate_rc56_flops
    lrs = LRSchedule(base_lr=0.01, max_epochs=480, lr_epoch_boundaries=None, lr_decay_factor=None,
                     linear_final_lr=None, cosine_minimum=0)
    target_layers = rc_internal_layers(9)

    weight_decay_bias = 0
    warmup_factor = 0

    CONVERSION_EPSILON = 1e-5

    cfg = get_baseconfig_by_epoch(network_type=network_type,
                                  dataset_name=get_dataset_name_by_model_name(network_type), dataset_subset='train',
                                  global_batch_size=batch_size, num_node=1,
                                  weight_decay=weight_decay_strength, optimizer_type='sgd', momentum=0.9,
                                  max_epochs=lrs.max_epochs, base_lr=lrs.base_lr,
                                  lr_epoch_boundaries=lrs.lr_epoch_boundaries, cosine_minimum=lrs.cosine_minimum,
                                  lr_decay_factor=lrs.lr_decay_factor,
                                  warmup_epochs=0, warmup_method='linear', warmup_factor=warmup_factor,
                                  ckpt_iter_period=40000, tb_iter_period=100, output_dir="compare_dir",
                                  tb_dir="compare_dir", save_weights=None, val_epoch_period=2,
                                  linear_final_lr=lrs.linear_final_lr,
                                  weight_decay_bias=weight_decay_bias, deps=deps)
    resrep_config = ResRepConfig(target_layers=target_layers, succeeding_strategy=succeeding_strategy,
                                 pacesetter_dict=pacesetter_dict, lasso_strength=1e-4,
                                 flops_func=flops_func, flops_target=0.471, mask_interval=200,
                                 compactor_momentum=0.99, before_mask_iters=5 * 50000 // batch_size,
                                 begin_granularity=4, weight_decay_on_compactor=False, num_at_least=1)
    if with_compactor:
        builder = ResRepBuilder(cfg, resrep_config)
    else:
        builder = ResRepBuilder(cfg, resrep_config)
        builder.mode = 'deploy'
        # builder = ConvBuilder(cfg)

    model = create_SRC56(cfg, builder)

    with Engine(local_rank=0) as engine:
        optimizer = get_optimizer(cfg, resrep_config, model,
                                  no_l2_keywords=[], use_nesterov=False,
                                  keyword_to_lr_mult=None)
        scheduler = get_lr_scheduler(cfg, optimizer)
        # --------------------------------- done -------------------------------

        engine.register_state(
            scheduler=scheduler, model=model, optimizer=optimizer)

        engine.load_hdf5(os.path.join(cfg.output_dir, hdf5_file), load_weights_keyword=None)
        return engine, cfg


if __name__ == '__main__':
    # engine, cfg = load_model_with_compactor(os.path.join('finish.hdf5'))
    engine, cfg = load_model_with_compactor(os.path.join('folded.hdf5'), with_compactor=False)

    engine.setup_log(
        name='train', log_dir=cfg.output_dir, file_name='log.txt')
    engine.state.model.eval()
    val_data = create_dataset(cfg.dataset_name, 'val',
                              global_batch_size=100, distributed=False)

    tb_tags = ['Top1-Acc', 'Top5-Acc', 'Loss']
    discrip_str = 'Epoch-{}/{}'.format(0, cfg.max_epochs)
    tb_writer = SummaryWriter(cfg.tb_dir)
    criterion = get_criterion(cfg).cuda()
    model = engine.state.model.cuda()
    val_during_train(epoch=-1, iteration=0, tb_tags=tb_tags, engine=engine, model=model,
                     val_data=val_data, criterion=criterion, descrip_str=discrip_str,
                     dataset_name=cfg.dataset_name, test_batch_size=100, tb_writer=tb_writer)

@optyang
Copy link

optyang commented Feb 10, 2022

hi, i am here again. tips for guys looking for this question. codes below can compare the eval difference by original weights and folded weights:

import os

import torch
from tensorboardX import SummaryWriter

from base_config import get_baseconfig_by_epoch
from base_model.stagewise_resnet import SRCNet, create_SRC56
from builder import ConvBuilder
from constants import LRSchedule, rc_origin_deps_flattened, rc_succeeding_strategy, rc_pacesetter_dict, \
    rc_internal_layers
from data.data_factory import create_dataset
from model_map import get_dataset_name_by_model_name
from ndp_test import val_during_train
from rr.resrep_builder import ResRepBuilder
from rr.resrep_config import ResRepConfig
from rr.resrep_convert import compactor_convert
from rr.resrep_scripts import calculate_rc56_flops
from rr.resrep_train import get_optimizer, get_criterion
from rr.resrep_util import get_compactor_mask_dict
from utils.engine import Engine
from utils.lr_scheduler import get_lr_scheduler


def load_model_with_compactor(hdf5_file, with_compactor=True):
    network_type = "src56"
    weight_decay_strength = 1e-4
    batch_size = 64
    deps = rc_origin_deps_flattened(9)
    succeeding_strategy = rc_succeeding_strategy(9)
    pacesetter_dict = rc_pacesetter_dict(9)
    flops_func = calculate_rc56_flops
    lrs = LRSchedule(base_lr=0.01, max_epochs=480, lr_epoch_boundaries=None, lr_decay_factor=None,
                     linear_final_lr=None, cosine_minimum=0)
    target_layers = rc_internal_layers(9)

    weight_decay_bias = 0
    warmup_factor = 0

    CONVERSION_EPSILON = 1e-5

    cfg = get_baseconfig_by_epoch(network_type=network_type,
                                  dataset_name=get_dataset_name_by_model_name(network_type), dataset_subset='train',
                                  global_batch_size=batch_size, num_node=1,
                                  weight_decay=weight_decay_strength, optimizer_type='sgd', momentum=0.9,
                                  max_epochs=lrs.max_epochs, base_lr=lrs.base_lr,
                                  lr_epoch_boundaries=lrs.lr_epoch_boundaries, cosine_minimum=lrs.cosine_minimum,
                                  lr_decay_factor=lrs.lr_decay_factor,
                                  warmup_epochs=0, warmup_method='linear', warmup_factor=warmup_factor,
                                  ckpt_iter_period=40000, tb_iter_period=100, output_dir="compare_dir",
                                  tb_dir="compare_dir", save_weights=None, val_epoch_period=2,
                                  linear_final_lr=lrs.linear_final_lr,
                                  weight_decay_bias=weight_decay_bias, deps=deps)
    resrep_config = ResRepConfig(target_layers=target_layers, succeeding_strategy=succeeding_strategy,
                                 pacesetter_dict=pacesetter_dict, lasso_strength=1e-4,
                                 flops_func=flops_func, flops_target=0.471, mask_interval=200,
                                 compactor_momentum=0.99, before_mask_iters=5 * 50000 // batch_size,
                                 begin_granularity=4, weight_decay_on_compactor=False, num_at_least=1)
    if with_compactor:
        builder = ResRepBuilder(cfg, resrep_config)
    else:
        builder = ResRepBuilder(cfg, resrep_config)
        builder.mode = 'deploy'
        # builder = ConvBuilder(cfg)

    model = create_SRC56(cfg, builder)

    with Engine(local_rank=0) as engine:
        optimizer = get_optimizer(cfg, resrep_config, model,
                                  no_l2_keywords=[], use_nesterov=False,
                                  keyword_to_lr_mult=None)
        scheduler = get_lr_scheduler(cfg, optimizer)
        # --------------------------------- done -------------------------------

        engine.register_state(
            scheduler=scheduler, model=model, optimizer=optimizer)

        engine.load_hdf5(os.path.join(cfg.output_dir, hdf5_file), load_weights_keyword=None)
        return engine, cfg


if __name__ == '__main__':
    # engine, cfg = load_model_with_compactor(os.path.join('finish.hdf5'))
    engine, cfg = load_model_with_compactor(os.path.join('folded.hdf5'), with_compactor=False)

    engine.setup_log(
        name='train', log_dir=cfg.output_dir, file_name='log.txt')
    engine.state.model.eval()
    val_data = create_dataset(cfg.dataset_name, 'val',
                              global_batch_size=100, distributed=False)

    tb_tags = ['Top1-Acc', 'Top5-Acc', 'Loss']
    discrip_str = 'Epoch-{}/{}'.format(0, cfg.max_epochs)
    tb_writer = SummaryWriter(cfg.tb_dir)
    criterion = get_criterion(cfg).cuda()
    model = engine.state.model.cuda()
    val_during_train(epoch=-1, iteration=0, tb_tags=tb_tags, engine=engine, model=model,
                     val_data=val_data, criterion=criterion, descrip_str=discrip_str,
                     dataset_name=cfg.dataset_name, test_batch_size=100, tb_writer=tb_writer)

Thank you for sharing! Is the inference meant for the unpruned model or the pruned model?

@leoluopy
Copy link
Author

leoluopy commented Feb 10, 2022

if with_compactor=True , it's the unpruned model.
if with_compactor=False, the weights are folded , meaning that pruned .

@optyang
Copy link

optyang commented Feb 10, 2022

Thank you! Is the val acc of the original model with weights being folded really as same as the val acc of the compressed model where the filters are really removed (rather than folded)?

@leoluopy
Copy link
Author

yes same acc ,and after folding there are lots of channels with zero weights which can be removed with no harm . see the paper for details .

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants