Skip to content

Commit

Permalink
v0.0.22
Browse files Browse the repository at this point in the history
  • Loading branch information
ffiirree committed Nov 22, 2022
1 parent 3ab1fb5 commit 28256a1
Show file tree
Hide file tree
Showing 63 changed files with 1,697 additions and 1,644 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@ __pycache__/
.vscode
test.py
build/
*.egg-info/
*.egg-info/
*.pth
108 changes: 108 additions & 0 deletions adversarial_attack.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import json
import argparse
import torch

from tqdm import tqdm

from cvm.utils import *


def parse_args():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('--dataset', type=str, default='ImageNet', choices=list_datasets() + ['ImageNet'],
help='path to the ImageNet dataset.')
parser.add_argument('--data-dir', type=str, default='/datasets/ILSVRC2012',
help='path to the ImageNet dataset.')
parser.add_argument('--model', '-m', type=str, default='mobilenet_v1_x1_0', choices=list_models(),
help='type of model to use. (default: mobilenet_v1_x1_0)')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes')
parser.add_argument('--model-path', type=str, default=None)
parser.add_argument('--workers', '-j', type=int, default=8, metavar='N',
help='number of data loading workers pre GPU. (default: 3)')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='mini-batch size, this is the total batch size of all GPUs. (default: 256)')
parser.add_argument('--crop-size', type=int, default=224)
parser.add_argument('--resize-size', type=int, default=232)
parser.add_argument('--dali', action='store_true', help='use nvidia dali.')
parser.add_argument('--dali-cpu', action='store_true',
help='runs CPU based version of DALI pipeline. (default: false)')
parser.add_argument('--method', type=str, default='PGD', choices=['FGSM', 'PGD'])
parser.add_argument('--attack-steps', type=int, default=3, metavar='N')
parser.add_argument('--attack-eps', type=float, default=0.03, metavar='E')
parser.add_argument('--attack-alpha', type=float, default=0.01, metavar='A')
parser.add_argument('--attack-target', type=int, default=-1, metavar='T')
return parser.parse_args()


if __name__ == '__main__':
assert torch.cuda.is_available(), 'CUDA IS NOT AVAILABLE!!'
torch.backends.cudnn.benchmark = True

args = parse_args()
init_distributed_mode(args)

if args.local_rank == 0:
print(json.dumps(vars(args), indent=4))

model = create_model(
args.model,
pretrained=True,
thumbnail=(args.crop_size < 128),
pth=args.model_path,
distributed=args.distributed,
local_rank=args.local_rank,
num_classes=args.num_classes
)

val_loader = create_loader(
args.dataset,
root=args.data_dir,
is_training=False,
batch_size=args.batch_size,
val_resize_size=args.resize_size,
val_crop_size=args.crop_size,
crop_size=args.crop_size,
workers=args.workers,
dali=args.dali,
dali_cpu=args.dali_cpu,
distributed=args.distributed,
local_rank=args.local_rank
)

if args.local_rank == 0:
if val_loader.type != "dali":
print(f'Validation: \n{val_loader.dataset.transform}')

attacker = None
if args.method == 'FGSM':
attacker = FGSM(model, args.attack_eps)
elif args.method == 'PGD':
attacker = PGD(model, args.attack_eps, args.attack_steps, args.attack_alpha)
else:
raise ValueError(f'Invalid attacker: {args.method}.')

if args.local_rank == 0:
print(f'Attacker: {attacker}')

top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
for (images, target) in tqdm(val_loader, desc='validating', unit='batch'):

if args.attack_target >= 0:
target.fill_(args.attack_target)

images = attacker.perturb(images, target, args.attack_target >= 0)

with torch.inference_mode():
output = model(images)

acc1, acc5 = accuracy(output, target, topk=(1, 5))

top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))

acc = f'\n -- top1={top1.avg:6.3f}, top5={top5.avg:6.3f}\n'
if args.local_rank == 0:
print(acc)
2 changes: 1 addition & 1 deletion cvm/models/alexnet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch
import torch.nn as nn
from .core import export, load_from_local_or_url
from .utils import export, load_from_local_or_url
from typing import Any


Expand Down
28 changes: 15 additions & 13 deletions cvm/models/convmixer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from functools import partial
import torch
import torch.nn as nn
from .core import blocks, Conv2dBlock, Conv2d1x1Block, export, load_from_local_or_url

from .ops import blocks
from .utils import export, config, load_from_local_or_url
from typing import Any


Expand Down Expand Up @@ -29,13 +31,13 @@ def __init__(
super().__init__()

self.features = nn.Sequential(
Conv2dBlock(in_channels, h, patch_size, stride=patch_size),
blocks.Conv2dBlock(in_channels, h, patch_size, stride=patch_size),

*[nn.Sequential(
Residual(
Conv2dBlock(h, h, kernel_size, groups=h, padding='same')
blocks.Conv2dBlock(h, h, kernel_size, groups=h, padding='same')
),
Conv2d1x1Block(h, h)
blocks.Conv2d1x1Block(h, h)
) for _ in range(depth)]
)

Expand Down Expand Up @@ -70,54 +72,54 @@ def _conv_mixer(


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_1536_20_k9_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(1536, 20, 9, 7, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_1536_20_k3_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(1536, 20, 3, 7, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_1024_20_k9_p14(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(1024, 20, 9, 14, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_1024_16_k9_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(1024, 16, 9, 7, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_1024_12_k8_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(1024, 12, 8, 7, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(partial(nn.ReLU, inplace=True))
@blocks.activation(partial(nn.ReLU, inplace=True))
def conv_mixer_768_32_k7_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(768, 32, 7, 7, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(partial(nn.ReLU, inplace=True))
@blocks.activation(partial(nn.ReLU, inplace=True))
def conv_mixer_768_32_k3_p14(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(768, 32, 3, 14, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_512_16_k8_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(512, 16, 8, 7, pretrained, pth, progress, **kwargs)


@export
@blocks.nonlinear(nn.GELU)
@blocks.activation(nn.GELU)
def conv_mixer_512_12_k8_p7(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
return _conv_mixer(512, 12, 8, 7, pretrained, pth, progress, **kwargs)
14 changes: 8 additions & 6 deletions cvm/models/convnext.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.normalization import LayerNorm
from .core import blocks, export, config, load_from_local_or_url

from .ops import blocks
from .utils import export, config, load_from_local_or_url
from typing import Any, OrderedDict, List


Expand Down Expand Up @@ -166,39 +168,39 @@ def forward(self, x):


@export
def convnext_tiny(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
def convnext_t(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
model = ConvNeXt(layers=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
load_from_local_or_url(model, pth, kwargs.get('url', None), progress)
return model


@export
def convnext_small(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
def convnext_s(pretrained: bool = False, pth: str = None, progress: bool = True, **kwargs: Any):
model = ConvNeXt(layers=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
if pretrained:
load_from_local_or_url(model, pth, kwargs.get('url', None), progress)
return model


@export
def convnext_base(pretrained: bool = False, in_22k=False, pth: str = None, progress: bool = True, **kwargs: Any):
def convnext_b(pretrained: bool = False, in_22k=False, pth: str = None, progress: bool = True, **kwargs: Any):
model = ConvNeXt(layers=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
if pretrained:
load_from_local_or_url(model, pth, kwargs.get('url', None), progress)
return model


@export
def convnext_large(pretrained: bool = False, in_22k=False, pth: str = None, progress: bool = True, **kwargs: Any):
def convnext_l(pretrained: bool = False, in_22k=False, pth: str = None, progress: bool = True, **kwargs: Any):
model = ConvNeXt(layers=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
if pretrained:
load_from_local_or_url(model, pth, kwargs.get('url', None), progress)
return model


@export
def convnext_xlarge(pretrained: bool = False, in_22k=False, pth: str = None, progress: bool = True, **kwargs: Any):
def convnext_xl(pretrained: bool = False, in_22k=False, pth: str = None, progress: bool = True, **kwargs: Any):
model = ConvNeXt(layers=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
if pretrained:
load_from_local_or_url(model, pth, kwargs.get('url', None), progress)
Expand Down
4 changes: 0 additions & 4 deletions cvm/models/core/__init__.py

This file was deleted.

Loading

0 comments on commit 28256a1

Please sign in to comment.