Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Feature] Add hyperband and randomsearch from Hyperbandster #343

Merged
merged 2 commits into from
Aug 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions federatedscope/autotune/algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def get_scheduler(init_cfg):
init_cfg (federatedscope.core.configs.config.CN): configuration.
"""

if init_cfg.hpo.scheduler == 'rs':
scheduler = ModelFreeBase(init_cfg)
elif init_cfg.hpo.scheduler in ['sha', 'bo_kde', 'bohb', 'bo_gp', 'bo_rf']:
if init_cfg.hpo.scheduler in [
'sha', 'rs', 'bo_kde', 'bohb', 'hb', 'bo_gp', 'bo_rf'
]:
scheduler = SuccessiveHalvingAlgo(init_cfg)
# elif init_cfg.hpo.scheduler == 'pbt':
# scheduler = PBT(init_cfg)
Expand Down
56 changes: 37 additions & 19 deletions federatedscope/autotune/hpbandster.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import ConfigSpace as CS
import hpbandster.core.nameserver as hpns
from hpbandster.core.worker import Worker
from hpbandster.optimizers import BOHB
from hpbandster.optimizers import BOHB, HyperBand, RandomSearch
from hpbandster.optimizers.iterations import SuccessiveHalving

from federatedscope.autotune.utils import eval_in_fs
Expand All @@ -16,31 +16,41 @@
logger = logging.getLogger(__name__)


def clear_cache(working_folder):
# Clear cached ckpt
for name in os.listdir(working_folder):
if name.endswith('.pth'):
os.remove(osp(working_folder, name))


class MyRandomSearch(RandomSearch):
def __init__(self, working_folder, **kwargs):
self.working_folder = working_folder
super(MyRandomSearch, self).__init__(**kwargs)


class MyBOHB(BOHB):
def __init__(self, working_folder, **kwargs):
self.working_folder = working_folder
super(MyBOHB, self).__init__(**kwargs)

def get_next_iteration(self, iteration, iteration_kwargs={}):
# number of 'SH rungs'
s = self.max_SH_iter - 1 - (iteration % self.max_SH_iter)
# number of configurations in that bracket
n0 = int(np.floor((self.max_SH_iter) / (s + 1)) * self.eta**s)
ns = [max(int(n0 * (self.eta**(-i))), 1) for i in range(s + 1)]
if os.path.exists(self.working_folder):
self.clear_cache()
return (SuccessiveHalving(
HPB_iter=iteration,
num_configs=ns,
budgets=self.budgets[(-s - 1):],
config_sampler=self.config_generator.get_config,
**iteration_kwargs))
clear_cache(self.working_folder)
return super(MyBOHB, self).get_next_iteration(iteration,
iteration_kwargs)


def clear_cache(self):
# Clear cached ckpt
for name in os.listdir(self.working_folder):
if name.endswith('.pth'):
os.remove(osp(self.working_folder, name))
class MyHyperBand(HyperBand):
def __init__(self, working_folder, **kwargs):
self.working_folder = working_folder
super(MyHyperBand, self).__init__(**kwargs)

def get_next_iteration(self, iteration, iteration_kwargs={}):
if os.path.exists(self.working_folder):
clear_cache(self.working_folder)
return super(MyHyperBand,
self).get_next_iteration(iteration, iteration_kwargs)


class MyWorker(Worker):
Expand Down Expand Up @@ -102,7 +112,15 @@ def run_hpbandster(cfg, scheduler):
'max_budget': cfg.hpo.sha.budgets[-1],
'working_folder': cfg.hpo.working_folder
}
optimizer = MyBOHB(**opt_kwargs)
if cfg.hpo.scheduler in ['rs', 'wrap_rs']:
optimizer = MyRandomSearch(**opt_kwargs)
elif cfg.hpo.scheduler in ['hb', 'wrap_hb']:
optimizer = MyHyperBand(**opt_kwargs)
elif cfg.hpo.scheduler in ['bo_kde', 'bohb', 'wrap_bo_kde', 'wrap_bohb']:
optimizer = MyBOHB(**opt_kwargs)
else:
raise ValueError

if cfg.hpo.sha.iter != 0:
n_iterations = cfg.hpo.sha.iter
else:
Expand Down
5 changes: 3 additions & 2 deletions federatedscope/hpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,11 @@
# global_cfg.merge_from_list(args.opts)

scheduler = get_scheduler(init_cfg)
if init_cfg.hpo.scheduler in ['rs', 'sha', 'wrap_sha']:
if init_cfg.hpo.scheduler in ['sha', 'wrap_sha']:
_ = scheduler.optimize()
elif init_cfg.hpo.scheduler in [
'bo_kde', 'bohb', 'wrap_bo_kde', 'wrap_bohb'
'rs', 'bo_kde', 'hb', 'bohb', 'wrap_rs', 'wrap_bo_kde', 'wrap_hb',
'wrap_bohb'
]:
from federatedscope.autotune.hpbandster import run_hpbandster
run_hpbandster(init_cfg, scheduler)
Expand Down
51 changes: 51 additions & 0 deletions scripts/example_configs/femnist/avg/hb.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
use_gpu: True
device: 3
outdir: hb_femnist_avg
early_stop:
patience: 100
seed: 12345
federate:
mode: standalone
total_round_num: 500
sample_client_rate: 1.0
share_local_model: True
online_aggr: True
data:
root: data/
type: femnist
splits: [0.6,0.2,0.2]
batch_size: 16
subsample: 0.05
transform: [['ToTensor'], ['Normalize', {'mean': [0.1307], 'std': [0.3081]}]]
num_workers: 0
model:
type: convnet2
hidden: 2048
out_channels: 62
dropout: 0.5
train:
batch_or_epoch: epoch
local_update_steps: 1
optimizer:
lr: 0.01
weight_decay: 0.0
grad:
grad_clip: 5.0
criterion:
type: CrossEntropyLoss
trainer:
type: cvtrainer
eval:
freq: 1
metrics: ['acc', 'correct', 'f1']
split: ['test', 'val', 'train']
hpo:
scheduler: hb
num_workers: 0
ss: 'scripts/example_configs/femnist/avg/ss.yaml'
sha:
budgets: [ 9, 81 ]
elim_rate: 3
iter: 12
metric: 'client_summarized_weighted_avg.val_avg_loss'
working_folder: hb_femnist_avg
61 changes: 61 additions & 0 deletions scripts/example_configs/femnist/avg/hb_wrap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
use_gpu: True
device: 3
outdir: hb_wrap_femnist_avg
early_stop:
patience: 100
seed: 12345
federate:
mode: standalone
total_round_num: 500
sample_client_rate: 1.0
share_local_model: True
online_aggr: True
use_diff: True
data:
root: data/
type: femnist
splits: [0.6,0.2,0.2]
batch_size: 16
subsample: 0.05
transform: [['ToTensor'], ['Normalize', {'mean': [0.1307], 'std': [0.3081]}]]
num_workers: 0
model:
type: convnet2
hidden: 2048
out_channels: 62
dropout: 0.5
train:
batch_or_epoch: epoch
local_update_steps: 1
optimizer:
lr: 0.01
weight_decay: 0.0
grad:
grad_clip: 5.0
criterion:
type: CrossEntropyLoss
trainer:
type: cvtrainer
eval:
freq: 1
metrics: ['acc', 'correct', 'f1']
split: ['test', 'val', 'train']
hpo:
scheduler: wrap_hb
num_workers: 0
init_cand_num: 100
ss: 'scripts/example_configs/femnist/avg/ss.yaml'
sha:
budgets: [ 9, 81 ]
elim_rate: 3
iter: 12
table:
num: 27
fedex:
sched: 'aggressive'
use: True
diff: False
eta0: -1.0
gamma: 0.0
metric: 'client_summarized_weighted_avg.val_avg_loss'
working_folder: hb_wrap_femnist_avg
7 changes: 4 additions & 3 deletions scripts/example_configs/femnist/avg/rs.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,13 @@ eval:
metrics: ['acc', 'correct', 'f1']
split: ['test', 'val', 'train']
hpo:
scheduler: sha
scheduler: rs
num_workers: 0
init_cand_num: 10
ss: 'scripts/example_configs/femnist/avg/ss.yaml'
sha:
budgets: [50]
elim_rate: 10
budgets: [ 50, 50 ]
elim_rate: 3
iter: 50
metric: 'client_summarized_weighted_avg.val_avg_loss'
working_folder: rs_femnist_avg
9 changes: 5 additions & 4 deletions scripts/example_configs/femnist/avg/rs_wrap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,14 @@ eval:
metrics: ['acc', 'correct', 'f1']
split: ['test', 'val', 'train']
hpo:
scheduler: wrap_sha
scheduler: wrap_rs
num_workers: 0
init_cand_num: 10
init_cand_num: 100
ss: 'scripts/example_configs/femnist/avg/ss.yaml'
sha:
budgets: [50]
elim_rate: 10
budgets: [ 50, 50 ]
elim_rate: 3
iter: 50
table:
num: 27
fedex:
Expand Down