From 2b7923e09fdb2d32aba9a2fdd8814f2dcd80578e Mon Sep 17 00:00:00 2001 From: Achyudh Ram <7617287+achyudh@users.noreply.github.com> Date: Sat, 13 Apr 2019 23:25:30 -0400 Subject: [PATCH 01/22] Integrate BERT into Hedwig (#29) * Fix package imports * Update README.md * Fix bug due to TAR/AR attribute check * Add BERT models * Add BERT tokenizer * Return logits from the model.py * Remove unused classes in models/bert * Return logits from the model.py (#12) * Remove unused classes in models/bert (#13) * Add initial main file * Add args for BERT * Add partial support for BERT * Initialize training and optimization * Draft the structure of Trainers for BERT * Remove duplicate tokenizer * Add utils * Move optimization to utils * Add more structure for trainer * Refactor the trainer (#15) * Refactor the trainer * Add more edits * Add support for our datasets * Add evaluator * Split data4bert module into multiple processors * Refactor BERT tokenizer * Integrate BERT into Castor framework (#17) * Remove unused classes in models/bert * Split data4bert module into multiple processors * Refactor BERT tokenizer * Add multilabel support in BertTrainer * Add multilabel support in BertEvaluator * Add get_test_samples method in dataset processors * Fix args.py for BERT * Add support for Reuters, IMDB datasets for BERT * Revert "Integrate BERT into Castor framework (#17)" This reverts commit e4244ec73950d1efb15f706de6a4c77988c821ba. * Fix paths to datasets in dataset classes and args * Add SST dataset * Add hedwig-data instructions to README.md * Fix KimCNN README * Fix RegLSTM README * Fix typos in README * Remove trec_eval from README * Add tensorboardX to requirements.txt * Rename processors module to bert_processors * Add method to print metrics after training * Add model check-pointing and early stopping for BERT * Add logos * Update README.md * Fix code comments in classification trainer * Add support for AAPD, Sogou, AGNews and Yelp2014 * Fix bug that deleted saved models * Update README for HAN * Update README for XML-CNN * Remove redundant TODOs from the READMEs * Fix logo in README.md * Update README for Char-CNN * Fix all the READMEs * Resolve conflict * Fix Typos * Re-Add SST2 Processor * Add support for evaluating trained model * Update args.py * Resolve issues due to DataParallel wrapper on saved model * Remove redundant Yelp processor * Fix bug for safely creating the saving directory * Change checkpoint paths to timestamps * Remove unwanted string.strip() from tokenizer * Create save path if it doesn't exist * Decouple model checkpoints from code * Remove model choice restrictions for BERT * Remove model/distill driver * Simplify checkpoint directory creation --- README.md | 54 +- __init__.py | 4 +- common/evaluators/bert_evaluator.py | 80 ++ common/evaluators/classification_evaluator.py | 9 +- common/trainers/bert_trainer.py | 118 +++ common/trainers/classification_trainer.py | 23 +- datasets/aapd.py | 10 +- datasets/bert_processors/__init__.py | 0 datasets/bert_processors/aapd_processor.py | 33 + .../bert_processors/abstract_processor.py | 193 ++++ datasets/bert_processors/agnews_processor.py | 34 + datasets/bert_processors/imdb_processor.py | 34 + datasets/bert_processors/reuters_processor.py | 33 + datasets/bert_processors/sogou_processor.py | 34 + datasets/bert_processors/sst_processor.py | 39 + .../bert_processors/yelp2014_processor.py | 34 + datasets/imdb.py | 6 +- datasets/reuters.py | 17 +- datasets/sst.py | 92 ++ datasets/yelp2014.py | 8 +- docs/hedwig.png | Bin 0 -> 23564 bytes models/args.py | 2 +- models/bert/__init__.py | 0 models/bert/__main__.py | 169 ++++ models/bert/args.py | 43 + models/bert/model.py | 851 ++++++++++++++++++ models/char_cnn/README.md | 44 +- models/char_cnn/__main__.py | 5 + models/char_cnn/args.py | 4 +- models/han/README.md | 46 +- models/han/__main__.py | 7 +- models/han/args.py | 4 +- models/kim_cnn/README.md | 130 +-- models/kim_cnn/__main__.py | 5 + models/kim_cnn/args.py | 4 +- models/reg_lstm/README.md | 54 +- models/reg_lstm/__main__.py | 5 + models/reg_lstm/args.py | 4 +- models/xml_cnn/README.md | 32 +- models/xml_cnn/__main__.py | 11 +- models/xml_cnn/args.py | 5 +- requirements.txt | 1 + utils/io.py | 257 ++++++ utils/optimization.py | 179 ++++ utils/tokenization.py | 387 ++++++++ 45 files changed, 2808 insertions(+), 296 deletions(-) create mode 100644 common/evaluators/bert_evaluator.py create mode 100644 common/trainers/bert_trainer.py create mode 100644 datasets/bert_processors/__init__.py create mode 100644 datasets/bert_processors/aapd_processor.py create mode 100644 datasets/bert_processors/abstract_processor.py create mode 100644 datasets/bert_processors/agnews_processor.py create mode 100644 datasets/bert_processors/imdb_processor.py create mode 100644 datasets/bert_processors/reuters_processor.py create mode 100644 datasets/bert_processors/sogou_processor.py create mode 100644 datasets/bert_processors/sst_processor.py create mode 100644 datasets/bert_processors/yelp2014_processor.py create mode 100644 datasets/sst.py create mode 100644 docs/hedwig.png create mode 100644 models/bert/__init__.py create mode 100644 models/bert/__main__.py create mode 100644 models/bert/args.py create mode 100644 models/bert/model.py create mode 100644 utils/io.py create mode 100644 utils/optimization.py create mode 100644 utils/tokenization.py diff --git a/README.md b/README.md index e7048f6..3530ba6 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,21 @@ -# Hedwig +

+ +

This repo contains PyTorch deep learning models for document classification, implemented by the Data Systems Group at the University of Waterloo. ## Models + [Kim CNN](models/kim_cnn/): CNNs for sentence classification [(Kim, EMNLP 2014)](http://www.aclweb.org/anthology/D14-1181) -+ [Conv-RNN](models/conv_rnn/): Convolutional RNN [(Wang et al., KDD 2017)](https://dl.acm.org/citation.cfm?id=3098140) + [HAN](models/han/): Hierarchical Attention Networks [(Zichao, et al, NAACL 2016)](https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf) + [Reg-LSTM](models/reg_lstm/): Regularized LSTM for document classification [(Merity et al.)](https://arxiv.org/abs/1708.02182) + [XML-CNN](models/xml_cnn/): CNNs for extreme multi-label text classification [(Liu et al., SIGIR 2017)](http://nyc.lti.cs.cmu.edu/yiming/Publications/jliu-sigir17.pdf) -+ [Char-CNN](.//): Character-level Convolutional Network [(Zhang et al., NIPS 2015)](http://papers.nips.cc/paper/5782-character-level-convolutional-networks-for-text-classification.pdf) ++ [Char-CNN](models/char_cnn/): Character-level Convolutional Network [(Zhang et al., NIPS 2015)](http://papers.nips.cc/paper/5782-character-level-convolutional-networks-for-text-classification.pdf) Each model directory has a `README.md` with further details. ## Setting up PyTorch -**If you are an internal Hedwig contributor using GPU machines in the lab, follow the instructions [here](docs/internal-instructions.md).** - Hedwig is designed for Python 3.6 and [PyTorch](https://pytorch.org/) 0.4. PyTorch recommends [Anaconda](https://www.anaconda.com/distribution/) for managing your environment. We'd recommend creating a custom environment as follows: @@ -26,10 +25,10 @@ $ conda create --name castor python=3.6 $ source activate castor ``` -And installing the packages as follows: +And installing PyTorch as follows: ``` -$ conda install pytorch torchvision -c pytorch +$ conda install pytorch=0.4.1 cuda92 -c pytorch ``` Other Python packages we use can be installed via pip: @@ -38,49 +37,38 @@ Other Python packages we use can be installed via pip: $ pip install -r requirements.txt ``` -Code depends on data from NLTK (e.g., stopwords) so you'll have to download them. Run the Python interpreter and type the commands: +Code depends on data from NLTK (e.g., stopwords) so you'll have to download them. +Run the Python interpreter and type the commands: ```python >>> import nltk >>> nltk.download() ``` -Finally, run the following inside the `utils` directory to build the `trec_eval` tool for evaluating certain datasets. +## Datasets + +Download the Reuters, AAPD and IMDB datasets, along with word2vec embeddings from +[`hedwig-data`](https://git.uwaterloo.ca/jimmylin/hedwig-data). ```bash -$ ./get_trec_eval.sh +$ git clone https://github.com/castorini/hedwig.git +$ git clone https://git.uwaterloo.ca/jimmylin/hedwig-data.git ``` -## Data and Pre-Trained Models - -**If you are an internal Hedwig contributor using GPU machines in the lab, follow the instructions [here](docs/internal-instructions.md).** - -To fully take advantage of code here, clone these other two repos: - -+ [`Castor-data`](https://git.uwaterloo.ca/jimmylin/Castor-data): embeddings, datasets, etc. -+ [`Caster-models`](https://git.uwaterloo.ca/jimmylin/Castor-models): pre-trained models - Organize your directory structure as follows: ``` . -├── Hedwig -├── Castor-data -└── Castor-models +├── hedwig +└── hedwig-data ``` -For example (using HTTPS): +After cloning the hedwig-data repo, you need to unzip the embeddings and run the preprocessing script: ```bash -$ git clone https://github.com/castorini/hedwig.git -$ git clone https://git.uwaterloo.ca/jimmylin/Castor-data.git -$ git clone https://git.uwaterloo.ca/jimmylin/Castor-models.git +cd hedwig-data/embeddings/word2vec +gzip -d GoogleNews-vectors-negative300.bin.gz +python bin2txt.py GoogleNews-vectors-negative300.bin GoogleNews-vectors-negative300.txt ``` -After cloning the Hedwig-data repo, you need to unzip embeddings and run data pre-processing scripts. You can choose -to follow instructions under each dataset and embedding directory separately, or just run the following script in -Hedwig-data to do all of the steps for you: - -```bash -$ ./setup.sh -``` +**If you are an internal Hedwig contributor using the machines in the lab, follow the instructions [here](docs/internal-instructions.md).** diff --git a/__init__.py b/__init__.py index d4384a9..742ea25 100644 --- a/__init__.py +++ b/__init__.py @@ -1,3 +1,3 @@ -from models import reg_lstm, kim_cnn +from models import reg_lstm, kim_cnn, han, char_cnn, xml_cnn -__all__ = ['reg_lstm', 'kim_cnn'] +__all__ = ['reg_lstm', 'kim_cnn', 'char_cnn', 'xml_cnn', 'han'] diff --git a/common/evaluators/bert_evaluator.py b/common/evaluators/bert_evaluator.py new file mode 100644 index 0000000..122d3ae --- /dev/null +++ b/common/evaluators/bert_evaluator.py @@ -0,0 +1,80 @@ +import warnings + +import numpy as np +import torch +import torch.nn.functional as F +from sklearn import metrics +from torch.utils.data import DataLoader, SequentialSampler, TensorDataset +from tqdm import tqdm + +from datasets.bert_processors.abstract_processor import convert_examples_to_features +from utils.tokenization import BertTokenizer + +# Suppress warnings from sklearn.metrics +warnings.filterwarnings('ignore') + + +class BertEvaluator(object): + def __init__(self, model, processor, args, split='dev'): + self.args = args + self.model = model + self.processor = processor + self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) + if split == 'test': + self.eval_examples = self.processor.get_test_examples(args.data_dir) + else: + self.eval_examples = self.processor.get_dev_examples(args.data_dir) + + def get_scores(self, silent=False): + eval_features = convert_examples_to_features(self.eval_examples, self.args.max_seq_length, self.tokenizer) + + all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) + all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) + + eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size) + + self.model.eval() + + total_loss = 0 + nb_eval_steps, nb_eval_examples = 0, 0 + predicted_labels, target_labels = list(), list() + + for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent): + input_ids = input_ids.to(self.args.device) + input_mask = input_mask.to(self.args.device) + segment_ids = segment_ids.to(self.args.device) + label_ids = label_ids.to(self.args.device) + + with torch.no_grad(): + logits = self.model(input_ids, segment_ids, input_mask) + + if self.args.is_multilabel: + predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy()) + target_labels.extend(label_ids.cpu().detach().numpy()) + loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False) + else: + predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy()) + target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy()) + loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1)) + + if self.args.n_gpu > 1: + loss = loss.mean() + if self.args.gradient_accumulation_steps > 1: + loss = loss / self.args.gradient_accumulation_steps + total_loss += loss.item() + + nb_eval_examples += input_ids.size(0) + nb_eval_steps += 1 + + predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels) + accuracy = metrics.accuracy_score(target_labels, predicted_labels) + precision = metrics.precision_score(target_labels, predicted_labels, average='micro') + recall = metrics.recall_score(target_labels, predicted_labels, average='micro') + f1 = metrics.f1_score(target_labels, predicted_labels, average='micro') + avg_loss = total_loss / nb_eval_steps + + return [accuracy, precision, recall, f1, avg_loss], ['accuracy', 'precision', 'recall', 'f1', 'avg_loss'] diff --git a/common/evaluators/classification_evaluator.py b/common/evaluators/classification_evaluator.py index c51c5e3..7078488 100644 --- a/common/evaluators/classification_evaluator.py +++ b/common/evaluators/classification_evaluator.py @@ -18,14 +18,14 @@ def get_scores(self): self.data_loader.init_epoch() total_loss = 0 - # Temp Ave if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging old_params = self.model.get_params() self.model.load_ema_params() predicted_labels, target_labels = list(), list() for batch_idx, batch in enumerate(self.data_loader): - if hasattr(self.model, 'TAR') and self.model.TAR: # TAR condition + if hasattr(self.model, 'tar') and self.model.tar: if self.ignore_lengths: scores, rnn_outs = self.model(batch.text) else: @@ -46,7 +46,8 @@ def get_scores(self): target_labels.extend(torch.argmax(batch.label, dim=1).cpu().detach().numpy()) total_loss += F.cross_entropy(scores, torch.argmax(batch.label, dim=1), size_average=False).item() - if hasattr(self.model, 'TAR') and self.model.TAR: # TAR condition + if hasattr(self.model, 'tar') and self.model.tar: + # Temporal activation regularization total_loss += (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() predicted_labels = np.array(predicted_labels) @@ -57,8 +58,8 @@ def get_scores(self): f1 = metrics.f1_score(target_labels, predicted_labels, average='micro') avg_loss = total_loss / len(self.data_loader.dataset.examples) - # Temp Ave if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging self.model.load_params(old_params) return [accuracy, precision, recall, f1, avg_loss], ['accuracy', 'precision', 'recall', 'f1', 'cross_entropy_loss'] diff --git a/common/trainers/bert_trainer.py b/common/trainers/bert_trainer.py new file mode 100644 index 0000000..46dc1c5 --- /dev/null +++ b/common/trainers/bert_trainer.py @@ -0,0 +1,118 @@ +import datetime +import os + +import torch +import torch.nn.functional as F +from tensorboardX import SummaryWriter +from torch.utils.data import DataLoader, RandomSampler, TensorDataset +from torch.utils.data.distributed import DistributedSampler +from tqdm import tqdm +from tqdm import trange + +from common.evaluators.bert_evaluator import BertEvaluator +from datasets.bert_processors.abstract_processor import convert_examples_to_features +from utils.optimization import warmup_linear +from utils.tokenization import BertTokenizer + + +class BertTrainer(object): + def __init__(self, model, optimizer, processor, args): + self.args = args + self.model = model + self.optimizer = optimizer + self.processor = processor + self.train_examples = self.processor.get_train_examples(args.data_dir) + self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) + + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) + self.snapshot_path = os.path.join(self.args.save_path, self.processor.NAME, '%s.pt' % timestamp) + + self.num_train_optimization_steps = int( + len(self.train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs + if args.local_rank != -1: + self.num_train_optimization_steps = args.num_train_optimization_steps // torch.distributed.get_world_size() + + self.log_header = 'Epoch Iteration Progress Dev/Acc. Dev/Pr. Dev/Re. Dev/F1 Dev/Loss' + self.log_template = ' '.join('{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) + + self.iterations, self.nb_tr_steps, self.tr_loss = 0, 0, 0 + self.best_dev_f1, self.unimproved_iters = 0, 0 + self.early_stop = False + + def train_epoch(self, train_dataloader): + for step, batch in enumerate(tqdm(train_dataloader, desc="Training")): + batch = tuple(t.to(self.args.device) for t in batch) + input_ids, input_mask, segment_ids, label_ids = batch + logits = self.model(input_ids, segment_ids, input_mask) + + if self.args.is_multilabel: + loss = F.binary_cross_entropy_with_logits(logits, label_ids.float()) + else: + loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1)) + + if self.args.n_gpu > 1: + loss = loss.mean() + if self.args.gradient_accumulation_steps > 1: + loss = loss / self.args.gradient_accumulation_steps + + if self.args.fp16: + self.optimizer.backward(loss) + else: + loss.backward() + + self.tr_loss += loss.item() + self.nb_tr_steps += 1 + if (step + 1) % self.args.gradient_accumulation_steps == 0: + if self.args.fp16: + lr_this_step = self.args.learning_rate * warmup_linear(self.iterations / self.num_train_optimization_steps, self.args.warmup_proportion) + for param_group in self.optimizer.param_groups: + param_group['lr'] = lr_this_step + self.optimizer.step() + self.optimizer.zero_grad() + self.iterations += 1 + + def train(self): + train_features = convert_examples_to_features( + self.train_examples, self.args.max_seq_length, self.tokenizer) + + print("Number of examples: ", len(self.train_examples)) + print("Batch size:", self.args.batch_size) + print("Num of steps:", self.num_train_optimization_steps) + + all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) + all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + if self.args.local_rank == -1: + train_sampler = RandomSampler(train_data) + else: + train_sampler = DistributedSampler(train_data) + + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.args.batch_size) + + self.model.train() + + for epoch in trange(int(self.args.epochs), desc="Epoch"): + self.train_epoch(train_dataloader) + dev_evaluator = BertEvaluator(self.model, self.processor, self.args, split='dev') + dev_acc, dev_precision, dev_recall, dev_f1, dev_loss = dev_evaluator.get_scores()[0] + + # Print validation results + tqdm.write(self.log_header) + tqdm.write(self.log_template.format(epoch + 1, self.iterations, epoch + 1, self.args.epochs, + dev_acc, dev_precision, dev_recall, dev_f1, dev_loss)) + + # Update validation results + if dev_f1 > self.best_dev_f1: + self.unimproved_iters = 0 + self.best_dev_f1 = dev_f1 + torch.save(self.model, self.snapshot_path) + + else: + self.unimproved_iters += 1 + if self.unimproved_iters >= self.args.patience: + self.early_stop = True + tqdm.write("Early Stopping. Epoch: {}, Best Dev F1: {}".format(epoch, self.best_dev_f1)) + break diff --git a/common/trainers/classification_trainer.py b/common/trainers/classification_trainer.py index c75f983..e43e0e7 100644 --- a/common/trainers/classification_trainer.py +++ b/common/trainers/classification_trainer.py @@ -7,7 +7,7 @@ import torch.nn.functional as F from tensorboardX import SummaryWriter -from .trainer import Trainer +from common.trainers.trainer import Trainer class ClassificationTrainer(Trainer): @@ -24,8 +24,10 @@ def __init__(self, model, embedding, train_loader, trainer_config, train_evaluat '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.0f}%,{:>8.6f},{:12.4f}'.split(',')) self.dev_log_template = ' '.join( '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.4f},{:>8.4f},{:8.4f},{:12.4f},{:12.4f}'.split(',')) - self.writer = SummaryWriter(log_dir="tensorboard_logs/" + datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) - self.snapshot_path = os.path.join(self.model_outfile, self.train_loader.dataset.NAME, 'best_model.pt') + + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) + self.snapshot_path = os.path.join(self.model_outfile, self.train_loader.dataset.NAME, '%s.pt' % timestamp) def train_epoch(self, epoch): self.train_loader.init_epoch() @@ -34,7 +36,7 @@ def train_epoch(self, epoch): self.iterations += 1 self.model.train() self.optimizer.zero_grad() - if hasattr(self.model, 'TAR') and self.model.TAR: + if hasattr(self.model, 'tar') and self.model.tar: if 'ignore_lengths' in self.config and self.config['ignore_lengths']: scores, rnn_outs = self.model(batch.text) else: @@ -57,18 +59,18 @@ def train_epoch(self, epoch): n_correct += 1 loss = F.cross_entropy(scores, torch.argmax(batch.label.data, dim=1)) - if hasattr(self.model, 'TAR') and self.model.TAR: - loss = loss + self.model.TAR*(rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() - if hasattr(self.model, 'AR') and self.model.AR: - loss = loss + self.model.AR*(rnn_outs[:]).pow(2).mean() + if hasattr(self.model, 'tar') and self.model.tar: + loss = loss + self.model.tar * (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() + if hasattr(self.model, 'ar') and self.model.ar: + loss = loss + self.model.ar * (rnn_outs[:]).pow(2).mean() n_total += batch.batch_size train_acc = 100. * n_correct / n_total loss.backward() self.optimizer.step() - # Temp Ave if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging self.model.update_ema() if self.iterations % self.log_interval == 1: @@ -83,7 +85,6 @@ def train(self, epochs): self.start = time.time() header = ' Time Epoch Iteration Progress (%Epoch) Loss Accuracy' dev_header = ' Time Epoch Iteration Progress Dev/Acc. Dev/Pr. Dev/Recall Dev/F1 Dev/Loss' - # model_outfile is actually a directory, using model_outfile to conform to Trainer naming convention os.makedirs(self.model_outfile, exist_ok=True) os.makedirs(os.path.join(self.model_outfile, self.train_loader.dataset.NAME), exist_ok=True) @@ -98,6 +99,8 @@ def train(self, epochs): self.writer.add_scalar('Dev/Precision', dev_precision, epoch) self.writer.add_scalar('Dev/Recall', dev_recall, epoch) self.writer.add_scalar('Dev/F-measure', dev_f1, epoch) + + # Print validation results print('\n' + dev_header) print(self.dev_log_template.format(time.time() - self.start, epoch, self.iterations, epoch, epochs, dev_acc, dev_precision, dev_recall, dev_f1, dev_loss)) diff --git a/datasets/aapd.py b/datasets/aapd.py index 2702559..10ee01c 100644 --- a/datasets/aapd.py +++ b/datasets/aapd.py @@ -1,5 +1,4 @@ import os -import re import numpy as np import torch @@ -7,7 +6,8 @@ from torchtext.data.iterator import BucketIterator from torchtext.vocab import Vectors -from datasets.reuters import clean_string, clean_string_fl, split_sents +from datasets.reuters import clean_string, split_sents + def char_quantize(string, max_length=1000): identity = np.identity(len(AAPDCharQuantized.ALPHABET)) @@ -40,9 +40,9 @@ def sort_key(ex): return len(ex.text) @classmethod - def splits(cls, path, train=os.path.join('AAPD', 'data', 'aapd_train.tsv'), - validation=os.path.join('AAPD', 'data', 'aapd_validation.tsv'), - test=os.path.join('AAPD', 'data','aapd_test.tsv'), **kwargs): + def splits(cls, path, train=os.path.join('AAPD', 'train.tsv'), + validation=os.path.join('AAPD', 'dev.tsv'), + test=os.path.join('AAPD', 'test.tsv'), **kwargs): return super(AAPD, cls).splits( path, train=train, validation=validation, test=test, format='tsv', fields=[('label', cls.LABEL_FIELD), ('text', cls.TEXT_FIELD)] diff --git a/datasets/bert_processors/__init__.py b/datasets/bert_processors/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/datasets/bert_processors/aapd_processor.py b/datasets/bert_processors/aapd_processor.py new file mode 100644 index 0000000..91e78f8 --- /dev/null +++ b/datasets/bert_processors/aapd_processor.py @@ -0,0 +1,33 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class AAPDProcessor(BertProcessor): + NAME = 'AAPD' + NUM_CLASSES = 54 + IS_MULTILABEL = True + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir,'AAPD', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'AAPD', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'AAPD', 'test.tsv')), 'test') + + def _create_examples(self, lines, set_type): + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples diff --git a/datasets/bert_processors/abstract_processor.py b/datasets/bert_processors/abstract_processor.py new file mode 100644 index 0000000..4ef610a --- /dev/null +++ b/datasets/bert_processors/abstract_processor.py @@ -0,0 +1,193 @@ +import csv + +import sys +import numpy as np + + +class InputExample(object): + """A single training/test example for simple sequence classification.""" + + def __init__(self, guid, text_a, text_b=None, label=None): + """Constructs a InputExample. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + self.guid = guid + self.text_a = text_a + self.text_b = text_b + self.label = label + + +class InputFeatures(object): + """A single set of features of data.""" + + def __init__(self, input_ids, input_mask, segment_ids, label_id): + self.input_ids = input_ids + self.input_mask = input_mask + self.segment_ids = segment_ids + self.label_id = label_id + + +class BertProcessor(object): + """Base class for data converters for sequence classification data sets.""" + + def get_train_examples(self, data_dir): + """ + Gets a collection of `InputExample`s for the train set + :param data_dir: + :return: + """ + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """ + Gets a collection of `InputExample`s for the dev set + :param data_dir: + :return: + """ + raise NotImplementedError() + + def get_test_examples(self, data_dir): + """ + Gets a collection of `InputExample`s for the test set + :param data_dir: + :return: + """ + raise NotImplementedError() + + def get_labels(self): + """ + Gets a list of possible labels in the dataset + :return: + """ + raise NotImplementedError() + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """ + Reads a Tab Separated Values (TSV) file + :param input_file: + :param quotechar: + :return: + """ + with open(input_file, "r") as f: + reader = csv.reader(f, delimiter="\t", quotechar=quotechar) + lines = [] + for line in reader: + if sys.version_info[0] == 2: + line = list(str(cell, 'utf-8') for cell in line) + lines.append(line) + return lines + + +def convert_examples_to_features(examples, max_seq_length, tokenizer, print_examples=False): + """ + Loads a data file into a list of InputBatch objects + :param examples: + :param max_seq_length: + :param tokenizer: + :param print_examples: + :return: a list of InputBatch objects + """ + + features = [] + for (ex_index, example) in enumerate(examples): + tokens_a = tokenizer.tokenize(example.text_a) + + tokens_b = None + if example.text_b: + tokens_b = tokenizer.tokenize(example.text_b) + # Modifies `tokens_a` and `tokens_b` in place so that the total + # length is less than the specified length. + # Account for [CLS], [SEP], [SEP] with "- 3" + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:(max_seq_length - 2)] + + # The convention in BERT is: + # (a) For sequence pairs: + # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] + # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 + # (b) For single sequences: + # tokens: [CLS] the dog is hairy . [SEP] + # type_ids: 0 0 0 0 0 0 0 + # + # Where "type_ids" are used to indicate whether this is the first + # sequence or the second sequence. The embedding vectors for `type=0` and + # `type=1` were learned during pre-training and are added to the wordpiece + # embedding vector (and position vector). This is not *strictly* necessary + # since the [SEP] token unambigiously separates the sequences, but it makes + # it easier for the model to learn the concept of sequences. + # + # For classification tasks, the first vector (corresponding to [CLS]) is + # used as as the "sentence vector". Note that this only makes sense because + # the entire model is fine-tuned. + tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + segment_ids = [0] * len(tokens) + + if tokens_b: + tokens += tokens_b + ["[SEP]"] + segment_ids += [1] * (len(tokens_b) + 1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length. + padding = [0] * (max_seq_length - len(input_ids)) + input_ids += padding + input_mask += padding + segment_ids += padding + + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + assert len(segment_ids) == max_seq_length + + label_id = [float(x) for x in example.label] + + if print_examples and ex_index < 5: + print("tokens: %s" % " ".join([str(x) for x in tokens])) + print("input_ids: %s" % " ".join([str(x) for x in input_ids])) + print("input_mask: %s" % " ".join([str(x) for x in input_mask])) + print("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + print("label: %s" % example.label) + + features.append(InputFeatures(input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id)) + return features + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """ + Truncates a sequence pair in place to the maximum length + :param tokens_a: + :param tokens_b: + :param max_length: + :return: + """ + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() diff --git a/datasets/bert_processors/agnews_processor.py b/datasets/bert_processors/agnews_processor.py new file mode 100644 index 0000000..b1f65c2 --- /dev/null +++ b/datasets/bert_processors/agnews_processor.py @@ -0,0 +1,34 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class AGNewsProcessor(BertProcessor): + NAME = 'AGNews' + NUM_CLASSES = 4 + IS_MULTILABEL = False + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'AGNews', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'AGNews', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'AGNews', 'test.tsv')), 'test') + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples diff --git a/datasets/bert_processors/imdb_processor.py b/datasets/bert_processors/imdb_processor.py new file mode 100644 index 0000000..1d997dc --- /dev/null +++ b/datasets/bert_processors/imdb_processor.py @@ -0,0 +1,34 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class IMDBProcessor(BertProcessor): + NAME = 'IMDB' + NUM_CLASSES = 10 + IS_MULTILABEL = False + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'IMDB', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'IMDB', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'IMDB', 'test.tsv')), 'test') + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples \ No newline at end of file diff --git a/datasets/bert_processors/reuters_processor.py b/datasets/bert_processors/reuters_processor.py new file mode 100644 index 0000000..8ad9b33 --- /dev/null +++ b/datasets/bert_processors/reuters_processor.py @@ -0,0 +1,33 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class ReutersProcessor(BertProcessor): + NAME = 'Reuters' + NUM_CLASSES = 90 + IS_MULTILABEL = True + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Reuters', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Reuters', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Reuters', 'test.tsv')), 'test') + + def _create_examples(self, lines, set_type): + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = '%s-%s' % (set_type, i) + text_a = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples \ No newline at end of file diff --git a/datasets/bert_processors/sogou_processor.py b/datasets/bert_processors/sogou_processor.py new file mode 100644 index 0000000..6beae50 --- /dev/null +++ b/datasets/bert_processors/sogou_processor.py @@ -0,0 +1,34 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class SogouProcessor(BertProcessor): + NAME = 'Sogou' + NUM_CLASSES = 5 + IS_MULTILABEL = False + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Sogou', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Sogou', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Sogou', 'test.tsv')), 'test') + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples diff --git a/datasets/bert_processors/sst_processor.py b/datasets/bert_processors/sst_processor.py new file mode 100644 index 0000000..01c6079 --- /dev/null +++ b/datasets/bert_processors/sst_processor.py @@ -0,0 +1,39 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class SST2Processor(BertProcessor): + NAME = 'SST-2' + NUM_CLASSES = 2 + IS_MULTILABEL = False + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'SST-2', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'SST-2', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'SST-2', 'test.tsv')), 'test') + + @staticmethod + def _create_examples(lines, set_type): + """ + Creates examples for the training and dev sets + :param lines: + :param set_type: + :return: + """ + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = '%s-%s' % (set_type, i) + label = line[0] + text = line[1] + examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) + return examples diff --git a/datasets/bert_processors/yelp2014_processor.py b/datasets/bert_processors/yelp2014_processor.py new file mode 100644 index 0000000..c93eb6c --- /dev/null +++ b/datasets/bert_processors/yelp2014_processor.py @@ -0,0 +1,34 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample + + +class Yelp2014Processor(BertProcessor): + NAME = 'Yelp2014' + NUM_CLASSES = 5 + IS_MULTILABEL = False + + def get_train_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Yelp2014', 'train.tsv')), 'train') + + def get_dev_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Yelp2014', 'dev.tsv')), 'dev') + + def get_test_examples(self, data_dir): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'Yelp2014', 'test.tsv')), 'test') + + def _create_examples(self, lines, set_type): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + guid = "%s-%s" % (set_type, i) + text_a = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples diff --git a/datasets/imdb.py b/datasets/imdb.py index 5cd60db..c82963c 100644 --- a/datasets/imdb.py +++ b/datasets/imdb.py @@ -40,9 +40,9 @@ def sort_key(ex): return len(ex.text) @classmethod - def splits(cls, path, train=os.path.join('IMDB', 'data', 'imdb_train.tsv'), - validation=os.path.join('IMDB', 'data', 'imdb_validation.tsv'), - test=os.path.join('IMDB', 'data', 'imdb_test.tsv'), **kwargs): + def splits(cls, path, train=os.path.join('IMDB', 'train.tsv'), + validation=os.path.join('IMDB', 'dev.tsv'), + test=os.path.join('IMDB', 'test.tsv'), **kwargs): return super(IMDB, cls).splits( path, train=train, validation=validation, test=test, format='tsv', fields=[('label', cls.LABEL_FIELD), ('text', cls.TEXT_FIELD)] diff --git a/datasets/reuters.py b/datasets/reuters.py index abd7036..8bdab2c 100644 --- a/datasets/reuters.py +++ b/datasets/reuters.py @@ -31,17 +31,6 @@ def char_quantize(string, max_length=1000): return np.concatenate((quantized_string, np.zeros((max_length - len(quantized_string), len(ReutersCharQuantized.ALPHABET)), dtype=np.float32))) -def clean_string_fl(string): - """ - Returns only the title and first line (excluding the title) for every Reuters article, then calls clean_string - """ - split_string = string.split('.') - if len(split_string) > 1: - return clean_string(split_string[0] + ". " + split_string[1]) - else: - return clean_string(string) - - def process_labels(string): """ Returns the label string as a list of integers @@ -64,9 +53,9 @@ def sort_key(ex): return len(ex.text) @classmethod - def splits(cls, path, train=os.path.join('Reuters-21578', 'data', 'reuters_train.tsv'), - validation=os.path.join('Reuters-21578', 'data', 'reuters_validation.tsv'), - test=os.path.join('Reuters-21578', 'data','reuters_test.tsv'), **kwargs): + def splits(cls, path, train=os.path.join('Reuters', 'train.tsv'), + validation=os.path.join('Reuters', 'dev.tsv'), + test=os.path.join('Reuters', 'test.tsv'), **kwargs): return super(Reuters, cls).splits( path, train=train, validation=validation, test=test, format='tsv', fields=[('label', cls.LABEL_FIELD), ('text', cls.TEXT_FIELD)] diff --git a/datasets/sst.py b/datasets/sst.py new file mode 100644 index 0000000..8b66383 --- /dev/null +++ b/datasets/sst.py @@ -0,0 +1,92 @@ +import os + +import numpy as np +import torch +from torchtext.data import NestedField, Field, TabularDataset +from torchtext.data.iterator import BucketIterator +from torchtext.vocab import Vectors + +from datasets.reuters import clean_string, split_sents + + +def char_quantize(string, max_length=500): + identity = np.identity(len(SSTCharQuantized.ALPHABET)) + quantized_string = np.array([identity[SSTCharQuantized.ALPHABET[char]] for char in list(string.lower()) if char in SSTCharQuantized.ALPHABET], dtype=np.float32) + if len(quantized_string) > max_length: + return quantized_string[:max_length] + else: + return np.concatenate((quantized_string, np.zeros((max_length - len(quantized_string), len(SSTCharQuantized.ALPHABET)), dtype=np.float32))) + + +def process_labels(string): + """ + Returns the label string as a list of integers + :param string: + :return: + """ + return [float(x) for x in string] + + +class SST(TabularDataset): + NAME = 'SST-2' + NUM_CLASSES = 2 + IS_MULTILABEL = False + + TEXT_FIELD = Field(batch_first=True, tokenize=clean_string, include_lengths=True) + LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_labels) + + @staticmethod + def sort_key(ex): + return len(ex.text) + + @classmethod + def splits(cls, path, train=os.path.join('SST-2', 'train.tsv'), + validation=os.path.join('SST-2', 'dev.tsv'), + test=os.path.join('SST-2', 'test.tsv'), **kwargs): + return super(SST, cls).splits( + path, train=train, validation=validation, test=test, + format='tsv', fields=[('label', cls.LABEL_FIELD), ('text', cls.TEXT_FIELD)] + ) + + @classmethod + def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, device=0, vectors=None, + unk_init=torch.Tensor.zero_): + """ + :param path: directory containing train, test, dev files + :param vectors_name: name of word vectors file + :param vectors_cache: path to directory containing word vectors file + :param batch_size: batch size + :param device: GPU device + :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes + :param unk_init: function used to generate vector for OOV words + :return: + """ + if vectors is None: + vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init) + + train, val, test = cls.splits(path) + cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors) + return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, + sort_within_batch=True, device=device) + + +class SSTCharQuantized(SST): + ALPHABET = dict(map(lambda t: (t[1], t[0]), enumerate(list("""abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""")))) + TEXT_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=char_quantize) + + @classmethod + def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, device=0, vectors=None, + unk_init=torch.Tensor.zero_): + """ + :param path: directory containing train, test, dev files + :param batch_size: batch size + :param device: GPU device + :return: + """ + train, val, test = cls.splits(path) + return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, device=device) + + +class SSTHierarchical(SST): + NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) + TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) diff --git a/datasets/yelp2014.py b/datasets/yelp2014.py index 249c243..01655c4 100644 --- a/datasets/yelp2014.py +++ b/datasets/yelp2014.py @@ -7,7 +7,7 @@ from torchtext.data.iterator import BucketIterator from torchtext.vocab import Vectors -from datasets.reuters import clean_string, clean_string_fl, split_sents +from datasets.reuters import clean_string, split_sents def char_quantize(string, max_length=1000): @@ -41,9 +41,9 @@ def sort_key(ex): return len(ex.text) @classmethod - def splits(cls, path, train=os.path.join('Yelp-Reviews-2014', 'data', 'yelp2014_train.tsv'), - validation=os.path.join('Yelp-Reviews-2014', 'data', 'yelp2014_validation.tsv'), - test=os.path.join('Yelp-Reviews-2014', 'data', 'yelp2014_test.tsv'), **kwargs): + def splits(cls, path, train=os.path.join('Yelp2014', 'train.tsv'), + validation=os.path.join('Yelp2014', 'dev.tsv'), + test=os.path.join('Yelp2014', 'test.tsv'), **kwargs): return super(Yelp2014, cls).splits( path, train=train, validation=validation, test=test, format='tsv', fields=[('label', cls.LABEL_FIELD), ('text', cls.TEXT_FIELD)] diff --git a/docs/hedwig.png b/docs/hedwig.png new file mode 100644 index 0000000000000000000000000000000000000000..26df0a6dc2d7494a48c623485f6938311bdef478 GIT binary patch literal 23564 zcmZ6ybzBo()G$6;S_SD)kPr}*?hugK+JI8<#Dj@Hx=WA|!sr^XDV+lb zjOHDDpZD{7-rxR!?c8(DJ@?c-w<2}4l*vgLNB{r;xvI)bT>yZP2>`%py+eqFaEw!a z!~Uc9Rx2bEJ@E}Fq&JYAbVhP9 zFMdZ3Rv&zIG=<3Dkzr>NMyc--1<<1{0 zHHRFH->**_t56+UVqFn%gmd^BMSbE0s$PtD{hpZaen$z%3@)92>uNv0Pmjr zXLgkF3;~^%UVs!G#3|Dim$C)$U!!m?Jg-LctWh;9e{rl(vw0>DbieU-8wlo z1ZBFR54=S?6V=8g$ECde?w$?0Z~q;i<@Sg8h%~XV%Kx-5*(c#M3cS_YWRP=@fxX3|L=FZ zexS(A5N(?LoMYL*C?{58ydrFKPJS}aK4*Q+;#P@@#+n9E` z_Y>Y5)R3`S&%CjIPOwI_4qH8w@VRCB;WdW>(M?tLcY`eh=Y^M~E%==5p9| z%zr)5MVvF_rRmACX*?vq!R3BY4isNjondlL;eE%G9AkL)Hon5%sn?LiCIG2L4r!1}yW ztv$jDZ(e*MFPIa`d#gnD(nrW=h`ej959B}D*KddcQT+a$Z`)N-q8q{7YA1OAjlxPl z7LNk$P2~*O<@z5qPRGu-5PU`})(;dnWS!h>giNcdKXD*0Ms8Wtnbn4LOB0KBKEh|d zK?qS$A`#DU9t-B8ecAove_&mKRLfqgR^1Vrl7{d1oaJ1`0%mWJ0f$dsbSnJBIrC{# zm3u7p9|M*|7WY1da3*K**ZL#8|D#?b4k9M36vkVcXZ#EJthdCFl_Dg}_eMAsb5Aq+ z8`wzvHP4~fidFY@zMqx*HB|JH^FPzAaX-J+K)Jn@Fo^55tc455e+`O>z7dL$6Ym~U zjio|Oe*wlDHX(`$)HgDeKAr*S^(QM||5Qz`z70dak~scv?J6J0pXFa%w4s@9ff3v* z6cK@{hu6AH&m@vnZtX?%l!feN;bL@s_nAeiklcD7ajNtVIEQbqKRz`|j-J1Lof=<(|XWtt>yTV6KT? z8=(bRq8n|Fw*yY#)ULqNti8~?a$YNr>c7e`Lf@o0ZGttE9`C9l_mQ(_ zC$r#Ng-~;AmyrlE*+N`72MEai5z(`ZxorJ22nF57k4}^ckJEh-OH(kzGu9SwFQQld zQc7y3nH@8$Cc=eqNmDfoh~uwY$?qq4CycdFd*PnRq95<=jZ`FpIBe!!w#sr7Z`uL6 z^vX{rsqGt`Aowhfq{8!)`5SnY(T2Bzb0(r*rJSwjRm8SJ3PlA{t`auMZ!jEUOg(FF zbJVlc7>DM`-KSd7tJZEb#hF&!AV+jjmPwke(V)Tq#R;u>f8U=6WT&c{Oo7Rib?va; zqM=&8d{KWpQtJV#D{EF%ypd)4r>orJ53-!C(D<#38L3Fg*;`{r*=&=#qETKGgO>#< z-|I`?23lJTUn%(hM@@&$Sx@383yNlnqL;e!I9s@VTzGMqjrKpk`pCGo3sB~kG4Ekd z8m5NFN7d!7s2Sc;Dh`xFppq8QFPQE>V<#9Vrsja(JZIg@cGgjyh!_8Z(~rdww}@El zFGZ?TNCv9;Rv!LcxU=$j@74^5uGny5dvYY`ployGw4UAlg65Vrn9(2j(Ms|9VDm8U z14Z-zL+^+}gx3GG(Vq#v>k9vyaNql`Gu@OFjO)e<&px^NUzmX6PI$=0Oj_sjlD|0X zn5X&vy7rF`n#ogFdS1jtmqK}1bKS1G1pw{+_~7{m*6%b)53nz~j;~)L06&q$nI=R1T+@7ym`eY)%giWV zxi?!lLA#75q&LDdITkIDN6(~AeWxXp;x8prWd1Sv;HquM9RM)3j=%#n%>D(C<-{;k zpoRY|zq2^nJwQ5;n-lW4WiIO2JWH;-;nAr_e(zq}_wwI#2lpMMj0Z+Bu1RKgYV85P zDTn~sBEI4P_jG0o`B+;#Vd{Ks{rYw15Kr0oa;1LQyn&((9})nlebtE*0>@_xVI%kG zz1FBVyk--8ZCYRggB>K|G=$&*vajPc_;3bEO*Naz}>XJ^QS<>)xmQ?+|ov+cEv~ z(viwFYr*)MS1LTJiSK463~6S*@1BCWpkL$(cQ@Q{h#yT>)p?R5n>EC0bL(FdJKsaP zzp?~h^Q3-`-kj#TXz1vx_@frx*d&t8-&4qU34Q4ig06-tXSbJ$DE+1;MYkh zXY(uWJW@B)SGI*Vx%i>P+s4H_%$J9?mi}jM+|xWew8nE#(%IX3;K<*x^9PJrk!p1R zX#lqMjU~)+k$JLT3rh_s8n=Y(A$*GwO9@F2)|`e5mjbSW3}!K{i%Cltyf%U6Z%>C= z`o0dTZF%uS(U2g+MUJDI0@@qWIp2EKtN=CI#1&1*%OR?2R?Nw+5ga9_jUnP-3&KRS zJCWNRv$_p#zI)>vlZb-4m>2D|fA|W4T*ib2X2BlhQCg_~zY$Oj-mZF{V)UIA#O7b6 zngvOvMd@}(DHpg~vILS`Wop&ipfjp3Ys=Z|h>{>_lK;yhDREef6j!YEK0Ls8whRqF z_XT?^pqJYtPl2IZXG$=eH|oeAm6E}>|I5yL_uT=UBHGb<0|zCTQeiXpR4#8A?&=`8 z6v58?JX^6`R<7Vm*;Qd+bD78hX3js$eN;8Jwvotf>+P^J-~?=cRxND1Ib{)l2Z~m}gH#Az~quXmmGET~^ zG0*WRUVk7zElL(62&yotW@}vy42=J0b~k9&+T;cUkm$#U99bh-_W3T|pFEo+xqq&f zST0H#qnGhDVOl204kLbpCC|Y^iuHB+lbDAf-GjU!$oE}+_(z}OZPiO|oP61=l~l)D zkM}t-?fjwc5$(63el?t5*(ht^QaaQUKVc!?FLOJ6=|ol%FxkpnR~Up`e14XCo$L6< zXHlu>BUpAYTK^`Ggs4f^cmEK%XZQxD4=+Ji)IGRv%9Jn^xrw-k?O0+{>CNSyHT`0W=`VrFYAy{lKXu}S3JSgY80fwBg zkcf-_%X1!3hCI2g*s{5?o;4x)pFZ|wsNGM}%bv{T^L@8VOzpqTZJ&oIB2p@BLZwSJ zxNejD!}+|M!MV2T9khLeP;OJ zHZRTWl8SrdT~?TcT%5>s+L;yC8?2dsk`(W$2`ux!=}6w$nvjXjj}<9&RWN6m2}ChI zO;6pt)oj%Qtvj7#{yqYC87i3=9DWrrSwGj>qVYUh|17=acI`NSlpJui`#u{nNzfl} zDY{|)Bg-ZA-Mpes$(9StFlj}=J+dP{SR8vdwkIpH%J`*kp|O0T8)(3WUbb{212-ik z-L_<{W{Bh|yi^`^QvBiUvgyNN*Af{(cw+fh^Nq(iAs$*7A&51$3mUlClUToF0Vb-iuvmsRI4GMQzxe+&{ zGy2sPxJDj?7EBa+k_nr9Q(Dnx;)brppN2Qn{MI$C}MA z^#u`MqCb_|#nI~D z%^Ao6bZbq*41f?4$((xv;h9`r;c1%pyisi1au z#cq4%utE1|W{iG)mhK-b#W~rQy6upggcOCw$L^67WwSGY;`l1}u2RA?-DM02xV+NB z$$7vU&w(e~X8OV*)c+r99=ku2bj*Pos-3F<#@m<7z*KkzlnPYy03Oc|C#Mm9znODA zAXqT~cw2GkEsRg=C5zDph-!2V9$46F_g7j*;HP(8IlBmc+^TA)0=78$tbzR#GCK5X z^Rb0~lG|xYhEj7XAkg`PNVHt>`Zua`^TQtrpMV$FGl(53xghiOpVs zPGox%8dW2__;?Dt_hIg_IJ|ptzQSo<>S@p?MYBstZI~F0sN(m&vhBeEdN&PUB0cZ{ z44cJ45eOEl-zrLTni>|)<-s9CY<5i!|rz><0KY{d5i zZ(-+>JV+s91#%mIj@#%90Vm#dPvIyn`XPLpnlX60XJBQi92OYurG%qYg}&W|@r%>N zr8q4+Y5Et!_9xC<`L;JI<_Cu+2mBG%O1SX>|Alf$;hDNZ76%dI!E^q~2c#OdRjQfJ z**}5Z{l{5$4Ods^ctKMuoR?cua+Ypm{^?d1IxWjz)$qNJQX4xtzfpMuWzou*3hT+d(CsG6P?!MNaOfu4mmGGXSK?ekM;^`i{=%lO%2 z7^)%Imt98av~4fu4NUzIXJ<`qvqy}D6u@24QNL9KZec!;l7LiJAq)-do*IQ0<54ud z2($x`<(_{p=x9;sd>dR!ErtBK6}8--yU^q8z~69san9K`{c}G!;|8RkSc9E1(6Yqx8gnLti7SdYSx!x= z0uLyoR{0j@a|K0kA}-5yTSK}x>J>fH{xL83v<;yV^(gMS$$UAjaNh?(PQu5{i?RXsH)vor2HgI=2lPL1n`KWr@%~Gro3ZIYF=O_n(DaBUZyXe2625xi%C2x+P&I+7(FNfejF^ghyJ0971x23lBMk&5?Z8Jpn; zUBP%DwalfKy8f5a6lQXAB6IwA#ND$%N*(#gb()&(k zEPqqzOXQE?{t?3uHejD&SloiFpIo2|&SXyZepE>JH-5hs#z~Kmi>DGPR~37$0kn;- z5JESqtZ_;zcjR%)=)2N%X>JEI0p!R`9>avyyH7npX@%#f3~soO zwZnt-gR>I(C&)Tj98{_0YvaD!#WTYXZm6B3ArU{c zPMR|prhBXKBSi83(+HK-Z$NzlrZZAuW8$FeFH=H88vD)f#kzG$-PZakCZ{q%_3e^B zBqq_1kb6qY4~Al)y*2t*Qe`g-4_rpBtd@`WnwIQvh(Za$HPv{OxD=9x*|}JuUv?^F z|8=mLaPa)%j3+PwTaAu;%Y=Tf+^q+p#EEd=N@AOy>~nG2IJNkPl@LW1**ao-uaSA3 zeS{tPaO7d4Og*jp9^1#eS|nNAH16C|$Y-?n;`X`@6&s^XqE$S5Bc13K25+!3cERn_ju1OE}#?o?PK!Dqy0Dj^q7QK?qf zUNgP#hm%Q3pQtQ!EHNi%_Oq#V8h&F$CRG=x!n=39W@F_KCH{( zrz2)`7biLSnUUTY`tbuftHz;wXUJaQ8=%&EXbsUn~FSn^tBC_IEVRPOwIrw0H0aPm;# zELeN_`KmGLtCH_T%Z6WHn6qs=|Lp$J%1qJHPsvMnrpX$Q9HL{EFZFv@Mke^0IpBLf z3lzR%6`{DjUpbfymon^k{qBqQEo*tT?Op_==p!ar+7V^&WO0h!cBfmAHmM>%m*z4w zKu-)~PJMsn#xsBPzS^dvuUE8vpiQ<=pXt%$&)1UZDP;0I#k|v!CB{0@)mQRmIWP18 z{Im5)f`t|YR9se~OoroIGE&mJMKAeDK_rqN1bb6opkZ;5j3;wnd$k@y-igugHpY8M zax9u(9upnlFQh;2WT@Mwsm?j^-FhdLauD?m1KBf+T1xIbTGnr;%;C+;GCcE88|*wn zCE7)hKo3|OmLBysyh#}E{}Mq2e%(58=j|xoz9p6X2n0t~+!c}<cijbAiEr#^(YO7`y z_nvMjdFJWfqbf5rf2)fu7=H+W>b0u-c558%dI}@|npK--@q=gMwwt&WgWAic(x&(% z7~a~S{+q;vZYk4>Ba*`~LZ(-jF_D28y2}gXgZRNr+G(UuYcy!8|G}7PXgyzOh`g+` zV_!*Rrm(aK=a1)EYRmt4QtR(z^pSLm9t;?q|IG02T}T#t*)FJAl#Y2OpLq zQ4M|8s}*alKf53C(<&xsof%IwsrZUhV@4wPM!)cz+#7i7k{^IS@Pss3LuQ9rF8RSw zLYBB96sG%}FbX<4#L&Bl`D^4+g}b1c)o7RJ?no2?$8BOhx)n}e^%-`Bqp zF2J`Dxi%`~6X?`3Br!hYz;;xM@j_+W)_WyA&u6=A#6>YSV#Vc_uPcD6BX7C+W0Fw7 zOwr>2OdXjS!ott6$ZEdGjS1?yo8NRGS#oji11B2ojGS9ObLOJ7@bi(-{veNO0*gLz zk<^SfX4txP=g3m*Q-}z2UN>)t{*_#}i;<3O<~ynJaY*aGrVW$h+RFARSNB*8n|iWq zE_8T)1;pgY`rc9g76`l0J;fere6DOUNY=dL-uG4O=Z_1m$H5gfzP!}|*F!d87`*G_ zUa4GQzU8|Mk;49!QN>e_517oQgz$S!l=mbM8*UjztW49cv>rleKjm)^_V|dU@9H&x zyUdDPnEKx9W_X(Rh0C?p5EwNryl$w5pN9&p_Ae_U*jP>OONd{W29m#ymJp`~C4!eT z<>j0p>6|ka{ry88UCl20kf&^3lEUm(EQtF4!aTFrjm1LUpNM&8Q zLBReP_N2d&f($RQUKy?Dn7pv)Gq>>C*oXB(MdJD`GL7_Nyj-gVht675ykQFos{TbU zP@OcSo8HxXaV4X7vEBda^sgkyC=>l&H%DNQ8l!8LtW0TL78Z&AfVO&Y(nJ@Ktd@nf zPz5hUj{m@7nwC<%^Y2yis5E%?bk74GniVj!G0V4*p?LXx)~5TJ%WTAB_Xln9vvZhC zUSEjH$lsrsk5;u9SxZ_UrG4qW8uNpdQEN@(jy(;sW3!h>hFAIdRyxMd|T2=%y^c}G>keT9?B z1C25r&(_PXzh`)~Qo_+&b)=ji8i+tg00RGVyD;>eqJw6hwGYZe4tjf_$|Gjwm-!B( zoV)0D|1!=s>g!D94$S7NI!xf)q=XGTQ}DZf++>`met{(ZcR8x~&!AdXJ>Fu?3oa2y zP%o=z0fk|F1;d6uuTyc%vKA36-8+8KKz5jMPzhQ8th)3AR7_D)piwK6DC_cy-fx9}qA;R*2Xbn)u3je$ItUVW5pW-OP5`hzw+Q z{0-_S4VT5QRDZp8uXg~56Cs$9f{v^A$u8wUBFEN6cYI0B@*^5{687k6;V^MA51j<; zsYR=5Fd`3Cf8Cn&^tT4mxB7>q$m01wXWj51TiL$w>poN%>Fmh+;Ok6hzk16LLA!mZ znK%?Fonyqu!JG)*$|CfgSafFh;`C~(cttUcADJz{Xub}h6 zWL>lwJ;?mqrd}1(m3!R|2a#JYuZQ-2JLWlrzL3mE@7Qfnt3S`oY}nxJ`~Hy}WuqF~ zY-*tv==0E2u|-^)GnBrey>3wy!q_8<`X;%z-M%NdZWny-HiUm`${k;zZ3!^@NugUh zw2eFG9HU=q-j6;^ocel>OOZTKZ-N@DrBcwl4r=dXzDULn;HQeO&`Uqxa*J}yLoH>I zQjt5;Z?nYC7VO|C%DKh0Qx12=k$%j-bL(h5hF8lAUFt)wR8ikf()PThqxJt)fYNNo z5THdH<3fSZ@cGfIrQ*QC;&$|9u^X~hDn{38d&96{%+!}@9%}Cr1~jniJaBHxdU)d9 zI7khAZLz#RS&cldgJpLY`&aHF4lIJT<;ZmjfF74h^aqK_=)rqjxU1D*-6lP>sEOuZ zj+gz--a-xx9(%x8rbLVmJbe(&3YV*F2yO^^k=dBpzA$wU1P*4t;IE@nX+VE#ng4by z-GRU3wK9qRnhk1~8ZPPacqeOE9FuFS-aa9IP$kR%wZMDrY-@Wq$-qF{Q+c!I@wbU$ zLqvgQ7B>Ar7zTE`>vMBh8WP!DTMV$}Tj@@K+j{7p>(R>TF>v5qV8@pCBh|{&$&3Q` z)*UhBKb3vTg2N;%ij&ebR|)Tk$^~#SRM6wD00lYCIDd_rN`tE~vba|yl6OQk(rJnp zCS&R}%u1jxTE<&CP~Mtif!&?4ArBtbMCF;p_NN+O)O0tf)L%pSM}gf*b89>7-_fd> z^m#CryyGm%^$HkC8|UA{HUmqVB(R`5R?p$%EMoqjy_`G}BjDR`|}QVlD=h zpI-cQakDIRi1~$0S+I>&l^+>NTILuM_qWm^0ZJ8vwHSy+HQ43ga{1}R&tMWgBBTcrZ`vM9Qqo+vySNz< z1hq4r_%&TdxiNxSCrrh>;K+AYboBylBxkeiF>Og z)LzK^Y`|$0p)WtFa@}Imw3^{Xmz{}x7z(Ak9J&ad7~Tyx_(JvmB+bkMbiY0S!GWCq zVNeERpKMD2AlA?8ET*91@^sd#sX^|zY($idJawDo+v^>(}Q(73hf68pQ`SMbUgKCNOBgQ0W_^oDJ>C4Wy#zLrV zBB=hF!`F&K8*}8BGOF1WP*P z7hiJ6mcGJkhLR_^EVm3E*0J!9o4mDIxipyW87n^4N>8ScGAy0|Ixb6PXyG>4>1j8T zi1#6?pg+Iju9ZhObXk$Ue04`WWAO1tssa(76?0DnA^dBAMXPmikWWF!@C&iG=8j-7 z3!|AD^Yh)RDO6AKWgE-BZ1ivo_seVrknt1xptc$bT+G>o98s%bssSiA2D$RlD=1^ z%41~&b=0|9ct}_hGEN<&uoALU^HuJc)P@eRfhK&)>&&P4Ks`Rjy$1Rv)uJ z+Btp!mSe_}J*6nFH0VcqGmIrE(t@)-bVR6;A8wCRBue5)kS>Osu3PE03a{$BZJzL* zoUe*Y;1ppn{gE1RkD&ofBePP68oD?U2My=GIFfj@-x3Fv9)T>klJvU5k3Udk;cA+_ zL3)hSmaJGjz?*Bt4kGLKI1;AbGr5^h_@(kgL)L4qKePvxn<0u@EsXrS=FrjMxTTB$ za;rgZ)%(tV8Vx&qGC&(hh+%}6!@)>Yd zV&Zc>V|;t>b>5?JR~?F_gxD-;76@FSl2l8Jq^p#_Odf-OT41*jTH@Z{-*(Duk(D~b zrLfs`{4AL|t~=6|xAx2@%l9M2Jzt4h*_l+ME3?_1yKWp#6CPgJ@Oa|$0_B8?iNTSp z>F+qh3&N#1s6A%yi152k5x|~ubrEpIJ)Uu&R%KY7!5%~LW_lk|sO$M|I%XX5?Keu| z$hI&!rp|A((|M1L)cB-rzmL4{{+S{0+aiLu>S-YN!#R3Oe^yza7sDw}mIHoFE^WMF zRAf#U3br_}|Nc5IPspz{UV1q%F63i>#v@4_Ba8Kqt0=CcEH%794wPp~fSgFa`Y>7a zUH8vtT2Vp1!I8?;=J;o2`I>>O_Y*F+o5mw}x7$X>w9ZXYPJSflRZCDM6PROZsu!m7 zX@#>b!yVw$zC$lAHdmp|#-(H5Bd>x}8hY?8oac;(olgReLpkU{Qf zoXDMQl?piA&!_5$Vrq>imSn*iorDv*Wacg{_M;!qv1R>O|4Q;z@iTND;-Cq65GmYQbk32oR|ChE6HH82OuuLzHZ#qWh*R{P#k}jd%Vgl31FD}upYU#Nw-asi@**- zev~fy@Okp0lfijaEUHOs31*-aFuVervc`!B?qj2yr0xm_HJi*@alDnXC_LuR4lJDs z*e}UJd1lTLH-B?Td}jeM_0M$Ms9tYKoc!@<8Q7Su*knUBq`K!J9|^j$?sCCM`Y|%r z32auc``3Azc(X{=1u7i2<1<4>PTjXZKb{0xcn3XC@*JIew_HDzN$yf|l+x+iBbr4Y9s0nB!;)XUNh;oX5ZhsR*47*wq^+skTqd?*r}-=Uw54qQ zH0|tQef-s?ri-%Yx4&x>i_yRD>{#|pE{#I*Iga;-^-`6uU%CqA94jUFJFcbG{UkLA zUfSS4a$;TkXRlh~q+}UMJInXVb=tJdnzeQKs=QY{!}bi%aGkoSw5VO9O=z1L(e0``(c%$k)Sps)Z2tZ=yjcrkcy6)IU@vBN|56&e(-}c@SrOEkJ>}5aw>D3j zt18ia74Lk~zyKpay*gvA(;!?G1#Z0l4%2?NLh*5FqL(6~)zfvQN@7O4fB10hjA1Jz zOX`fr+-@1W8g@k#fkzESjzbR4KU?>E8ThU!ajjGXQKO#zCd6rR7%}y_2&oo|BoIYUc9~ z#C_{?-X7ZCNqUJm>ti$j!18jkkQv*ROT7`6e{2sG3=pTuDLKmB>+D3LCl+}6U_ z8xMyh;^c>Jvw$D%9H7lz(Q$%&AA@#5d%z3b$pD>00@{09tCEvVEoqE(s2AppVn=5| zj=MdPxiUhpMsUs=7>aR%lQWDKOB(w5>(19t6!kOOu#63cp=0K0kjk zw|vF7V)uON$?7ujkbuzlc~Bep^96;hA?v6ZQzF5mOAIKT1j!`x$U5XI>cL-XXZ!Q8 z`tRhJk{#C^H=hEG?VIzHq(m~q0^{Glahn}p8~Mubze|fK=!Acc|7O023EIUTQMbu~ zSfwp<2!AD&SWG%DhBd3vJR|$-qK+y8<5BQOhx>h0=nD7U8TMTLOQEkB4*7mj6&E;= zDJ3OZ_-GKUX_%Go&L~}iImODv(dQk@<*HAExz}by>;VtqLP)VsoV{fcT8WNv5v_>#_FL!k{IlHD26QXKmCA1>jCj|Fcobp%74$iEe zNolokWfju;_?(s^PhDfXXk%ki22j7WAbQ!Rz#sha1dq<_Wk713U9a)?q(RkAeQ!3< z_3gmwCI?OP7Zcs+z976dhBE4vWk(6ajQ|*=%lTmwV&hKNhEhi0cD?o2J`{LI<0A00 zpGhsVXs=2v@ziGS(drYN2z6#dzobVEnk#5LxxRk|=7n$ZOzWQC0h=YitKeygVSYMZ z%F!n_1wg~UY<)1mEFF`V)jrL={5nJU3bB&C3x!DkS%QN)VdjI+;{w4^`b(GOrSD^t zWKCnDAtwm<3LwISV%{%U<;cRc=jtO*CcKdR>a^L!hS|O#c-7H9H+oz=v@u?7OCgA6 znkC}(?23+-+|#u2<%9LUoXT3cqcZC#VV)Mo!%_dEiI8r$O+2QFSw>G0OW9W&FGa;H zq1�&2@YyC2qVtb~fgOElkJb`ToAN^na2@m5m3&`=9Eys)k{&w;Es+rrHRptf-58CD)uIS-W@E+kn_{1J=g$DVu;Qz6yl1wVIknS<137piB7_Tvnu4nMi zo;8pz5=_ag$m%>~o4Tg$LSGocz$#o%RzFC(xG*}JZEpxOfJLK* zu%Bae{%6bIy|wr=sqtMmZfL%pwLp#9AcF3lq|>S8r_b@RHPljfn$=wRr9WS_nbT-) z?Brmmz{jH{&Qj47C)eVJ#E>a>N|EB!kL0^9R;d~ptedRW*^$wQrXDp=C&8Hce{Q{5*`aJ?}pdtqexU*Gg2%x z--Jznsi8}T-1}(4X-#H?4Ppcu32JwCCu2C3T08UIp6#Vmz0Q7A$^b2kz-MAA#Xkp1 zh8u$xUB(z^)xWgpR^;K(V@e&u>Qsjh=E@no32O*z+>teR%bC4F-h87STcj`%g3_v( zRcc1-JO!rW@6U{(zBd>BbpQFTC3UBM4l*_}35Xi@PQbAzmEq=-ks*@WB*pIdNGW6J+D{It6?KhuPFfQ?90gwByx2g`s_ zs*`pwB}ke@UePll{bXU0-rT^ zlF9vZ(6sG@xv(6U=?y2`SA<@NQP(Iz30nGHp&p)y`BQ%mr_g&DeYh#kBc}}{=zc4j zE9b<2Pg(kxVoi|Jm6r$WSm7`JV5?}fU@Dc+t1k9c($$JQTj;DQeU@cdLNd?$Z(oc*#xN^Z6v-JU=M$tB!0&K^!8&N#)MBPZ<9q zg2sOk@gT?SJFRGX1779GaB(S|lbC;LBSk((Q+ti7^z6GHk~T~HvE)6Mby6Dmes)Lv z2JLAe#Vzxj;LT9TSUpE^T|#K^^dXp3DKIDNOmbPVMzHFNH6K2!Rvh zuu}uWvBpRB`orKf|fqp!_<0I|IzqHcib_K$!R+`?0^6C%xhh>0 zB4RMg{qvLwKA;gUhr7wF-mz!$RhaTXqGcyVY%aj1xG4wsI*x#>92U|&*7@NTxkttr z=`0CA$qS!;B-y7~LgZnb$(I^=$3-3UFM`ogjp+zBc)ia9aB|(h_x8#%*V5$Ao}RU> zWzilT@B!>2+538TSQU2ISSAH2_vqOdsrH!6zhlOwtrq!3^fb44&iI{v_m7-Wh+puj zJB+9m(gFMmQu#s(uvwD|&*CIXi3g04$Ld&i(yBZacSak5Lu5BMnl^|d$s#vkXvJpe zjMwR%lSwy{Uk$WvX!dG7L~U2whYT>2?+GCW3>m7&o)Gf913my~*wOC^B};>(-5TYL zHdCre#TT(t&8`c6(bQd>2%qY~P~Sta?7jx}GO2w0fwAOVk8MA6snx!bzm2=G{pC`H zZU~no!}{8s>nmPE>@cC&$4&ArW-JRa@e{|dWAh0=SP=Xf%(o>*I)symm+4+pRq3Sp zY+k=>NOdLvgxD&zX4LnnF4LCidbAK97&nk6MGR7-1c4#_%;YCE3oG(F`~g=WE*vQ< zfPCsD$-|1z$;!#~=~fjC4`}arnh49Yhm(ny#hFt~?6AsQx2EzDaIUtIpTEI6S6Mc` zZxW82muftm1Ht>eAdaSXiyW7K#dYLSsI~J(CC`VUAy`|5OVLzyW#X$B8+>$u&hR2j=p=E<7BGY<$e~v3*ZN>w z7Yn{(((^1Zt}@EeMWT)p!TBZ22VmMz6GEWyKa_vE#(SNZ_FQ--A3=ytP%=SGT0ic( z4HI*JLb7s8$KSVdT89C-%!RdD(fEZynS56E*!zGL@wtgb%7`u@JkMI*X*$8x`pEObEAD7sI_H2>CKH3yoo;7 zm-cQ*3FuB(1Z`*bMc*ndEr>elOp)`hc- zw1Rd_Nu7gAiGHu0?s2zgT#{S!t<%bq?8T3wlQWEtZeut&Sc( zlVNfD&Od<8*Wdx%nHFt$L$E=TGGWa~md#I?)arXQHIx86R0+J|E7p{yuhM9?@;Rm* zBD1Wj*gg*|RiPiV=qRsrx#bTvm*UyrCrXEQ`Ll0q z53}33=p{`^sHb8phF(`7-LB5;Ey~| zj9IV+<%N{zPY}!YX?05|82+~+4mIX7Zi=1DbBTj7FMbn-{40BpqfyWsdCK~L_mFdL zWfU!Z@md-Es~znIyaJodtZj|$DU^ab?VM7x+k)maVg(a&BZE6g^Ki1Ir?DQw?2sc9 zFr5hIf;~Ls`j=m?sO_5UouTX6kqo?YZF|}fN5*_7=s{UDKW`vLFlughuA2K(m&Q$a z#lAl&rUlij z(&1$I%J2pE)On$vtEogV>(>`_8pJpe7yo=}@5f%~(Et)lU635ekf7dLGTR@BeYD_= z9m~7>&9#x$P|gZ<-Wpo+gnrp=SLBna;&y^@ny_b|b6?0p&-E9Z!T}>CbGQ@;9$o3+ z`05W7XAgFa^T$JjRz0!>pOpWxExyi;QR9t=z9d`xku}u-^1R@~6X#j4Y1P2K&I!3+ z#v8x||11BvRIOv0REe~$o@z@zOHA8#DBvnJzkI-4_Mju#&s!T2nr**xA=ov3Y4I0` zRp4SGwYz)m{ny4%NNcSOun(%R8+O3YdA;Topl9$Wrt3p+tE>iC|2lu7h(f)_`O^vR zofC+WOmaV;ai(|1{WJT^27)~|Z=HkkYJCVx) zsGr|0?Z(Sf=$bTo zi=-ntSD@bEMl&1c1}?Li~KFFG%=xnO>Vz5Q2Mg zbSN(OzN5iErJk6!uUU-Vd@k>7LGp-_yyLbQdH33o)kvtmhsr7wy5Y4<4Hhuesf`=ec~CMp9WKa?L(3W4?62L7vIo#QJjiEWLuUkjqjNTP67?(a`F*r*wvSya{^jme+?FMB())elqeb8oxacMHy}WwM@JpI2q0_A*GbrwQu!BM2#SGG%tfIS$8J z+(>P|7_sg&MSqa)+4ag6Hjj}Tj8&8Gca$>RNMz&qyftusr!vmvTv5njB-#>J_FCIu zE+35=N7F=KnkuCJ`B1mCwLIf`rgyHTA1+ea7UN63?8VmaFa0jL`eZzBv?Y=8jf4W_clsK?)ZMW%z|;V6j#w zY$h$MH<+r#mJ&TW)}5983e$bIs#bMMsE>8D+D+1Dj}v`}#s$JIGum7I-kmB1$o}CN zZAgnT7}!CFv9T;KO!vG?&$Y@DqinV)&7wxSVXKkgKHY4qK@zDE&ZW=)z$*v=xfm36 z4Kc3&zSGalWKgEHoDlD|^VL~6FVHrR@5F4H(#?*;4522=S&Fe6+e3&V}H!#-<; zZ9X9orUrZF-Pfu13ERQ$a4bn^*_3O1Tb(9sw}yCA<8MR5>dPGlLs0*~e@M^%fKiVl z{XpIhQd%rEf{V5fpE6A*#@J^(aRk>EN|Ky4Sr(qLM<$e`2#tvDaGSgEc4JF$?cueU zbnJWPvdvU3svPYd_SzBczT`Dx4H=kJJnqn&LgRSLb2I&mgxuub5nxDmZEn!CaKF6Er#K3OTkyQ*YrK)zV7Elots&c%Dlhb?kY3&N^i}XYzSaT<&2a z+^cd~OL`NC+V>@p$omcjQ`p)C`!Z|{(IT?T@9lfB$Gjb}m<&pU&H4i3@$S;bTwuy6 zZQoeQ2b_2nJhiq3TWUn%_Jdl&TL-w@RmeB34Cf(JZ%neE(u2zo;-NjD-Z z(_zZkN1`ILn{2gms`DmzPNm(Vp!&m0OR3(?Fgu*9yu{2R^!w0mJ!tCC>hx;@#G{T3 z%Mz#t5)KJo#J)TWdt)?b*rv|;GyFSHz?uWg2nkPIhnNJE-LFNAGIH;R*V{aj_P#ExJ@DU`Z()iY%z6H5OnF>6z3;UWf}IQ`BxL)?O276V4`}okd>0L zWA28oVIsMChfo+ar&`dZE-M{RtsHyj$m1}SB9-imetpdg1V_!Qb0ecrIIF_uWnj^A zo6Z+&urRo`cRYA8Svir$yB+p+W1&lhDhXV3MD;m)%ABI*oQX_7Yxi+tLkU+$A;OvH zMOJ;58%t{-Le#87wm=l`0~C&uo&IyN)-?4BC0Fseme;Xi@Gta(?6`ltA^TN(oZ%|4 z0n(td0~4%kVEiulvPDjrwLHsLmvx6l`9ZLl0#UD{Su-Sf)vfx$9l?}0M)MB7$F^Wl zefqP0g$QkUR~H)+X}{pk3KT1H+GtN~9Xscl1^HOE?={-TTcUToWChlRzA*#1%UdlT zCo6U9{hubzJD$z&d*GopYQ$<$qdJXht*Vh2Rc+NCMNxZ?*u)m&Q_*Tmtxz9SQMLCB zv0B8a)mTBO+CpmuY4}Ebey`W>=C9mypL6a#_ul78o_o%_rb!|XqhwYb$vJ{xq`Bqt zMfy?n6ghM_?md>K`$EFNkC&E$v=A1@`XkVhz8P1fpd^@74yzBdSRBMO`pm9>;aYPQ z*8u-?cn$rWr*M#L%b#259{Xb4RnEYrU~j?S)vCcCQr0+gBgLZ+%rC~lv`nN5|4jb* zMxRGF?w!@(C{)O**ddd zWp*BwLQg&MlI+PK(fT3Bqa$BmB`FFR@*VOy1In8 z=oZy&y&pGOr?hl~sp(_%HW_Y#v8E=uY`5Wb;N5qh&4AhAA5_z--rn~PH_5v2Lyv(` z6}3C8M7x>mj~zUg#8fsoNUOaKvr?gpk@{Mx0hI^5pDnNL~hQRM9#@6gtl+5Z$ zCC|HH3Xj*h8lHBVf4=_}P>|A>%XOokN{m4;qmZ#h_n>BEDO(m)Sc@!g5BYLOjPp2{ z7-+UqZLc6@K2@4$ezTZuODF4#0I`4_dOzF{*!AR`ugh-5JKhhYL9W5iSHK_wuLb=~ zXZ~;$F!6`g2NpeDE@~k8kOxBzR9Sntsb_UxCI{2YrrDj+8eeu>SRi_`yYC?Mr<08C zepdQ}i(3DtsV9?ge{Q9T#;JGzJiRnbZYNY}N)4VR8~z8 zYpBliX?zx_fK?De^H7Ic0s{@M&c2r(u1PYprC4w?F_5)#`_UCK-Yv^2`2!g=eH=Yh z$;HUs-xU%rK7v>X0M5{OS25e(;R%B7N-k7I?ffwuc}OjEFYr4J-%oN@P}XR4ccVPO zyKbO!!Rc44(o_!>yw%z2B<$6$f6>;q>Ssply_tD5!UScY9C_7Jsalm=zaFpF{8Sdv zSxOBRb2ExNp&->7zh|JYY}C~QNWktX5xPDg1QPv%$6WDg@RY?e)g#o3T)jOw^?c0`-REtvTA*}|>RH?6*EZ6Nf zV*=gZET;?7%S|im5k+cSq>`4?^KAq@eKKp#(Z%j6g{7C&&7zUGZABL3oNCst^5uEL z6;GU|{O?Lesej{lk(w!6AQRqxYUUT0pHHtN9x9K6(oNzGEr)!RFCJ^R&9A?Wc*PFf zb8HK~!1^s8x^5|dP{9cJ1$d;S98gIjPTmXOMDL$u!^V(cWmpM7Z8f#MfjRBoexHY zm(K(bdC+xiWS!up@b$lK1bGP~1^`HX=SsA#lWvd@J{{t$VJQjA>TPjyhU5#}t2MNQ z^|HjNzM2*q*m>+}PgCC&YV>w>!)u~^ZpUFn)U=&rfbOWV+`lZs+EP$Wme01{ZocyC zf|x7Zac%+qL2$!VTWdt;=pt|u=ifi^Y<6*odAHh%hmX4bm&!IvRV3Tp(ShD;(WcR+ z#~b_9oMFAt+HmOs(8L644-Hj|)L%GQsf9QjV7glv!N#OZlszz!;s{WcM+XCcM|Uv22Cq4 z*Z0{hemj32ZRz0+vCIvEF5+t4jc}r38FC**r9tHd1fgJey2-Q{}A^aBF)pt|&ml)h)sP8-@ z>wwe+&=5;|42fhffz9(jIx1?yIp~j?W%R!dm)3VvV=PTd!PSh)kUnM`=Z_cW9NJRB zZFu0>i8H{zrid5|eaBBUvA0>S((wNVN~bv-NWOOZ2mVOWaz#EYeq|BEj^rKEy>SFy zUQR_-M08g0N3;s*2Oh6z^K4SDCi66g(tfGq5L{`yW~^S(Zk^h_;rl4sDb^}%Tw<)S z^ke79!mDteu10TqIVRkL=4lyrx5@S~WF#y+>Fs4bZKwXPl520;?~tYl>>7I!#>Q5b z#s;_-7g%fSh_t(-Yr#0_MqzR;I$yD0HFmy~N^*5+d!;%O)=eHWz#Eq?m@wJ>Vha>) zdS%ad3g%D&MXdkm(%3fJRCac_e#{jw__n?ecMBPM-Sw#FIThUdZXNSW&3v?)XeQHN z@HQj*5ff{z?>@nvIvL)g)j1pYxcg_(8Ife{XilOQW5wFoeX|s-q;s3XuWy zh<8w?C02(VnZvcDilExR}CEJPhOyhLs=%%I8+X4k* z&n!3|)RrIDBdJh<>HS-ef(E5q?Z}D8Q}4_zTMj{!sOh5SP8} zoy*=rFMdAu9VQ1NC0Kom^Wz9K>3z3ko|kbWHKtBW&@H0D2R^0v)P4Vb_|r|dv<@F) z|ElcQ;qJ}k>9I10{JQ!PLg}kR6TX7;Rxm4Yc-_ANWX}NSm07)n`+~uG2O&~9BZ+*F z1BsuStH_(|z^|MAlTW5&2w0_I4Pz}(nh~w!RF{$ve|SoM9!MwzajBlFc0jUP}E&1-XdpZ$+~Dr=}LdAGxWTTe=?l$}{4+n^l7QvyhksjH0G1-bCkac5iyJ4fE5W;tH6pOT*`~#~9(vjvnQdmTU>S5+5fy6dd(Iu?( z*e{i=9&a{L))e(s&;`L1w@Q2xk70_Pyk7b*8H~NaWI0Nc`#wK>smeL9X|GQA_tb>* zkLD#RYM^9EUCJR?e0FWo0$9xk%Z|OnI^z*Fm7e|wyh;63@+glS;VvlK#l`@YPpGpS zT-V4-=jBeEQ{+j+*{6cgwsy^wlo9XRkNi_kn%T_4(TGRtN8p z1ig~w0S~xK$%tHB0#t?hi_P?*rRhss_$7tGV5}}t-Kta;so8|TI!VqTobI~80yuVR zlGlCiP~Nx6zPj*Uh^6EXjq55s>PI8cCdGqOf2E=OZtGSelD})C_$~hmI;lp}6xVDO z3fIFw+I52Fv(UAmx&y-Qhv7T17zDbbPKd)R*6I(}s^ISgfe1}ggSR44KqOZpzw`sY zKKtuP`^IV%GS~GkxM(Hrc;+f3`9ieFyQ*OKH^a^TuUk~DXH58y?;b3Zmw(J^vHT~} z>fydjx&SzhobQ;5rG0e6AU?EY}*7&>i2=HOM9-thA5r zywnjGJy3yT$?P+ixvd87Loh~9T*-MU%l)&MG{LgpeISLqj6ud>(=^1m>Gtn* z`O7)VCaMC}j{4cF3$%T%Y&9hcqj z1;<qg@9F{mFYaH zfc_+l`uFFU1Bg-Yrv_ejm#?aCJmWyyjDX|Qf^8G4V~@@5`v0u_qv7GJ!_XT-MOWnA z?5?F%Ll5FJgK-wN8pScbZB$=+C76MsfhGxu)wz^zI;ItCYz%L%g^YPmPnfcMIqT;w z$d+mG#kjIKT`5xe1Xw#04g>}EAxXnG`E-1zAO`Je^9i{*SdmZ@9cbz7)?yC(IU^T{8s zK?Xu#tF{XhdFDva|+`paT>v0^Mu`-U; z4rZI1$=`T(bpKnLUr70vH#g|VJvHmQo(f^Zv1D@D_htLMrVQ*M403WP<%Pr<8ORbtVD6}(XV3OvR5-oul@7x=V&M!@HFM!(g;9c7&Iywk6M!&VQ{j(?HwrgNpb=k3dC z#}z~}T_(#db?slTS4+#+kA?n8s6wJJPe|eGxbba%0R0_-bN*)SgdXr!E+mZ2%R07w4qe(lr=f zLfaet**~t6V+#vli~zmjc#*M84i>UrKV;;b80)anwnU0BOAJg(?i~FhYEBh_LaF{l zLBlgkc3*a9l}=_2Cf^7G0JzQWa%C?!_MA+kZ_KqNH|^d!NT7?xDNC{1!(g#@D6ofb zJv%u9qK9IK$1#{qtMh!qq;_nnM#jt$5sB#Ux5u<>_crs{nD;I{omK-Jns^}-`SI#Q*P2X=L?;rGX2>Vq(%NIN1dyDq zi}{XWKt=oJ87cIFHXEcN?CVJSb?$Ep7=Fk>i5SJ>T28&zo%mgwB+xg>v>iiO#kKJT zyT=rSrUc3$iWv%K64Gt6$~z)QqIsNJ2BbZ8X^%k$fcr1%i#z<4tYw3E1yH8C?Yst1 zW?E+}(JmF}&zbiIf7YJ!(y^_bGVHzhBvJ7@eV)eA1AI*?;!ot^hq zZvH3J%KK*B85W?}|7<#w#iA->KBo$=@blCdV4;HjIDXZxY4VzcX!EoqcJNR?of(!q zB0F`qEYueHfk6)BY1^uw%IniU3QHQp!meQd)cxz2u^Z+}q}~W?!R2Ki9ohW10f^}Z z8Fnux23?cNWLL?oT>47X9sxc@bE#@lkmOI?5>l5~Df9^nt-qUVNl$@F0X96z_da`7 z&uyL9Zoy_xy2~qS-**H?q`~@>E>83!`Tiw|H`dPs!Dss_OU&va1f$4C&H*ilAGH5x zSr-w1h(utVj>3n;1jUSdWfFby4 z#Y=hSVy{pNUK4QcODR(hl^vW$arRnBB>D|b5eHSKhd zVVoXDl-6Y`{4tvGc()j&uxcyC;@p~{{?a))k5wu3NcT8}B?g#Io<8`_{F~39glR>5 zT_R)t8+eei^x6hq$fT=@Tw}UR-h4^r+E=i9=vyKe&z>iviTRzV&@bliav>Rz4+=G+ z<$7mxr}g}j!`yDOg(%|JH5s},{:>9.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) + + +def evaluate_split(model, processor, args, split='dev'): + evaluator = BertEvaluator(model, processor, args, split) + accuracy, precision, recall, f1, avg_loss = evaluator.get_scores(silent=True)[0] + print('\n' + LOG_HEADER) + print(LOG_TEMPLATE.format(split.upper(), accuracy, precision, recall, f1, avg_loss)) + + +if __name__ == '__main__': + # Set default configuration in args.py + args = get_args() + + if args.local_rank == -1 or not args.cuda: + device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") + n_gpu = torch.cuda.device_count() + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl') + + print('Device:', str(device).upper()) + print('Number of GPUs:', n_gpu) + print('Distributed training:', bool(args.local_rank != -1)) + print('FP16:', args.fp16) + + # Set random seed for reproducibility + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + if args.server_ip and args.server_port: + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + dataset_map = { + 'SST-2': SST2Processor, + 'Reuters': ReutersProcessor, + 'IMDB': IMDBProcessor, + 'AAPD': AAPDProcessor, + 'AGNews': AGNewsProcessor, + 'Yelp2014': Yelp2014Processor, + 'Sogou': SogouProcessor + } + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') + + args.batch_size = args.batch_size // args.gradient_accumulation_steps + args.device = device + args.n_gpu = n_gpu + args.num_labels = dataset_map[args.dataset].NUM_CLASSES + args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL + + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + + processor = dataset_map[args.dataset]() + args.is_lowercase = 'uncased' in args.model + tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) + + train_examples = None + num_train_optimization_steps = None + if not args.trained_model: + train_examples = processor.get_train_examples(args.data_dir) + num_train_optimization_steps = int( + len(train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs + if args.local_rank != -1: + num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() + + cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)) + model = BertForSequenceClassification.from_pretrained(args.model, cache_dir=cache_dir, num_labels=args.num_labels) + + if args.fp16: + model.half() + model.to(device) + + if args.local_rank != -1: + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError("Install NVIDIA Apex to use distributed and FP16 training.") + + model = DDP(model) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + + if args.fp16: + try: + from apex.optimizers import FP16_Optimizer + from apex.optimizers import FusedAdam + except ImportError: + raise ImportError("Please install NVIDIA Apex for distributed and FP16 training") + + optimizer = FusedAdam(optimizer_grouped_parameters, + lr=args.lr, + bias_correction=False, + max_grad_norm=1.0) + if args.loss_scale == 0: + optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + else: + optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + + else: + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.lr, + warmup=args.warmup_proportion, + t_total=num_train_optimization_steps) + + trainer = BertTrainer(model, optimizer, processor, args) + + if not args.trained_model: + trainer.train() + model = torch.load(trainer.snapshot_path) + else: + model = BertForSequenceClassification.from_pretrained(args.model, num_labels=args.num_labels) + model_ = torch.load(args.trained_model, map_location=lambda storage, loc: storage) + state={} + for key in model_.state_dict().keys(): + new_key = key.replace("module.", "") + state[new_key] = model_.state_dict()[key] + model.load_state_dict(state) + model = model.to(device) + + evaluate_split(model, processor, args, split='dev') + evaluate_split(model, processor, args, split='test') + diff --git a/models/bert/args.py b/models/bert/args.py new file mode 100644 index 0000000..9171c1b --- /dev/null +++ b/models/bert/args.py @@ -0,0 +1,43 @@ +import os +from argparse import ArgumentParser + +import models.args + + +def get_args(): + parser = models.args.get_args() + + parser.add_argument('--model', default=None, type=str, required=True) + parser.add_argument('--dataset', type=str, default='SST-2', choices=['SST-2', 'AGNews', 'Reuters', 'AAPD', 'IMDB', 'Yelp2014']) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'bert')) + parser.add_argument('--cache-dir', default='cache', type=str) + + parser.add_argument('--max-seq-length', + default=128, + type=int, + help='The maximum total input sequence length after WordPiece tokenization. \n' + 'Sequences longer than this will be truncated, and sequences shorter \n' + 'than this will be padded.') + + parser.add_argument('--trained-model', default=None, type=str) + parser.add_argument('--local-rank', type=int, default=-1, help='local rank for distributed training') + parser.add_argument('--fp16', action='store_true', help='use 16-bit floating point precision') + + parser.add_argument('--warmup-proportion', + default=0.1, + type=float, + help='Proportion of training to perform linear learning rate warmup for') + + parser.add_argument('--gradient-accumulation-steps', type=int, default=1, + help='Number of updates steps to accumulate before performing a backward/update pass') + + parser.add_argument('--loss-scale', + type=float, default=0, + help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n' + '0 (default value): dynamic loss scaling.\n' + 'Positive power of 2: static loss scaling value.\n') + + parser.add_argument('--server-ip', type=str, default='', help='Can be used for distant debugging.') + parser.add_argument('--server-port', type=str, default='', help='Can be used for distant debugging.') + args = parser.parse_args() + return args diff --git a/models/bert/model.py b/models/bert/model.py new file mode 100644 index 0000000..5456fbd --- /dev/null +++ b/models/bert/model.py @@ -0,0 +1,851 @@ +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import json +import logging +import math +import os +import shutil +import sys +import tarfile +import tempfile +from io import open + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss + +from utils.io import cached_path + +logger = logging.getLogger(__name__) + +PRETRAINED_MODEL_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", +} +CONFIG_NAME = 'bert_config.json' +WEIGHTS_NAME = 'pytorch_model.bin' +TF_WEIGHTS_NAME = 'model.ckpt' + + +def load_tf_weights_in_bert(model, tf_checkpoint_path): + """ Load tf checkpoints in a pytorch model + """ + try: + import re + import numpy as np + import tensorflow as tf + except ImportError: + print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions.") + raise + tf_path = os.path.abspath(tf_checkpoint_path) + print("Converting TensorFlow checkpoint from {}".format(tf_path)) + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + print("Loading TF weight {} with shape {}".format(name, shape)) + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split('/') + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any(n in ["adam_v", "adam_m"] for n in name): + print("Skipping {}".format("/".join(name))) + continue + pointer = model + for m_name in name: + if re.fullmatch(r'[A-Za-z]+_\d+', m_name): + l = re.split(r'_(\d+)', m_name) + else: + l = [m_name] + if l[0] == 'kernel' or l[0] == 'gamma': + pointer = getattr(pointer, 'weight') + elif l[0] == 'output_bias' or l[0] == 'beta': + pointer = getattr(pointer, 'bias') + elif l[0] == 'output_weights': + pointer = getattr(pointer, 'weight') + else: + pointer = getattr(pointer, l[0]) + if len(l) >= 2: + num = int(l[1]) + pointer = pointer[num] + if m_name[-11:] == '_embeddings': + pointer = getattr(pointer, 'weight') + elif m_name == 'kernel': + array = np.transpose(array) + try: + assert pointer.shape == array.shape + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + print("Initialize PyTorch weight {}".format(name)) + pointer.data = torch.from_numpy(array) + return model + + +def gelu(x): + """ + Implementation of the gelu activation function + OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +class BertConfig(object): + """Configuration class to store the configuration of a `BertModel`. + """ + def __init__(self, + vocab_size_or_config_json_file, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02): + """Constructs BertConfig. + + Args: + vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. + hidden_size: Size of the encoder layers and the pooler layer. + num_hidden_layers: Number of hidden layers in the Transformer encoder. + num_attention_heads: Number of attention heads for each attention layer in + the Transformer encoder. + intermediate_size: The size of the "intermediate" (i.e., feed-forward) + layer in the Transformer encoder. + hidden_act: The non-linear activation function (function or string) in the + encoder and pooler. If string, "gelu", "relu" and "swish" are supported. + hidden_dropout_prob: The dropout probabilitiy for all fully connected + layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob: The dropout ratio for the attention + probabilities. + max_position_embeddings: The maximum sequence length that this model might + ever be used with. Typically set this to something large just in case + (e.g., 512 or 1024 or 2048). + type_vocab_size: The vocabulary size of the `token_type_ids` passed into + `BertModel`. + initializer_range: The sttdev of the truncated_normal_initializer for + initializing all weight matrices. + """ + if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2 + and isinstance(vocab_size_or_config_json_file, str)): + with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: + json_config = json.loads(reader.read()) + for key, value in json_config.items(): + self.__dict__[key] = value + elif isinstance(vocab_size_or_config_json_file, int): + self.vocab_size = vocab_size_or_config_json_file + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + else: + raise ValueError("First argument must be either a vocabulary size (int)" + "or the path to a pretrained model config file (str)") + + @classmethod + def from_dict(cls, json_object): + """Constructs a `BertConfig` from a Python dictionary of parameters.""" + config = BertConfig(vocab_size_or_config_json_file=-1) + for key, value in json_object.items(): + config.__dict__[key] = value + return config + + @classmethod + def from_json_file(cls, json_file): + """Constructs a `BertConfig` from a json file of parameters.""" + with open(json_file, "r", encoding='utf-8') as reader: + text = reader.read() + return cls.from_dict(json.loads(text)) + + def __repr__(self): + return str(self.to_json_string()) + + def to_dict(self): + """Serializes this instance to a Python dictionary.""" + output = copy.deepcopy(self.__dict__) + return output + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" + + +try: + from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm + +except ImportError: + logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .") + + class BertLayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-12): + """Construct a layernorm module in the TF style (epsilon inside the square root). + """ + super(BertLayerNorm, self).__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) + self.variance_epsilon = eps + + def forward(self, x): + u = x.mean(-1, keepdim=True) + s = (x - u).pow(2).mean(-1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.variance_epsilon) + return self.weight * x + self.bias + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings. + """ + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None): + seq_length = input_ids.size(1) + position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + def __init__(self, config): + super(BertSelfAttention, self).__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + "The hidden size (%d) is not a multiple of the number of attention " + "heads (%d)" % (config.hidden_size, config.num_attention_heads)) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward(self, hidden_states, attention_mask): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + return context_layer + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super(BertSelfOutput, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config): + super(BertAttention, self).__init__() + self.self = BertSelfAttention(config) + self.output = BertSelfOutput(config) + + def forward(self, input_tensor, attention_mask): + self_output = self.self(input_tensor, attention_mask) + attention_output = self.output(self_output, input_tensor) + return attention_output + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super(BertIntermediate, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, str)): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super(BertOutput, self).__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask): + attention_output = self.attention(hidden_states, attention_mask) + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + def __init__(self, config): + super(BertEncoder, self).__init__() + layer = BertLayer(config) + self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True): + all_encoder_layers = [] + for layer_module in self.layer: + hidden_states = layer_module(hidden_states, attention_mask) + if output_all_encoded_layers: + all_encoder_layers.append(hidden_states) + if not output_all_encoded_layers: + all_encoder_layers.append(hidden_states) + return all_encoder_layers + + +class BertPooler(nn.Module): + def __init__(self, config): + super(BertPooler, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super(BertPredictionHeadTransform, self).__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, str)): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertLMPredictionHead, self).__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(bert_model_embedding_weights.size(1), + bert_model_embedding_weights.size(0), + bias=False) + self.decoder.weight = bert_model_embedding_weights + self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + self.bias + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertOnlyMLMHead, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertOnlyNSPHead(nn.Module): + def __init__(self, config): + super(BertOnlyNSPHead, self).__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +class BertPreTrainingHeads(nn.Module): + def __init__(self, config, bert_model_embedding_weights): + super(BertPreTrainingHeads, self).__init__() + self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class BertPreTrainedModel(nn.Module): + """ An abstract class to handle weights initialization and + a simple interface for dowloading and loading pretrained models. + """ + def __init__(self, config, *inputs, **kwargs): + super(BertPreTrainedModel, self).__init__() + if not isinstance(config, BertConfig): + raise ValueError( + "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " + "To create a model from a Google pretrained model use " + "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( + self.__class__.__name__, self.__class__.__name__ + )) + self.config = config + + def init_bert_weights(self, module): + """ Initialize the weights. + """ + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + elif isinstance(module, BertLayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, + from_tf=False, *inputs, **kwargs): + """ + Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict. + Download and cache the pre-trained model file if needed. + + Params: + pretrained_model_name_or_path: either: + - a str with the name of a pre-trained model to load selected in the list of: + . `bert-base-uncased` + . `bert-large-uncased` + . `bert-base-cased` + . `bert-large-cased` + . `bert-base-multilingual-uncased` + . `bert-base-multilingual-cased` + . `bert-base-chinese` + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance + - a path or url to a pretrained model archive containing: + . `bert_config.json` a configuration file for the model + . `model.chkpt` a TensorFlow checkpoint + from_tf: should we load the weights from a locally saved TensorFlow checkpoint + cache_dir: an optional path to a folder in which the pre-trained models will be cached. + state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models + *inputs, **kwargs: additional input for the specific Bert class + (ex: num_labels for BertForSequenceClassification) + """ + if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP: + archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path] + else: + archive_file = pretrained_model_name_or_path + # redirect to the cache, if necessary + try: + resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), + archive_file)) + return None + if resolved_archive_file == archive_file: + logger.info("loading archive file {}".format(archive_file)) + else: + logger.info("loading archive file {} from cache at {}".format( + archive_file, resolved_archive_file)) + tempdir = None + if os.path.isdir(resolved_archive_file) or from_tf: + serialization_dir = resolved_archive_file + else: + # Extract archive to temp dir + tempdir = tempfile.mkdtemp() + logger.info("extracting archive file {} to temp dir {}".format( + resolved_archive_file, tempdir)) + with tarfile.open(resolved_archive_file, 'r:gz') as archive: + archive.extractall(tempdir) + serialization_dir = tempdir + # Load config + config_file = os.path.join(serialization_dir, CONFIG_NAME) + config = BertConfig.from_json_file(config_file) + logger.info("Model config {}".format(config)) + # Instantiate model. + model = cls(config, *inputs, **kwargs) + if state_dict is None and not from_tf: + weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) + state_dict = torch.load(weights_path, map_location='cpu' if not torch.cuda.is_available() else None) + if tempdir: + # Clean up temp dir + shutil.rmtree(tempdir) + if from_tf: + # Directly load from a TensorFlow checkpoint + weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME) + return load_tf_weights_in_bert(model, weights_path) + # Load from a PyTorch state_dict + old_keys = [] + new_keys = [] + for key in state_dict.keys(): + new_key = None + if 'gamma' in key: + new_key = key.replace('gamma', 'weight') + if 'beta' in key: + new_key = key.replace('beta', 'bias') + if new_key: + old_keys.append(key) + new_keys.append(new_key) + for old_key, new_key in zip(old_keys, new_keys): + state_dict[new_key] = state_dict.pop(old_key) + + missing_keys = [] + unexpected_keys = [] + error_msgs = [] + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, '_metadata', None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + def load(module, prefix=''): + local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) + module._load_from_state_dict( + state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) + for name, child in module._modules.items(): + if child is not None: + load(child, prefix + name + '.') + start_prefix = '' + if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()): + start_prefix = 'bert.' + load(model, prefix=start_prefix) + if len(missing_keys) > 0: + logger.info("Weights of {} not initialized from pretrained model: {}".format( + model.__class__.__name__, missing_keys)) + if len(unexpected_keys) > 0: + logger.info("Weights from pretrained model not used in {}: {}".format( + model.__class__.__name__, unexpected_keys)) + if len(error_msgs) > 0: + raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( + model.__class__.__name__, "\n\t".join(error_msgs))) + return model + + +class BertModel(BertPreTrainedModel): + """BERT model ("Bidirectional Embedding Representations from a Transformer"). + + Params: + config: a BertConfig class instance with the configuration to build a new model + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. + + Outputs: Tuple of (encoded_layers, pooled_output) + `encoded_layers`: controled by `output_all_encoded_layers` argument: + - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end + of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each + encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], + - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding + to the last attention block of shape [batch_size, sequence_length, hidden_size], + `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a + classifier pretrained on top of the hidden state associated to the first character of the + input (`CLS`) to train on the Next-Sentence task (see BERT's paper). + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = modeling.BertModel(config=config) + all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertModel, self).__init__(config) + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + self.pooler = BertPooler(config) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + embedding_output = self.embeddings(input_ids, token_type_ids) + encoded_layers = self.encoder(embedding_output, + extended_attention_mask, + output_all_encoded_layers=output_all_encoded_layers) + sequence_output = encoded_layers[-1] + pooled_output = self.pooler(sequence_output) + if not output_all_encoded_layers: + encoded_layers = encoded_layers[-1] + return encoded_layers, pooled_output + + +class BertForPreTraining(BertPreTrainedModel): + """BERT model with pre-training heads. + This module comprises the BERT model followed by the two pre-training heads: + - the masked language modeling head, and + - the next sentence classification head. + + Params: + config: a BertConfig class instance with the configuration to build a new model. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] + with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss + is only computed for the labels set in [0, ..., vocab_size] + `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] + with indices selected in [0, 1]. + 0 => next sentence is the continuation, 1 => next sentence is a random sentence. + + Outputs: + if `masked_lm_labels` and `next_sentence_label` are not `None`: + Outputs the total_loss which is the sum of the masked language modeling loss and the next + sentence classification loss. + if `masked_lm_labels` or `next_sentence_label` is `None`: + Outputs a tuple comprising + - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and + - the next sentence classification logits of shape [batch_size, 2]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + model = BertForPreTraining(config) + masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config): + super(BertForPreTraining, self).__init__(config) + self.bert = BertModel(config) + self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None): + sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, + output_all_encoded_layers=False) + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + + if masked_lm_labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss(ignore_index=-1) + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + return total_loss + else: + return prediction_scores, seq_relationship_score + + +class BertForSequenceClassification(BertPreTrainedModel): + """BERT model for classification. + This module is composed of the BERT model with a linear layer on top of + the pooled output. + + Params: + `config`: a BertConfig class instance with the configuration to build a new model. + `num_labels`: the number of classes for the classifier. Default = 2. + + Inputs: + `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] + with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts + `extract_features.py`, `run_classifier.py` and `run_squad.py`) + `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token + types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to + a `sentence B` token (see BERT paper for more details). + `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices + selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max + input sequence length in the current batch. It's the mask that we typically use for attention when + a batch has varying length sentences. + `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] + with indices selected in [0, ..., num_labels]. + + Outputs: + if `labels` is not `None`: + Outputs the CrossEntropy classification loss of the output with the labels. + if `labels` is `None`: + Outputs the classification logits of shape [batch_size, num_labels]. + + Example usage: + ```python + # Already been converted into WordPiece token ids + input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) + input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) + token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) + + config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) + + num_labels = 2 + + model = BertForSequenceClassification(config, num_labels) + logits = model(input_ids, token_type_ids, input_mask) + ``` + """ + def __init__(self, config, num_labels): + super(BertForSequenceClassification, self).__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, num_labels) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + return logits diff --git a/models/char_cnn/README.md b/models/char_cnn/README.md index d4a69c6..982c17e 100644 --- a/models/char_cnn/README.md +++ b/models/char_cnn/README.md @@ -1,53 +1,33 @@ -## Character-level Convolutional Network +## Character-level Convolutional Neural Network -Implementation of Char-CNN from Character-level Convolutional Networks for Text Classification (http://papers.nips.cc/paper/5782-character-level-convolutional-networks-for-text-classification.pdf) +Implementation of [Char-CNN (2015)](http://papers.nips.cc/paper/5782-character-level-convolutional-networks-for-text-classification.pdf) ## Quick Start To run the model on Reuters dataset, just run the following from the Castor working directory: ``` -python -m char_cnn --dataset Reuters --gpu 1 --batch_size 128 --lr 0.001 +python -m models.char_cnn --dataset Reuters --batch-size 128 --lr 0.001 --seed 3435 ``` -To test the model, you can use the following command. +The best model weights will be saved in ``` -python -m char_cnn --trained_model kim_cnn/saves/Reuters/best_model.pt +models/char_cnn/saves/Reuters/best_model.pt ``` +To test the model, you can use the following command. + +``` +python -m models.char_cnn --dataset Reuters --batch_size 32 --trained-model models/char_cnn/saves/Reuters/best_model.pt --seed 3435 +``` ## Dataset We experiment the model on the following datasets. -- Reuters Newswire (RCV-1) -- Arxiv Academic Paper Dataset (AAPD) +- Reuters (ModApte) +- AAPD ## Settings Adam is used for training. - -## Dataset Results - -### RCV-1 -``` -python -m char_cnn --dataset Reuters --gpu 1 --batch_size 128 --lr 0.001 -``` -  | Accuracy | Avg. Precision | Avg. Recall | Avg. F1 --- | -- | -- | -- | -- -Char-CNN (Dev) | 0.585 | 0.702 | 0.569 | 0.628 -Char-CNN (Test) | 0.589 | 0.691 | 0.552 | 0.614 - -### AAPD -``` -python -m char_cnn --dataset AAPD --gpu 1 --batch_size 128 --lr 0.001 -``` -  | Accuracy | Avg. Precision | Avg. Recall | Avg. F1 --- | -- | -- | -- | -- -Char-CNN (Dev) | 0.305 | 0.681 | 0.537 | 0.600 -Char-CNN (Test) | 0.294 | 0.681 | 0.526 | 0.593 - -## TODO -- Support ONNX export. Currently throws a ONNX export failed (Couldn't export Python operator forward_flattened_wrapper) exception. -- Parameters tuning - diff --git a/models/char_cnn/__main__.py b/models/char_cnn/__main__.py index 4e39cfe..4344698 100644 --- a/models/char_cnn/__main__.py +++ b/models/char_cnn/__main__.py @@ -1,4 +1,5 @@ import logging +import os import random from copy import deepcopy @@ -115,6 +116,10 @@ def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_si if args.cuda: model.cuda() + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) diff --git a/models/char_cnn/args.py b/models/char_cnn/args.py index 2c12a87..62c517f 100644 --- a/models/char_cnn/args.py +++ b/models/char_cnn/args.py @@ -14,9 +14,9 @@ def get_args(): parser.add_argument('--epoch-decay', type=int, default=15) parser.add_argument('--weight-decay', type=float, default=0) - parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'word2vec')) + parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') - parser.add_argument('--save-path', type=str, default=os.path.join('char_cnn', 'saves')) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'char_cnn')) parser.add_argument('--resume-snapshot', type=str) parser.add_argument('--trained-model', type=str) diff --git a/models/han/README.md b/models/han/README.md index 87bea53..1999332 100644 --- a/models/han/README.md +++ b/models/han/README.md @@ -1,56 +1,42 @@ # Hierarchical Attention Networks -Implementation for Hierarchical Attention Networks for Documnet Classification of [HAN (2016)](https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf) with PyTorch and Torchtext. - -## Model Type - -- rand: All words are randomly initialized and then modified during training. -- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). All words -- including the unknown ones that are initialized with zero -- are kept static and only the other parameters of the model are learned. -- non-static: Same as above but the pretrained vectors are fine-tuned for each task. - - +Implementation of Hierarchical Attention Networks for Documnet Classification [HAN (2016)](https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf) with PyTorch and Torchtext. ## Quick Start -To run the model on Reuters dataset on static, just run the following from the Castor working directory. +To run the model on Reuters dataset on static, just run the following from the project working directory. ``` -python -m han --dataset Reuters +python -m models.han --dataset Reuters --mode static --batch-size 32 --lr 0.01 --epochs 30 --seed 3435 ``` -The file will be saved in +The best model weights will be saved in ``` -han/saves/best_model.pt +models/han/saves/Reuters/best_model.pt ``` To test the model, you can use the following command. ``` -python -m han --trained_model han/saves/Reuters/static_best_model.pt +python -m models.han --dataset Reuters --mode static --batch-size 32 --trained-model models/han/saves/Reuters/best_model.pt --seed 3435 ``` +## Model Types + +- rand: All words are randomly initialized and then modified during training. +- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). All words -- including the unknown ones that are initialized with zero -- are kept static and only the other parameters of the model are learned. +- non-static: Same as above but the pretrained vectors are fine-tuned for each task. + ## Dataset We experiment the model on the following datasets. -- Reuters-21578: Split the data into sentences for the sentence level attention model and split the sentences into words for the word level attention. The word2vec pretrained embeddings were used for the task. +- Reuters (ModApte) +- AAPD +- IMDB +- Yelp 2014 ## Settings Adam is used for training. - -## Training Time - -For training time, when - -``` -torch.backends.cudnn.deterministic = True -``` - -is specified, the training will be ~10 min. Reuters-21578 is a relatively small dataset and the implementation is a vectorized one, hence the speed. - - - -## TODO -- a combined hyperparameter tuning on a few of the datasets and report results with the hyperparameters diff --git a/models/han/__main__.py b/models/han/__main__.py index f213234..6d77b7c 100644 --- a/models/han/__main__.py +++ b/models/han/__main__.py @@ -1,4 +1,5 @@ import logging +import os import random from copy import deepcopy @@ -12,8 +13,8 @@ from datasets.imdb import IMDBHierarchical as IMDB from datasets.reuters import ReutersHierarchical as Reuters from datasets.yelp2014 import Yelp2014Hierarchical as Yelp2014 -from models.han.model import HAN from models.han.args import get_args +from models.han.model import HAN class UnknownWordVecCache(object): @@ -117,6 +118,10 @@ def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_si if args.cuda: model.cuda() + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) diff --git a/models/han/args.py b/models/han/args.py index 76d062b..c803f71 100644 --- a/models/han/args.py +++ b/models/han/args.py @@ -17,9 +17,9 @@ def get_args(): parser.add_argument('--word-num-hidden', type=int, default=50) parser.add_argument('--sentence-num-hidden', type=int, default=50) - parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'word2vec')) + parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') - parser.add_argument('--save-path', type=str, default=os.path.join('han', 'saves')) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'han')) parser.add_argument('--resume-snapshot', type=str) parser.add_argument('--trained-model', type=str) diff --git a/models/kim_cnn/README.md b/models/kim_cnn/README.md index f360f77..1b53cac 100644 --- a/models/kim_cnn/README.md +++ b/models/kim_cnn/README.md @@ -1,130 +1,48 @@ -# kim_cnn +# KimCNN Implementation for Convolutional Neural Networks for Sentence Classification of [Kim (2014)](https://arxiv.org/abs/1408.5882) with PyTorch and Torchtext. -## Model Type - -- rand: All words are randomly initialized and then modified during training. -- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). All words -- including the unknown ones that are initialized with zero -- are kept static and only the other parameters of the model are learned. -- non-static: Same as above but the pretrained vectors are fine-tuned for each task. -- multichannel: A model with two sets of word vectors. Each set of vectors is treated as a 'channel' and each filter is applied to both channels, but gradients are back-propagated only through one of the channels. Hence the model is able to fine-tune one set of vectors while keeping the other static. Both channels are initialized with word2vec.# text-classification-cnn -Implementation for Convolutional Neural Networks for Sentence Classification of [Kim (2014)](https://arxiv.org/abs/1408.5882) with PyTorch. - ## Quick Start -To run the model on SST-1 dataset on multichannel, just run the following from the Castor working directory. +To run the model on the Reuters dataset, just run the following from the working directory: ``` -python -m kim_cnn --mode multichannel +python -m models.kim_cnn --mode static --dataset Reuters --batch-size 32 --lr 0.01 --epochs 30 --dropout 0.5 --seed 3435 ``` -The file will be saved in +The best model weights will be saved in ``` -kim_cnn/saves/best_model.pt +models/kim_cnn/saves/Reuters/best_model.pt ``` To test the model, you can use the following command. ``` -python -m kim_cnn --trained_model kim_cnn/saves/SST-1/multichannel_best_model.pt --mode multichannel -``` - -## Dataset - -We experiment the model on the following datasets. - -- SST-1: Keep the original splits and train with phrase level dataset and test on sentence level dataset. -- SST-2: Same as SST-1 but with neutral reviews removed and binary labels. - -## Settings - -Adadelta is used for training. - -## Training Time - -For training time, when - -``` -torch.backends.cudnn.deterministic = True -``` - -is specified, the training will be ~3h because deterministic cnn algorithm is used (accuracy v.s. speed). - -Other option is that - -``` -torch.backends.cudnn.enabled = False -``` -but this will take ~6-7x training time. - -## SST-1 Dataset Results - -**Random** - -``` -python -m kim_cnn --dataset SST-1 --mode rand --lr 0.5777 --weight_decay 0.0007 --dropout 0 -``` - -**Static** - -``` -python -m kim_cnn --dataset SST-1 --mode static --lr 0.3213 --weight_decay 0.0002 --dropout 0.4 -``` - -**Non-static** - -``` -python -m kim_cnn --dataset SST-1 --mode non-static --lr 0.388 --weight_decay 0.0004 --dropout 0.2 +python -m models.kim_cnn --dataset Reuters --mode static --batch-size 32 --trained-model models/kim_cnn/saves/Reuters/best_model.pt --seed 3435 ``` -**Multichannel** - -``` -python -m kim_cnn --dataset SST-1 --mode multichannel --lr 0.3782 --weight_decay 0.0002 --dropout 0.4 -``` - -Using deterministic algorithm for cuDNN. - -| Test Accuracy on SST-1 | rand | static | non-static | multichannel | -|:------------------------------:|:----------:|:------------:|:--------------:|:---------------:| -| Paper | 45.0 | 45.5 | 48.0 | 47.4 | -| PyTorch using above configs | 44.3 | 47.9 | 48.6 | 49.2 | - -## SST-2 Dataset Results - -**Random** - -``` -python -m kim_cnn --dataset SST-2 --mode rand --lr 0.564 --weight_decay 0.0007 --dropout 0.5 -``` +## Model Types -**Static** - -``` -python -m kim_cnn --dataset SST-2 --mode static --lr 0.5589 --weight_decay 0.0004 --dropout 0.5 -``` - -**Non-static** - -``` -python -m kim_cnn --dataset SST-2 --mode non-static --lr 0.5794 --weight_decay 0.0003 --dropout 0.3 -``` - -**Multichannel** +- rand: All words are randomly initialized and then modified during training. +- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). + All words, including the unknown ones that are initialized with zero, are kept static and only the other + parameters of the model are learned. +- non-static: Same as above but the pretrained vectors are fine-tuned for each task. +- multichannel: A model with two sets of word vectors. Each set of vectors is treated as a 'channel' and each + filter is applied to both channels, but gradients are back-propagated only through one of the channels. Hence the + model is able to fine-tune one set of vectors while keeping the other static. Both channels are initialized with + word2vec. -``` -python -m kim_cnn --dataset SST-2 --mode multichannel --lr 0.7373 --weight_decay 0.0001 --dropout 0.1 -``` +## Dataset -Using deterministic algorithm for cuDNN. +We experiment the model on the following datasets: -| Test Accuracy on SST-2 | rand | static | non-static | multichannel | -|:------------------------------:|:----------:|:------------:|:--------------:|:---------------:| -| Paper | 82.7 | 86.8 | 87.2 | 88.1 | -| PyTorch using above configs | 83.0 | 86.4 | 87.3 | 87.4 | +- Reuters (ModApte) +- AAPD +- IMDB +- Yelp 2014 -## TODO +## Settings -- More experiments on subjectivity -- Parameters tuning +Adam is used for training. diff --git a/models/kim_cnn/__main__.py b/models/kim_cnn/__main__.py index 742c5b1..5cf6720 100644 --- a/models/kim_cnn/__main__.py +++ b/models/kim_cnn/__main__.py @@ -1,4 +1,5 @@ import logging +import os import random from copy import deepcopy @@ -111,6 +112,10 @@ def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_si if args.cuda: model.cuda() + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) diff --git a/models/kim_cnn/args.py b/models/kim_cnn/args.py index a5c5ac1..10094f8 100644 --- a/models/kim_cnn/args.py +++ b/models/kim_cnn/args.py @@ -15,9 +15,9 @@ def get_args(): parser.add_argument('--epoch-decay', type=int, default=15) parser.add_argument('--weight-decay', type=float, default=0) - parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'word2vec')) + parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') - parser.add_argument('--save-path', type=str, default=os.path.join('kim_cnn', 'saves')) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'kim_cnn')) parser.add_argument('--resume-snapshot', type=str) parser.add_argument('--trained-model', type=str) diff --git a/models/reg_lstm/README.md b/models/reg_lstm/README.md index a31512a..60d176d 100644 --- a/models/reg_lstm/README.md +++ b/models/reg_lstm/README.md @@ -1,40 +1,52 @@ -# LSTM with Regularization +# RegLSTM -Implementation of a standard LSTM using PyTorch and Torchtext for text classification with Regularization. +Implementation of a standard LSTM with regularization using PyTorch for text classification. -## Model Type +## Quick start -- rand: All words are randomly initialized and then modified during training. -- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). All words -- including the unknown ones that are initialized with zero -- are kept static and only the other parameters of the model are learned. -- non-static: Same as above but the pretrained vectors are fine-tuned for each task. +To run the model on Reuters dataset on static, just run the following from the project working directory. -## Quick Start +``` +python -m models.reg_lstm --dataset Reuters --mode static --batch-size 32 --lr 0.01 --epochs 30 --bidirectional --num-layers 1 --hidden-dim 512 --wdrop 0.1 --embed-droprate 0.2 --dropout 0.5 --beta-ema 0.99 --seed 3435 +``` -To run the model on Reuters dataset on static, just run the following from the Castor working directory. +The best model weights will be saved in ``` -python -m lstm_baseline --mode static +model/reg_lstm/saves/Reuters/best_model.pt ``` -## Dataset +To test the model, you can use the following command. -We experiment the model on the following datasets. +``` +python -m models.reg_lstm --dataset Reuters --mode static --batch-size 32 --trained-model models/reg_lstm/saves/Reuters/best_model.pt --seed 3435 +``` -- Reuters dataset - ModApte splits +## Model Types -## Settings +- rand: All words are randomly initialized and then modified during training. +- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). + All words, including the unknown ones that are initialized with zero, are kept static and only the other + parameters of the model are learned. +- non-static: Same as above but the pretrained vectors are fine-tuned for each task. -Adam is used for training with an option of temporal averaging. +## Regularization -## TODO -- Support ONNX export. Currently throws a ONNX export failed (Couldn't export Python operator forward_flattened_wrapper) exception. -- Add dataset results with different hyperparameters -- Parameters tuning +Regularization options like embedding dropout, weight drop, temporal activation regularization and temporal averaging are available +through command line args. + +## Dataset -## Regularization Module +We experiment the model on the following datasets: + +- Reuters (ModApte) +- AAPD +- IMDB +- Yelp 2014 + +## Settings -- Regularization methods like Embedding dropout, Weight Dropped LSTM and Temporal Activation Regularization are implemented. -- Temporal Averaging is also an additional module +Adam is used for training with an option for temporal averaging. ## Acknowledgement - The additional modules have been heavily inspired by two open source repositories: diff --git a/models/reg_lstm/__main__.py b/models/reg_lstm/__main__.py index ecc37f6..c0dfc4a 100644 --- a/models/reg_lstm/__main__.py +++ b/models/reg_lstm/__main__.py @@ -1,4 +1,5 @@ import logging +import os import random from copy import deepcopy @@ -114,6 +115,10 @@ def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_si if args.cuda: model.cuda() + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) diff --git a/models/reg_lstm/args.py b/models/reg_lstm/args.py index 1b025d0..30761ae 100644 --- a/models/reg_lstm/args.py +++ b/models/reg_lstm/args.py @@ -24,9 +24,9 @@ def get_args(): parser.add_argument('--tar', type=float, default=0.0, help="temporal activation regularization") parser.add_argument('--ar', type=float, default=0.0, help="activation regularization") - parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'word2vec')) + parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') - parser.add_argument('--save-path', type=str, default=os.path.join('reg_lstm', 'saves')) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'reg_lstm')) parser.add_argument('--resume-snapshot', type=str) parser.add_argument('--trained-model', type=str) diff --git a/models/xml_cnn/README.md b/models/xml_cnn/README.md index 56f52b9..416e874 100644 --- a/models/xml_cnn/README.md +++ b/models/xml_cnn/README.md @@ -1,40 +1,40 @@ # XML_CNN -Implementation for XML Convolutional Neural Network for Document Classification of [XML-CNN (2014)](http://nyc.lti.cs.cmu.edu/yiming/Publications/jliu-sigir17.pdf) with PyTorch and Torchtext. +Implementation of XML Convolutional Neural Network for Document Classification [XML-CNN (2014)](http://nyc.lti.cs.cmu.edu/yiming/Publications/jliu-sigir17.pdf) with PyTorch and Torchtext. -## Model Type +## Quick Start -- rand: All words are randomly initialized and then modified during training. -- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). All words -- including the unknown ones that are initialized with zero -- are kept static and only the other parameters of the model are learned. -- non-static: Same as above but the pretrained vectors are fine-tuned for each task. +To run the model on the Reuters dataset, just run the following from the working directory: -## Quick Start +``` +python -m models.xml_cnn --mode static --dataset Reuters --batch-size 32 --lr 0.01 --epochs 30 --dropout 0.5 --dynamic-pool-length 8 --seed 3435 +``` -To run the model on Reuters dataset on static just run the following from the Castor working directory. +The best model weight will be saved in ``` -python -m xml_cnn --dataset Reuters +models/xml_cnn/saves/Reuters/best_model.pt ``` -The file will be saved in +To test the model, you can use the following command. ``` -xml_cnn/saves/best_model.pt +python -m models.xml_cnn --dataset Reuters --mode static --batch-size 32 --dynamic-pool-length 8 --trained-model models/xml_cnn/saves/Reuters/best_model.pt --seed 3435 ``` +## Model Types +- rand: All words are randomly initialized and then modified during training. +- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). All words -- including the unknown ones that are initialized with zero -- are kept static and only the other parameters of the model are learned. +- non-static: Same as above but the pretrained vectors are fine-tuned for each task. ## Dataset We experiment the model on the following datasets. -- Reuters: A multi-label document classification dataset. +- Reuters (ModApte) +- AAPD ## Settings Adam is used for training. - - -## TODO - -- Report hyperparameters and results after finetuning on other datasets like AAPD. diff --git a/models/xml_cnn/__main__.py b/models/xml_cnn/__main__.py index 1f6c70f..5e30273 100644 --- a/models/xml_cnn/__main__.py +++ b/models/xml_cnn/__main__.py @@ -1,6 +1,7 @@ -from copy import deepcopy import logging +import os import random +from copy import deepcopy import numpy as np import torch @@ -8,10 +9,10 @@ from common.evaluate import EvaluatorFactory from common.train import TrainerFactory -from datasets.reuters import Reuters from datasets.aapd import AAPD -from datasets.yelp2014 import Yelp2014 from datasets.imdb import IMDB +from datasets.reuters import Reuters +from datasets.yelp2014 import Yelp2014 from models.xml_cnn.args import get_args from models.xml_cnn.model import XmlCNN @@ -111,6 +112,10 @@ def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_si if args.cuda: model.cuda() + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + parameter = filter(lambda p: p.requires_grad, model.parameters()) optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) diff --git a/models/xml_cnn/args.py b/models/xml_cnn/args.py index 1d02582..5d269ed 100644 --- a/models/xml_cnn/args.py +++ b/models/xml_cnn/args.py @@ -18,12 +18,11 @@ def get_args(): parser.add_argument('--num-bottleneck-hidden', type=int, default=512) parser.add_argument('--dynamic-pool-length', type=int, default=32) - parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'Castor-data', 'embeddings', 'word2vec')) + parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') - parser.add_argument('--save-path', type=str, default=os.path.join('xml_cnn', 'saves')) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'xml_cnn')) parser.add_argument('--resume-snapshot', type=str) parser.add_argument('--trained-model', type=str) - args = parser.parse_args() return args diff --git a/requirements.txt b/requirements.txt index 53da0ee..e30f873 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,5 +7,6 @@ Cython==0.28.2 pyjnius==1.1.1 scikit-learn==0.19.1 scipy==1.0.0 +tensorboardX==1.6 torchtext==0.2.3 tqdm>=4.23,<4.23.99 diff --git a/utils/io.py b/utils/io.py new file mode 100644 index 0000000..190c625 --- /dev/null +++ b/utils/io.py @@ -0,0 +1,257 @@ +# Copyright 2018 AllenNLP authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This file is adapted from https://github.com/allenai/allennlp + +from __future__ import absolute_import, division, print_function, unicode_literals + +import json +import logging +import os +import shutil +import sys +import tempfile +from functools import wraps +from hashlib import sha256 +from io import open + +import boto3 +import requests +from botocore.exceptions import ClientError +from tqdm import tqdm + +from urllib.parse import urlparse + +try: + from pathlib import Path + PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + Path.home() / '.pytorch_pretrained_bert')) +except (AttributeError, ImportError): + PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', + os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert')) + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +def url_to_filename(url, etag=None): + """ + Convert `url` into a hashed filename in a repeatable way. + If `etag` is specified, append its hash to the url's, delimited + by a period. + """ + url_bytes = url.encode('utf-8') + url_hash = sha256(url_bytes) + filename = url_hash.hexdigest() + + if etag: + etag_bytes = etag.encode('utf-8') + etag_hash = sha256(etag_bytes) + filename += '.' + etag_hash.hexdigest() + + return filename + + +def filename_to_url(filename, cache_dir=None): + """ + Return the url and etag (which may be ``None``) stored for `filename`. + Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + cache_path = os.path.join(cache_dir, filename) + if not os.path.exists(cache_path): + raise EnvironmentError("file {} not found".format(cache_path)) + + meta_path = cache_path + '.json' + if not os.path.exists(meta_path): + raise EnvironmentError("file {} not found".format(meta_path)) + + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata['url'] + etag = metadata['etag'] + + return url, etag + + +def cached_path(url_or_filename, cache_dir=None): + """ + Given something that might be a URL (or might be a local path), + determine which. If it's a URL, download the file and cache it, and + return the path to the cached file. If it's already a local path, + make sure the file exists and then return the path. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(url_or_filename, Path): + url_or_filename = str(url_or_filename) + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + parsed = urlparse(url_or_filename) + + if parsed.scheme in ('http', 'https', 's3'): + # URL, so get it from the cache (downloading if necessary) + return get_from_cache(url_or_filename, cache_dir) + elif os.path.exists(url_or_filename): + # File, and it exists. + return url_or_filename + elif parsed.scheme == '': + # File, but it doesn't exist. + raise EnvironmentError("file {} not found".format(url_or_filename)) + else: + # Something unknown + raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) + + +def split_s3_path(url): + """Split a full s3 path into the bucket name and path.""" + parsed = urlparse(url) + if not parsed.netloc or not parsed.path: + raise ValueError("bad s3 path {}".format(url)) + bucket_name = parsed.netloc + s3_path = parsed.path + # Remove '/' at beginning of path. + if s3_path.startswith("/"): + s3_path = s3_path[1:] + return bucket_name, s3_path + + +def s3_request(func): + """ + Wrapper function for s3 requests in order to create more helpful error + messages. + """ + + @wraps(func) + def wrapper(url, *args, **kwargs): + try: + return func(url, *args, **kwargs) + except ClientError as exc: + if int(exc.response["Error"]["Code"]) == 404: + raise EnvironmentError("file {} not found".format(url)) + else: + raise + + return wrapper + + +@s3_request +def s3_etag(url): + """Check ETag on S3 object.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_object = s3_resource.Object(bucket_name, s3_path) + return s3_object.e_tag + + +@s3_request +def s3_get(url, temp_file): + """Pull a file directly from S3.""" + s3_resource = boto3.resource("s3") + bucket_name, s3_path = split_s3_path(url) + s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) + + +def http_get(url, temp_file): + req = requests.get(url, stream=True) + content_length = req.headers.get('Content-Length') + total = int(content_length) if content_length is not None else None + progress = tqdm(unit="B", total=total) + for chunk in req.iter_content(chunk_size=1024): + if chunk: # filter out keep-alive new chunks + progress.update(len(chunk)) + temp_file.write(chunk) + progress.close() + + +def get_from_cache(url, cache_dir=None): + """ + Given a URL, look for the corresponding dataset in the local cache. + If it's not there, download it. Then return the path to the cached file. + """ + if cache_dir is None: + cache_dir = PYTORCH_PRETRAINED_BERT_CACHE + if sys.version_info[0] == 3 and isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + + # Get eTag to add to filename, if it exists. + if url.startswith("s3://"): + etag = s3_etag(url) + else: + response = requests.head(url, allow_redirects=True) + if response.status_code != 200: + raise IOError("HEAD request failed for url {} with status code {}" + .format(url, response.status_code)) + etag = response.headers.get("ETag") + + filename = url_to_filename(url, etag) + + # get cache path to put the file + cache_path = os.path.join(cache_dir, filename) + + if not os.path.exists(cache_path): + # Download to temporary file, then copy to cache dir once finished. + # Otherwise you get corrupt cache entries if the download gets interrupted. + with tempfile.NamedTemporaryFile() as temp_file: + logger.info("%s not found in cache, downloading to %s", url, temp_file.name) + + # GET file object + if url.startswith("s3://"): + s3_get(url, temp_file) + else: + http_get(url, temp_file) + + # we are copying the file before closing it, so flush to avoid truncation + temp_file.flush() + # shutil.copyfileobj() starts at the current position, so go to the start + temp_file.seek(0) + + logger.info("copying %s to cache at %s", temp_file.name, cache_path) + with open(cache_path, 'wb') as cache_file: + shutil.copyfileobj(temp_file, cache_file) + + logger.info("creating metadata file for %s", cache_path) + meta = {'url': url, 'etag': etag} + meta_path = cache_path + '.json' + with open(meta_path, 'w', encoding="utf-8") as meta_file: + json.dump(meta, meta_file) + + logger.info("removing temp file %s", temp_file.name) + + return cache_path + + +def read_set_from_file(filename): + """ + Extract a de-duped collection (set) of text from a file. + Expected file format is one item per line. + """ + + collection = set() + with open(filename, 'r', encoding='utf-8') as file_: + for line in file_: + collection.add(line.rstrip()) + return collection + + +def get_file_extension(path, dot=True, lower=True): + ext = os.path.splitext(path)[1] + ext = ext if dot else ext[1:] + return ext.lower() if lower else ext \ No newline at end of file diff --git a/utils/optimization.py b/utils/optimization.py new file mode 100644 index 0000000..fa911e5 --- /dev/null +++ b/utils/optimization.py @@ -0,0 +1,179 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch optimization for BERT model.""" + +import math +import torch +from torch.optim import Optimizer +from torch.optim.optimizer import required +from torch.nn.utils import clip_grad_norm_ +import logging + +logger = logging.getLogger(__name__) + +def warmup_cosine(x, warmup=0.002): + if x < warmup: + return x/warmup + return 0.5 * (1.0 + torch.cos(math.pi * x)) + +def warmup_constant(x, warmup=0.002): + """ Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps. + Learning rate is 1. afterwards. """ + if x < warmup: + return x/warmup + return 1.0 + +def warmup_linear(x, warmup=0.002): + """ Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step. + After `t_total`-th training step, learning rate is zero. """ + if x < warmup: + return x/warmup + return max((x-1.)/(warmup-1.), 0) + +SCHEDULES = { + 'warmup_cosine': warmup_cosine, + 'warmup_constant': warmup_constant, + 'warmup_linear': warmup_linear, +} + + +class BertAdam(Optimizer): + """Implements BERT version of Adam algorithm with weight decay fix. + Params: + lr: learning rate + warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 + t_total: total number of training steps for the learning + rate schedule, -1 means constant learning rate. Default: -1 + schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' + b1: Adams b1. Default: 0.9 + b2: Adams b2. Default: 0.999 + e: Adams epsilon. Default: 1e-6 + weight_decay: Weight decay. Default: 0.01 + max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 + """ + def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', + b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, + max_grad_norm=1.0): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) + if schedule not in SCHEDULES: + raise ValueError("Invalid schedule parameter: {}".format(schedule)) + if not 0.0 <= warmup < 1.0 and not warmup == -1: + raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) + if not 0.0 <= b1 < 1.0: + raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) + if not 0.0 <= b2 < 1.0: + raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) + if not e >= 0.0: + raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e)) + defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, + b1=b1, b2=b2, e=e, weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + super(BertAdam, self).__init__(params, defaults) + + def get_lr(self): + lr = [] + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + if len(state) == 0: + return [0] + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup']) + else: + lr_scheduled = group['lr'] + lr.append(lr_scheduled) + return lr + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + warned_for_t_total = False + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['next_m'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['next_v'] = torch.zeros_like(p.data) + + next_m, next_v = state['next_m'], state['next_v'] + beta1, beta2 = group['b1'], group['b2'] + + # Add grad clipping + if group['max_grad_norm'] > 0: + clip_grad_norm_(p, group['max_grad_norm']) + + # Decay the first and second moment running average coefficient + # In-place operations to update the averages at the same time + next_m.mul_(beta1).add_(1 - beta1, grad) + next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) + update = next_m / (next_v.sqrt() + group['e']) + + # Just adding the square of the weights to the loss function is *not* + # the correct way of using L2 regularization/weight decay with Adam, + # since that will interact with the m and v parameters in strange ways. + # + # Instead we want to decay the weights in a manner that doesn't interact + # with the m/v parameters. This is equivalent to adding the square + # of the weights to the loss with plain (non-momentum) SGD. + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + + if group['t_total'] != -1: + schedule_fct = SCHEDULES[group['schedule']] + progress = state['step']/group['t_total'] + lr_scheduled = group['lr'] * schedule_fct(progress, group['warmup']) + # warning for exceeding t_total (only active with warmup_linear + if group['schedule'] == "warmup_linear" and progress > 1. and not warned_for_t_total: + logger.warning( + "Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. " + "Please set 't_total' of {} correctly.".format(group['schedule'], lr_scheduled, self.__class__.__name__)) + warned_for_t_total = True + # end warning + else: + lr_scheduled = group['lr'] + + update_with_lr = lr_scheduled * update + p.data.add_(-update_with_lr) + + state['step'] += 1 + + # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 + # No bias correction + # bias_correction1 = 1 - beta1 ** state['step'] + # bias_correction2 = 1 - beta2 ** state['step'] + + return loss diff --git a/utils/tokenization.py b/utils/tokenization.py new file mode 100644 index 0000000..8761998 --- /dev/null +++ b/utils/tokenization.py @@ -0,0 +1,387 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes.""" + +import collections +import logging +import os +import unicodedata +from io import open + +from utils.io import cached_path + +logger = logging.getLogger(__name__) + +PRETRAINED_VOCAB_ARCHIVE_MAP = { + 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", + 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", + 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", + 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", + 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", + 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", + 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", +} +PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { + 'bert-base-uncased': 512, + 'bert-large-uncased': 512, + 'bert-base-cased': 512, + 'bert-large-cased': 512, + 'bert-base-multilingual-uncased': 512, + 'bert-base-multilingual-cased': 512, + 'bert-base-chinese': 512, +} +VOCAB_NAME = 'vocab.txt' + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + with open(vocab_file, "r", encoding="utf-8") as reader: + while True: + token = reader.readline() + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + if not text: + return [] + tokens = text.split() + return tokens + + +class BertTokenizer(object): + """Runs end-to-end tokenization: punctuation splitting + wordpiece""" + + def __init__(self, vocab_file, is_lowercase=True, max_len=None, do_basic_tokenize=True, + never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): + """Constructs a BertTokenizer. + + Args: + vocab_file: Path to a one-wordpiece-per-line vocabulary file + is_lowercase: Whether to lower case the input + Only has an effect when do_wordpiece_only=False + do_basic_tokenize: Whether to do basic tokenization before wordpiece. + max_len: An artificial maximum length to truncate tokenized sequences to; + Effective maximum length is always the minimum of this + value (if specified) and the underlying BERT model's + sequence length. + never_split: List of tokens which will never be split during tokenization. + Only has an effect when do_wordpiece_only=False + """ + if not os.path.isfile(vocab_file): + raise ValueError( + "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " + "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict( + [(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer(is_lowercase=is_lowercase, + never_split=never_split) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) + self.max_len = max_len if max_len is not None else int(1e12) + + def tokenize(self, text): + if self.do_basic_tokenize: + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def convert_tokens_to_ids(self, tokens): + """Converts a sequence of tokens into ids using the vocab.""" + ids = [] + for token in tokens: + ids.append(self.vocab[token]) + if len(ids) > self.max_len: + logger.warning( + "Token indices sequence length is longer than the specified maximum " + " sequence length for this BERT model ({} > {}). Running this" + " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) + ) + return ids + + def convert_ids_to_tokens(self, ids): + """Converts a sequence of ids in wordpiece tokens using the vocab.""" + tokens = [] + for i in ids: + tokens.append(self.ids_to_tokens[i]) + return tokens + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs): + """ + Instantiate a PreTrainedBertModel from a pre-trained model file. + Download and cache the pre-trained model file if needed. + """ + if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP: + vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path] + else: + vocab_file = pretrained_model_name_or_path + if os.path.isdir(vocab_file): + vocab_file = os.path.join(vocab_file, VOCAB_NAME) + # redirect to the cache, if necessary + try: + resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) + except EnvironmentError: + logger.error( + "Model name '{}' was not found in model name list ({}). " + "We assumed '{}' was a path or url but couldn't find any file " + "associated to this path or url.".format( + pretrained_model_name_or_path, + ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), + vocab_file)) + return None + if resolved_vocab_file == vocab_file: + logger.info("loading vocabulary file {}".format(vocab_file)) + else: + logger.info("loading vocabulary file {} from cache at {}".format( + vocab_file, resolved_vocab_file)) + if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: + # if we're using a pretrained model, ensure the tokenizer wont index sequences longer + # than the number of positional embeddings + max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path] + kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) + # Instantiate tokenizer. + tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) + return tokenizer + + +class BasicTokenizer(object): + """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" + + def __init__(self, + is_lowercase=True, + never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): + """Constructs a BasicTokenizer. + + Args: + is_lowercase: Whether to lower case the input. + """ + self.is_lowercase = is_lowercase + self.never_split = never_split + + def tokenize(self, text): + """Tokenizes a piece of text.""" + text = self._clean_text(text) + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if self.is_lowercase and token not in self.never_split: + token = token.lower() + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text): + """Splits punctuation on a piece of text.""" + if text in self.never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ((cp >= 0x4E00 and cp <= 0x9FFF) or # + (cp >= 0x3400 and cp <= 0x4DBF) or # + (cp >= 0x20000 and cp <= 0x2A6DF) or # + (cp >= 0x2A700 and cp <= 0x2B73F) or # + (cp >= 0x2B740 and cp <= 0x2B81F) or # + (cp >= 0x2B820 and cp <= 0x2CEAF) or + (cp >= 0xF900 and cp <= 0xFAFF) or # + (cp >= 0x2F800 and cp <= 0x2FA1F)): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xfffd or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """Tokenizes a piece of text into its word pieces. + + This uses a greedy longest-match-first algorithm to perform tokenization + using the given vocabulary. + + For example: + input = "unaffable" + output = ["un", "##aff", "##able"] + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through `BasicTokenizer`. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + +def _is_whitespace(char): + """Checks whether `chars` is a whitespace character.""" + # \t, \n, and \r are technically contorl characters but we treat them + # as whitespace since they are generally considered as such. + if char == " " or char == "\t" or char == "\n" or char == "\r": + return True + cat = unicodedata.category(char) + if cat == "Zs": + return True + return False + + +def _is_control(char): + """Checks whether `chars` is a control character.""" + # These are technically control characters but we count them as whitespace + # characters. + if char == "\t" or char == "\n" or char == "\r": + return False + cat = unicodedata.category(char) + if cat.startswith("C"): + return True + return False + + +def _is_punctuation(char): + """Checks whether `chars` is a punctuation character.""" + cp = ord(char) + # We treat all non-letter/number ASCII as punctuation. + # Characters such as "^", "$", and "`" are not in the Unicode + # Punctuation class but we treat them as punctuation anyways, for + # consistency. + if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or + (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): + return True + cat = unicodedata.category(char) + if cat.startswith("P"): + return True + return False From 0979f77f214152742dff28e4d0f0de7d7b3f29b8 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Fri, 19 Apr 2019 17:14:01 -0400 Subject: [PATCH 02/22] Add TREC relevance datasets --- common/evaluate.py | 6 ++- common/train.py | 6 ++- datasets/robust04.py | 65 ++++++++++++++++++++++++ datasets/robust05.py | 64 ++++++++++++++++++++++++ datasets/robust45.py | 115 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 254 insertions(+), 2 deletions(-) create mode 100644 datasets/robust04.py create mode 100644 datasets/robust05.py create mode 100644 datasets/robust45.py diff --git a/common/evaluate.py b/common/evaluate.py index 9d98a6c..a4ad6d8 100644 --- a/common/evaluate.py +++ b/common/evaluate.py @@ -1,4 +1,5 @@ from common.evaluators.classification_evaluator import ClassificationEvaluator +from common.evaluators.relevance_transfer_evaluator import RelevanceTransferEvaluator class EvaluatorFactory(object): @@ -9,7 +10,10 @@ class EvaluatorFactory(object): 'Reuters': ClassificationEvaluator, 'AAPD': ClassificationEvaluator, 'IMDB': ClassificationEvaluator, - 'Yelp2014': ClassificationEvaluator + 'Yelp2014': ClassificationEvaluator, + 'Robust04': RelevanceTransferEvaluator, + 'Robust05': RelevanceTransferEvaluator, + 'Robust45': RelevanceTransferEvaluator } @staticmethod diff --git a/common/train.py b/common/train.py index 4fd80fb..b53c8f7 100644 --- a/common/train.py +++ b/common/train.py @@ -1,4 +1,5 @@ from common.trainers.classification_trainer import ClassificationTrainer +from common.trainers.relevance_transfer_trainer import RelevanceTransferTrainer class TrainerFactory(object): @@ -9,7 +10,10 @@ class TrainerFactory(object): 'Reuters': ClassificationTrainer, 'AAPD': ClassificationTrainer, 'IMDB': ClassificationTrainer, - 'Yelp2014': ClassificationTrainer + 'Yelp2014': ClassificationTrainer, + 'Robust04': RelevanceTransferTrainer, + 'Robust05': RelevanceTransferTrainer, + 'Robust45': RelevanceTransferTrainer, } @staticmethod diff --git a/datasets/robust04.py b/datasets/robust04.py new file mode 100644 index 0000000..996fcd7 --- /dev/null +++ b/datasets/robust04.py @@ -0,0 +1,65 @@ +import csv +import os +import sys + +import torch +from torchtext.data import NestedField, Field, TabularDataset +from torchtext.data.iterator import BucketIterator +from torchtext.vocab import Vectors + +from datasets.robust45 import clean_string, split_sents, process_docids, process_labels + +csv.field_size_limit(sys.maxsize) + + +class Robust04(TabularDataset): + NAME = 'Robust04' + NUM_CLASSES = 2 + TEXT_FIELD = Field(batch_first=True, tokenize=clean_string, include_lengths=True) + LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_labels) + DOCID_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_docids) + TOPICS = ['307', '310', '321', '325', '330', '336', '341', '344', '345', '347', '350', '353', '354', '355', '356', + '362', '363', '367', '372', '375', '378', '379', '389', '393', '394', '397', '399', '400', '404', '408', + '414', '416', '419', '422', '423', '426', '427', '433', '435', '436', '439', '442', '443', '445', '614', + '620', '626', '646', '677', '690'] + + @staticmethod + def sort_key(ex): + return len(ex.text) + + @classmethod + def splits(cls, path, train, validation, test, **kwargs): + return super(Robust04, cls).splits( + path, train=train, validation=validation, test=test, + format='tsv', fields=[('label', cls.LABEL_FIELD), ('docid', cls.DOCID_FIELD), ('text', cls.TEXT_FIELD)] + ) + + @classmethod + def iters(cls, path, vectors_name, vectors_cache, topic, batch_size=64, shuffle=True, device=0, + vectors=None, unk_init=torch.Tensor.zero_): + """ + :param path: directory containing train, test, dev files + :param vectors_name: name of word vectors file + :param vectors_cache: path to directory containing word vectors file + :param topic: topic from which articles should be fetched + :param batch_size: batch size + :param device: GPU device + :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes + :param unk_init: function used to generate vector for OOV words + :return: + """ + if vectors is None: + vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init) + + train_path = os.path.join('TREC', 'robust04_train_%s.tsv' % topic) + dev_path = os.path.join('TREC', 'robust04_dev_%s.tsv' % topic) + test_path = os.path.join('TREC', 'core17_10k_%s.tsv' % topic) + train, val, test = cls.splits(path, train=train_path, validation=dev_path, test=test_path) + cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors) + return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, + sort_within_batch=True, device=device) + + +class Robust04Hierarchical(Robust04): + NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) + TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) \ No newline at end of file diff --git a/datasets/robust05.py b/datasets/robust05.py new file mode 100644 index 0000000..1c9064c --- /dev/null +++ b/datasets/robust05.py @@ -0,0 +1,64 @@ +import csv +import os +import sys + +import torch +from torchtext.data import NestedField, Field, TabularDataset +from torchtext.data.iterator import BucketIterator +from torchtext.vocab import Vectors + +from datasets.robust45 import clean_string, split_sents, process_docids, process_labels + +csv.field_size_limit(sys.maxsize) + + +class Robust05(TabularDataset): + NAME = 'Robust05' + NUM_CLASSES = 2 + TEXT_FIELD = Field(batch_first=True, tokenize=clean_string, include_lengths=True) + LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_labels) + DOCID_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_docids) + TOPICS = ['307', '310', '325', '330', '336', '341', '344', '345', '347', '353', '354', '362', '363', '367', '372', + '375', '378', '389', '393', '394', '397', '399', '404', '408', '416', '419', '426', '427', '433', '435', + '436', '439', '443'] + + @staticmethod + def sort_key(ex): + return len(ex.text) + + @classmethod + def splits(cls, path, train, validation, test, **kwargs): + return super(Robust05, cls).splits( + path, train=train, validation=validation, test=test, + format='tsv', fields=[('label', cls.LABEL_FIELD), ('docid', cls.DOCID_FIELD), ('text', cls.TEXT_FIELD)] + ) + + @classmethod + def iters(cls, path, vectors_name, vectors_cache, topic, batch_size=64, shuffle=True, device=0, + vectors=None, unk_init=torch.Tensor.zero_): + """ + :param path: directory containing train, test, dev files + :param vectors_name: name of word vectors file + :param vectors_cache: path to directory containing word vectors file + :param topic: topic from which articles should be fetched + :param batch_size: batch size + :param device: GPU device + :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes + :param unk_init: function used to generate vector for OOV words + :return: + """ + if vectors is None: + vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init) + + train_path = os.path.join('TREC', 'robust05_train_%s.tsv' % topic) + dev_path = os.path.join('TREC', 'robust05_dev_%s.tsv' % topic) + test_path = os.path.join('TREC', 'core17_%s.tsv' % topic) + train, val, test = cls.splits(path, train=train_path, validation=dev_path, test=test_path) + cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors) + return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, + sort_within_batch=True, device=device) + + +class Robust05Hierarchical(Robust05): + NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) + TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) \ No newline at end of file diff --git a/datasets/robust45.py b/datasets/robust45.py new file mode 100644 index 0000000..e9cf1a8 --- /dev/null +++ b/datasets/robust45.py @@ -0,0 +1,115 @@ +import csv +import os +import random +import re +import sys + +import torch +from nltk import tokenize +from torchtext.data import NestedField, Field, TabularDataset +from torchtext.data.iterator import BucketIterator +from torchtext.vocab import Vectors + +csv.field_size_limit(sys.maxsize) + + +def clean_string(string, sentence_droprate=0, max_length=5000): + """ + Performs tokenization and string cleaning + """ + if sentence_droprate > 0: + lines = [x for x in tokenize.sent_tokenize(string) if len(x) > 1] + lines_drop = [x for x in lines if random.randint(0, 100) > 100 * sentence_droprate] + string = ' '.join(lines_drop if len(lines_drop) > 0 else lines) + + string = re.sub(r'[^A-Za-z0-9]', ' ', string) + string = re.sub(r'\s{2,}', ' ', string) + tokenized_string = string.lower().strip().split() + return tokenized_string[:min(max_length, len(tokenized_string))] + + +def split_sents(string, max_length=50): + tokenized_string = [x for x in tokenize.sent_tokenize(string) if len(x) > 1] + return tokenized_string[:min(max_length, len(tokenized_string))] + + +def process_labels(string): + """ + Returns the label string as a list of integers + :param string: + :return: + """ + # return [float(x) for x in string] + return 0 if string == '01' else 1 + + +def process_docids(string): + """ + Returns the docid as an integer + :param string: + :return: + """ + try: + docid = int(string) + except ValueError: + # print("Error converting docid to integer:", string) + docid = 0 + return docid + + +class Robust45(TabularDataset): + NAME = 'Robust45' + NUM_CLASSES = 2 + TEXT_FIELD = Field(batch_first=True, tokenize=clean_string, include_lengths=True) + LABEL_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_labels) + DOCID_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=process_docids) + TOPICS = ['307', '310', '321', '325', '330', '336', '341', '344', '345', '347', '350', '353', '354', '355', '356', + '362', '363', '367', '372', '375', '378', '379', '389', '393', '394', '397', '399', '400', '404', '408', + '414', '416', '419', '422', '423', '426', '427', '433', '435', '436', '439', '442', '443', '445', '614', + '620', '626', '646', '677', '690'] + TOPICS = ['307', '310', '321', '325', '330'] + + @staticmethod + def sort_key(ex): + return len(ex.text) + + @classmethod + def splits(cls, path, train, validation, test, **kwargs): + return super(Robust45, cls).splits( + path, train=train, validation=validation, test=test, + format='tsv', fields=[('label', cls.LABEL_FIELD), ('docid', cls.DOCID_FIELD), ('text', cls.TEXT_FIELD)] + ) + + @classmethod + def iters(cls, path, vectors_name, vectors_cache, topic, batch_size=64, shuffle=True, device=0, + vectors=None, unk_init=torch.Tensor.zero_): + """ + :param path: directory containing train, test, dev files + :param vectors_name: name of word vectors file + :param vectors_cache: path to directory containing word vectors file + :param topic: topic from which articles should be fetched + :param batch_size: batch size + :param device: GPU device + :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes + :param unk_init: function used to generate vector for OOV words + :return: + """ + if vectors is None: + vectors = Vectors(name=vectors_name, cache=vectors_cache, unk_init=unk_init) + + train_path = os.path.join('TREC', 'robust45_aug_train_%s.tsv' % topic) + dev_path = os.path.join('TREC', 'robust45_dev_%s.tsv' % topic) + test_path = os.path.join('TREC', 'core17_10k_%s.tsv' % topic) + train, val, test = cls.splits(path, train=train_path, validation=dev_path, test=test_path) + cls.TEXT_FIELD.build_vocab(train, val, test, vectors=vectors) + return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, + sort_within_batch=True, device=device) + + +class Robust45Hierarchical(Robust45): + @staticmethod + def clean_sentence(string): + return clean_string(string, sentence_droprate=0, max_length=100) + + NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) + TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) \ No newline at end of file From e5f2ee08d1edd8e73687c79a1272eae38614f085 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Fri, 19 Apr 2019 17:15:16 -0400 Subject: [PATCH 03/22] Add relevance transfer trainer and evaluator --- .../relevance_transfer_evaluator.py | 76 ++++++++++ common/trainers/relevance_transfer_trainer.py | 133 ++++++++++++++++++ 2 files changed, 209 insertions(+) create mode 100644 common/evaluators/relevance_transfer_evaluator.py create mode 100644 common/trainers/relevance_transfer_trainer.py diff --git a/common/evaluators/relevance_transfer_evaluator.py b/common/evaluators/relevance_transfer_evaluator.py new file mode 100644 index 0000000..b9e3680 --- /dev/null +++ b/common/evaluators/relevance_transfer_evaluator.py @@ -0,0 +1,76 @@ +import warnings + +import numpy as np +import torch +import torch.nn.functional as F +from sklearn import metrics + +from common.evaluators.evaluator import Evaluator + +# Suppress warnings from sklearn.metrics +warnings.filterwarnings('ignore') + + +class RelevanceTransferEvaluator(Evaluator): + + def __init__(self, dataset_cls, model, embedding, data_loader, batch_size, device, keep_results=False): + super().__init__(dataset_cls, model, embedding, data_loader, batch_size, device, keep_results) + self.ignore_lengths = False + self.y_target = None + self.y_pred = None + self.docid = None + + def get_scores(self): + self.model.eval() + self.data_loader.init_epoch() + self.y_target = list() + self.y_pred = list() + self.docid = list() + total_loss = 0 + + if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging + old_params = self.model.get_params() + self.model.load_ema_params() + + for batch_idx, batch in enumerate(self.data_loader): + if hasattr(self.model, 'tar') and self.model.tar: + if self.ignore_lengths: + scores, rnn_outs = self.model(batch.text) + else: + scores, rnn_outs = self.model(batch.text[0], lengths=batch.text[1]) + else: + if self.ignore_lengths: + scores = self.model(batch.text) + else: + scores = self.model(batch.text[0], lengths=batch.text[1]) + + # Computing loss and storing predictions + predictions = torch.sigmoid(scores).squeeze(dim=1) + total_loss += F.binary_cross_entropy(predictions, batch.label.float()).item() + self.docid.extend(batch.docid.cpu().detach().numpy()) + self.y_pred.extend(predictions.cpu().detach().numpy()) + self.y_target.extend(batch.label.cpu().detach().numpy()) + + if hasattr(self.model, 'tar') and self.model.tar: + # Temporal activation regularization + total_loss += (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() + + predicted_labels = np.around(np.array(self.y_pred)) + target_labels = np.array(self.y_target) + accuracy = metrics.accuracy_score(target_labels, predicted_labels) + average_precision = metrics.average_precision_score(target_labels, predicted_labels, average=None) + f1 = metrics.f1_score(target_labels, predicted_labels, average='macro') + avg_loss = total_loss / len(self.data_loader.dataset.examples) + + try: + precision = metrics.precision_score(target_labels, predicted_labels, average=None)[1] + except IndexError: + # Handle cases without positive labels + precision = 0 + + if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging + self.model.load_params(old_params) + + return [accuracy, precision, average_precision, f1, avg_loss], ['accuracy', 'precision', 'average_precision', 'f1', 'cross_entropy_loss'] \ No newline at end of file diff --git a/common/trainers/relevance_transfer_trainer.py b/common/trainers/relevance_transfer_trainer.py new file mode 100644 index 0000000..fba44f0 --- /dev/null +++ b/common/trainers/relevance_transfer_trainer.py @@ -0,0 +1,133 @@ +import datetime +import os +import time + +import torch +import torch.nn.functional as F +from tensorboardX import SummaryWriter +from tqdm import trange, tqdm + +from common.trainers.trainer import Trainer +from tasks.relevance_transfer.resample import ImbalancedDatasetSampler + + +class RelevanceTransferTrainer(Trainer): + + def __init__(self, model, embedding, train_loader, trainer_config, train_evaluator, test_evaluator, dev_evaluator): + super(RelevanceTransferTrainer, self).__init__(model, embedding, train_loader, trainer_config, train_evaluator, test_evaluator, dev_evaluator) + self.config = trainer_config + self.early_stop = False + self.best_dev_ap = 0 + self.iterations = 0 + self.iters_not_improved = 0 + self.start = None + + self.log_header = 'Epoch Iteration Progress Dev/Acc. Dev/Pr. Dev/AP. Dev/F1 Dev/Loss' + self.log_template = ' '.join('{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) + + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) + self.snapshot_path = os.path.join(self.model_outfile, self.train_loader.dataset.NAME, '%s.pt' % timestamp) + + def train_epoch(self, epoch): + self.train_loader.init_epoch() + n_correct, n_total = 0, 0 + + for batch_idx, batch in enumerate(tqdm(self.train_loader, desc="Training")): + self.iterations += 1 + self.model.train() + self.optimizer.zero_grad() + + # Clip gradients to address exploding gradients in LSTM + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 25.0) + + # Randomly sample equal number of positive and negative documents + if 'ignore_lengths' in self.config and self.config['ignore_lengths']: + if 'resample' in self.config and self.config['resample']: + indices = ImbalancedDatasetSampler(batch.text, batch.label).get_indices() + batch_text = batch.text[indices] + batch_label = batch.label[indices] + else: + batch_text = batch.text + batch_label = batch.label + else: + if 'resample' in self.config and self.config['resample']: + indices = ImbalancedDatasetSampler(batch.text[0], batch.label).get_indices() + batch_text = batch.text[0][indices] + batch_lengths = batch.text[1][indices] + batch_label = batch.label + else: + batch_text = batch.text[0] + batch_lengths = batch.text[1] + batch_label = batch.label + + if hasattr(self.model, 'tar') and self.model.tar: + if 'ignore_lengths' in self.config and self.config['ignore_lengths']: + scores, rnn_outs = self.model(batch_text) + else: + scores, rnn_outs = self.model(batch_text, lengths=batch_lengths) + else: + if 'ignore_lengths' in self.config and self.config['ignore_lengths']: + scores = self.model(batch_text) + else: + scores = self.model(batch_text, lengths=batch_lengths) + + # Computing accuracy and loss + predictions = torch.sigmoid(scores).squeeze(dim=1) + for tensor1, tensor2 in zip(predictions.round(), batch_label): + try: + if int(tensor1.item()) == int(tensor2.item()): + n_correct += 1 + except ValueError: + # Ignore NaN/Inf values + pass + + loss = F.binary_cross_entropy(predictions, batch_label.float()) + + if hasattr(self.model, 'tar') and self.model.tar: + loss = loss + (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() + + n_total += batch.batch_size + train_acc = n_correct / n_total + loss.backward() + self.optimizer.step() + + if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging + self.model.update_ema() + + if self.iterations % self.log_interval == 1: + niter = epoch * len(self.train_loader) + batch_idx + self.writer.add_scalar('Train/Loss', loss.data.item(), niter) + self.writer.add_scalar('Train/Accuracy', train_acc, niter) + + def train(self, epochs): + self.start = time.time() + # model_outfile is actually a directory, using model_outfile to conform to Trainer naming convention + os.makedirs(self.model_outfile, exist_ok=True) + os.makedirs(os.path.join(self.model_outfile, self.train_loader.dataset.NAME), exist_ok=True) + + for epoch in trange(1, epochs + 1, desc="Epoch"): + self.train_epoch(epoch) + + # Evaluate performance on validation set + dev_acc, dev_precision, dev_ap, dev_f1, dev_loss = self.dev_evaluator.get_scores()[0] + self.writer.add_scalar('Dev/Loss', dev_loss, epoch) + self.writer.add_scalar('Dev/Accuracy', dev_acc, epoch) + self.writer.add_scalar('Dev/Precision', dev_precision, epoch) + self.writer.add_scalar('Dev/AP', dev_ap, epoch) + tqdm.write(self.log_header) + tqdm.write(self.log_template.format(epoch, self.iterations, epoch + 1, epochs, + dev_acc, dev_precision, dev_ap, dev_f1, dev_loss)) + + # Update validation results + if dev_f1 > self.best_dev_ap: + self.iters_not_improved = 0 + self.best_dev_ap = dev_f1 + torch.save(self.model, self.snapshot_path) + else: + self.iters_not_improved += 1 + if self.iters_not_improved >= self.patience: + self.early_stop = True + tqdm.write("Early Stopping. Epoch: {}, Best Dev F1: {}".format(epoch, self.best_dev_ap)) + break From 57f068055a962c2b45fcb26cdc687148bd5bc22c Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Fri, 19 Apr 2019 17:31:29 -0400 Subject: [PATCH 04/22] Add re-ranking module --- tasks/relevance_transfer/rerank.py | 71 ++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 tasks/relevance_transfer/rerank.py diff --git a/tasks/relevance_transfer/rerank.py b/tasks/relevance_transfer/rerank.py new file mode 100644 index 0000000..b9c54d5 --- /dev/null +++ b/tasks/relevance_transfer/rerank.py @@ -0,0 +1,71 @@ +import os + +import numpy as np + + +def load_ranks(rank_file): + score_dict = {} + with open(rank_file, 'r') as f: + for line in f: + topic, _, docid, _, score, _ = line.split() + if topic not in score_dict: + score_dict[topic] = dict() + score_dict[topic.strip()][docid.strip()] = float(score) + return score_dict + + +def merge_ranks(old_ranks, new_ranks, topics): + doc_ranks = dict() + for topic in topics: + missing_docids = list() + old_scores = old_ranks[topic] + new_scores = new_ranks[topic] + if topic not in doc_ranks: + doc_ranks[topic] = list(), list(), list() + print("Processing documents in topic", topic) + for docid, old_score in old_scores.items(): + try: + new_score = new_scores[docid] + doc_ranks[topic][0].append(docid) + doc_ranks[topic][1].append(old_score) + doc_ranks[topic][2].append(new_score) + except KeyError: + missing_docids.append(docid) + print("Number of missing documents in topic %s: %d" % (topic, len(missing_docids))) + return doc_ranks + + +def interpolate(old_scores, new_scores, alpha): + s_min, s_max = min(old_scores), max(old_scores) + old_score = (old_scores - s_min) / (s_max - s_min) + s_min, s_max = min(new_scores), max(new_scores) + new_score = (new_scores - s_min) / (s_max - s_min) + score = old_score * (1 - alpha) + new_score * alpha + return score + + +def rerank_alpha(doc_ranks, alpha, limit, filename, tag): + filename = '%s_rerank_%0.1f.txt' % (filename, alpha) + with open(os.path.join(filename), 'w') as f: + print('Writing output for alpha', alpha) + for topic in doc_ranks: + docids, old_scores, new_scores = doc_ranks[topic] + score = interpolate(np.array(old_scores), np.array(new_scores), alpha) + sorted_score = sorted(list(zip(docids, score)), key=lambda x: -x[1]) + + rank = 1 + for docids, score in sorted_score: + f.write(f'{topic} Q0 {docids} {rank} {score} castor_{tag}\n') + rank += 1 + if rank > limit: + break + + +def rerank(args, dataset): + ret_ranks = load_ranks(args.ret_ranks) + clf_ranks = load_ranks(args.clf_ranks) + doc_ranks = merge_ranks(ret_ranks, clf_ranks, topics=dataset.TOPICS) + + filename = os.path.splitext(args.clf_ranks)[0] + for alpha in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: + rerank_alpha(doc_ranks, alpha, 10000, filename, tag="achyudh") \ No newline at end of file From 7d26d717287d836624498c63365cc49f735364df Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Fri, 19 Apr 2019 17:31:41 -0400 Subject: [PATCH 05/22] Add ImbalancedDatasetSampler --- tasks/relevance_transfer/resample.py | 60 ++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 tasks/relevance_transfer/resample.py diff --git a/tasks/relevance_transfer/resample.py b/tasks/relevance_transfer/resample.py new file mode 100644 index 0000000..160b538 --- /dev/null +++ b/tasks/relevance_transfer/resample.py @@ -0,0 +1,60 @@ +# MIT License +# +# Copyright (c) 2018 Ming +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# Modified by: Achyudh Keshav Ram +# On: 7th Feb 2019 + +import torch +import torch.utils.data + + +class ImbalancedDatasetSampler: + """Samples elements randomly from a given list of indices for imbalanced dataset + Arguments: + indices (list, optional): a list of indices + num_samples (int, optional): number of samples to draw + """ + + def __init__(self, dataset, labels, indices=None, num_samples=None): + + # All elements in the dataset will be considered if indices is None + self.indices = list(range(len(dataset))) if indices is None else indices + self.num_samples = len(self.indices) if num_samples is None else num_samples + + # Compute distribution of classes in the dataset + self.labels = labels + label_to_count = dict() + for idx in self.indices: + label = self.labels[idx].item() + if label in label_to_count: + label_to_count[label] += 1 + else: + label_to_count[label] = 1 + + # Compute weight for each sample + weights = [1.0 / label_to_count[self.labels[idx].item()] + for idx in self.indices] + self.weights = torch.DoubleTensor(weights) + + def get_indices(self): + return list(self.indices[i] for i in torch.multinomial( + self.weights, self.num_samples, replacement=True)) From eab4fc29c96f444f06824bed9b25f5ba72b21e0f Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Fri, 19 Apr 2019 17:32:05 -0400 Subject: [PATCH 06/22] Add relevance transfer package --- tasks/__init__.py | 0 tasks/relevance_transfer/__init__.py | 0 tasks/relevance_transfer/__main__.py | 239 +++++++++++++++++++++++++++ tasks/relevance_transfer/args.py | 69 ++++++++ tasks/relevance_transfer/config.json | 184 +++++++++++++++++++++ 5 files changed, 492 insertions(+) create mode 100644 tasks/__init__.py create mode 100644 tasks/relevance_transfer/__init__.py create mode 100644 tasks/relevance_transfer/__main__.py create mode 100644 tasks/relevance_transfer/args.py create mode 100644 tasks/relevance_transfer/config.json diff --git a/tasks/__init__.py b/tasks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tasks/relevance_transfer/__init__.py b/tasks/relevance_transfer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tasks/relevance_transfer/__main__.py b/tasks/relevance_transfer/__main__.py new file mode 100644 index 0000000..b9bf9d4 --- /dev/null +++ b/tasks/relevance_transfer/__main__.py @@ -0,0 +1,239 @@ +import json +import logging +import os +import pickle +import random +from collections import defaultdict +from copy import deepcopy + +import numpy as np +import torch +from tqdm import tqdm + +from common.evaluate import EvaluatorFactory +from common.train import TrainerFactory +from datasets.robust04 import Robust04, Robust04Hierarchical +from datasets.robust05 import Robust05, Robust05Hierarchical +from datasets.robust45 import Robust45, Robust45Hierarchical +from models.han.model import HAN +from models.kim_cnn.model import KimCNN +from models.reg_lstm.model import RegLSTM +from models.xml_cnn.model import XmlCNN +from tasks.relevance_transfer.args import get_args +from tasks.relevance_transfer.rerank import rerank + + +# String templates for logging results +LOG_HEADER = 'Topic Dev/Acc. Dev/Pr. Dev/Re. Dev/F1 Dev/Loss' +LOG_TEMPLATE = ' '.join('{:>5s},{:>9.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) + + +class UnknownWordVecCache(object): + """ + Caches the first randomly generated word vector for a certain size to make it is reused. + """ + cache = {} + + @classmethod + def unk(cls, tensor): + size_tup = tuple(tensor.size()) + if size_tup not in cls.cache: + cls.cache[size_tup] = torch.Tensor(tensor.size()) + cls.cache[size_tup].uniform_(-0.25, 0.25) + return cls.cache[size_tup] + + +def get_logger(): + logger = logging.getLogger(__name__) + logger.setLevel(logging.INFO) + + ch = logging.StreamHandler() + ch.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(levelname)s - %(message)s') + ch.setFormatter(formatter) + logger.addHandler(ch) + + return logger + + +def evaluate_dataset(split, dataset_cls, model, embedding, loader, pred_scores, args, topic): + saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, args.batch_size, args.gpu) + if args.model in {'HAN', 'HR-CNN'}: + saved_model_evaluator.ignore_lengths = True + accuracy, precision, recall, f1, avg_loss = saved_model_evaluator.get_scores()[0] + + if split == 'test': + pred_scores[topic] = (saved_model_evaluator.y_pred, saved_model_evaluator.docid) + else: + print('\n' + LOG_HEADER) + print(LOG_TEMPLATE.format(topic, accuracy, precision, recall, f1, avg_loss) + '\n') + + return saved_model_evaluator.y_pred + + +def save_ranks(pred_scores, output_path): + with open(output_path, 'w') as output_file: + for topic in tqdm(pred_scores, desc='Saving'): + scores, docid = pred_scores[topic] + max_scores = defaultdict(list) + for score, docid in zip(scores, docid): + max_scores[docid].append(score) + sorted_score = sorted(((sum(scores)/len(scores), docid) for docid, scores in max_scores.items()), reverse=True) + rank = 1 # Reset rank counter to one + for score, docid in sorted_score: + output_file.write(f'{topic} Q0 {docid} {rank} {score} Castor\n') + rank += 1 + + +if __name__ == '__main__': + # Set default configuration in args.py + args = get_args() + logger = get_logger() + + # Set random seed for reproducibility + torch.manual_seed(args.seed) + torch.backends.cudnn.deterministic = True + np.random.seed(args.seed) + random.seed(args.seed) + + if not args.cuda: + args.gpu = -1 + if torch.cuda.is_available() and args.cuda: + print('Note: You are using GPU for training') + torch.cuda.set_device(args.gpu) + torch.cuda.manual_seed(args.seed) + if torch.cuda.is_available() and not args.cuda: + print('Warning: Using CPU for training') + + dataset_map = { + 'Robust04': Robust04, + 'Robust45': Robust45, + 'Robust05': Robust05 + } + + dataset_map_hi = { + 'Robust04': Robust04Hierarchical, + 'Robust45': Robust45Hierarchical, + 'Robust05': Robust05Hierarchical + } + + model_map = { + 'RegLSTM': RegLSTM, + 'KimCNN': KimCNN, + 'HAN': HAN, + 'XML-CNN': XmlCNN, + } + + if args.model in {'HAN', 'HR-CNN'}: + dataset = dataset_map_hi[args.dataset] + else: + dataset = dataset_map[args.dataset] + print('Dataset:', args.dataset) + + if args.rerank: + rerank(args, dataset) + + else: + topic_iter = 0 + cache_path = os.path.splitext(args.output_path)[0] + '.pkl' + if args.resume_snapshot: + # Load previous cached run + with open(cache_path, 'rb') as cache_file: + pred_scores = pickle.load(cache_file) + else: + pred_scores = dict() + + with open(os.path.join('tasks', 'relevance_transfer', 'config.json'), 'r') as config_file: + topic_configs = json.load(config_file) + + for topic in dataset.TOPICS: + topic_iter += 1 + # Skip topics that have already been predicted + if args.resume_snapshot and topic in pred_scores: + continue + + print("Training on topic %d of %d..." % (topic_iter, len(dataset.TOPICS))) + train_iter, dev_iter, test_iter = dataset.iters(args.data_dir, args.word_vectors_file, args.word_vectors_dir, + topic, batch_size=args.batch_size, device=args.gpu, + unk_init=UnknownWordVecCache.unk) + + print('Vocabulary size:', len(train_iter.dataset.TEXT_FIELD.vocab)) + print('Target Classes:', train_iter.dataset.NUM_CLASSES) + print('Train Instances:', len(train_iter.dataset)) + print('Dev Instances:', len(dev_iter.dataset)) + print('Test Instances:', len(test_iter.dataset)) + + config = deepcopy(args) + config.target_class = 1 + config.dataset = train_iter.dataset + config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab) + + if args.variable_dynamic_pool: + # Set dynamic pool length based on topic configs + if args.model in topic_configs and topic in topic_configs[args.model]: + print("Setting dynamic_pool to", topic_configs[args.model][topic]["dynamic_pool"]) + config.dynamic_pool = topic_configs[args.model][topic]["dynamic_pool"] + if config.dynamic_pool: + print("Setting dynamic_pool_length to", topic_configs[args.model][topic]["dynamic_pool_length"]) + config.dynamic_pool_length = topic_configs[args.model][topic]["dynamic_pool_length"] + + model = model_map[args.model](config) + + if args.cuda: + model.cuda() + print('Shifting model to GPU...') + + parameter = filter(lambda p: p.requires_grad, model.parameters()) + optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) + + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') + else: + train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, + args.batch_size, args.gpu) + test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, + args.batch_size, args.gpu) + dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, + args.batch_size, args.gpu) + + trainer_config = { + 'optimizer': optimizer, + 'batch_size': args.batch_size, + 'log_interval': args.log_every, + 'dev_log_interval': args.dev_every, + 'patience': args.patience, + 'model_outfile': args.save_path, + 'logger': logger, + 'resample': args.resample + } + + if args.model in {'HAN', 'HR-CNN'}: + trainer_config['ignore_lengths'] = True + dev_evaluator.ignore_lengths = True + test_evaluator.ignore_lengths = True + + trainer = TrainerFactory.get_trainer(args.dataset, model, None, train_iter, trainer_config, train_evaluator, + test_evaluator, dev_evaluator) + + trainer.train(args.epochs) + + # Calculate dev and test metrics + model = torch.load(trainer.snapshot_path) + + if hasattr(model, 'beta_ema') and model.beta_ema > 0: + old_params = model.get_params() + model.load_ema_params() + + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') + else: + evaluate_dataset('dev', dataset_map[args.dataset], model, None, dev_iter, pred_scores, args, topic) + evaluate_dataset('test', dataset_map[args.dataset], model, None, test_iter, pred_scores, args, topic) + + if hasattr(model, 'beta_ema') and model.beta_ema > 0: + model.load_params(old_params) + + with open(cache_path, 'wb') as cache_file: + pickle.dump(pred_scores, cache_file) + + save_ranks(pred_scores, args.output_path) diff --git a/tasks/relevance_transfer/args.py b/tasks/relevance_transfer/args.py new file mode 100644 index 0000000..bcf2273 --- /dev/null +++ b/tasks/relevance_transfer/args.py @@ -0,0 +1,69 @@ +import os + +from argparse import ArgumentParser + + +def get_args(): + parser = ArgumentParser(description="Deep learning models for relevance transfer") + parser.add_argument('--no-cuda', action='store_false', dest='cuda') + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--epochs', type=int, default=50) + parser.add_argument('--batch-size', type=int, default=1024) + parser.add_argument('--mode', type=str, default='static', choices=['rand', 'static', 'non-static', 'multichannel']) + parser.add_argument('--lr', type=float, default=0.001) + parser.add_argument('--seed', type=int, default=3435) + parser.add_argument('--model', type=str, default='KimCNN', choices=['RegLSTM', 'KimCNN', 'HAN', 'XML-CNN']) + parser.add_argument('--dataset', type=str, default='Robust04', choices=['Robust04', 'Robust05', 'Robust45']) + parser.add_argument('--dev_every', type=int, default=30) + parser.add_argument('--log_every', type=int, default=10) + parser.add_argument('--patience', type=int, default=5) + parser.add_argument('--save_path', type=str, default=os.path.join('model_checkpoints', 'relevance_transfer')) + parser.add_argument('--words_dim', type=int, default=300) + parser.add_argument('--embed_dim', type=int, default=300) + parser.add_argument('--dropout', type=float, default=0.5) + parser.add_argument('--epoch_decay', type=int, default=15) + parser.add_argument('--data_dir', default=os.path.join(os.pardir, 'hedwig-data', 'datasets')) + parser.add_argument('--word_vectors_dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) + parser.add_argument('--word_vectors_file', help='word vectors filename', default='GoogleNews-vectors-negative300.txt') + parser.add_argument("--output-path", type=str, default="run.core17.lstm.topics.robust00.txt") + parser.add_argument('--resume-snapshot', action='store_true') + parser.add_argument('--resample', action='store_true') + + # RegLSTM parameters + parser.add_argument('--num-layers', type=int, default=2) + parser.add_argument('--hidden-dim', type=int, default=256) + parser.add_argument('--bidirectional', action='store_true') + parser.add_argument('--tar', action='store_true') + parser.add_argument('--weight-decay', type=float, default=0) + parser.add_argument('--beta-ema', type=float, default = 0, help="for temporal averaging") + parser.add_argument('--wdrop', type=float, default=0.0, help="for weight-drop") + parser.add_argument('--embed-droprate', type=float, default=0.0, help="for embedded dropout") + + # KimCNN parameters + parser.add_argument('--dropblock', type=float, default=0.0) + parser.add_argument('--dropblock-size', type=int, default=7) + parser.add_argument('--batchnorm', action='store_true') + parser.add_argument('--attention', action='store_true') + parser.add_argument('--output-channel', type=int, default=100) + + # HAN parameters + parser.add_argument('--word-num-hidden', type=int, default=50) + parser.add_argument('--sentence-num-hidden', type=int, default=50) + + # XML-CNN parameters + parser.add_argument('--bottleneck-layer', action='store_true') + parser.add_argument('--dynamic-pool', action='store_true') + parser.add_argument('--variable-dynamic-pool', action='store_true') + parser.add_argument('--bottleneck-units', type=int, default=100) + parser.add_argument('--dynamic-pool-length', type=int, default=8) + + # HR-CNN parameters + parser.add_argument('--sentence-channel', type=int, default=100) + + # Re-ranking parameters + parser.add_argument('--rerank', action='store_true') + parser.add_argument("--ret-ranks", type=str, help='retrieval rank file', default="run.core17.bm25+rm3.wcro0405.hits10000.txt") + parser.add_argument("--clf-ranks", type=str, help='classification rank file', default="run.core17.lstm.topics.robust45.txt") + + args = parser.parse_args() + return args diff --git a/tasks/relevance_transfer/config.json b/tasks/relevance_transfer/config.json new file mode 100644 index 0000000..db00a55 --- /dev/null +++ b/tasks/relevance_transfer/config.json @@ -0,0 +1,184 @@ +{ + "KimCNN": { + "307": { + "dynamic_pool": false + }, + "310": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "321": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "325": { + "dynamic_pool": false + }, + "330": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "336": { + "dynamic_pool": false + }, + "341": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "344": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "345": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "347": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "350": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "353": { + "dynamic_pool": false + }, + "354": { + "dynamic_pool": false + }, + "355": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "356": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "362": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "363": { + "dynamic_pool": false + }, + "367": { + "dynamic_pool": false + }, + "372": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "375": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "378": { + "dynamic_pool": false + }, + "379": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "389": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "393": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "394": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "397": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "399": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "400": { + "dynamic_pool": false + }, + "404": { + "dynamic_pool": false + }, + "408": { + "dynamic_pool": false + }, + "414": { + "dynamic_pool": false + }, + "416": { + "dynamic_pool": false + }, + "419": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "422": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "423": { + "dynamic_pool": false + }, + "426": { + "dynamic_pool": false + }, + "427": { + "dynamic_pool": false + }, + "433": { + "dynamic_pool": false + }, + "435": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "436": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "439": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "442": { + "dynamic_pool": false + }, + "443": { + "dynamic_pool": false + }, + "445": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "614": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "620": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "626": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "646": { + "dynamic_pool": true, + "dynamic_pool_length": 8 + }, + "677": { + "dynamic_pool": true, + "dynamic_pool_length": 4 + }, + "690": { + "dynamic_pool": false + } + } +} From a08b2d1b842ac7f22dec52ae981d4af58d5ed0ea Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Fri, 19 Apr 2019 17:32:21 -0400 Subject: [PATCH 07/22] Fix import in classification trainer --- common/evaluators/classification_evaluator.py | 6 +++--- common/trainers/classification_trainer.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/common/evaluators/classification_evaluator.py b/common/evaluators/classification_evaluator.py index 7078488..0ff89d9 100644 --- a/common/evaluators/classification_evaluator.py +++ b/common/evaluators/classification_evaluator.py @@ -1,9 +1,9 @@ +import numpy as np import torch import torch.nn.functional as F -import numpy as np - from sklearn import metrics -from .evaluator import Evaluator + +from common.evaluators.evaluator import Evaluator class ClassificationEvaluator(Evaluator): diff --git a/common/trainers/classification_trainer.py b/common/trainers/classification_trainer.py index e43e0e7..a6e0da0 100644 --- a/common/trainers/classification_trainer.py +++ b/common/trainers/classification_trainer.py @@ -1,8 +1,8 @@ +import datetime +import os import time -import datetime import numpy as np -import os import torch import torch.nn.functional as F from tensorboardX import SummaryWriter From 0890eae1eedc47971f6d9d2f1ea4ea5e9f20810a Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Mon, 29 Apr 2019 02:12:08 -0400 Subject: [PATCH 08/22] Remove unwanted args from models/bert --- models/bert/__main__.py | 6 ------ models/bert/args.py | 17 ++++++++--------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/models/bert/__main__.py b/models/bert/__main__.py index 43932a5..daf94fb 100644 --- a/models/bert/__main__.py +++ b/models/bert/__main__.py @@ -57,12 +57,6 @@ def evaluate_split(model, processor, args, split='dev'): if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) - if args.server_ip and args.server_port: - import ptvsd - print("Waiting for debugger attach") - ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) - ptvsd.wait_for_attach() - dataset_map = { 'SST-2': SST2Processor, 'Reuters': ReutersProcessor, diff --git a/models/bert/args.py b/models/bert/args.py index 9171c1b..5819765 100644 --- a/models/bert/args.py +++ b/models/bert/args.py @@ -1,5 +1,4 @@ import os -from argparse import ArgumentParser import models.args @@ -11,6 +10,9 @@ def get_args(): parser.add_argument('--dataset', type=str, default='SST-2', choices=['SST-2', 'AGNews', 'Reuters', 'AAPD', 'IMDB', 'Yelp2014']) parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'bert')) parser.add_argument('--cache-dir', default='cache', type=str) + parser.add_argument('--trained-model', default=None, type=str) + parser.add_argument('--local-rank', type=int, default=-1, help='local rank for distributed training') + parser.add_argument('--fp16', action='store_true', help='use 16-bit floating point precision') parser.add_argument('--max-seq-length', default=128, @@ -19,25 +21,22 @@ def get_args(): 'Sequences longer than this will be truncated, and sequences shorter \n' 'than this will be padded.') - parser.add_argument('--trained-model', default=None, type=str) - parser.add_argument('--local-rank', type=int, default=-1, help='local rank for distributed training') - parser.add_argument('--fp16', action='store_true', help='use 16-bit floating point precision') - parser.add_argument('--warmup-proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for') - parser.add_argument('--gradient-accumulation-steps', type=int, default=1, + parser.add_argument('--gradient-accumulation-steps', + type=int, + default=1, help='Number of updates steps to accumulate before performing a backward/update pass') parser.add_argument('--loss-scale', - type=float, default=0, + type=float, + default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n' '0 (default value): dynamic loss scaling.\n' 'Positive power of 2: static loss scaling value.\n') - parser.add_argument('--server-ip', type=str, default='', help='Can be used for distant debugging.') - parser.add_argument('--server-port', type=str, default='', help='Can be used for distant debugging.') args = parser.parse_args() return args From 1116c64337d2fe3941ae54f8ac460a8203bec65f Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Thu, 2 May 2019 05:03:02 -0400 Subject: [PATCH 09/22] Fix bug where model wasn't in training mode every epoch --- common/trainers/bert_trainer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/common/trainers/bert_trainer.py b/common/trainers/bert_trainer.py index 46dc1c5..7c25e25 100644 --- a/common/trainers/bert_trainer.py +++ b/common/trainers/bert_trainer.py @@ -42,6 +42,7 @@ def __init__(self, model, optimizer, processor, args): def train_epoch(self, train_dataloader): for step, batch in enumerate(tqdm(train_dataloader, desc="Training")): + self.model.train() batch = tuple(t.to(self.args.device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch logits = self.model(input_ids, segment_ids, input_mask) @@ -92,8 +93,6 @@ def train(self): train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=self.args.batch_size) - self.model.train() - for epoch in trange(int(self.args.epochs), desc="Epoch"): self.train_epoch(train_dataloader) dev_evaluator = BertEvaluator(self.model, self.processor, self.args, split='dev') From 0f34aa002cff547d56ec351e3dc615b4e4a91c73 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 5 May 2019 02:28:17 -0400 Subject: [PATCH 10/22] Add Robust45 preprocessor for BERT --- .../bert_processors/robust45_processor.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 datasets/bert_processors/robust45_processor.py diff --git a/datasets/bert_processors/robust45_processor.py b/datasets/bert_processors/robust45_processor.py new file mode 100644 index 0000000..4780bf1 --- /dev/null +++ b/datasets/bert_processors/robust45_processor.py @@ -0,0 +1,97 @@ +import os + +from datasets.bert_processors.abstract_processor import BertProcessor, InputExample, InputFeatures + + +class RelevanceFeatures(InputFeatures): + """A single set of features for relevance tasks.""" + + def __init__(self, input_ids, input_mask, segment_ids, label_id, guid): + super().__init__(input_ids, input_mask, segment_ids, label_id) + self.guid = guid + + +class Robust45Processor(BertProcessor): + NAME = 'Robust45' + NUM_CLASSES = 2 + TOPICS = ['307', '310', '321', '325', '330', '336', '341', '344', '345', '347', '350', '353', '354', '355', '356', + '362', '363', '367', '372', '375', '378', '379', '389', '393', '394', '397', '399', '400', '404', '408', + '414', '416', '419', '422', '423', '426', '427', '433', '435', '436', '439', '442', '443', '445', '614', + '620', '626', '646', '677', '690'] + TOPICS = ['307', '310', '321', '325', '330'] + + def get_train_examples(self, data_dir, **kwargs): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'TREC', 'robust45_aug_train_%s.tsv' % kwargs['topic'])), 'train') + + def get_dev_examples(self, data_dir, **kwargs): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'TREC', 'robust45_dev_%s.tsv' % kwargs['topic'])), 'dev') + + def get_test_examples(self, data_dir, **kwargs): + return self._create_examples( + self._read_tsv(os.path.join(data_dir, 'TREC', 'core17_10k_%s.tsv' % kwargs['topic'])), 'test') + + @staticmethod + def _create_examples(lines, split): + """Creates examples for the training and dev sets.""" + examples = [] + for (i, line) in enumerate(lines): + if i == 0: + continue + text_a = line[2] + guid = line[1] + label = line[0] + examples.append( + InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +def convert_examples_to_features(examples, max_seq_length, tokenizer): + """ + Loads a data file into a list of InputBatch objects + :param examples: + :param max_seq_length: + :param tokenizer: + :return: a list of InputBatch objects + """ + + features = [] + for (ex_index, example) in enumerate(examples): + tokens_a = tokenizer.tokenize(example.text_a) + + tokens_b = None + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:(max_seq_length - 2)] + + tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + segment_ids = [0] * len(tokens) + + if tokens_b: + tokens += tokens_b + ["[SEP]"] + segment_ids += [1] * (len(tokens_b) + 1) + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # The mask has 1 for real tokens and 0 for padding tokens + input_mask = [1] * len(input_ids) + + # Zero-pad up to the sequence length + padding = [0] * (max_seq_length - len(input_ids)) + input_ids += padding + input_mask += padding + segment_ids += padding + + try: + docid = int(example.guid) + except ValueError: + # print("Error converting docid to integer:", string) + docid = 0 + + features.append(RelevanceFeatures(input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=0 if example.label == '01' else 1, + guid=docid)) + return features From 7bed0f1678c078ee1afedd46fabc11a7b4efe976 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 5 May 2019 02:29:44 -0400 Subject: [PATCH 11/22] Add support for BERT for relevance transfer --- .../relevance_transfer_evaluator.py | 119 ++++-- common/trainers/relevance_transfer_trainer.py | 213 +++++----- tasks/relevance_transfer/__main__.py | 382 ++++++++++++------ tasks/relevance_transfer/args.py | 12 +- 4 files changed, 464 insertions(+), 262 deletions(-) diff --git a/common/evaluators/relevance_transfer_evaluator.py b/common/evaluators/relevance_transfer_evaluator.py index b9e3680..aa561f3 100644 --- a/common/evaluators/relevance_transfer_evaluator.py +++ b/common/evaluators/relevance_transfer_evaluator.py @@ -4,8 +4,12 @@ import torch import torch.nn.functional as F from sklearn import metrics +from torch.utils.data import TensorDataset, SequentialSampler, DataLoader +from tqdm import tqdm from common.evaluators.evaluator import Evaluator +from datasets.bert_processors.robust45_processor import convert_examples_to_features +from utils.tokenization import BertTokenizer # Suppress warnings from sklearn.metrics warnings.filterwarnings('ignore') @@ -13,55 +17,105 @@ class RelevanceTransferEvaluator(Evaluator): - def __init__(self, dataset_cls, model, embedding, data_loader, batch_size, device, keep_results=False): - super().__init__(dataset_cls, model, embedding, data_loader, batch_size, device, keep_results) - self.ignore_lengths = False + def __init__(self, model, config, **kwargs): + super().__init__(kwargs['dataset'], model, kwargs['embedding'], kwargs['data_loader'], + batch_size=config['batch_size'], device=config['device']) + + if config['model'] in {'BERT-Base', 'BERT-Large'}: + variant = 'bert-large-uncased' if config['model'] == 'BERT-Large' else 'bert-base-uncased' + self.tokenizer = BertTokenizer.from_pretrained(variant, is_lowercase=config['is_lowercase']) + self.processor = kwargs['processor'] + if config['split'] == 'test': + self.eval_examples = self.processor.get_test_examples(config['data_dir'], topic=config['topic']) + else: + self.eval_examples = self.processor.get_dev_examples(config['data_dir'], topic=config['topic']) + + self.config = config + self.ignore_lengths = config['ignore_lengths'] self.y_target = None self.y_pred = None self.docid = None - def get_scores(self): + def get_scores(self, silent=False): self.model.eval() - self.data_loader.init_epoch() self.y_target = list() self.y_pred = list() self.docid = list() total_loss = 0 - if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: - # Temporal averaging - old_params = self.model.get_params() - self.model.load_ema_params() + if self.config['model'] in {'BERT-Base', 'BERT-Large'}: + eval_features = convert_examples_to_features(self.eval_examples, self.config['max_seq_length'], self.tokenizer) - for batch_idx, batch in enumerate(self.data_loader): - if hasattr(self.model, 'tar') and self.model.tar: - if self.ignore_lengths: - scores, rnn_outs = self.model(batch.text) - else: - scores, rnn_outs = self.model(batch.text[0], lengths=batch.text[1]) - else: - if self.ignore_lengths: - scores = self.model(batch.text) + all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) + all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) + all_document_ids = torch.tensor([f.guid for f in eval_features], dtype=torch.long) + + eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_document_ids) + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.config['batch_size']) + + for input_ids, input_mask, segment_ids, label_ids, document_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent): + input_ids = input_ids.to(self.config['device']) + input_mask = input_mask.to(self.config['device']) + segment_ids = segment_ids.to(self.config['device']) + label_ids = label_ids.to(self.config['device']) + + with torch.no_grad(): + logits = torch.sigmoid(self.model(input_ids, segment_ids, input_mask)).squeeze(dim=1) + + # Computing loss and storing predictions + self.docid.extend(document_ids.cpu().detach().numpy()) + self.y_pred.extend(logits.cpu().detach().numpy()) + self.y_target.extend(label_ids.cpu().detach().numpy()) + loss = F.binary_cross_entropy(logits, label_ids.float()) + + if self.config['n_gpu'] > 1: + loss = loss.mean() + if self.config['gradient_accumulation_steps'] > 1: + loss = loss / self.config['gradient_accumulation_steps'] + total_loss += loss.item() + + else: + self.data_loader.init_epoch() + + if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging + old_params = self.model.get_params() + self.model.load_ema_params() + + for batch in tqdm(self.data_loader, desc="Evaluating", disable=silent): + if hasattr(self.model, 'tar') and self.model.tar: + if self.ignore_lengths: + logits, rnn_outs = torch.sigmoid(self.model(batch.text)).squeeze(dim=1) + else: + logits, rnn_outs = torch.sigmoid(self.model(batch.text[0], lengths=batch.text[1])).squeeze(dim=1) else: - scores = self.model(batch.text[0], lengths=batch.text[1]) + if self.ignore_lengths: + logits = torch.sigmoid(self.model(batch.text)).squeeze(dim=1) + else: + logits = torch.sigmoid(self.model(batch.text[0], lengths=batch.text[1])).squeeze(dim=1) + + total_loss += F.binary_cross_entropy(logits, batch.label.float()).item() + if hasattr(self.model, 'tar') and self.model.tar: + # Temporal activation regularization + total_loss += (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() - # Computing loss and storing predictions - predictions = torch.sigmoid(scores).squeeze(dim=1) - total_loss += F.binary_cross_entropy(predictions, batch.label.float()).item() - self.docid.extend(batch.docid.cpu().detach().numpy()) - self.y_pred.extend(predictions.cpu().detach().numpy()) - self.y_target.extend(batch.label.cpu().detach().numpy()) + self.docid.extend(batch.docid.cpu().detach().numpy()) + self.y_pred.extend(logits.cpu().detach().numpy()) + self.y_target.extend(batch.label.cpu().detach().numpy()) - if hasattr(self.model, 'tar') and self.model.tar: - # Temporal activation regularization - total_loss += (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() + if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging + self.model.load_params(old_params) predicted_labels = np.around(np.array(self.y_pred)) target_labels = np.array(self.y_target) accuracy = metrics.accuracy_score(target_labels, predicted_labels) average_precision = metrics.average_precision_score(target_labels, predicted_labels, average=None) f1 = metrics.f1_score(target_labels, predicted_labels, average='macro') - avg_loss = total_loss / len(self.data_loader.dataset.examples) + avg_loss = total_loss / len(predicted_labels) try: precision = metrics.precision_score(target_labels, predicted_labels, average=None)[1] @@ -69,8 +123,5 @@ def get_scores(self): # Handle cases without positive labels precision = 0 - if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: - # Temporal averaging - self.model.load_params(old_params) - - return [accuracy, precision, average_precision, f1, avg_loss], ['accuracy', 'precision', 'average_precision', 'f1', 'cross_entropy_loss'] \ No newline at end of file + return [accuracy, precision, average_precision, f1, avg_loss], \ + ['accuracy', 'precision', 'average_precision', 'f1', 'cross_entropy_loss'] diff --git a/common/trainers/relevance_transfer_trainer.py b/common/trainers/relevance_transfer_trainer.py index fba44f0..8a0e592 100644 --- a/common/trainers/relevance_transfer_trainer.py +++ b/common/trainers/relevance_transfer_trainer.py @@ -1,133 +1,154 @@ import datetime import os -import time import torch import torch.nn.functional as F from tensorboardX import SummaryWriter +from torch.utils.data import TensorDataset, RandomSampler, DataLoader from tqdm import trange, tqdm from common.trainers.trainer import Trainer +from datasets.bert_processors.robust45_processor import convert_examples_to_features from tasks.relevance_transfer.resample import ImbalancedDatasetSampler +from utils.tokenization import BertTokenizer class RelevanceTransferTrainer(Trainer): - - def __init__(self, model, embedding, train_loader, trainer_config, train_evaluator, test_evaluator, dev_evaluator): - super(RelevanceTransferTrainer, self).__init__(model, embedding, train_loader, trainer_config, train_evaluator, test_evaluator, dev_evaluator) - self.config = trainer_config + def __init__(self, model, config, **kwargs): + super().__init__(model, kwargs['embedding'], kwargs['train_loader'], config, None, kwargs['test_evaluator'], kwargs['dev_evaluator']) + + if config['model'] in {'BERT-Base', 'BERT-Large'}: + variant = 'bert-large-uncased' if config['model'] == 'BERT-Large' else 'bert-base-uncased' + self.tokenizer = BertTokenizer.from_pretrained(variant, is_lowercase=config['is_lowercase']) + self.processor = kwargs['processor'] + self.optimizer = config['optimizer'] + self.train_examples = self.processor.get_train_examples(config['data_dir'], topic=config['topic']) + self.num_train_optimization_steps = int(len(self.train_examples) / + config['batch_size'] / + config['gradient_accumulation_steps'] + ) * config['epochs'] + self.config = config self.early_stop = False self.best_dev_ap = 0 self.iterations = 0 - self.iters_not_improved = 0 - self.start = None + self.unimproved_iters = 0 self.log_header = 'Epoch Iteration Progress Dev/Acc. Dev/Pr. Dev/AP. Dev/F1 Dev/Loss' self.log_template = ' '.join('{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) - self.snapshot_path = os.path.join(self.model_outfile, self.train_loader.dataset.NAME, '%s.pt' % timestamp) - - def train_epoch(self, epoch): - self.train_loader.init_epoch() - n_correct, n_total = 0, 0 + self.snapshot_path = os.path.join(self.model_outfile, config['dataset'].NAME, '%s.pt' % timestamp) - for batch_idx, batch in enumerate(tqdm(self.train_loader, desc="Training")): - self.iterations += 1 + def train_epoch(self): + for step, batch in enumerate(tqdm(self.train_loader, desc="Training")): self.model.train() - self.optimizer.zero_grad() - # Clip gradients to address exploding gradients in LSTM - torch.nn.utils.clip_grad_norm_(self.model.parameters(), 25.0) + if self.config['model'] in {'BERT-Base', 'BERT-Large'}: + batch = tuple(t.to(self.config['device']) for t in batch) + input_ids, input_mask, segment_ids, label_ids = batch + logits = torch.sigmoid(self.model(input_ids, segment_ids, input_mask)).squeeze(dim=1) + loss = F.binary_cross_entropy(logits, label_ids.float()) - # Randomly sample equal number of positive and negative documents - if 'ignore_lengths' in self.config and self.config['ignore_lengths']: - if 'resample' in self.config and self.config['resample']: - indices = ImbalancedDatasetSampler(batch.text, batch.label).get_indices() - batch_text = batch.text[indices] - batch_label = batch.label[indices] - else: - batch_text = batch.text - batch_label = batch.label + if self.config['n_gpu'] > 1: + loss = loss.mean() + if self.config['gradient_accumulation_steps'] > 1: + loss = loss / self.config['gradient_accumulation_steps'] + + loss.backward() + + if (step + 1) % self.config['gradient_accumulation_steps'] == 0: + self.optimizer.step() + self.optimizer.zero_grad() + self.iterations += 1 else: - if 'resample' in self.config and self.config['resample']: - indices = ImbalancedDatasetSampler(batch.text[0], batch.label).get_indices() - batch_text = batch.text[0][indices] - batch_lengths = batch.text[1][indices] - batch_label = batch.label - else: - batch_text = batch.text[0] - batch_lengths = batch.text[1] - batch_label = batch.label + # Clip gradients to address exploding gradients in LSTM + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 25.0) - if hasattr(self.model, 'tar') and self.model.tar: + # Randomly sample equal number of positive and negative documents + self.train_loader.init_epoch() if 'ignore_lengths' in self.config and self.config['ignore_lengths']: - scores, rnn_outs = self.model(batch_text) + if 'resample' in self.config and self.config['resample']: + indices = ImbalancedDatasetSampler(batch.text, batch.label).get_indices() + batch_text = batch.text[indices] + batch_label = batch.label[indices] + else: + batch_text = batch.text + batch_label = batch.label else: - scores, rnn_outs = self.model(batch_text, lengths=batch_lengths) - else: - if 'ignore_lengths' in self.config and self.config['ignore_lengths']: - scores = self.model(batch_text) + if 'resample' in self.config and self.config['resample']: + indices = ImbalancedDatasetSampler(batch.text[0], batch.label).get_indices() + batch_text = batch.text[0][indices] + batch_lengths = batch.text[1][indices] + batch_label = batch.label + else: + batch_text = batch.text[0] + batch_lengths = batch.text[1] + batch_label = batch.label + + if hasattr(self.model, 'tar') and self.model.tar: + if 'ignore_lengths' in self.config and self.config['ignore_lengths']: + logits, rnn_outs = torch.sigmoid(self.model(batch_text)).squeeze(dim=1) + else: + logits, rnn_outs = torch.sigmoid(self.model(batch_text, lengths=batch_lengths)).squeeze(dim=1) else: - scores = self.model(batch_text, lengths=batch_lengths) - - # Computing accuracy and loss - predictions = torch.sigmoid(scores).squeeze(dim=1) - for tensor1, tensor2 in zip(predictions.round(), batch_label): - try: - if int(tensor1.item()) == int(tensor2.item()): - n_correct += 1 - except ValueError: - # Ignore NaN/Inf values - pass - - loss = F.binary_cross_entropy(predictions, batch_label.float()) + if 'ignore_lengths' in self.config and self.config['ignore_lengths']: + logits = torch.sigmoid(self.model(batch_text)).squeeze(dim=1) + else: + logits = torch.sigmoid(self.model(batch_text, lengths=batch_lengths)).squeeze(dim=1) - if hasattr(self.model, 'tar') and self.model.tar: - loss = loss + (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() + loss = F.binary_cross_entropy(logits, batch_label.float()) + if hasattr(self.model, 'tar') and self.model.tar: + loss = loss + (rnn_outs[1:] - rnn_outs[:-1]).pow(2).mean() - n_total += batch.batch_size - train_acc = n_correct / n_total - loss.backward() - self.optimizer.step() + loss.backward() + self.optimizer.step() + self.iterations += 1 + self.optimizer.zero_grad() - if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: - # Temporal averaging - self.model.update_ema() - - if self.iterations % self.log_interval == 1: - niter = epoch * len(self.train_loader) + batch_idx - self.writer.add_scalar('Train/Loss', loss.data.item(), niter) - self.writer.add_scalar('Train/Accuracy', train_acc, niter) + if hasattr(self.model, 'beta_ema') and self.model.beta_ema > 0: + # Temporal averaging + self.model.update_ema() def train(self, epochs): - self.start = time.time() - # model_outfile is actually a directory, using model_outfile to conform to Trainer naming convention os.makedirs(self.model_outfile, exist_ok=True) - os.makedirs(os.path.join(self.model_outfile, self.train_loader.dataset.NAME), exist_ok=True) - - for epoch in trange(1, epochs + 1, desc="Epoch"): - self.train_epoch(epoch) - - # Evaluate performance on validation set - dev_acc, dev_precision, dev_ap, dev_f1, dev_loss = self.dev_evaluator.get_scores()[0] - self.writer.add_scalar('Dev/Loss', dev_loss, epoch) - self.writer.add_scalar('Dev/Accuracy', dev_acc, epoch) - self.writer.add_scalar('Dev/Precision', dev_precision, epoch) - self.writer.add_scalar('Dev/AP', dev_ap, epoch) - tqdm.write(self.log_header) - tqdm.write(self.log_template.format(epoch, self.iterations, epoch + 1, epochs, - dev_acc, dev_precision, dev_ap, dev_f1, dev_loss)) - - # Update validation results - if dev_f1 > self.best_dev_ap: - self.iters_not_improved = 0 - self.best_dev_ap = dev_f1 - torch.save(self.model, self.snapshot_path) - else: - self.iters_not_improved += 1 - if self.iters_not_improved >= self.patience: - self.early_stop = True - tqdm.write("Early Stopping. Epoch: {}, Best Dev F1: {}".format(epoch, self.best_dev_ap)) - break + os.makedirs(os.path.join(self.model_outfile, self.config['dataset'].NAME), exist_ok=True) + + if self.config['model'] in {'BERT-Base', 'BERT-Large'}: + train_features = convert_examples_to_features( + self.train_examples, self.config['max_seq_length'], self.tokenizer) + + all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) + all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) + all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) + all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + train_sampler = RandomSampler(train_data) + self.train_loader = DataLoader(train_data, sampler=train_sampler, batch_size=self.config['batch_size']) + + with trange(1, epochs + 1, desc="Epoch") as t_epochs: + for epoch in t_epochs: + self.train_epoch() + + # Evaluate performance on validation set + dev_acc, dev_precision, dev_ap, dev_f1, dev_loss = self.dev_evaluator.get_scores()[0] + self.writer.add_scalar('Dev/Loss', dev_loss, epoch) + self.writer.add_scalar('Dev/Accuracy', dev_acc, epoch) + self.writer.add_scalar('Dev/Precision', dev_precision, epoch) + self.writer.add_scalar('Dev/AP', dev_ap, epoch) + tqdm.write(self.log_header) + tqdm.write(self.log_template.format(epoch, self.iterations, epoch, epochs, + dev_acc, dev_precision, dev_ap, dev_f1, dev_loss)) + + # Update validation results + if dev_f1 > self.best_dev_ap: + self.unimproved_iters = 0 + self.best_dev_ap = dev_f1 + torch.save(self.model, self.snapshot_path) + else: + self.unimproved_iters += 1 + if self.unimproved_iters >= self.patience: + self.early_stop = True + tqdm.write("Early Stopping. Epoch: {}, Best Dev F1: {}".format(epoch, self.best_dev_ap)) + t_epochs.close() + break diff --git a/tasks/relevance_transfer/__main__.py b/tasks/relevance_transfer/__main__.py index b9bf9d4..9b3b9bb 100644 --- a/tasks/relevance_transfer/__main__.py +++ b/tasks/relevance_transfer/__main__.py @@ -1,5 +1,4 @@ import json -import logging import os import pickle import random @@ -10,21 +9,23 @@ import torch from tqdm import tqdm -from common.evaluate import EvaluatorFactory -from common.train import TrainerFactory +from common.evaluators.relevance_transfer_evaluator import RelevanceTransferEvaluator +from common.trainers.relevance_transfer_trainer import RelevanceTransferTrainer +from datasets.bert_processors.robust45_processor import Robust45Processor from datasets.robust04 import Robust04, Robust04Hierarchical from datasets.robust05 import Robust05, Robust05Hierarchical from datasets.robust45 import Robust45, Robust45Hierarchical +from models.bert.model import BertForSequenceClassification as Bert from models.han.model import HAN from models.kim_cnn.model import KimCNN from models.reg_lstm.model import RegLSTM from models.xml_cnn.model import XmlCNN from tasks.relevance_transfer.args import get_args from tasks.relevance_transfer.rerank import rerank - +from utils.optimization import BertAdam # String templates for logging results -LOG_HEADER = 'Topic Dev/Acc. Dev/Pr. Dev/Re. Dev/F1 Dev/Loss' +LOG_HEADER = 'Topic Dev/Acc. Dev/Pr. Dev/AP. Dev/F1 Dev/Loss' LOG_TEMPLATE = ' '.join('{:>5s},{:>9.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) @@ -43,32 +44,41 @@ def unk(cls, tensor): return cls.cache[size_tup] -def get_logger(): - logger = logging.getLogger(__name__) - logger.setLevel(logging.INFO) - - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(levelname)s - %(message)s') - ch.setFormatter(formatter) - logger.addHandler(ch) +def evaluate_split(model, topic, split, config, **kwargs): + evaluator_config = { + 'model': config.model, + 'topic': topic, + 'split': split, + 'dataset': kwargs['dataset'], + 'batch_size': config.batch_size, + 'ignore_lengths': False, + 'is_lowercase': True, + 'gradient_accumulation_steps': config.gradient_accumulation_steps, + 'max_seq_length': config.max_seq_length, + 'data_dir': config.data_dir, + 'n_gpu': n_gpu, + 'device': config.device + } - return logger + if config.model in {'HAN', 'HR-CNN'}: + trainer_config['ignore_lengths'] = True + evaluator_config['ignore_lengths'] = True + evaluator = RelevanceTransferEvaluator(model, evaluator_config, + processor=kwargs['processor'], + embedding=kwargs['embedding'], + data_loader=kwargs['loader'], + dataset=kwargs['dataset']) -def evaluate_dataset(split, dataset_cls, model, embedding, loader, pred_scores, args, topic): - saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, args.batch_size, args.gpu) - if args.model in {'HAN', 'HR-CNN'}: - saved_model_evaluator.ignore_lengths = True - accuracy, precision, recall, f1, avg_loss = saved_model_evaluator.get_scores()[0] + accuracy, precision, recall, f1, avg_loss = evaluator.get_scores()[0] if split == 'test': - pred_scores[topic] = (saved_model_evaluator.y_pred, saved_model_evaluator.docid) + pred_scores[topic] = (evaluator.y_pred, evaluator.docid) else: print('\n' + LOG_HEADER) print(LOG_TEMPLATE.format(topic, accuracy, precision, recall, f1, avg_loss) + '\n') - return saved_model_evaluator.y_pred + return evaluator.y_pred def save_ranks(pred_scores, output_path): @@ -78,7 +88,8 @@ def save_ranks(pred_scores, output_path): max_scores = defaultdict(list) for score, docid in zip(scores, docid): max_scores[docid].append(score) - sorted_score = sorted(((sum(scores)/len(scores), docid) for docid, scores in max_scores.items()), reverse=True) + sorted_score = sorted(((sum(scores) / len(scores), docid) for docid, scores in max_scores.items()), + reverse=True) rank = 1 # Reset rank counter to one for score, docid in sorted_score: output_file.write(f'{topic} Q0 {docid} {rank} {score} Castor\n') @@ -88,7 +99,17 @@ def save_ranks(pred_scores, output_path): if __name__ == '__main__': # Set default configuration in args.py args = get_args() - logger = get_logger() + + if torch.cuda.is_available() and not args.cuda: + print('Warning: Using CPU for training') + + device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") + n_gpu = torch.cuda.device_count() + args.device = device + args.n_gpu = n_gpu + + print('Device:', str(device).upper()) + print('Number of GPUs:', n_gpu) # Set random seed for reproducibility torch.manual_seed(args.seed) @@ -96,27 +117,24 @@ def save_ranks(pred_scores, output_path): np.random.seed(args.seed) random.seed(args.seed) - if not args.cuda: - args.gpu = -1 - if torch.cuda.is_available() and args.cuda: - print('Note: You are using GPU for training') - torch.cuda.set_device(args.gpu) - torch.cuda.manual_seed(args.seed) - if torch.cuda.is_available() and not args.cuda: - print('Warning: Using CPU for training') - dataset_map = { 'Robust04': Robust04, 'Robust45': Robust45, 'Robust05': Robust05 } - dataset_map_hi = { + dataset_map_hier = { 'Robust04': Robust04Hierarchical, 'Robust45': Robust45Hierarchical, 'Robust05': Robust05Hierarchical } + dataset_map_bert = { + 'Robust45': Robust45Processor, + 'Robust04': None, + 'Robust05': None + } + model_map = { 'RegLSTM': RegLSTM, 'KimCNN': KimCNN, @@ -124,11 +142,17 @@ def save_ranks(pred_scores, output_path): 'XML-CNN': XmlCNN, } - if args.model in {'HAN', 'HR-CNN'}: - dataset = dataset_map_hi[args.dataset] + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') else: - dataset = dataset_map[args.dataset] - print('Dataset:', args.dataset) + print('Dataset:', args.dataset) + + if args.model in {'HAN', 'HR-CNN'}: + dataset = dataset_map_hier[args.dataset] + elif args.model in {'BERT-Base', 'BERT-Large'}: + dataset = dataset_map_bert[args.dataset] + else: + dataset = dataset_map[args.dataset] if args.rerank: rerank(args, dataset) @@ -136,6 +160,7 @@ def save_ranks(pred_scores, output_path): else: topic_iter = 0 cache_path = os.path.splitext(args.output_path)[0] + '.pkl' + if args.resume_snapshot: # Load previous cached run with open(cache_path, 'rb') as cache_file: @@ -143,97 +168,194 @@ def save_ranks(pred_scores, output_path): else: pred_scores = dict() - with open(os.path.join('tasks', 'relevance_transfer', 'config.json'), 'r') as config_file: - topic_configs = json.load(config_file) - - for topic in dataset.TOPICS: - topic_iter += 1 - # Skip topics that have already been predicted - if args.resume_snapshot and topic in pred_scores: - continue - - print("Training on topic %d of %d..." % (topic_iter, len(dataset.TOPICS))) - train_iter, dev_iter, test_iter = dataset.iters(args.data_dir, args.word_vectors_file, args.word_vectors_dir, - topic, batch_size=args.batch_size, device=args.gpu, - unk_init=UnknownWordVecCache.unk) - - print('Vocabulary size:', len(train_iter.dataset.TEXT_FIELD.vocab)) - print('Target Classes:', train_iter.dataset.NUM_CLASSES) - print('Train Instances:', len(train_iter.dataset)) - print('Dev Instances:', len(dev_iter.dataset)) - print('Test Instances:', len(test_iter.dataset)) - - config = deepcopy(args) - config.target_class = 1 - config.dataset = train_iter.dataset - config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab) - - if args.variable_dynamic_pool: - # Set dynamic pool length based on topic configs - if args.model in topic_configs and topic in topic_configs[args.model]: - print("Setting dynamic_pool to", topic_configs[args.model][topic]["dynamic_pool"]) - config.dynamic_pool = topic_configs[args.model][topic]["dynamic_pool"] - if config.dynamic_pool: - print("Setting dynamic_pool_length to", topic_configs[args.model][topic]["dynamic_pool_length"]) - config.dynamic_pool_length = topic_configs[args.model][topic]["dynamic_pool_length"] - - model = model_map[args.model](config) - - if args.cuda: - model.cuda() - print('Shifting model to GPU...') - - parameter = filter(lambda p: p.requires_grad, model.parameters()) - optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) - - if args.dataset not in dataset_map: - raise ValueError('Unrecognized dataset') - else: - train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, - args.batch_size, args.gpu) - test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, - args.batch_size, args.gpu) - dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, - args.batch_size, args.gpu) - - trainer_config = { - 'optimizer': optimizer, - 'batch_size': args.batch_size, - 'log_interval': args.log_every, - 'dev_log_interval': args.dev_every, - 'patience': args.patience, - 'model_outfile': args.save_path, - 'logger': logger, - 'resample': args.resample - } - - if args.model in {'HAN', 'HR-CNN'}: - trainer_config['ignore_lengths'] = True - dev_evaluator.ignore_lengths = True - test_evaluator.ignore_lengths = True - - trainer = TrainerFactory.get_trainer(args.dataset, model, None, train_iter, trainer_config, train_evaluator, - test_evaluator, dev_evaluator) - - trainer.train(args.epochs) - - # Calculate dev and test metrics - model = torch.load(trainer.snapshot_path) - - if hasattr(model, 'beta_ema') and model.beta_ema > 0: - old_params = model.get_params() - model.load_ema_params() - - if args.dataset not in dataset_map: - raise ValueError('Unrecognized dataset') - else: - evaluate_dataset('dev', dataset_map[args.dataset], model, None, dev_iter, pred_scores, args, topic) - evaluate_dataset('test', dataset_map[args.dataset], model, None, test_iter, pred_scores, args, topic) - - if hasattr(model, 'beta_ema') and model.beta_ema > 0: - model.load_params(old_params) - - with open(cache_path, 'wb') as cache_file: - pickle.dump(pred_scores, cache_file) + if args.model in {'BERT-Base', 'BERT-Large'}: + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter:", args.gradient_accumulation_steps) + + args.batch_size = args.batch_size // args.gradient_accumulation_steps + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + + processor = dataset_map_bert[args.dataset]() + args.is_lowercase = 'uncased' in args.model + variant = 'bert-large-uncased' if args.model == 'BERT-Large' else 'bert-base-uncased' + + for topic in dataset.TOPICS: + topic_iter += 1 + # Skip topics that have already been predicted + if args.resume_snapshot and topic in pred_scores: + continue + + print("Training on topic %d of %d..." % (topic_iter, len(dataset.TOPICS))) + train_examples = processor.get_train_examples(args.data_dir, topic=topic) + num_train_optimization_steps = int( + len(train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs + model = Bert.from_pretrained(variant, cache_dir=args.cache_dir, num_labels=1) + model.to(device) + if n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], + 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.lr, + warmup=args.warmup_proportion, + t_total=num_train_optimization_steps) + + trainer_config = { + 'model': args.model, + 'topic': topic, + 'dataset': dataset, + 'optimizer': optimizer, + 'batch_size': args.batch_size, + 'patience': args.patience, + 'epochs': args.epochs, + 'is_lowercase': True, + 'gradient_accumulation_steps': args.gradient_accumulation_steps, + 'max_seq_length': args.max_seq_length, + 'data_dir': args.data_dir, + 'model_outfile': args.save_path, + 'n_gpu': n_gpu, + 'device': args.device + } + + evaluator_config = { + 'model': args.model, + 'topic': topic, + 'dataset': dataset, + 'split': 'dev', + 'batch_size': args.batch_size, + 'ignore_lengths': True, + 'is_lowercase': True, + 'gradient_accumulation_steps': args.gradient_accumulation_steps, + 'max_seq_length': args.max_seq_length, + 'data_dir': args.data_dir, + 'n_gpu': n_gpu, + 'device': args.device + } + + dev_evaluator = RelevanceTransferEvaluator(model, evaluator_config, dataset=dataset, embedding=None, + processor=processor, data_loader=None) + trainer = RelevanceTransferTrainer(model, trainer_config, processor=processor, train_loader=None, + embedding=None, test_evaluator=None, dev_evaluator=dev_evaluator) + + trainer.train(args.epochs) + model = torch.load(trainer.snapshot_path) + + # Calculate dev and test metrics + evaluate_split(model, topic, 'dev', args, embedding=None, dataset=dataset, loader=None, processor=processor) + evaluate_split(model, topic, 'test', args, embedding=None, dataset=dataset, loader=None, processor=processor) + + with open(cache_path, 'wb') as cache_file: + pickle.dump(pred_scores, cache_file) + + else: + if not args.cuda: + args.gpu = -1 + if torch.cuda.is_available() and args.cuda: + torch.cuda.set_device(args.gpu) + torch.cuda.manual_seed(args.seed) + + with open(os.path.join('tasks', 'relevance_transfer', 'config.json'), 'r') as config_file: + topic_configs = json.load(config_file) + + for topic in dataset.TOPICS: + topic_iter += 1 + # Skip topics that have already been predicted + if args.resume_snapshot and topic in pred_scores: + continue + + print("Training on topic %d of %d..." % (topic_iter, len(dataset.TOPICS))) + train_iter, dev_iter, test_iter = dataset.iters(args.data_dir, args.word_vectors_file, + args.word_vectors_dir, topic, + batch_size=args.batch_size, device=args.gpu, + unk_init=UnknownWordVecCache.unk) + + print('Vocabulary size:', len(train_iter.dataset.TEXT_FIELD.vocab)) + print('Target Classes:', train_iter.dataset.NUM_CLASSES) + print('Train Instances:', len(train_iter.dataset)) + print('Dev Instances:', len(dev_iter.dataset)) + print('Test Instances:', len(test_iter.dataset)) + + config = deepcopy(args) + config.target_class = 1 + config.dataset = train_iter.dataset + config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab) + + if args.variable_dynamic_pool: + # Set dynamic pool length based on topic configs + if args.model in topic_configs and topic in topic_configs[args.model]: + print("Setting dynamic_pool to", topic_configs[args.model][topic]["dynamic_pool"]) + config.dynamic_pool = topic_configs[args.model][topic]["dynamic_pool"] + if config.dynamic_pool: + print("Setting dynamic_pool_length to", + topic_configs[args.model][topic]["dynamic_pool_length"]) + config.dynamic_pool_length = topic_configs[args.model][topic]["dynamic_pool_length"] + + model = model_map[args.model](config) + + if args.cuda: + model.cuda() + print('Shifting model to GPU...') + + parameter = filter(lambda p: p.requires_grad, model.parameters()) + optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) + + trainer_config = { + 'model': args.model, + 'dataset': dataset, + 'optimizer': optimizer, + 'batch_size': args.batch_size, + 'patience': args.patience, + 'resample': args.resample, + 'epochs': args.epochs, + 'is_lowercase': True, + 'gradient_accumulation_steps': args.gradient_accumulation_steps, + 'data_dir': args.data_dir, + 'model_outfile': args.save_path, + 'device': args.gpu + } + + evaluator_config = { + 'topic': topic, + 'model': args.model, + 'dataset': dataset, + 'batch_size': args.batch_size, + 'ignore_lengths': False, + 'data_dir': args.data_dir, + 'device': args.gpu + } + + if args.model in {'HAN', 'HR-CNN'}: + trainer_config['ignore_lengths'] = True + evaluator_config['ignore_lengths'] = True + + test_evaluator = RelevanceTransferEvaluator(model, evaluator_config, dataset=dataset, embedding=None, data_loader=test_iter) + dev_evaluator = RelevanceTransferEvaluator(model, evaluator_config, dataset=dataset, embedding=None, data_loader=dev_iter) + trainer = RelevanceTransferTrainer(model, trainer_config, embedding=None, train_loader=train_iter, + test_evaluator=test_evaluator, dev_evaluator=dev_evaluator) + + trainer.train(args.epochs) + model = torch.load(trainer.snapshot_path) + + if hasattr(model, 'beta_ema') and model.beta_ema > 0: + old_params = model.get_params() + model.load_ema_params() + + # Calculate dev and test metrics model, topic, split, config + evaluate_split(model, topic, 'dev', args, embedding=None, dataset=dataset, loader=dev_iter, processor=None) + evaluate_split(model, topic, 'test', args, embedding=None, dataset=dataset, loader=test_iter, processor=None) + + if hasattr(model, 'beta_ema') and model.beta_ema > 0: + model.load_params(old_params) + + with open(cache_path, 'wb') as cache_file: + pickle.dump(pred_scores, cache_file) save_ranks(pred_scores, args.output_path) diff --git a/tasks/relevance_transfer/args.py b/tasks/relevance_transfer/args.py index bcf2273..ed3a3f7 100644 --- a/tasks/relevance_transfer/args.py +++ b/tasks/relevance_transfer/args.py @@ -12,7 +12,7 @@ def get_args(): parser.add_argument('--mode', type=str, default='static', choices=['rand', 'static', 'non-static', 'multichannel']) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--seed', type=int, default=3435) - parser.add_argument('--model', type=str, default='KimCNN', choices=['RegLSTM', 'KimCNN', 'HAN', 'XML-CNN']) + parser.add_argument('--model', type=str, default='KimCNN', choices=['RegLSTM', 'KimCNN', 'HAN', 'XML-CNN', 'BERT-Base', 'BERT-Large']) parser.add_argument('--dataset', type=str, default='Robust04', choices=['Robust04', 'Robust05', 'Robust45']) parser.add_argument('--dev_every', type=int, default=30) parser.add_argument('--log_every', type=int, default=10) @@ -35,7 +35,7 @@ def get_args(): parser.add_argument('--bidirectional', action='store_true') parser.add_argument('--tar', action='store_true') parser.add_argument('--weight-decay', type=float, default=0) - parser.add_argument('--beta-ema', type=float, default = 0, help="for temporal averaging") + parser.add_argument('--beta-ema', type=float, default=0, help="for temporal averaging") parser.add_argument('--wdrop', type=float, default=0.0, help="for weight-drop") parser.add_argument('--embed-droprate', type=float, default=0.0, help="for embedded dropout") @@ -60,6 +60,14 @@ def get_args(): # HR-CNN parameters parser.add_argument('--sentence-channel', type=int, default=100) + # BERT parameters + parser.add_argument('--cache-dir', default='cache', type=str) + parser.add_argument('--variant', type=str, choices=['bert-base-uncased', 'bert-large-uncased', 'bert-base-cased', 'bert-large-cased']) + parser.add_argument('--max-seq-length', default=128, type=int) + parser.add_argument('--warmup-proportion', default=0.1, type=float) + parser.add_argument('--gradient-accumulation-steps', type=int, default=1) + parser.add_argument('--loss-scale', type=float, default=0) + # Re-ranking parameters parser.add_argument('--rerank', action='store_true') parser.add_argument("--ret-ranks", type=str, help='retrieval rank file', default="run.core17.bm25+rm3.wcro0405.hits10000.txt") From 6c8c7284ab57b505d5cad263a6570506407373d4 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Wed, 3 Jul 2019 02:51:08 -0400 Subject: [PATCH 12/22] Add hierarchical BERT model --- common/evaluators/bert_evaluator.py | 30 +++- common/trainers/bert_trainer.py | 33 +++- .../bert_processors/abstract_processor.py | 64 +++++++ models/bert/__main__.py | 5 +- models/hbert/README.md | 48 +++++ models/hbert/__init__.py | 0 models/hbert/__main__.py | 166 ++++++++++++++++++ models/hbert/args.py | 42 +++++ models/hbert/model.py | 75 ++++++++ models/hbert/sentence_encoder.py | 17 ++ utils/preprocessing.py | 17 ++ 11 files changed, 480 insertions(+), 17 deletions(-) create mode 100644 models/hbert/README.md create mode 100644 models/hbert/__init__.py create mode 100644 models/hbert/__main__.py create mode 100644 models/hbert/args.py create mode 100644 models/hbert/model.py create mode 100644 models/hbert/sentence_encoder.py create mode 100644 utils/preprocessing.py diff --git a/common/evaluators/bert_evaluator.py b/common/evaluators/bert_evaluator.py index 122d3ae..88c4660 100644 --- a/common/evaluators/bert_evaluator.py +++ b/common/evaluators/bert_evaluator.py @@ -7,7 +7,9 @@ from torch.utils.data import DataLoader, SequentialSampler, TensorDataset from tqdm import tqdm -from datasets.bert_processors.abstract_processor import convert_examples_to_features +from datasets.bert_processors.abstract_processor import convert_examples_to_features, \ + convert_examples_to_hierarchical_features +from utils.preprocessing import pad_input_matrix from utils.tokenization import BertTokenizer # Suppress warnings from sklearn.metrics @@ -26,14 +28,28 @@ def __init__(self, model, processor, args, split='dev'): self.eval_examples = self.processor.get_dev_examples(args.data_dir) def get_scores(self, silent=False): - eval_features = convert_examples_to_features(self.eval_examples, self.args.max_seq_length, self.tokenizer) + if self.args.is_hierarchical: + eval_features = convert_examples_to_hierarchical_features( + self.eval_examples, self.args.max_seq_length, self.tokenizer) + else: + eval_features = convert_examples_to_features( + self.eval_examples, self.args.max_seq_length, self.tokenizer) + + unpadded_input_ids = [f.input_ids for f in eval_features] + unpadded_input_mask = [f.input_mask for f in eval_features] + unpadded_segment_ids = [f.segment_ids for f in eval_features] + + if self.args.is_hierarchical: + pad_input_matrix(unpadded_input_ids, self.args.max_doc_length) + pad_input_matrix(unpadded_input_mask, self.args.max_doc_length) + pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length) - all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) - all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) + padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long) + padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long) + padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long) + label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) - eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size) diff --git a/common/trainers/bert_trainer.py b/common/trainers/bert_trainer.py index 7c25e25..359dc91 100644 --- a/common/trainers/bert_trainer.py +++ b/common/trainers/bert_trainer.py @@ -3,7 +3,6 @@ import torch import torch.nn.functional as F -from tensorboardX import SummaryWriter from torch.utils.data import DataLoader, RandomSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm @@ -11,7 +10,9 @@ from common.evaluators.bert_evaluator import BertEvaluator from datasets.bert_processors.abstract_processor import convert_examples_to_features +from datasets.bert_processors.abstract_processor import convert_examples_to_hierarchical_features from utils.optimization import warmup_linear +from utils.preprocessing import pad_input_matrix from utils.tokenization import BertTokenizer @@ -25,7 +26,6 @@ def __init__(self, model, optimizer, processor, args): self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) self.snapshot_path = os.path.join(self.args.save_path, self.processor.NAME, '%s.pt' % timestamp) self.num_train_optimization_steps = int( @@ -74,18 +74,33 @@ def train_epoch(self, train_dataloader): self.iterations += 1 def train(self): - train_features = convert_examples_to_features( - self.train_examples, self.args.max_seq_length, self.tokenizer) + if self.args.is_hierarchical: + train_features = convert_examples_to_hierarchical_features( + self.train_examples, self.args.max_seq_length, self.tokenizer) + else: + train_features = convert_examples_to_features( + self.train_examples, self.args.max_seq_length, self.tokenizer) + + unpadded_input_ids = [f.input_ids for f in train_features] + unpadded_input_mask = [f.input_mask for f in train_features] + unpadded_segment_ids = [f.segment_ids for f in train_features] + + if self.args.is_hierarchical: + pad_input_matrix(unpadded_input_ids, self.args.max_doc_length) + pad_input_matrix(unpadded_input_mask, self.args.max_doc_length) + pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length) print("Number of examples: ", len(self.train_examples)) print("Batch size:", self.args.batch_size) print("Num of steps:", self.num_train_optimization_steps) - all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) - all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) - train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long) + padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long) + padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long) + label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) + + train_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids) + if self.args.local_rank == -1: train_sampler = RandomSampler(train_data) else: diff --git a/datasets/bert_processors/abstract_processor.py b/datasets/bert_processors/abstract_processor.py index 4ef610a..0f0eee0 100644 --- a/datasets/bert_processors/abstract_processor.py +++ b/datasets/bert_processors/abstract_processor.py @@ -2,6 +2,7 @@ import sys import numpy as np +from nltk.tokenize import sent_tokenize class InputExample(object): @@ -170,6 +171,69 @@ def convert_examples_to_features(examples, max_seq_length, tokenizer, print_exam return features +def convert_examples_to_hierarchical_features(examples, max_seq_length, tokenizer, print_examples=False): + """ + Loads a data file into a list of InputBatch objects + :param examples: + :param max_seq_length: + :param tokenizer: + :param print_examples: + :return: a list of InputBatch objects + """ + + features = [] + for (ex_index, example) in enumerate(examples): + tokens_a = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_a)] + tokens_b = None + + if example.text_b: + tokens_b = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_b)] + # Modifies `tokens_a` and `tokens_b` in place so that the total length is less than the specified length + # Account for [CLS], [SEP], [SEP] + _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) + else: + # Account for [CLS] and [SEP] + for i0 in range(len(tokens_a)): + if len(tokens_a[i0]) > max_seq_length - 2: + tokens_a[i0] = tokens_a[i0][:(max_seq_length - 2)] + + tokens = [["[CLS]"] + line + ["[SEP]"] for line in tokens_a] + segment_ids = [[0] * len(line) for line in tokens] + + if tokens_b: + tokens += tokens_b + ["[SEP]"] + segment_ids += [1] * (len(tokens_b) + 1) + + input_ids = list() + for line in tokens: + input_ids.append(tokenizer.convert_tokens_to_ids(line)) + + # Input mask has 1 for real tokens and 0 for padding tokens + input_mask = [[1] * len(line_ids) for line_ids in input_ids] + + # Zero-pad up to the sequence length. + padding = [[0] * (max_seq_length - len(line_ids)) for line_ids in input_ids] + for i0 in range(len(input_ids)): + input_ids[i0] += padding[i0] + input_mask[i0] += padding[i0] + segment_ids[i0] += padding[i0] + + label_id = [float(x) for x in example.label] + + if print_examples and ex_index < 5: + print("tokens: %s" % " ".join([str(x) for x in tokens])) + print("input_ids: %s" % " ".join([str(x) for x in input_ids])) + print("input_mask: %s" % " ".join([str(x) for x in input_mask])) + print("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) + print("label: %s" % example.label) + + features.append(InputFeatures(input_ids=input_ids, + input_mask=input_mask, + segment_ids=segment_ids, + label_id=label_id)) + return features + + def _truncate_seq_pair(tokens_a, tokens_b, max_length): """ Truncates a sequence pair in place to the maximum length diff --git a/models/bert/__main__.py b/models/bert/__main__.py index daf94fb..3241db7 100644 --- a/models/bert/__main__.py +++ b/models/bert/__main__.py @@ -1,5 +1,6 @@ import os import random +import time import numpy as np import torch @@ -26,7 +27,9 @@ def evaluate_split(model, processor, args, split='dev'): evaluator = BertEvaluator(model, processor, args, split) + start_time = time.time() accuracy, precision, recall, f1, avg_loss = evaluator.get_scores(silent=True)[0] + print("Inference time", time.time() - start_time) print('\n' + LOG_HEADER) print(LOG_TEMPLATE.format(split.upper(), accuracy, precision, recall, f1, avg_loss)) @@ -86,6 +89,7 @@ def evaluate_split(model, processor, args, split='dev'): processor = dataset_map[args.dataset]() args.is_lowercase = 'uncased' in args.model + args.is_hierarchical = False tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) train_examples = None @@ -109,7 +113,6 @@ def evaluate_split(model, processor, args, split='dev'): from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Install NVIDIA Apex to use distributed and FP16 training.") - model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) diff --git a/models/hbert/README.md b/models/hbert/README.md new file mode 100644 index 0000000..5d9337e --- /dev/null +++ b/models/hbert/README.md @@ -0,0 +1,48 @@ +# Hierarchical BERT + +A wrapper around pre-trained [BERT](https://arxiv.org/abs/1810.04805) models for finetuning on Document Classification tasks. + +## Quick start + +For fine-tuning the pre-trained BERT-base model on Reuters dataset, just run the following from the project working directory. + +``` +python -m models.hbert --dataset Reuters --model bert-base-uncased --max-seq-length 256 --batch-size 16 --lr 2e-5 --epochs 30 +``` + +The best model weights will be saved in + +``` +models/hbert/saves/Reuters/best_model.pt +``` + +To test the model, you can use the following command. + +``` +python -m models.hbert --dataset Reuters --model bert-base-uncased --max-seq-length 256 --batch-size 16 --lr 2e-5 --epochs 30 --trained-model models/hbert/saves/Reuters/best_model.pt +``` + +## Model Types + +We follow the same types of models as in [huggingface's implementation](https://github.com/huggingface/pytorch-pretrained-BERT.git) +- bert-base-uncased +- bert-large-uncased +- bert-base-cased +- bert-large-cased + +## Dataset + +We experiment the model on the following datasets: +- Reuters (ModApte) +- AAPD +- IMDB +- Yelp 2014 + +## Settings + +Finetuning procedure can be found in : +- [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) +- [DocBERT: BERT for Document Classification](https://arxiv.org/abs/1904.08398v1) + +## Acknowledgement +- Our implementation is inspired from [huggingface's implementation](https://github.com/huggingface/pytorch-pretrained-BERT.git) diff --git a/models/hbert/__init__.py b/models/hbert/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/hbert/__main__.py b/models/hbert/__main__.py new file mode 100644 index 0000000..b7de67c --- /dev/null +++ b/models/hbert/__main__.py @@ -0,0 +1,166 @@ +import os +import random +import time + +import numpy as np +import torch + +from common.evaluators.bert_evaluator import BertEvaluator +from common.trainers.bert_trainer import BertTrainer +from datasets.bert_processors.aapd_processor import AAPDProcessor +from datasets.bert_processors.agnews_processor import AGNewsProcessor +from datasets.bert_processors.imdb_processor import IMDBProcessor +from datasets.bert_processors.reuters_processor import ReutersProcessor +from datasets.bert_processors.sogou_processor import SogouProcessor +from datasets.bert_processors.sst_processor import SST2Processor +from datasets.bert_processors.yelp2014_processor import Yelp2014Processor +from models.hbert.args import get_args +from models.hbert.model import HierarchicalBert +from utils.io import PYTORCH_PRETRAINED_BERT_CACHE +from utils.optimization import BertAdam +from utils.tokenization import BertTokenizer + +# String templates for logging results +LOG_HEADER = 'Split Dev/Acc. Dev/Pr. Dev/Re. Dev/F1 Dev/Loss' +LOG_TEMPLATE = ' '.join('{:>5s},{:>9.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) + + +def evaluate_split(model, processor, args, split='dev'): + evaluator = BertEvaluator(model, processor, args, split) + start_time = time.time() + accuracy, precision, recall, f1, avg_loss = evaluator.get_scores(silent=True)[0] + print("Inference time", time.time() - start_time) + print('\n' + LOG_HEADER) + print(LOG_TEMPLATE.format(split.upper(), accuracy, precision, recall, f1, avg_loss)) + + +if __name__ == '__main__': + # Set default configuration in args.py + args = get_args() + + if args.local_rank == -1 or not args.cuda: + device = torch.device("cuda" if torch.cuda.is_available() and args.cuda else "cpu") + n_gpu = torch.cuda.device_count() + else: + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + n_gpu = 1 + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.distributed.init_process_group(backend='nccl') + + print('Device:', str(device).upper()) + print('Number of GPUs:', n_gpu) + print('Distributed training:', bool(args.local_rank != -1)) + print('FP16:', args.fp16) + + # Set random seed for reproducibility + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + dataset_map = { + 'SST-2': SST2Processor, + 'Reuters': ReutersProcessor, + 'IMDB': IMDBProcessor, + 'AAPD': AAPDProcessor, + 'AGNews': AGNewsProcessor, + 'Yelp2014': Yelp2014Processor, + 'Sogou': SogouProcessor + } + + if args.gradient_accumulation_steps < 1: + raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( + args.gradient_accumulation_steps)) + + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') + + args.batch_size = args.batch_size // args.gradient_accumulation_steps + args.device = device + args.n_gpu = n_gpu + args.num_labels = dataset_map[args.dataset].NUM_CLASSES + args.is_multilabel = dataset_map[args.dataset].IS_MULTILABEL + + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + + processor = dataset_map[args.dataset]() + args.is_lowercase = 'uncased' in args.model + args.is_hierarchical = True + tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase) + + train_examples = None + num_train_optimization_steps = None + if not args.trained_model: + train_examples = processor.get_train_examples(args.data_dir) + num_train_optimization_steps = int( + len(train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs + if args.local_rank != -1: + num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size() + + cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)) + model = HierarchicalBert(args, cache_dir=cache_dir) + + if args.fp16: + model.half() + model.to(device) + + if args.local_rank != -1: + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError("Install NVIDIA Apex to use distributed and FP16 training.") + model = DDP(model) + elif n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Prepare optimizer + param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + + if args.fp16: + try: + from apex.optimizers import FP16_Optimizer + from apex.optimizers import FusedAdam + except ImportError: + raise ImportError("Please install NVIDIA Apex for distributed and FP16 training") + + optimizer = FusedAdam(optimizer_grouped_parameters, + lr=args.lr, + bias_correction=False, + max_grad_norm=1.0) + if args.loss_scale == 0: + optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) + else: + optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) + + else: + optimizer = BertAdam(optimizer_grouped_parameters, + lr=args.lr, + warmup=args.warmup_proportion, + t_total=num_train_optimization_steps) + + trainer = BertTrainer(model, optimizer, processor, args) + + if not args.trained_model: + trainer.train() + model = torch.load(trainer.snapshot_path) + else: + model = model = HierarchicalBert(args.model) + model_ = torch.load(args, map_location=lambda storage, loc: storage) + state = {} + for key in model_.state_dict().keys(): + new_key = key.replace("module.", "") + state[new_key] = model_.state_dict()[key] + model.load_state_dict(state) + model = model.to(device) + + evaluate_split(model, processor, args, split='dev') + evaluate_split(model, processor, args, split='test') + diff --git a/models/hbert/args.py b/models/hbert/args.py new file mode 100644 index 0000000..021364f --- /dev/null +++ b/models/hbert/args.py @@ -0,0 +1,42 @@ +import os + +import models.args + + +def get_args(): + parser = models.args.get_args() + + parser.add_argument('--model', default=None, type=str, required=True) + parser.add_argument('--dataset', type=str, default='SST-2', choices=['SST-2', 'AGNews', 'Reuters', 'AAPD', 'IMDB', 'Yelp2014']) + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'bert')) + parser.add_argument('--cache-dir', default='cache', type=str) + parser.add_argument('--trained-model', default=None, type=str) + parser.add_argument('--local-rank', type=int, default=-1, help='local rank for distributed training') + parser.add_argument('--fp16', action='store_true', help='enable 16-bit floating point precision') + parser.add_argument('--loss-scale', type=float, default=0, help='loss scaling to improve fp16 numeric stability') + + parser.add_argument('--dropout', type=float, default=0.5) + parser.add_argument('--dropblock', type=float, default=0.0) + parser.add_argument('--dropblock-size', type=int, default=7) + parser.add_argument('--beta-ema', type=float, default=0) + parser.add_argument('--embed-droprate', type=float, default=0.0) + parser.add_argument('--batchnorm', action='store_true') + parser.add_argument('--dynamic-pool', action='store_true') + parser.add_argument('--dynamic-pool-length', type=int, default=8) + parser.add_argument('--conv-output-channels', type=int, default=100) + + parser.add_argument('--max-seq-length', default=128, type=int, + help='maximum total input sequence length after tokenization') + + parser.add_argument('--max-doc-length', default=128, type=int, + help='maximum number of lines processed in one document') + + parser.add_argument('--warmup-proportion', default=0.1, type=float, + help='proportion of training to perform linear learning rate warmup for') + + parser.add_argument('--gradient-accumulation-steps', type=int, default=1, + help='number of updates steps to accumulate before performing a backward/update pass') + + + args = parser.parse_args() + return args diff --git a/models/hbert/model.py b/models/hbert/model.py new file mode 100644 index 0000000..9196d1a --- /dev/null +++ b/models/hbert/model.py @@ -0,0 +1,75 @@ +import torch +from torch import nn +import torch.nn.functional as F + +from models.hbert.sentence_encoder import BertSentenceEncoder + + +class HierarchicalBert(nn.Module): + + def __init__(self, args, cache_dir): + super().__init__() + self.args =args + ks = 3 + input_channels = 1 + + self.sentence_encoder = BertSentenceEncoder.from_pretrained( + args.model, + cache_dir=cache_dir, + num_labels=args.num_labels) + + self.conv1 = nn.Conv2d(input_channels, + args.conv_output_channels, + (3, self.sentence_encoder.config.hidden_size), + padding=(2, 0)) + self.conv2 = nn.Conv2d(input_channels, + args.conv_output_channels, + (4, self.sentence_encoder.config.hidden_size), + padding=(3, 0)) + self.conv3 = nn.Conv2d(input_channels, + args.conv_output_channels, + (5, self.sentence_encoder.config.hidden_size), + padding=(4, 0)) + + self.dropout = nn.Dropout(args.dropout) + self.fc1 = nn.Linear(ks * args.conv_output_channels, args.num_labels) + + def forward(self, input_ids, segment_ids=None, input_mask=None): + """ + a batch is a tensor of shape [batch_size, #file_in_commit, #line_in_file] + and each element is a line, i.e., a bert_batch, + which consists of input_ids, input_mask, segment_ids, label_ids + """ + input_ids = input_ids.permute(1, 0, 2) # (sentences, batch_size, words) + segment_ids = segment_ids.permute(1, 0, 2) + input_mask = input_mask.permute(1, 0, 2) + + x_encoded = [] + for i0 in range(len(input_ids)): + x_encoded.append(self.sentence_encoder(input_ids[i0], input_mask[i0], segment_ids[i0])) + + x = torch.stack(x_encoded) # (sentences, batch_size, hidden_size) + x = x.permute(1, 0, 2) # (batch_size, sentences, hidden_size) + x = x.unsqueeze(1) # (batch_size, input_channels, sentences, hidden_size) + + if self.args.batchnorm: + x = [F.relu(self.batchnorm1(self.conv1(x))).squeeze(3), + F.relu(self.batchnorm2(self.conv2(x))).squeeze(3), + F.relu(self.batchnorm3(self.conv3(x))).squeeze(3)] + else: + x = [F.relu(self.conv1(x)).squeeze(3), + F.relu(self.conv2(x)).squeeze(3), + F.relu(self.conv3(x)).squeeze(3)] + + if self.args.dynamic_pool: + x = [self.dynamic_pool(i).squeeze(2) for i in x] # (batch, output_channels) * ks + x = torch.cat(x, 1) # (batch, output_channels * ks) + x = x.view(-1, self.filter_widths * self.output_channel * self.dynamic_pool_length) + else: + x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # (batch, output_channels, num_sentences) * ks + x = torch.cat(x, 1) # (batch, channel_output * ks) + + x = self.dropout(x) + logit = self.fc1(x) # (batch, num_labels) + + return logit diff --git a/models/hbert/sentence_encoder.py b/models/hbert/sentence_encoder.py new file mode 100644 index 0000000..bda242a --- /dev/null +++ b/models/hbert/sentence_encoder.py @@ -0,0 +1,17 @@ +from torch import nn + +from models.bert.model import BertPreTrainedModel, BertModel + + +class BertSentenceEncoder(BertPreTrainedModel): + def __init__(self, config, num_labels): + super().__init__(config) + self.num_labels = num_labels + self.bert = BertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.apply(self.init_bert_weights) + + def forward(self, input_ids, token_type_ids=None, attention_mask=None): + _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) + pooled_output = self.dropout(pooled_output) + return pooled_output diff --git a/utils/preprocessing.py b/utils/preprocessing.py new file mode 100644 index 0000000..3ed55c2 --- /dev/null +++ b/utils/preprocessing.py @@ -0,0 +1,17 @@ +import numpy as np + + +def pad_input_matrix(unpadded_matrix, max_doc_length): + """ + Returns a zero-padded matrix for a given jagged list + :param unpadded_matrix: jagged list to be padded + :return: zero-padded matrix + """ + max_doc_length = min(max_doc_length, max(len(x) for x in unpadded_matrix)) + zero_padding_array = [0 for i0 in range(len(unpadded_matrix[0][0]))] + + for i0 in range(len(unpadded_matrix)): + if len(unpadded_matrix[i0]) < max_doc_length: + unpadded_matrix[i0] += [zero_padding_array for i1 in range(max_doc_length - len(unpadded_matrix[i0]))] + elif len(unpadded_matrix[i0]) > max_doc_length: + unpadded_matrix[i0] = unpadded_matrix[i0][:max_doc_length] From 615fa274907105abefee2bf89540672982a39743 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 7 Jul 2019 15:13:16 -0400 Subject: [PATCH 13/22] Remove tensorboardX logging --- common/trainers/classification_trainer.py | 9 --------- common/trainers/trainer.py | 4 ---- requirements.txt | 1 - 3 files changed, 14 deletions(-) diff --git a/common/trainers/classification_trainer.py b/common/trainers/classification_trainer.py index a6e0da0..0de3d51 100644 --- a/common/trainers/classification_trainer.py +++ b/common/trainers/classification_trainer.py @@ -5,7 +5,6 @@ import numpy as np import torch import torch.nn.functional as F -from tensorboardX import SummaryWriter from common.trainers.trainer import Trainer @@ -26,7 +25,6 @@ def __init__(self, model, embedding, train_loader, trainer_config, train_evaluat '{:>6.0f},{:>5.0f},{:>9.0f},{:>5.0f}/{:<5.0f} {:>7.4f},{:>8.4f},{:8.4f},{:12.4f},{:12.4f}'.split(',')) timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) self.snapshot_path = os.path.join(self.model_outfile, self.train_loader.dataset.NAME, '%s.pt' % timestamp) def train_epoch(self, epoch): @@ -75,8 +73,6 @@ def train_epoch(self, epoch): if self.iterations % self.log_interval == 1: niter = epoch * len(self.train_loader) + batch_idx - self.writer.add_scalar('Train/Loss', loss.data.item(), niter) - self.writer.add_scalar('Train/Accuracy', train_acc, niter) print(self.log_template.format(time.time() - self.start, epoch, self.iterations, 1 + batch_idx, len(self.train_loader), 100.0 * (1 + batch_idx) / len(self.train_loader), loss.item(), train_acc)) @@ -94,11 +90,6 @@ def train(self, epochs): # Evaluate performance on validation set dev_acc, dev_precision, dev_recall, dev_f1, dev_loss = self.dev_evaluator.get_scores()[0] - self.writer.add_scalar('Dev/Loss', dev_loss, epoch) - self.writer.add_scalar('Dev/Accuracy', dev_acc, epoch) - self.writer.add_scalar('Dev/Precision', dev_precision, epoch) - self.writer.add_scalar('Dev/Recall', dev_recall, epoch) - self.writer.add_scalar('Dev/F-measure', dev_f1, epoch) # Print validation results print('\n' + dev_header) diff --git a/common/trainers/trainer.py b/common/trainers/trainer.py index 4be2da1..51d96ef 100644 --- a/common/trainers/trainer.py +++ b/common/trainers/trainer.py @@ -14,12 +14,8 @@ def __init__(self, model, embedding, train_loader, trainer_config, train_evaluat self.model_outfile = trainer_config.get('model_outfile') self.lr_reduce_factor = trainer_config.get('lr_reduce_factor') self.patience = trainer_config.get('patience') - self.use_tensorboard = trainer_config.get('tensorboard') self.clip_norm = trainer_config.get('clip_norm') - if self.use_tensorboard: - from tensorboardX import SummaryWriter - self.writer = SummaryWriter(log_dir=None, comment='' if trainer_config['run_label'] is None else trainer_config['run_label']) self.logger = trainer_config.get('logger') self.train_evaluator = train_evaluator diff --git a/requirements.txt b/requirements.txt index a22f217..fe8e9b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,5 +3,4 @@ numpy==1.14.0 Cython==0.28.2 scikit-learn==0.19.1 scipy==1.0.0 -tensorboardX==1.6 torchtext==0.2.3 From b40cccbce09545cec9e2172b40e762cc1ef5d7ad Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 7 Jul 2019 15:14:53 -0400 Subject: [PATCH 14/22] Add hierarchical BERT for relevance transfer --- .../relevance_transfer_evaluator.py | 37 +++++++---- common/trainers/relevance_transfer_trainer.py | 42 ++++++++----- .../bert_processors/robust45_processor.py | 63 +++++++++++++------ datasets/robust45.py | 2 - models/hbert/args.py | 4 +- models/hbert/model.py | 26 ++++---- tasks/relevance_transfer/__main__.py | 28 +++++++-- tasks/relevance_transfer/args.py | 5 +- 8 files changed, 136 insertions(+), 71 deletions(-) diff --git a/common/evaluators/relevance_transfer_evaluator.py b/common/evaluators/relevance_transfer_evaluator.py index aa561f3..a5baf10 100644 --- a/common/evaluators/relevance_transfer_evaluator.py +++ b/common/evaluators/relevance_transfer_evaluator.py @@ -9,6 +9,7 @@ from common.evaluators.evaluator import Evaluator from datasets.bert_processors.robust45_processor import convert_examples_to_features +from utils.preprocessing import pad_input_matrix from utils.tokenization import BertTokenizer # Suppress warnings from sklearn.metrics @@ -21,7 +22,7 @@ def __init__(self, model, config, **kwargs): super().__init__(kwargs['dataset'], model, kwargs['embedding'], kwargs['data_loader'], batch_size=config['batch_size'], device=config['device']) - if config['model'] in {'BERT-Base', 'BERT-Large'}: + if config['model'] in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: variant = 'bert-large-uncased' if config['model'] == 'BERT-Large' else 'bert-base-uncased' self.tokenizer = BertTokenizer.from_pretrained(variant, is_lowercase=config['is_lowercase']) self.processor = kwargs['processor'] @@ -43,16 +44,30 @@ def get_scores(self, silent=False): self.docid = list() total_loss = 0 - if self.config['model'] in {'BERT-Base', 'BERT-Large'}: - eval_features = convert_examples_to_features(self.eval_examples, self.config['max_seq_length'], self.tokenizer) - - all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) - all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) - all_document_ids = torch.tensor([f.guid for f in eval_features], dtype=torch.long) - - eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_document_ids) + if self.config['model'] in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: + eval_features = convert_examples_to_features( + self.eval_examples, + self.config['max_seq_length'], + self.tokenizer, + self.config['is_hierarchical'] + ) + + unpadded_input_ids = [f.input_ids for f in eval_features] + unpadded_input_mask = [f.input_mask for f in eval_features] + unpadded_segment_ids = [f.segment_ids for f in eval_features] + + if self.config['is_hierarchical']: + pad_input_matrix(unpadded_input_ids, self.config['max_doc_length']) + pad_input_matrix(unpadded_input_mask, self.config['max_doc_length']) + pad_input_matrix(unpadded_segment_ids, self.config['max_doc_length']) + + padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long) + padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long) + padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long) + label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) + document_ids = torch.tensor([f.guid for f in eval_features], dtype=torch.long) + + eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids, document_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.config['batch_size']) diff --git a/common/trainers/relevance_transfer_trainer.py b/common/trainers/relevance_transfer_trainer.py index 8a0e592..7057f9f 100644 --- a/common/trainers/relevance_transfer_trainer.py +++ b/common/trainers/relevance_transfer_trainer.py @@ -3,13 +3,13 @@ import torch import torch.nn.functional as F -from tensorboardX import SummaryWriter from torch.utils.data import TensorDataset, RandomSampler, DataLoader from tqdm import trange, tqdm from common.trainers.trainer import Trainer from datasets.bert_processors.robust45_processor import convert_examples_to_features from tasks.relevance_transfer.resample import ImbalancedDatasetSampler +from utils.preprocessing import pad_input_matrix from utils.tokenization import BertTokenizer @@ -17,7 +17,7 @@ class RelevanceTransferTrainer(Trainer): def __init__(self, model, config, **kwargs): super().__init__(model, kwargs['embedding'], kwargs['train_loader'], config, None, kwargs['test_evaluator'], kwargs['dev_evaluator']) - if config['model'] in {'BERT-Base', 'BERT-Large'}: + if config['model'] in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: variant = 'bert-large-uncased' if config['model'] == 'BERT-Large' else 'bert-base-uncased' self.tokenizer = BertTokenizer.from_pretrained(variant, is_lowercase=config['is_lowercase']) self.processor = kwargs['processor'] @@ -37,14 +37,13 @@ def __init__(self, model, config, **kwargs): self.log_template = ' '.join('{:>5.0f},{:>9.0f},{:>6.0f}/{:<5.0f} {:>6.4f},{:>8.4f},{:8.4f},{:8.4f},{:10.4f}'.split(',')) timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - self.writer = SummaryWriter(log_dir="tensorboard_logs/" + timestamp) self.snapshot_path = os.path.join(self.model_outfile, config['dataset'].NAME, '%s.pt' % timestamp) def train_epoch(self): for step, batch in enumerate(tqdm(self.train_loader, desc="Training")): self.model.train() - if self.config['model'] in {'BERT-Base', 'BERT-Large'}: + if self.config['model'] in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: batch = tuple(t.to(self.config['device']) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch logits = torch.sigmoid(self.model(input_ids, segment_ids, input_mask)).squeeze(dim=1) @@ -61,6 +60,7 @@ def train_epoch(self): self.optimizer.step() self.optimizer.zero_grad() self.iterations += 1 + else: # Clip gradients to address exploding gradients in LSTM torch.nn.utils.clip_grad_norm_(self.model.parameters(), 25.0) @@ -114,15 +114,29 @@ def train(self, epochs): os.makedirs(self.model_outfile, exist_ok=True) os.makedirs(os.path.join(self.model_outfile, self.config['dataset'].NAME), exist_ok=True) - if self.config['model'] in {'BERT-Base', 'BERT-Large'}: + if self.config['model'] in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: train_features = convert_examples_to_features( - self.train_examples, self.config['max_seq_length'], self.tokenizer) - - all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) - all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) - all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) - all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) - train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) + self.train_examples, + self.config['max_seq_length'], + self.tokenizer, + self.config['is_hierarchical'] + ) + + unpadded_input_ids = [f.input_ids for f in train_features] + unpadded_input_mask = [f.input_mask for f in train_features] + unpadded_segment_ids = [f.segment_ids for f in train_features] + + if self.config['is_hierarchical']: + pad_input_matrix(unpadded_input_ids, self.config['max_doc_length']) + pad_input_matrix(unpadded_input_mask, self.config['max_doc_length']) + pad_input_matrix(unpadded_segment_ids, self.config['max_doc_length']) + + padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long) + padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long) + padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long) + label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) + + train_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, label_ids) train_sampler = RandomSampler(train_data) self.train_loader = DataLoader(train_data, sampler=train_sampler, batch_size=self.config['batch_size']) @@ -132,10 +146,6 @@ def train(self, epochs): # Evaluate performance on validation set dev_acc, dev_precision, dev_ap, dev_f1, dev_loss = self.dev_evaluator.get_scores()[0] - self.writer.add_scalar('Dev/Loss', dev_loss, epoch) - self.writer.add_scalar('Dev/Accuracy', dev_acc, epoch) - self.writer.add_scalar('Dev/Precision', dev_precision, epoch) - self.writer.add_scalar('Dev/AP', dev_ap, epoch) tqdm.write(self.log_header) tqdm.write(self.log_template.format(epoch, self.iterations, epoch, epochs, dev_acc, dev_precision, dev_ap, dev_f1, dev_loss)) diff --git a/datasets/bert_processors/robust45_processor.py b/datasets/bert_processors/robust45_processor.py index 4780bf1..8349c08 100644 --- a/datasets/bert_processors/robust45_processor.py +++ b/datasets/bert_processors/robust45_processor.py @@ -1,5 +1,7 @@ import os +from nltk import sent_tokenize + from datasets.bert_processors.abstract_processor import BertProcessor, InputExample, InputFeatures @@ -18,7 +20,6 @@ class Robust45Processor(BertProcessor): '362', '363', '367', '372', '375', '378', '379', '389', '393', '394', '397', '399', '400', '404', '408', '414', '416', '419', '422', '423', '426', '427', '433', '435', '436', '439', '442', '443', '445', '614', '620', '626', '646', '677', '690'] - TOPICS = ['307', '310', '321', '325', '330'] def get_train_examples(self, data_dir, **kwargs): return self._create_examples( @@ -47,9 +48,10 @@ def _create_examples(lines, split): return examples -def convert_examples_to_features(examples, max_seq_length, tokenizer): +def convert_examples_to_features(examples, max_seq_length, tokenizer, is_hierarchical=False): """ Loads a data file into a list of InputBatch objects + :param is_hierarchical: :param examples: :param max_seq_length: :param tokenizer: @@ -58,30 +60,51 @@ def convert_examples_to_features(examples, max_seq_length, tokenizer): features = [] for (ex_index, example) in enumerate(examples): - tokens_a = tokenizer.tokenize(example.text_a) + if is_hierarchical: + tokens_a = [tokenizer.tokenize(line) for line in sent_tokenize(example.text_a)] + + # Account for [CLS] and [SEP] + for i0 in range(len(tokens_a)): + if len(tokens_a[i0]) > max_seq_length - 2: + tokens_a[i0] = tokens_a[i0][:(max_seq_length - 2)] + + tokens = [["[CLS]"] + line + ["[SEP]"] for line in tokens_a] + segment_ids = [[0] * len(line) for line in tokens] + + input_ids = list() + for line in tokens: + input_ids.append(tokenizer.convert_tokens_to_ids(line)) + + # Input mask has 1 for real tokens and 0 for padding tokens + input_mask = [[1] * len(line_ids) for line_ids in input_ids] + + # Zero-pad up to the sequence length. + padding = [[0] * (max_seq_length - len(line_ids)) for line_ids in input_ids] + for i0 in range(len(input_ids)): + input_ids[i0] += padding[i0] + input_mask[i0] += padding[i0] + segment_ids[i0] += padding[i0] - tokens_b = None - # Account for [CLS] and [SEP] with "- 2" - if len(tokens_a) > max_seq_length - 2: - tokens_a = tokens_a[:(max_seq_length - 2)] + else: + tokens_a = tokenizer.tokenize(example.text_a) - tokens = ["[CLS]"] + tokens_a + ["[SEP]"] - segment_ids = [0] * len(tokens) + # Account for [CLS] and [SEP] with "- 2" + if len(tokens_a) > max_seq_length - 2: + tokens_a = tokens_a[:(max_seq_length - 2)] - if tokens_b: - tokens += tokens_b + ["[SEP]"] - segment_ids += [1] * (len(tokens_b) + 1) + tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + segment_ids = [0] * len(tokens) - input_ids = tokenizer.convert_tokens_to_ids(tokens) + input_ids = tokenizer.convert_tokens_to_ids(tokens) - # The mask has 1 for real tokens and 0 for padding tokens - input_mask = [1] * len(input_ids) + # The mask has 1 for real tokens and 0 for padding tokens + input_mask = [1] * len(input_ids) - # Zero-pad up to the sequence length - padding = [0] * (max_seq_length - len(input_ids)) - input_ids += padding - input_mask += padding - segment_ids += padding + # Zero-pad up to the sequence length + padding = [0] * (max_seq_length - len(input_ids)) + input_ids += padding + input_mask += padding + segment_ids += padding try: docid = int(example.guid) diff --git a/datasets/robust45.py b/datasets/robust45.py index e9cf1a8..cf51328 100644 --- a/datasets/robust45.py +++ b/datasets/robust45.py @@ -39,7 +39,6 @@ def process_labels(string): :param string: :return: """ - # return [float(x) for x in string] return 0 if string == '01' else 1 @@ -67,7 +66,6 @@ class Robust45(TabularDataset): '362', '363', '367', '372', '375', '378', '379', '389', '393', '394', '397', '399', '400', '404', '408', '414', '416', '419', '422', '423', '426', '427', '433', '435', '436', '439', '442', '443', '445', '614', '620', '626', '646', '677', '690'] - TOPICS = ['307', '310', '321', '325', '330'] @staticmethod def sort_key(ex): diff --git a/models/hbert/args.py b/models/hbert/args.py index 021364f..9c2771f 100644 --- a/models/hbert/args.py +++ b/models/hbert/args.py @@ -23,12 +23,12 @@ def get_args(): parser.add_argument('--batchnorm', action='store_true') parser.add_argument('--dynamic-pool', action='store_true') parser.add_argument('--dynamic-pool-length', type=int, default=8) - parser.add_argument('--conv-output-channels', type=int, default=100) + parser.add_argument('--output-channel', type=int, default=100) parser.add_argument('--max-seq-length', default=128, type=int, help='maximum total input sequence length after tokenization') - parser.add_argument('--max-doc-length', default=128, type=int, + parser.add_argument('--max-doc-length', default=16, type=int, help='maximum number of lines processed in one document') parser.add_argument('--warmup-proportion', default=0.1, type=float, diff --git a/models/hbert/model.py b/models/hbert/model.py index 9196d1a..3cf13d8 100644 --- a/models/hbert/model.py +++ b/models/hbert/model.py @@ -7,32 +7,32 @@ class HierarchicalBert(nn.Module): - def __init__(self, args, cache_dir): + def __init__(self, args, cache_dir, **kwargs): super().__init__() self.args =args - ks = 3 input_channels = 1 + ks = 3 self.sentence_encoder = BertSentenceEncoder.from_pretrained( - args.model, + kwargs['variant'] if 'variant' in kwargs else args.model, cache_dir=cache_dir, num_labels=args.num_labels) self.conv1 = nn.Conv2d(input_channels, - args.conv_output_channels, + args.output_channel, (3, self.sentence_encoder.config.hidden_size), padding=(2, 0)) self.conv2 = nn.Conv2d(input_channels, - args.conv_output_channels, + args.output_channel, (4, self.sentence_encoder.config.hidden_size), padding=(3, 0)) self.conv3 = nn.Conv2d(input_channels, - args.conv_output_channels, + args.output_channel, (5, self.sentence_encoder.config.hidden_size), padding=(4, 0)) self.dropout = nn.Dropout(args.dropout) - self.fc1 = nn.Linear(ks * args.conv_output_channels, args.num_labels) + self.fc1 = nn.Linear(ks * args.output_channel, args.num_labels) def forward(self, input_ids, segment_ids=None, input_mask=None): """ @@ -62,14 +62,14 @@ def forward(self, input_ids, segment_ids=None, input_mask=None): F.relu(self.conv3(x)).squeeze(3)] if self.args.dynamic_pool: - x = [self.dynamic_pool(i).squeeze(2) for i in x] # (batch, output_channels) * ks - x = torch.cat(x, 1) # (batch, output_channels * ks) + x = [self.dynamic_pool(i).squeeze(2) for i in x] # (batch_size, output_channels) * ks + x = torch.cat(x, 1) # (batch_size, output_channels * ks) x = x.view(-1, self.filter_widths * self.output_channel * self.dynamic_pool_length) else: - x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # (batch, output_channels, num_sentences) * ks - x = torch.cat(x, 1) # (batch, channel_output * ks) + x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # (batch_size, output_channels, num_sentences) * ks + x = torch.cat(x, 1) # (batch_size, channel_output * ks) x = self.dropout(x) - logit = self.fc1(x) # (batch, num_labels) + logits = self.fc1(x) # (batch_size, num_labels) - return logit + return logits diff --git a/tasks/relevance_transfer/__main__.py b/tasks/relevance_transfer/__main__.py index 9b3b9bb..a2aef49 100644 --- a/tasks/relevance_transfer/__main__.py +++ b/tasks/relevance_transfer/__main__.py @@ -16,6 +16,7 @@ from datasets.robust05 import Robust05, Robust05Hierarchical from datasets.robust45 import Robust45, Robust45Hierarchical from models.bert.model import BertForSequenceClassification as Bert +from models.hbert.model import HierarchicalBert from models.han.model import HAN from models.kim_cnn.model import KimCNN from models.reg_lstm.model import RegLSTM @@ -55,9 +56,11 @@ def evaluate_split(model, topic, split, config, **kwargs): 'is_lowercase': True, 'gradient_accumulation_steps': config.gradient_accumulation_steps, 'max_seq_length': config.max_seq_length, + 'max_doc_length': args.max_doc_length, 'data_dir': config.data_dir, 'n_gpu': n_gpu, - 'device': config.device + 'device': config.device, + 'is_hierarchical': True if args.model in {'HBERT-Base', 'HBERT-Large'} else False } if config.model in {'HAN', 'HR-CNN'}: @@ -107,6 +110,7 @@ def save_ranks(pred_scores, output_path): n_gpu = torch.cuda.device_count() args.device = device args.n_gpu = n_gpu + args.num_labels = 1 print('Device:', str(device).upper()) print('Number of GPUs:', n_gpu) @@ -140,6 +144,10 @@ def save_ranks(pred_scores, output_path): 'KimCNN': KimCNN, 'HAN': HAN, 'XML-CNN': XmlCNN, + 'BERT-Base': Bert, + 'BERT-Large': Bert, + 'HBERT-Base': HierarchicalBert, + 'HBERT-Large': HierarchicalBert } if args.dataset not in dataset_map: @@ -149,7 +157,7 @@ def save_ranks(pred_scores, output_path): if args.model in {'HAN', 'HR-CNN'}: dataset = dataset_map_hier[args.dataset] - elif args.model in {'BERT-Base', 'BERT-Large'}: + elif args.model in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: dataset = dataset_map_bert[args.dataset] else: dataset = dataset_map[args.dataset] @@ -168,7 +176,7 @@ def save_ranks(pred_scores, output_path): else: pred_scores = dict() - if args.model in {'BERT-Base', 'BERT-Large'}: + if args.model in {'BERT-Base', 'BERT-Large', 'HBERT-Base', 'HBERT-Large'}: if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter:", args.gradient_accumulation_steps) @@ -190,7 +198,11 @@ def save_ranks(pred_scores, output_path): train_examples = processor.get_train_examples(args.data_dir, topic=topic) num_train_optimization_steps = int( len(train_examples) / args.batch_size / args.gradient_accumulation_steps) * args.epochs - model = Bert.from_pretrained(variant, cache_dir=args.cache_dir, num_labels=1) + + if args.model in {'BERT-Base', 'BERT-Large'}: + model = model_map[args.model].from_pretrained(variant, cache_dir=args.cache_dir, num_labels=1) + else: + model = model_map[args.model](args, variant=variant, cache_dir=args.cache_dir) model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) @@ -219,10 +231,12 @@ def save_ranks(pred_scores, output_path): 'is_lowercase': True, 'gradient_accumulation_steps': args.gradient_accumulation_steps, 'max_seq_length': args.max_seq_length, + 'max_doc_length': args.max_doc_length, 'data_dir': args.data_dir, 'model_outfile': args.save_path, 'n_gpu': n_gpu, - 'device': args.device + 'device': args.device, + 'is_hierarchical': True if args.model in {'HBERT-Base', 'HBERT-Large'} else False } evaluator_config = { @@ -235,9 +249,11 @@ def save_ranks(pred_scores, output_path): 'is_lowercase': True, 'gradient_accumulation_steps': args.gradient_accumulation_steps, 'max_seq_length': args.max_seq_length, + 'max_doc_length': args.max_doc_length, 'data_dir': args.data_dir, 'n_gpu': n_gpu, - 'device': args.device + 'device': args.device, + 'is_hierarchical': True if args.model in {'HBERT-Base', 'HBERT-Large'} else False } dev_evaluator = RelevanceTransferEvaluator(model, evaluator_config, dataset=dataset, embedding=None, diff --git a/tasks/relevance_transfer/args.py b/tasks/relevance_transfer/args.py index ed3a3f7..8b1c65c 100644 --- a/tasks/relevance_transfer/args.py +++ b/tasks/relevance_transfer/args.py @@ -12,8 +12,10 @@ def get_args(): parser.add_argument('--mode', type=str, default='static', choices=['rand', 'static', 'non-static', 'multichannel']) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--seed', type=int, default=3435) - parser.add_argument('--model', type=str, default='KimCNN', choices=['RegLSTM', 'KimCNN', 'HAN', 'XML-CNN', 'BERT-Base', 'BERT-Large']) parser.add_argument('--dataset', type=str, default='Robust04', choices=['Robust04', 'Robust05', 'Robust45']) + parser.add_argument('--model', type=str, default='KimCNN', choices=['RegLSTM', 'KimCNN', 'HAN', 'XML-CNN', 'BERT-Base', + 'BERT-Large', 'HBERT-Base', 'HBERT-Large']) + parser.add_argument('--dev_every', type=int, default=30) parser.add_argument('--log_every', type=int, default=10) parser.add_argument('--patience', type=int, default=5) @@ -64,6 +66,7 @@ def get_args(): parser.add_argument('--cache-dir', default='cache', type=str) parser.add_argument('--variant', type=str, choices=['bert-base-uncased', 'bert-large-uncased', 'bert-base-cased', 'bert-large-cased']) parser.add_argument('--max-seq-length', default=128, type=int) + parser.add_argument('--max-doc-length', default=16, type=int) parser.add_argument('--warmup-proportion', default=0.1, type=float) parser.add_argument('--gradient-accumulation-steps', type=int, default=1) parser.add_argument('--loss-scale', type=float, default=0) From 1b031a86021e86cdaf91446805c596251770bd3f Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 1 Sep 2019 07:44:07 -0400 Subject: [PATCH 15/22] Add learning rate multiplier --- models/hbert/__main__.py | 9 +++++++-- models/hbert/args.py | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/models/hbert/__main__.py b/models/hbert/__main__.py index b7de67c..2815558 100644 --- a/models/hbert/__main__.py +++ b/models/hbert/__main__.py @@ -119,10 +119,15 @@ def evaluate_split(model, processor, args, split='dev'): # Prepare optimizer param_optimizer = list(model.named_parameters()) + no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, - {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + {'params': [p for n, p in param_optimizer if 'sentence_encoder' not in n], + 'lr': args.lr * args.lr_mult, 'weight_decay': 0.0}, + {'params': [p for n, p in param_optimizer if 'sentence_encoder' in n and not any(nd in n for nd in no_decay)], + 'weight_decay': 0.01}, + {'params': [p for n, p in param_optimizer if 'sentence_encoder' in n and any(nd in n for nd in no_decay)], + 'weight_decay': 0.0}] if args.fp16: try: diff --git a/models/hbert/args.py b/models/hbert/args.py index 9c2771f..1040f8f 100644 --- a/models/hbert/args.py +++ b/models/hbert/args.py @@ -15,6 +15,7 @@ def get_args(): parser.add_argument('--fp16', action='store_true', help='enable 16-bit floating point precision') parser.add_argument('--loss-scale', type=float, default=0, help='loss scaling to improve fp16 numeric stability') + parser.add_argument('--lr-mult', type=float, default=1) parser.add_argument('--dropout', type=float, default=0.5) parser.add_argument('--dropblock', type=float, default=0.0) parser.add_argument('--dropblock-size', type=int, default=7) From e81cfff25f0ef2dd56c507bf2ce1c66037c8d37e Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 1 Sep 2019 23:10:52 -0400 Subject: [PATCH 16/22] Add lr multiplier for relevance transfer --- models/hbert/args.py | 1 - tasks/relevance_transfer/__main__.py | 10 ++++++++-- tasks/relevance_transfer/args.py | 1 + 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/models/hbert/args.py b/models/hbert/args.py index 1040f8f..f1da692 100644 --- a/models/hbert/args.py +++ b/models/hbert/args.py @@ -38,6 +38,5 @@ def get_args(): parser.add_argument('--gradient-accumulation-steps', type=int, default=1, help='number of updates steps to accumulate before performing a backward/update pass') - args = parser.parse_args() return args diff --git a/tasks/relevance_transfer/__main__.py b/tasks/relevance_transfer/__main__.py index a2aef49..dcf777c 100644 --- a/tasks/relevance_transfer/__main__.py +++ b/tasks/relevance_transfer/__main__.py @@ -211,9 +211,15 @@ def save_ranks(pred_scores, output_path): param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ - {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], + {'params': [p for n, p in param_optimizer if + 'sentence_encoder' not in n], + 'lr': args.lr * args.lr_mult, 'weight_decay': 0.0}, + {'params': [p for n, p in param_optimizer if + 'sentence_encoder' in n and not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, - {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] + {'params': [p for n, p in param_optimizer if + 'sentence_encoder' in n and any(nd in n for nd in no_decay)], + 'weight_decay': 0.0}] optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, diff --git a/tasks/relevance_transfer/args.py b/tasks/relevance_transfer/args.py index 8b1c65c..8fd55b1 100644 --- a/tasks/relevance_transfer/args.py +++ b/tasks/relevance_transfer/args.py @@ -11,6 +11,7 @@ def get_args(): parser.add_argument('--batch-size', type=int, default=1024) parser.add_argument('--mode', type=str, default='static', choices=['rand', 'static', 'non-static', 'multichannel']) parser.add_argument('--lr', type=float, default=0.001) + parser.add_argument('--lr-mult', type=float, default=1) parser.add_argument('--seed', type=int, default=3435) parser.add_argument('--dataset', type=str, default='Robust04', choices=['Robust04', 'Robust05', 'Robust45']) parser.add_argument('--model', type=str, default='KimCNN', choices=['RegLSTM', 'KimCNN', 'HAN', 'XML-CNN', 'BERT-Base', From 47586075576aac7ba2672892c91c7f33f782c5a7 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sat, 7 Sep 2019 15:03:01 -0400 Subject: [PATCH 17/22] Add MLP model --- datasets/reuters.py | 40 ++++++++++++ models/args.py | 2 +- models/mlp/README.md | 48 ++++++++++++++ models/mlp/__init__.py | 0 models/mlp/__main__.py | 141 +++++++++++++++++++++++++++++++++++++++++ models/mlp/args.py | 20 ++++++ models/mlp/model.py | 18 ++++++ 7 files changed, 268 insertions(+), 1 deletion(-) create mode 100644 models/mlp/README.md create mode 100644 models/mlp/__init__.py create mode 100644 models/mlp/__main__.py create mode 100644 models/mlp/args.py create mode 100644 models/mlp/model.py diff --git a/datasets/reuters.py b/datasets/reuters.py index 8bdab2c..82a3ff6 100644 --- a/datasets/reuters.py +++ b/datasets/reuters.py @@ -1,5 +1,8 @@ +import json import os import re +import sys +import csv import numpy as np import torch @@ -7,6 +10,8 @@ from torchtext.data.iterator import BucketIterator from torchtext.vocab import Vectors +csv.field_size_limit(sys.maxsize) + def clean_string(string): """ @@ -22,6 +27,11 @@ def split_sents(string): return string.strip().split('.') +def load_json(string): + split_val = json.loads(string) + return np.asarray(split_val, dtype=np.float32) + + def char_quantize(string, max_length=1000): identity = np.identity(len(ReutersCharQuantized.ALPHABET)) quantized_string = np.array([identity[ReutersCharQuantized.ALPHABET[char]] for char in list(string.lower()) if char in ReutersCharQuantized.ALPHABET], dtype=np.float32) @@ -100,6 +110,36 @@ def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, d return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, device=device) +class ReutersTFIDF(Reuters): + VOCAB_SIZE = 30485 + TEXT_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=load_json, dtype=torch.float) + + @classmethod + def splits(cls, path, train=os.path.join('Reuters', 'tfidf_train.tsv'), + validation=os.path.join('Reuters', 'tfidf_dev.tsv'), + test=os.path.join('Reuters', 'tfidf_test.tsv'), **kwargs): + return super(Reuters, cls).splits( + path, train=train, validation=validation, test=test, + format='tsv', fields=[('label', cls.LABEL_FIELD), ('text', cls.TEXT_FIELD)] + ) + + @classmethod + def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, device=0, vectors=None, + unk_init=torch.Tensor.zero_): + """ + :param path: directory containing train, test, dev files + :param vectors_name: name of word vectors file + :param vectors_cache: path to directory containing word vectors file + :param batch_size: batch size + :param device: GPU device + :param vectors: custom vectors - either predefined torchtext vectors or your own custom Vector classes + :param unk_init: function used to generate vector for OOV words + :return: + """ + train, val, test = cls.splits(path) + return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, device=device) + + class ReutersHierarchical(Reuters): NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) diff --git a/models/args.py b/models/args.py index e80ae2d..d1c9ad1 100644 --- a/models/args.py +++ b/models/args.py @@ -8,7 +8,7 @@ def get_args(): parser.add_argument('--no-cuda', action='store_false', dest='cuda') parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--epochs', type=int, default=50) - parser.add_argument('--batch-size', type=int, default=1024) + parser.add_argument('--batch-size', type=int, default=32) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--seed', type=int, default=3435) parser.add_argument('--patience', type=int, default=5) diff --git a/models/mlp/README.md b/models/mlp/README.md new file mode 100644 index 0000000..1b53cac --- /dev/null +++ b/models/mlp/README.md @@ -0,0 +1,48 @@ +# KimCNN + +Implementation for Convolutional Neural Networks for Sentence Classification of [Kim (2014)](https://arxiv.org/abs/1408.5882) with PyTorch and Torchtext. + +## Quick Start + +To run the model on the Reuters dataset, just run the following from the working directory: + +``` +python -m models.kim_cnn --mode static --dataset Reuters --batch-size 32 --lr 0.01 --epochs 30 --dropout 0.5 --seed 3435 +``` + +The best model weights will be saved in + +``` +models/kim_cnn/saves/Reuters/best_model.pt +``` + +To test the model, you can use the following command. + +``` +python -m models.kim_cnn --dataset Reuters --mode static --batch-size 32 --trained-model models/kim_cnn/saves/Reuters/best_model.pt --seed 3435 +``` + +## Model Types + +- rand: All words are randomly initialized and then modified during training. +- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). + All words, including the unknown ones that are initialized with zero, are kept static and only the other + parameters of the model are learned. +- non-static: Same as above but the pretrained vectors are fine-tuned for each task. +- multichannel: A model with two sets of word vectors. Each set of vectors is treated as a 'channel' and each + filter is applied to both channels, but gradients are back-propagated only through one of the channels. Hence the + model is able to fine-tune one set of vectors while keeping the other static. Both channels are initialized with + word2vec. + +## Dataset + +We experiment the model on the following datasets: + +- Reuters (ModApte) +- AAPD +- IMDB +- Yelp 2014 + +## Settings + +Adam is used for training. diff --git a/models/mlp/__init__.py b/models/mlp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/mlp/__main__.py b/models/mlp/__main__.py new file mode 100644 index 0000000..c0eb28d --- /dev/null +++ b/models/mlp/__main__.py @@ -0,0 +1,141 @@ +import os +import random +from copy import deepcopy + +import numpy as np +import torch.onnx + +from common.evaluate import EvaluatorFactory +from common.train import TrainerFactory +from datasets.aapd import AAPD +from datasets.imdb import IMDB +from datasets.reuters import ReutersTFIDF +from datasets.yelp2014 import Yelp2014 +from models.mlp.args import get_args +from models.mlp.model import MLP + + +def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_size, device, is_multilabel): + saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, batch_size, device) + if hasattr(saved_model_evaluator, 'is_multilabel'): + saved_model_evaluator.is_multilabel = is_multilabel + if hasattr(saved_model_evaluator, 'ignore_lengths'): + saved_model_evaluator.ignore_lengths = True + + scores, metric_names = saved_model_evaluator.get_scores() + print('Evaluation metrics for', split_name) + print(metric_names) + print(scores) + + +if __name__ == '__main__': + # Set default configuration in args.py + args = get_args() + + # Set random seed for reproducibility + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.backends.cudnn.deterministic = True + + if not args.cuda: + args.gpu = -1 + + if torch.cuda.is_available() and args.cuda: + print('Note: You are using GPU for training') + torch.cuda.set_device(args.gpu) + torch.cuda.manual_seed(args.seed) + args.gpu = torch.device('cuda:%d' % args.gpu) + + if torch.cuda.is_available() and not args.cuda: + print('Warning: Using CPU for training') + + dataset_map = { + 'Reuters': ReutersTFIDF, + 'AAPD': AAPD, + 'IMDB': IMDB, + 'Yelp2014': Yelp2014 + } + + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') + else: + dataset_class = dataset_map[args.dataset] + train_iter, dev_iter, test_iter = dataset_map[args.dataset].iters(args.data_dir, None, None, + batch_size=args.batch_size, + device=args.gpu, + unk_init=None) + + config = deepcopy(args) + config.dataset = train_iter.dataset + config.target_class = train_iter.dataset.NUM_CLASSES + config.words_num = train_iter.dataset.VOCAB_SIZE + + print('Dataset:', args.dataset) + print('No. of target classes:', train_iter.dataset.NUM_CLASSES) + print('No. of train instances', len(train_iter.dataset)) + print('No. of dev instances', len(dev_iter.dataset)) + print('No. of test instances', len(test_iter.dataset)) + + if args.resume_snapshot: + if args.cuda: + model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) + else: + model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) + else: + model = MLP(config) + if args.cuda: + model.cuda() + + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + + parameter = filter(lambda p: p.requires_grad, model.parameters()) + optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) + + train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, args.batch_size, args.gpu) + test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, args.batch_size, args.gpu) + dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, args.batch_size, args.gpu) + + if hasattr(train_evaluator, 'is_multilabel'): + train_evaluator.is_multilabel = dataset_class.IS_MULTILABEL + if hasattr(test_evaluator, 'is_multilabel'): + test_evaluator.is_multilabel = dataset_class.IS_MULTILABEL + if hasattr(dev_evaluator, 'is_multilabel'): + dev_evaluator.is_multilabel = dataset_class.IS_MULTILABEL + if hasattr(dev_evaluator, 'ignore_lengths'): + dev_evaluator.ignore_lengths = True + if hasattr(test_evaluator, 'ignore_lengths'): + test_evaluator.ignore_lengths = True + + trainer_config = { + 'optimizer': optimizer, + 'batch_size': args.batch_size, + 'log_interval': args.log_every, + 'patience': args.patience, + 'model_outfile': args.save_path, + 'is_multilabel': dataset_class.IS_MULTILABEL, + 'ignore_lengths': True + } + + trainer = TrainerFactory.get_trainer(args.dataset, model, None, train_iter, trainer_config, train_evaluator, test_evaluator, dev_evaluator) + + if not args.trained_model: + trainer.train(args.epochs) + else: + if args.cuda: + model = torch.load(args.trained_model, map_location=lambda storage, location: storage.cuda(args.gpu)) + else: + model = torch.load(args.trained_model, map_location=lambda storage, location: storage) + + # Calculate dev and test metrics + if hasattr(trainer, 'snapshot_path'): + model = torch.load(trainer.snapshot_path) + + evaluate_dataset('dev', dataset_map[args.dataset], model, None, dev_iter, args.batch_size, + is_multilabel=dataset_class.IS_MULTILABEL, + device=args.gpu) + evaluate_dataset('test', dataset_map[args.dataset], model, None, test_iter, args.batch_size, + is_multilabel=dataset_class.IS_MULTILABEL, + device=args.gpu) diff --git a/models/mlp/args.py b/models/mlp/args.py new file mode 100644 index 0000000..b05c95d --- /dev/null +++ b/models/mlp/args.py @@ -0,0 +1,20 @@ +import os + +import models.args + + +def get_args(): + parser = models.args.get_args() + + parser.add_argument('--dataset', type=str, default='Reuters', choices=['Reuters', 'AAPD', 'IMDB', 'Yelp2014']) + parser.add_argument('--embed-dim', type=int, default=300) + parser.add_argument('--dropout', type=float, default=0.5) + parser.add_argument('--epoch-decay', type=int, default=15) + parser.add_argument('--weight-decay', type=float, default=0) + + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'mlp')) + parser.add_argument('--resume-snapshot', type=str) + parser.add_argument('--trained-model', type=str) + + args = parser.parse_args() + return args diff --git a/models/mlp/model.py b/models/mlp/model.py new file mode 100644 index 0000000..dcfae1b --- /dev/null +++ b/models/mlp/model.py @@ -0,0 +1,18 @@ +import torch +import torch.nn as nn + + +class MLP(nn.Module): + + def __init__(self, config): + super().__init__() + dataset = config.dataset + target_class = config.target_class + # self.dropout = nn.Dropout(config.dropout) + self.fc1 = nn.Linear(dataset.VOCAB_SIZE, target_class) + + def forward(self, x, **kwargs): + x = torch.squeeze(x) # (batch, vocab_size) + # x = self.dropout(x) + logit = self.fc1(x) # (batch, target_size) + return logit From 289cde0d013842a1dfcfd24e53fa227929164456 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 8 Sep 2019 17:34:11 -0400 Subject: [PATCH 18/22] Add fastText model --- models/fasttext/README.md | 48 ++++++++++++ models/fasttext/__init__.py | 0 models/fasttext/__main__.py | 150 ++++++++++++++++++++++++++++++++++++ models/fasttext/args.py | 23 ++++++ models/fasttext/model.py | 44 +++++++++++ 5 files changed, 265 insertions(+) create mode 100644 models/fasttext/README.md create mode 100644 models/fasttext/__init__.py create mode 100644 models/fasttext/__main__.py create mode 100644 models/fasttext/args.py create mode 100644 models/fasttext/model.py diff --git a/models/fasttext/README.md b/models/fasttext/README.md new file mode 100644 index 0000000..1b53cac --- /dev/null +++ b/models/fasttext/README.md @@ -0,0 +1,48 @@ +# KimCNN + +Implementation for Convolutional Neural Networks for Sentence Classification of [Kim (2014)](https://arxiv.org/abs/1408.5882) with PyTorch and Torchtext. + +## Quick Start + +To run the model on the Reuters dataset, just run the following from the working directory: + +``` +python -m models.kim_cnn --mode static --dataset Reuters --batch-size 32 --lr 0.01 --epochs 30 --dropout 0.5 --seed 3435 +``` + +The best model weights will be saved in + +``` +models/kim_cnn/saves/Reuters/best_model.pt +``` + +To test the model, you can use the following command. + +``` +python -m models.kim_cnn --dataset Reuters --mode static --batch-size 32 --trained-model models/kim_cnn/saves/Reuters/best_model.pt --seed 3435 +``` + +## Model Types + +- rand: All words are randomly initialized and then modified during training. +- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). + All words, including the unknown ones that are initialized with zero, are kept static and only the other + parameters of the model are learned. +- non-static: Same as above but the pretrained vectors are fine-tuned for each task. +- multichannel: A model with two sets of word vectors. Each set of vectors is treated as a 'channel' and each + filter is applied to both channels, but gradients are back-propagated only through one of the channels. Hence the + model is able to fine-tune one set of vectors while keeping the other static. Both channels are initialized with + word2vec. + +## Dataset + +We experiment the model on the following datasets: + +- Reuters (ModApte) +- AAPD +- IMDB +- Yelp 2014 + +## Settings + +Adam is used for training. diff --git a/models/fasttext/__init__.py b/models/fasttext/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/fasttext/__main__.py b/models/fasttext/__main__.py new file mode 100644 index 0000000..c348bbb --- /dev/null +++ b/models/fasttext/__main__.py @@ -0,0 +1,150 @@ +import os +import random +from copy import deepcopy + +import numpy as np +import torch +import torch.onnx + +from common.evaluate import EvaluatorFactory +from common.train import TrainerFactory +from datasets.aapd import AAPD +from datasets.imdb import IMDB +from datasets.reuters import ReutersBOW +from datasets.yelp2014 import Yelp2014 +from models.fasttext.args import get_args +from models.fasttext.model import FastText + + +class UnknownWordVecCache(object): + """ + Caches the first randomly generated word vector for a certain size to make it is reused. + """ + cache = {} + + @classmethod + def unk(cls, tensor): + size_tup = tuple(tensor.size()) + if size_tup not in cls.cache: + cls.cache[size_tup] = torch.Tensor(tensor.size()) + cls.cache[size_tup].uniform_(-0.25, 0.25) + return cls.cache[size_tup] + + +def evaluate_dataset(split_name, dataset_cls, model, embedding, loader, batch_size, device, is_multilabel): + saved_model_evaluator = EvaluatorFactory.get_evaluator(dataset_cls, model, embedding, loader, batch_size, device) + if hasattr(saved_model_evaluator, 'is_multilabel'): + saved_model_evaluator.is_multilabel = is_multilabel + + scores, metric_names = saved_model_evaluator.get_scores() + print('Evaluation metrics for', split_name) + print(metric_names) + print(scores) + + +if __name__ == '__main__': + # Set default configuration in args.py + args = get_args() + + # Set random seed for reproducibility + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + torch.backends.cudnn.deterministic = True + + if not args.cuda: + args.gpu = -1 + + if torch.cuda.is_available() and args.cuda: + print('Note: You are using GPU for training') + torch.cuda.set_device(args.gpu) + torch.cuda.manual_seed(args.seed) + args.gpu = torch.device('cuda:%d' % args.gpu) + + if torch.cuda.is_available() and not args.cuda: + print('Warning: Using CPU for training') + + dataset_map = { + 'Reuters': ReutersBOW, + 'AAPD': AAPD, + 'IMDB': IMDB, + 'Yelp2014': Yelp2014 + } + + if args.dataset not in dataset_map: + raise ValueError('Unrecognized dataset') + else: + dataset_class = dataset_map[args.dataset] + train_iter, dev_iter, test_iter = dataset_map[args.dataset].iters(args.data_dir, args.word_vectors_file, + args.word_vectors_dir, + batch_size=args.batch_size, device=args.gpu, + unk_init=UnknownWordVecCache.unk) + + config = deepcopy(args) + config.dataset = train_iter.dataset + config.target_class = train_iter.dataset.NUM_CLASSES + config.words_num = len(train_iter.dataset.TEXT_FIELD.vocab) + + print('Dataset:', args.dataset) + print('No. of target classes:', train_iter.dataset.NUM_CLASSES) + print('No. of train instances', len(train_iter.dataset)) + print('No. of dev instances', len(dev_iter.dataset)) + print('No. of test instances', len(test_iter.dataset)) + + if args.resume_snapshot: + if args.cuda: + model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage.cuda(args.gpu)) + else: + model = torch.load(args.resume_snapshot, map_location=lambda storage, location: storage) + else: + model = FastText(config) + if args.cuda: + model.cuda() + + if not args.trained_model: + save_path = os.path.join(args.save_path, dataset_map[args.dataset].NAME) + os.makedirs(save_path, exist_ok=True) + + parameter = filter(lambda p: p.requires_grad, model.parameters()) + optimizer = torch.optim.Adam(parameter, lr=args.lr, weight_decay=args.weight_decay) + + train_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, train_iter, args.batch_size, args.gpu) + test_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, test_iter, args.batch_size, args.gpu) + dev_evaluator = EvaluatorFactory.get_evaluator(dataset_map[args.dataset], model, None, dev_iter, args.batch_size, args.gpu) + + if hasattr(train_evaluator, 'is_multilabel'): + train_evaluator.is_multilabel = dataset_class.IS_MULTILABEL + if hasattr(test_evaluator, 'is_multilabel'): + test_evaluator.is_multilabel = dataset_class.IS_MULTILABEL + if hasattr(dev_evaluator, 'is_multilabel'): + dev_evaluator.is_multilabel = dataset_class.IS_MULTILABEL + + trainer_config = { + 'optimizer': optimizer, + 'batch_size': args.batch_size, + 'log_interval': args.log_every, + 'patience': args.patience, + 'model_outfile': args.save_path, + 'is_multilabel': dataset_class.IS_MULTILABEL + } + + trainer = TrainerFactory.get_trainer(args.dataset, model, None, train_iter, trainer_config, train_evaluator, test_evaluator, dev_evaluator) + + if not args.trained_model: + trainer.train(args.epochs) + else: + if args.cuda: + model = torch.load(args.trained_model, map_location=lambda storage, location: storage.cuda(args.gpu)) + else: + model = torch.load(args.trained_model, map_location=lambda storage, location: storage) + + # Calculate dev and test metrics + if hasattr(trainer, 'snapshot_path'): + model = torch.load(trainer.snapshot_path) + + evaluate_dataset('dev', dataset_map[args.dataset], model, None, dev_iter, args.batch_size, + is_multilabel=dataset_class.IS_MULTILABEL, + device=args.gpu) + evaluate_dataset('test', dataset_map[args.dataset], model, None, test_iter, args.batch_size, + is_multilabel=dataset_class.IS_MULTILABEL, + device=args.gpu) diff --git a/models/fasttext/args.py b/models/fasttext/args.py new file mode 100644 index 0000000..85c7ce5 --- /dev/null +++ b/models/fasttext/args.py @@ -0,0 +1,23 @@ +import os + +import models.args + + +def get_args(): + parser = models.args.get_args() + + parser.add_argument('--dataset', type=str, default='Reuters', choices=['Reuters', 'AAPD', 'IMDB', 'Yelp2014']) + parser.add_argument('--mode', type=str, default='rand', choices=['rand', 'static', 'non-static']) + parser.add_argument('--words-dim', type=int, default=300) + parser.add_argument('--dropout', type=float, default=0.5) + parser.add_argument('--epoch-decay', type=int, default=15) + parser.add_argument('--weight-decay', type=float, default=0) + + parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec')) + parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt') + parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'kim_cnn')) + parser.add_argument('--resume-snapshot', type=str) + parser.add_argument('--trained-model', type=str) + + args = parser.parse_args() + return args diff --git a/models/fasttext/model.py b/models/fasttext/model.py new file mode 100644 index 0000000..450561a --- /dev/null +++ b/models/fasttext/model.py @@ -0,0 +1,44 @@ +import torch +import torch.nn as nn + +import torch.nn.functional as F + + +class FastText(nn.Module): + + def __init__(self, config): + super().__init__() + dataset = config.dataset + target_class = config.target_class + words_num = config.words_num + words_dim = config.words_dim + self.mode = config.mode + + if config.mode == 'rand': + rand_embed_init = torch.Tensor(words_num, words_dim).uniform_(-0.25, 0.25) + self.embed = nn.Embedding.from_pretrained(rand_embed_init, freeze=False) + elif config.mode == 'static': + self.static_embed = nn.Embedding.from_pretrained(dataset.TEXT_FIELD.vocab.vectors, freeze=True) + elif config.mode == 'non-static': + self.non_static_embed = nn.Embedding.from_pretrained(dataset.TEXT_FIELD.vocab.vectors, freeze=False) + else: + print("Unsupported Mode") + exit() + + self.dropout = nn.Dropout(config.dropout) + self.fc1 = nn.Linear(words_dim, target_class) + + def forward(self, x, **kwargs): + if self.mode == 'rand': + x = self.embed(x) # (batch, sent_len, embed_dim) + elif self.mode == 'static': + x = self.static_embed(x) # (batch, sent_len, embed_dim) + elif self.mode == 'non-static': + x = self.non_static_embed(x) # (batch, sent_len, embed_dim) + + x = F.avg_pool2d(x, (x.shape[1], 1)).squeeze(1) # (batch, embed_dim) + + logit = self.fc1(x) # (batch, target_size) + return logit + + From 12a09daa8c9197df679dd789e4433b2db5713f2a Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 8 Sep 2019 17:38:04 -0400 Subject: [PATCH 19/22] Add Reuters bag-of-words dataset class --- datasets/imdb.py | 21 ++++++--------------- datasets/reuters.py | 22 +++++++++++++++------- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/datasets/imdb.py b/datasets/imdb.py index c82963c..c4e354c 100644 --- a/datasets/imdb.py +++ b/datasets/imdb.py @@ -6,7 +6,7 @@ from torchtext.data.iterator import BucketIterator from torchtext.vocab import Vectors -from datasets.reuters import clean_string, split_sents +from datasets.reuters import clean_string, split_sents, process_labels, generate_ngrams def char_quantize(string, max_length=500): @@ -18,15 +18,6 @@ def char_quantize(string, max_length=500): return np.concatenate((quantized_string, np.zeros((max_length - len(quantized_string), len(IMDBCharQuantized.ALPHABET)), dtype=np.float32))) -def process_labels(string): - """ - Returns the label string as a list of integers - :param string: - :return: - """ - return [float(x) for x in string] - - class IMDB(TabularDataset): NAME = 'IMDB' NUM_CLASSES = 10 @@ -70,6 +61,11 @@ def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, d sort_within_batch=True, device=device) +class IMDBHierarchical(IMDB): + NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) + TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) + + class IMDBCharQuantized(IMDB): ALPHABET = dict(map(lambda t: (t[1], t[0]), enumerate(list("""abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""")))) TEXT_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=char_quantize) @@ -85,8 +81,3 @@ def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, d """ train, val, test = cls.splits(path) return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, device=device) - - -class IMDBHierarchical(IMDB): - NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) - TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) diff --git a/datasets/reuters.py b/datasets/reuters.py index 82a3ff6..40c1c16 100644 --- a/datasets/reuters.py +++ b/datasets/reuters.py @@ -27,6 +27,12 @@ def split_sents(string): return string.strip().split('.') +def generate_ngrams(tokens, n=2): + n_grams = zip(*[tokens[i:] for i in range(n)]) + tokens.extend(['-'.join(x) for x in n_grams]) + return tokens + + def load_json(string): split_val = json.loads(string) return np.asarray(split_val, dtype=np.float32) @@ -44,8 +50,6 @@ def char_quantize(string, max_length=1000): def process_labels(string): """ Returns the label string as a list of integers - :param string: - :return: """ return [float(x) for x in string] @@ -93,6 +97,15 @@ def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, d sort_within_batch=True, device=device) +class ReutersBOW(Reuters): + TEXT_FIELD = Field(batch_first=True, tokenize=clean_string, preprocessing=generate_ngrams, include_lengths=True) + + +class ReutersHierarchical(Reuters): + NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) + TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) + + class ReutersCharQuantized(Reuters): ALPHABET = dict(map(lambda t: (t[1], t[0]), enumerate(list("""abcdefghijklmnopqrstuvwxyz0123456789,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}""")))) TEXT_FIELD = Field(sequential=False, use_vocab=False, batch_first=True, preprocessing=char_quantize) @@ -138,8 +151,3 @@ def iters(cls, path, vectors_name, vectors_cache, batch_size=64, shuffle=True, d """ train, val, test = cls.splits(path) return BucketIterator.splits((train, val, test), batch_size=batch_size, repeat=False, shuffle=shuffle, device=device) - - -class ReutersHierarchical(Reuters): - NESTING_FIELD = Field(batch_first=True, tokenize=clean_string) - TEXT_FIELD = NestedField(NESTING_FIELD, tokenize=split_sents) From bcf1dca89d455e77be0f399aefc4c9d305a52d88 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 8 Sep 2019 18:07:43 -0400 Subject: [PATCH 20/22] Add input dropout for MLP --- models/mlp/args.py | 2 +- models/mlp/model.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/models/mlp/args.py b/models/mlp/args.py index b05c95d..f04fc17 100644 --- a/models/mlp/args.py +++ b/models/mlp/args.py @@ -8,7 +8,7 @@ def get_args(): parser.add_argument('--dataset', type=str, default='Reuters', choices=['Reuters', 'AAPD', 'IMDB', 'Yelp2014']) parser.add_argument('--embed-dim', type=int, default=300) - parser.add_argument('--dropout', type=float, default=0.5) + parser.add_argument('--dropout', type=float, default=0) parser.add_argument('--epoch-decay', type=int, default=15) parser.add_argument('--weight-decay', type=float, default=0) diff --git a/models/mlp/model.py b/models/mlp/model.py index dcfae1b..0ed1805 100644 --- a/models/mlp/model.py +++ b/models/mlp/model.py @@ -8,11 +8,11 @@ def __init__(self, config): super().__init__() dataset = config.dataset target_class = config.target_class - # self.dropout = nn.Dropout(config.dropout) + self.dropout = nn.Dropout(config.dropout) self.fc1 = nn.Linear(dataset.VOCAB_SIZE, target_class) def forward(self, x, **kwargs): x = torch.squeeze(x) # (batch, vocab_size) - # x = self.dropout(x) + x = self.dropout(x) logit = self.fc1(x) # (batch, target_size) return logit From 448b087d123871aa3d78534499e343cba4554d84 Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Sun, 8 Sep 2019 18:15:50 -0400 Subject: [PATCH 21/22] Remove duplicate README files --- models/fasttext/README.md | 48 --------------------------------------- models/mlp/README.md | 48 --------------------------------------- 2 files changed, 96 deletions(-) delete mode 100644 models/fasttext/README.md delete mode 100644 models/mlp/README.md diff --git a/models/fasttext/README.md b/models/fasttext/README.md deleted file mode 100644 index 1b53cac..0000000 --- a/models/fasttext/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# KimCNN - -Implementation for Convolutional Neural Networks for Sentence Classification of [Kim (2014)](https://arxiv.org/abs/1408.5882) with PyTorch and Torchtext. - -## Quick Start - -To run the model on the Reuters dataset, just run the following from the working directory: - -``` -python -m models.kim_cnn --mode static --dataset Reuters --batch-size 32 --lr 0.01 --epochs 30 --dropout 0.5 --seed 3435 -``` - -The best model weights will be saved in - -``` -models/kim_cnn/saves/Reuters/best_model.pt -``` - -To test the model, you can use the following command. - -``` -python -m models.kim_cnn --dataset Reuters --mode static --batch-size 32 --trained-model models/kim_cnn/saves/Reuters/best_model.pt --seed 3435 -``` - -## Model Types - -- rand: All words are randomly initialized and then modified during training. -- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). - All words, including the unknown ones that are initialized with zero, are kept static and only the other - parameters of the model are learned. -- non-static: Same as above but the pretrained vectors are fine-tuned for each task. -- multichannel: A model with two sets of word vectors. Each set of vectors is treated as a 'channel' and each - filter is applied to both channels, but gradients are back-propagated only through one of the channels. Hence the - model is able to fine-tune one set of vectors while keeping the other static. Both channels are initialized with - word2vec. - -## Dataset - -We experiment the model on the following datasets: - -- Reuters (ModApte) -- AAPD -- IMDB -- Yelp 2014 - -## Settings - -Adam is used for training. diff --git a/models/mlp/README.md b/models/mlp/README.md deleted file mode 100644 index 1b53cac..0000000 --- a/models/mlp/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# KimCNN - -Implementation for Convolutional Neural Networks for Sentence Classification of [Kim (2014)](https://arxiv.org/abs/1408.5882) with PyTorch and Torchtext. - -## Quick Start - -To run the model on the Reuters dataset, just run the following from the working directory: - -``` -python -m models.kim_cnn --mode static --dataset Reuters --batch-size 32 --lr 0.01 --epochs 30 --dropout 0.5 --seed 3435 -``` - -The best model weights will be saved in - -``` -models/kim_cnn/saves/Reuters/best_model.pt -``` - -To test the model, you can use the following command. - -``` -python -m models.kim_cnn --dataset Reuters --mode static --batch-size 32 --trained-model models/kim_cnn/saves/Reuters/best_model.pt --seed 3435 -``` - -## Model Types - -- rand: All words are randomly initialized and then modified during training. -- static: A model with pre-trained vectors from [word2vec](https://code.google.com/archive/p/word2vec/). - All words, including the unknown ones that are initialized with zero, are kept static and only the other - parameters of the model are learned. -- non-static: Same as above but the pretrained vectors are fine-tuned for each task. -- multichannel: A model with two sets of word vectors. Each set of vectors is treated as a 'channel' and each - filter is applied to both channels, but gradients are back-propagated only through one of the channels. Hence the - model is able to fine-tune one set of vectors while keeping the other static. Both channels are initialized with - word2vec. - -## Dataset - -We experiment the model on the following datasets: - -- Reuters (ModApte) -- AAPD -- IMDB -- Yelp 2014 - -## Settings - -Adam is used for training. From 71a2df3173c20aff8b8a0cc77510ee0b75f41ffa Mon Sep 17 00:00:00 2001 From: Achyudh Ram Date: Thu, 31 Oct 2019 22:01:03 -0400 Subject: [PATCH 22/22] Remove model caching mechanism for bert and hbert Fixes issue #9 --- models/bert/model.py | 13 ++- utils/io.py | 180 +----------------------------------------- utils/tokenization.py | 13 ++- 3 files changed, 13 insertions(+), 193 deletions(-) diff --git a/models/bert/model.py b/models/bert/model.py index 5456fbd..f2519a0 100644 --- a/models/bert/model.py +++ b/models/bert/model.py @@ -33,13 +33,12 @@ logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", + 'bert-base-uncased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-uncased.tar.gz'), + 'bert-large-uncased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-large-uncased.tar.gz'), + 'bert-base-cased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-cased.tar.gz'), + 'bert-large-cased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-large-cased.tar.gz'), + 'bert-base-multilingual-uncased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-multilingual-uncased.tar.gz'), + 'bert-base-multilingual-cased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-multilingual-cased.tar.gz') } CONFIG_NAME = 'bert_config.json' WEIGHTS_NAME = 'pytorch_model.bin' diff --git a/utils/io.py b/utils/io.py index 190c625..d531a50 100644 --- a/utils/io.py +++ b/utils/io.py @@ -15,21 +15,11 @@ from __future__ import absolute_import, division, print_function, unicode_literals -import json import logging import os -import shutil import sys -import tempfile -from functools import wraps -from hashlib import sha256 from io import open -import boto3 -import requests -from botocore.exceptions import ClientError -from tqdm import tqdm - from urllib.parse import urlparse try: @@ -43,50 +33,6 @@ logger = logging.getLogger(__name__) # pylint: disable=invalid-name -def url_to_filename(url, etag=None): - """ - Convert `url` into a hashed filename in a repeatable way. - If `etag` is specified, append its hash to the url's, delimited - by a period. - """ - url_bytes = url.encode('utf-8') - url_hash = sha256(url_bytes) - filename = url_hash.hexdigest() - - if etag: - etag_bytes = etag.encode('utf-8') - etag_hash = sha256(etag_bytes) - filename += '.' + etag_hash.hexdigest() - - return filename - - -def filename_to_url(filename, cache_dir=None): - """ - Return the url and etag (which may be ``None``) stored for `filename`. - Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BERT_CACHE - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - cache_path = os.path.join(cache_dir, filename) - if not os.path.exists(cache_path): - raise EnvironmentError("file {} not found".format(cache_path)) - - meta_path = cache_path + '.json' - if not os.path.exists(meta_path): - raise EnvironmentError("file {} not found".format(meta_path)) - - with open(meta_path, encoding="utf-8") as meta_file: - metadata = json.load(meta_file) - url = metadata['url'] - etag = metadata['etag'] - - return url, etag - - def cached_path(url_or_filename, cache_dir=None): """ Given something that might be a URL (or might be a local path), @@ -103,10 +49,7 @@ def cached_path(url_or_filename, cache_dir=None): parsed = urlparse(url_or_filename) - if parsed.scheme in ('http', 'https', 's3'): - # URL, so get it from the cache (downloading if necessary) - return get_from_cache(url_or_filename, cache_dir) - elif os.path.exists(url_or_filename): + if os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': @@ -117,127 +60,6 @@ def cached_path(url_or_filename, cache_dir=None): raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) -def split_s3_path(url): - """Split a full s3 path into the bucket name and path.""" - parsed = urlparse(url) - if not parsed.netloc or not parsed.path: - raise ValueError("bad s3 path {}".format(url)) - bucket_name = parsed.netloc - s3_path = parsed.path - # Remove '/' at beginning of path. - if s3_path.startswith("/"): - s3_path = s3_path[1:] - return bucket_name, s3_path - - -def s3_request(func): - """ - Wrapper function for s3 requests in order to create more helpful error - messages. - """ - - @wraps(func) - def wrapper(url, *args, **kwargs): - try: - return func(url, *args, **kwargs) - except ClientError as exc: - if int(exc.response["Error"]["Code"]) == 404: - raise EnvironmentError("file {} not found".format(url)) - else: - raise - - return wrapper - - -@s3_request -def s3_etag(url): - """Check ETag on S3 object.""" - s3_resource = boto3.resource("s3") - bucket_name, s3_path = split_s3_path(url) - s3_object = s3_resource.Object(bucket_name, s3_path) - return s3_object.e_tag - - -@s3_request -def s3_get(url, temp_file): - """Pull a file directly from S3.""" - s3_resource = boto3.resource("s3") - bucket_name, s3_path = split_s3_path(url) - s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) - - -def http_get(url, temp_file): - req = requests.get(url, stream=True) - content_length = req.headers.get('Content-Length') - total = int(content_length) if content_length is not None else None - progress = tqdm(unit="B", total=total) - for chunk in req.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - progress.update(len(chunk)) - temp_file.write(chunk) - progress.close() - - -def get_from_cache(url, cache_dir=None): - """ - Given a URL, look for the corresponding dataset in the local cache. - If it's not there, download it. Then return the path to the cached file. - """ - if cache_dir is None: - cache_dir = PYTORCH_PRETRAINED_BERT_CACHE - if sys.version_info[0] == 3 and isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - # Get eTag to add to filename, if it exists. - if url.startswith("s3://"): - etag = s3_etag(url) - else: - response = requests.head(url, allow_redirects=True) - if response.status_code != 200: - raise IOError("HEAD request failed for url {} with status code {}" - .format(url, response.status_code)) - etag = response.headers.get("ETag") - - filename = url_to_filename(url, etag) - - # get cache path to put the file - cache_path = os.path.join(cache_dir, filename) - - if not os.path.exists(cache_path): - # Download to temporary file, then copy to cache dir once finished. - # Otherwise you get corrupt cache entries if the download gets interrupted. - with tempfile.NamedTemporaryFile() as temp_file: - logger.info("%s not found in cache, downloading to %s", url, temp_file.name) - - # GET file object - if url.startswith("s3://"): - s3_get(url, temp_file) - else: - http_get(url, temp_file) - - # we are copying the file before closing it, so flush to avoid truncation - temp_file.flush() - # shutil.copyfileobj() starts at the current position, so go to the start - temp_file.seek(0) - - logger.info("copying %s to cache at %s", temp_file.name, cache_path) - with open(cache_path, 'wb') as cache_file: - shutil.copyfileobj(temp_file, cache_file) - - logger.info("creating metadata file for %s", cache_path) - meta = {'url': url, 'etag': etag} - meta_path = cache_path + '.json' - with open(meta_path, 'w', encoding="utf-8") as meta_file: - json.dump(meta, meta_file) - - logger.info("removing temp file %s", temp_file.name) - - return cache_path - - def read_set_from_file(filename): """ Extract a de-duped collection (set) of text from a file. diff --git a/utils/tokenization.py b/utils/tokenization.py index 8761998..d589a2d 100644 --- a/utils/tokenization.py +++ b/utils/tokenization.py @@ -25,13 +25,12 @@ logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { - 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", - 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", - 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", - 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", - 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", - 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", - 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", + 'bert-base-uncased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-uncased-vocab.txt'), + 'bert-large-uncased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-large-uncased-vocab.txt'), + 'bert-base-cased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-cased-vocab.txt'), + 'bert-large-cased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-large-cased-vocab.txt'), + 'bert-base-multilingual-uncased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-multilingual-uncased-vocab.txt'), + 'bert-base-multilingual-cased': os.path.join(os.pardir, 'hedwig-data', 'models', 'bert_pretrained', 'bert-base-multilingual-cased-vocab.txt') } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'bert-base-uncased': 512,