Skip to content
Snippets Groups Projects
Commit 7d6128ca authored by Benoit Favre's avatar Benoit Favre
Browse files

initial commit

parents
No related branches found
No related tags found
No related merge requests found
Copyright 2020 Benoit Favre
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Topic classifier for biomedical articles
========================================
Installing
----------
```
virtualenv -p python3 env
source env/bin/activated
pip install -r requirements.txt -f https://download.pytorch.org/whl/torch_stable.html
```
Running
-------
```
python trainer.py --gpus=-1 --name test1 --train_filename ../scrappers/data/20200529/litcovid.json
```
data.py 0 → 100644
import collections
import json
import random
import torch
from torch.utils.data import Dataset
class CustomDataset(Dataset):
def __init__(self, texts, labels):
self.texts = texts
self.labels = labels
def __getitem__(self, index):
return self.texts[index], self.labels[index]
def __len__(self):
return len(self.labels)
def bert_text_to_ids(tokenizer, sentence):
return torch.tensor(tokenizer.encode(sentence, add_special_tokens=True))
def load(tokenizer, hparams):
with open(hparams.train_filename) as fp:
articles = json.loads(fp.read())
label_vocab = collections.defaultdict(lambda: len(label_vocab))
for article in articles:
if 'topics' in article:
for topic in article['topics']:
label_vocab[topic]
label_vocab = dict(label_vocab)
dataset = [article for article in articles if 'topics' in article] # and 'abstract' in article]
for article in dataset:
if 'abstract' not in article or article['abstract'] == []:
article['abstract'] = ['']
random.shuffle(dataset)
sorted_labels = list(sorted(label_vocab.keys(), key=label_vocab.get))
texts = []
int_texts = []
int_labels = []
for article in dataset:
text = ' | '.join([''.join(article[feature]) for feature in hparams.selected_features])
texts.append(text)
int_texts.append(bert_text_to_ids(tokenizer, text)[:hparams.max_len])
int_labels.append([1 if label in article['topics'] else 0 for label in sorted_labels])
train_set = CustomDataset(int_texts[hparams.valid_size:], int_labels[hparams.valid_size:])
valid_set = CustomDataset(int_texts[:hparams.valid_size], int_labels[:hparams.valid_size])
return train_set, valid_set, label_vocab
model.py 0 → 100644
from argparse import ArgumentParser
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning.core.lightning import LightningModule
from transformers import AutoTokenizer
from transformers import AutoModel
import data
class Model(LightningModule):
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
self.tokenizer = AutoTokenizer.from_pretrained(hparams.bert_flavor)
self.train_set, self.valid_set, self.label_vocab = data.load(self.tokenizer, hparams)
hparams.num_labels = len(self.label_vocab)
self.bert = AutoModel.from_pretrained(hparams.bert_flavor)
self.decision = nn.Linear(self.bert.config.hidden_size, hparams.num_labels)
def forward(self, x):
_, output = self.bert(x, attention_mask = (x != self.tokenizer.pad_token_id).long())
return self.decision(output)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.binary_cross_entropy_with_logits(y_hat, y)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.binary_cross_entropy_with_logits(y_hat, y)
return {'val_loss': loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean().item()
return {'val_loss': avg_loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
def collate_fn(self, inputs):
text_len = max([len(x[0]) for x in inputs])
x_text = torch.full((len(inputs), text_len), self.tokenizer.pad_token_id).long()
for i, x in enumerate(inputs):
x_text[i, :len(x[0])] = torch.LongTensor(x[0])
y = torch.tensor([x[-1] for x in inputs]).float()
return x_text, y
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.hparams.batch_size, shuffle=True, pin_memory=True, collate_fn=self.collate_fn)
def val_dataloader(self):
return DataLoader(self.valid_set, batch_size=self.hparams.batch_size, pin_memory=True, collate_fn=self.collate_fn)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser])
parser.add_argument('--train_filename', default='litcovid.json', type=str)
parser.add_argument('--learning_rate', default=2e-5, type=float)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--epochs', default=10, type=int)
parser.add_argument('--valid_size', default=300, type=int)
parser.add_argument('--max_len', default=384, type=int)
parser.add_argument('--bert_flavor', default='monologg/biobert_v1.1_pubmed', type=str)
parser.add_argument('--selected_features', default=['title', 'abstract'], type=list)
return parser
from argparse import ArgumentParser
from pytorch_lightning import Trainer
import os
import json
import sys
import warnings
warnings.filterwarnings('ignore', message='Displayed epoch numbers in the progress bar start from.*')
warnings.filterwarnings('ignore', message='.*does not have many workers which may be a bottleneck.*')
from model import Model
def main(hparams):
model = Model(hparams)
trainer = Trainer(
max_nb_epochs=hparams.epochs,
gpus=hparams.gpus,
nb_gpu_nodes=hparams.nodes,
check_val_every_n_epoch=1,
progress_bar_refresh_rate=10,
num_sanity_val_steps=0,
fast_dev_run=hparams.fast_dev_run,
)
trainer.fit(model)
if __name__ == '__main__':
parser = ArgumentParser(add_help=False)
parser.add_argument('--gpus', type=str, default=None)
parser.add_argument('--nodes', type=int, default=1)
parser.add_argument('--name', type=str, required=True)
parser.add_argument('--fast_dev_run', default=False, action='store_true')
parser = Model.add_model_specific_args(parser)
command_line = 'python ' + ' '.join(sys.argv)
hparams = parser.parse_args()
hparams.cmd = command_line
main(hparams)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment