From 9b8cfc4ecefb96696ca673e0775dbc46930ae84e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=BF=8E=E8=88=AA?= Date: Thu, 20 Oct 2022 22:32:41 +0800 Subject: [PATCH 1/2] modify ofatrainer --- .../trainers/multi_modal/ofa/ofa_trainer.py | 15 ++++---- tests/trainers/test_ofa_trainer.py | 35 +++++++++++++++++-- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/modelscope/trainers/multi_modal/ofa/ofa_trainer.py b/modelscope/trainers/multi_modal/ofa/ofa_trainer.py index 3daadf43..474a6772 100644 --- a/modelscope/trainers/multi_modal/ofa/ofa_trainer.py +++ b/modelscope/trainers/multi_modal/ofa/ofa_trainer.py @@ -24,12 +24,13 @@ from .ofa_trainer_utils import (AdjustLabelSmoothedCrossEntropyCriterion, @TRAINERS.register_module(module_name=Trainers.ofa_tasks) class OFATrainer(EpochBasedTrainer): - def __init__(self, model: str, *args, **kwargs): + def __init__(self, model: str, cfg_file, work_dir, train_dataset, + eval_dataset, *args, **kwargs): model = Model.from_pretrained(model) model_dir = model.model_dir - cfg_file = os.path.join(model_dir, ModelFile.CONFIGURATION) + # cfg_file = os.path.join(model_dir, ModelFile.CONFIGURATION) cfg = Config.from_file(cfg_file) - dataset = self._build_dataset_with_config(cfg) + # dataset = self._build_dataset_with_config(cfg) preprocessor = { ConfigKeys.train: OfaPreprocessor( @@ -41,7 +42,7 @@ class OFATrainer(EpochBasedTrainer): # use torchrun launch world_size = int(os.environ.get('WORLD_SIZE', 1)) epoch_steps = math.ceil( - len(dataset['train']) / # noqa + len(train_dataset) / # noqa (cfg.train.dataloader.batch_size_per_gpu * world_size)) # noqa cfg.train.lr_scheduler.num_train_steps = epoch_steps * cfg.train.max_epochs cfg.train.criterion.tokenizer = model.tokenizer @@ -68,11 +69,11 @@ class OFATrainer(EpochBasedTrainer): cfg_file=cfg_file, model=model, data_collator=collator, - train_dataset=dataset['train'], - eval_dataset=dataset['valid'], + train_dataset=train_dataset, + eval_dataset=eval_dataset, preprocessor=preprocessor, optimizers=(optimizer, lr_scheduler), - work_dir=cfg.train.work_dir, + work_dir=work_dir, *args, **kwargs, ) diff --git a/tests/trainers/test_ofa_trainer.py b/tests/trainers/test_ofa_trainer.py index 8aab3544..3322271d 100644 --- a/tests/trainers/test_ofa_trainer.py +++ b/tests/trainers/test_ofa_trainer.py @@ -3,22 +3,51 @@ import glob import os import os.path as osp import shutil +import tempfile import unittest from modelscope.metainfo import Trainers +from modelscope.msdatasets import MsDataset from modelscope.trainers import build_trainer +from modelscope.utils.constant import DownloadMode from modelscope.utils.test_utils import test_level class TestOfaTrainer(unittest.TestCase): + def setUp(self): + column_map = {'premise': 'text', 'hypothesis': 'text2'} + data_train = MsDataset.load( + dataset_name='glue', + subset_name='mnli', + namespace='modelscope', + split='train[:100]', + download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) + self.train_dataset = MsDataset.from_hf_dataset( + data_train._hf_ds.rename_columns(column_map)) + data_eval = MsDataset.load( + dataset_name='glue', + subset_name='mnli', + namespace='modelscope', + split='validation_matched[:8]', + download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) + self.test_dataset = MsDataset.from_hf_dataset( + data_eval._hf_ds.rename_columns(column_map)) + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_trainer(self): os.environ['LOCAL_RANK'] = '0' model_id = 'damo/ofa_text-classification_mnli_large_en' - default_args = {'model': model_id} - trainer = build_trainer( - name=Trainers.ofa_tasks, default_args=default_args) + + kwargs = dict( + model=model_id, + cfg_file= + '/Users/running_you/.cache/modelscope/hub/damo/ofa_text-classification_mnli_large_en//configuration.json', + train_dataset=self.train_dataset, + eval_dataset=self.test_dataset, + work_dir='/Users/running_you/.cache/modelscope/hub/work/mnli') + + trainer = build_trainer(name=Trainers.ofa_tasks, default_args=kwargs) os.makedirs(trainer.work_dir, exist_ok=True) trainer.train() assert len( From 1bb1eeec775c6bf63c440a1a148e34400959136a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=BF=8E=E8=88=AA?= Date: Tue, 25 Oct 2022 10:55:24 +0800 Subject: [PATCH 2/2] fix ut --- tests/trainers/test_ofa_trainer.py | 7 +++---- tests/trainers/workspace/ckpts/caption/configuration.json | 1 + 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 tests/trainers/workspace/ckpts/caption/configuration.json diff --git a/tests/trainers/test_ofa_trainer.py b/tests/trainers/test_ofa_trainer.py index ac2e0678..9a8a7d90 100644 --- a/tests/trainers/test_ofa_trainer.py +++ b/tests/trainers/test_ofa_trainer.py @@ -86,9 +86,8 @@ class TestOfaTrainer(unittest.TestCase): model=pretrained_model, work_dir=WORKSPACE, train_dataset=MsDataset.load( - 'coco_2014_caption', - namespace='modelscope', - split='train[:12]'), + 'coco_2014_caption', namespace='modelscope', + split='train[:4]'), eval_dataset=MsDataset.load( 'coco_2014_caption', namespace='modelscope', @@ -99,7 +98,7 @@ class TestOfaTrainer(unittest.TestCase): trainer.train() self.assertIn(ModelFile.TORCH_MODEL_BIN_FILE, - os.path.join(WORKSPACE, 'output')) + os.listdir(os.path.join(WORKSPACE, 'output'))) shutil.rmtree(WORKSPACE) diff --git a/tests/trainers/workspace/ckpts/caption/configuration.json b/tests/trainers/workspace/ckpts/caption/configuration.json new file mode 100644 index 00000000..952693ba --- /dev/null +++ b/tests/trainers/workspace/ckpts/caption/configuration.json @@ -0,0 +1 @@ +{"framework": "pytorch", "task": "image-captioning", "model": {"type": "ofa", "beam_search": {"beam_size": 5, "max_len_b": 16, "min_len": 1, "no_repeat_ngram_size": 0}, "seed": 7, "max_src_length": 256, "language": "en", "gen_type": "generation", "patch_image_size": 480, "max_image_size": 480, "imagenet_default_mean_and_std": false}, "pipeline": {"type": "image-captioning"}, "dataset": {"column_map": {"text": "caption"}}, "train": {"work_dir": "work/ckpts/caption", "max_epochs": 1, "use_fp16": true, "dataloader": {"batch_size_per_gpu": 4, "workers_per_gpu": 0}, "lr_scheduler": {"name": "polynomial_decay", "warmup_proportion": 0.01, "lr_end": 1e-07}, "lr_scheduler_hook": {"type": "LrSchedulerHook", "by_epoch": false}, "optimizer": {"type": "AdamW", "lr": 5e-05, "weight_decay": 0.01}, "optimizer_hook": {"type": "TorchAMPOptimizerHook", "cumulative_iters": 1, "grad_clip": {"max_norm": 1.0, "norm_type": 2}, "loss_keys": "loss"}, "criterion": {"name": "AdjustLabelSmoothedCrossEntropyCriterion", "constraint_range": null, "drop_worst_after": 0, "drop_worst_ratio": 0.0, "ignore_eos": false, "ignore_prefix_size": 0, "label_smoothing": 0.0, "reg_alpha": 1.0, "report_accuracy": false, "sample_patch_num": 196, "sentence_avg": false, "use_rdrop": true}, "hooks": [{"type": "BestCkptSaverHook", "metric_key": "bleu-4", "interval": 100}, {"type": "TextLoggerHook", "interval": 1}, {"type": "IterTimerHook"}, {"type": "EvaluationHook", "by_epoch": true, "interval": 1}]}, "evaluation": {"dataloader": {"batch_size_per_gpu": 4, "workers_per_gpu": 0}, "metrics": [{"type": "bleu", "eval_tokenized_bleu": false, "ref_name": "labels", "hyp_name": "caption"}]}, "preprocessor": []}