2022-05-19 22:18:35 +08:00
|
|
|
# Copyright (c) Alibaba, Inc. and its affiliates.
|
2023-01-09 21:22:07 +08:00
|
|
|
import os.path
|
2023-02-13 16:54:02 +00:00
|
|
|
import shutil
|
|
|
|
|
import tempfile
|
2022-05-19 22:18:35 +08:00
|
|
|
import unittest
|
|
|
|
|
|
2023-01-09 21:22:07 +08:00
|
|
|
from modelscope.preprocessors import Preprocessor, build_preprocessor, nlp
|
2022-06-09 20:16:26 +08:00
|
|
|
from modelscope.utils.constant import Fields, InputFields
|
|
|
|
|
from modelscope.utils.logger import get_logger
|
2022-05-19 22:18:35 +08:00
|
|
|
|
|
|
|
|
logger = get_logger()
|
|
|
|
|
|
|
|
|
|
|
2023-09-15 15:26:18 +08:00
|
|
|
@unittest.skip('skip for huggingface model download failed.')
|
2022-05-19 22:18:35 +08:00
|
|
|
class NLPPreprocessorTest(unittest.TestCase):
|
|
|
|
|
|
2023-02-13 16:54:02 +00:00
|
|
|
def setUp(self):
|
|
|
|
|
print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
|
|
|
|
|
self.tmp_dir = tempfile.TemporaryDirectory().name
|
|
|
|
|
if not os.path.exists(self.tmp_dir):
|
|
|
|
|
os.makedirs(self.tmp_dir)
|
|
|
|
|
|
|
|
|
|
def tearDown(self):
|
|
|
|
|
shutil.rmtree(self.tmp_dir)
|
|
|
|
|
super().tearDown()
|
|
|
|
|
|
2022-05-19 22:18:35 +08:00
|
|
|
def test_tokenize(self):
|
|
|
|
|
cfg = dict(type='Tokenize', tokenizer_name='bert-base-cased')
|
|
|
|
|
preprocessor = build_preprocessor(cfg, Fields.nlp)
|
|
|
|
|
input = {
|
|
|
|
|
InputFields.text:
|
|
|
|
|
'Do not meddle in the affairs of wizards, '
|
|
|
|
|
'for they are subtle and quick to anger.'
|
|
|
|
|
}
|
|
|
|
|
output = preprocessor(input)
|
|
|
|
|
self.assertTrue(InputFields.text in output)
|
|
|
|
|
self.assertEqual(output['input_ids'], [
|
|
|
|
|
101, 2091, 1136, 1143, 13002, 1107, 1103, 5707, 1104, 16678, 1116,
|
|
|
|
|
117, 1111, 1152, 1132, 11515, 1105, 3613, 1106, 4470, 119, 102
|
|
|
|
|
])
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
output['token_type_ids'],
|
|
|
|
|
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
output['attention_mask'],
|
|
|
|
|
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
|
|
|
|
|
|
2023-02-13 16:54:02 +00:00
|
|
|
def test_save_pretrained(self):
|
|
|
|
|
preprocessor = Preprocessor.from_pretrained(
|
|
|
|
|
'damo/nlp_structbert_sentence-similarity_chinese-tiny')
|
|
|
|
|
save_path = os.path.join(self.tmp_dir, 'test_save_pretrained')
|
|
|
|
|
preprocessor.save_pretrained(save_path)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
os.path.isfile(os.path.join(save_path, 'configuration.json')))
|
|
|
|
|
|
2023-01-09 21:22:07 +08:00
|
|
|
def test_preprocessor_download(self):
|
|
|
|
|
from modelscope.preprocessors.nlp.token_classification_preprocessor import TokenClassificationPreprocessorBase
|
|
|
|
|
preprocessor: TokenClassificationPreprocessorBase = \
|
|
|
|
|
Preprocessor.from_pretrained('damo/nlp_raner_named-entity-recognition_chinese-base-news')
|
|
|
|
|
self.assertTrue(preprocessor is not None)
|
|
|
|
|
from modelscope.utils.hub import snapshot_download
|
|
|
|
|
model_dir = snapshot_download(
|
|
|
|
|
'damo/nlp_raner_named-entity-recognition_chinese-base-news')
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
os.path.isfile(os.path.join(model_dir, 'pytorch_model.bin')))
|
|
|
|
|
|
[to #42322933] Refactor NLP and fix some user feedbacks
1. Abstract keys of dicts needed by nlp metric classes into the init method
2. Add Preprocessor.save_pretrained to save preprocessor information
3. Abstract the config saving function, which can lead to normally saving in the direct call of from_pretrained, and the modification of cfg one by one when training.
4. Remove SbertTokenizer and VecoTokenizer, use transformers' tokenizers instead
5. Use model/preprocessor's from_pretrained in all nlp pipeline classes.
6. Add model_kwargs and preprocessor_kwargs in all nlp pipeline classes
7. Add base classes for fill-mask and text-classification preprocessor, as a demo for later changes
8. Fix user feedback: Re-train the model in continue training scenario
9. Fix user feedback: Too many checkpoint saved
10. Simplify the nlp-trainer
11. Fix user feedback: Split the default trainer's __init__ method, which makes user easier to override
12. Add safe_get to Config class
---------------------------- Another refactor from version 36 -------------------------
13. Name all nlp transformers' preprocessors from TaskNamePreprocessor to TaskNameTransformersPreprocessor, for example:
TextClassificationPreprocessor -> TextClassificationTransformersPreprocessor
14. Add a base class per task for all nlp tasks' preprocessors which has at least two sub-preprocessors
15. Add output classes of nlp models
16. Refactor the logic for token-classification
17. Fix bug: checkpoint_hook does not support pytorch_model.pt
18. Fix bug: Pipeline name does not match with task name, so inference will not succeed after training
NOTE: This is just a stop bleeding solution, the root cause is the uncertainty of the relationship between models and pipelines
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10723513
* add save_pretrained to preprocessor
* save preprocessor config in hook
* refactor label-id mapping fetching logic
* test ok on sentence-similarity
* run on finetuning
* fix bug
* pre-commit passed
* fix bug
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/preprocessors/nlp/nlp_base.py
* add params to init
* 1. support max ckpt num 2. support ignoring others but bin file in continue training 3. add arguments to some nlp metrics
* Split trainer init impls to overridable methods
* remove some obsolete tokenizers
* unfinished
* support input params in pipeline
* fix bugs
* fix ut bug
* fix bug
* fix ut bug
* fix ut bug
* fix ut bug
* add base class for some preprocessors
* Merge commit '379867739548f394d0fa349ba07afe04adf4c8b6' into feat/refactor_config
* compatible with old code
* fix ut bug
* fix ut bugs
* fix bug
* add some comments
* fix ut bug
* add a requirement
* fix pre-commit
* Merge commit '0451b3d3cb2bebfef92ec2c227b2a3dd8d01dc6a' into feat/refactor_config
* fixbug
* Support function type in registry
* fix ut bug
* fix bug
* Merge commit '5f719e542b963f0d35457e5359df879a5eb80b82' into feat/refactor_config
# Conflicts:
# modelscope/pipelines/nlp/multilingual_word_segmentation_pipeline.py
# modelscope/pipelines/nlp/named_entity_recognition_pipeline.py
# modelscope/pipelines/nlp/word_segmentation_pipeline.py
# modelscope/utils/hub.py
* remove obsolete file
* rename init args
* rename params
* fix merge bug
* add default preprocessor config for ner-model
* move a method a util file
* remove unused config
* Fix a bug in pbar
* bestckptsaver:change default ckpt numbers to 1
* 1. Add assert to max_epoch 2. split init_dist and get_device 3. change cmp func name
* Fix bug
* fix bug
* fix bug
* unfinished refactoring
* unfinished
* uw
* uw
* uw
* uw
* Merge branch 'feat/refactor_config' into feat/refactor_trainer
# Conflicts:
# modelscope/preprocessors/nlp/document_segmentation_preprocessor.py
# modelscope/preprocessors/nlp/faq_question_answering_preprocessor.py
# modelscope/preprocessors/nlp/relation_extraction_preprocessor.py
# modelscope/preprocessors/nlp/text_generation_preprocessor.py
* uw
* uw
* unify nlp task outputs
* uw
* uw
* uw
* uw
* change the order of text cls pipeline
* refactor t5
* refactor tg task preprocessor
* fix
* unfinished
* temp
* refactor code
* unfinished
* unfinished
* unfinished
* unfinished
* uw
* Merge branch 'feat/refactor_config' into feat/refactor_trainer
* smoke test pass
* ut testing
* pre-commit passed
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/models/nlp/bert/document_segmentation.py
# modelscope/pipelines/nlp/__init__.py
# modelscope/pipelines/nlp/document_segmentation_pipeline.py
* merge master
* unifnished
* Merge branch 'feat/fix_bug_pipeline_name' into feat/refactor_config
* fix bug
* fix ut bug
* support ner batch inference
* fix ut bug
* fix bug
* support batch inference on three nlp tasks
* unfinished
* fix bug
* fix bug
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/models/base/base_model.py
# modelscope/pipelines/nlp/conversational_text_to_sql_pipeline.py
# modelscope/pipelines/nlp/dialog_intent_prediction_pipeline.py
# modelscope/pipelines/nlp/dialog_modeling_pipeline.py
# modelscope/pipelines/nlp/dialog_state_tracking_pipeline.py
# modelscope/pipelines/nlp/document_segmentation_pipeline.py
# modelscope/pipelines/nlp/faq_question_answering_pipeline.py
# modelscope/pipelines/nlp/feature_extraction_pipeline.py
# modelscope/pipelines/nlp/fill_mask_pipeline.py
# modelscope/pipelines/nlp/information_extraction_pipeline.py
# modelscope/pipelines/nlp/named_entity_recognition_pipeline.py
# modelscope/pipelines/nlp/sentence_embedding_pipeline.py
# modelscope/pipelines/nlp/summarization_pipeline.py
# modelscope/pipelines/nlp/table_question_answering_pipeline.py
# modelscope/pipelines/nlp/text2text_generation_pipeline.py
# modelscope/pipelines/nlp/text_classification_pipeline.py
# modelscope/pipelines/nlp/text_error_correction_pipeline.py
# modelscope/pipelines/nlp/text_generation_pipeline.py
# modelscope/pipelines/nlp/text_ranking_pipeline.py
# modelscope/pipelines/nlp/token_classification_pipeline.py
# modelscope/pipelines/nlp/word_segmentation_pipeline.py
# modelscope/pipelines/nlp/zero_shot_classification_pipeline.py
# modelscope/trainers/nlp_trainer.py
* pre-commit passed
* fix bug
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/preprocessors/__init__.py
* fix bug
* fix bug
* fix bug
* fix bug
* fix bug
* fixbug
* pre-commit passed
* fix bug
* fixbug
* fix bug
* fix bug
* fix bug
* fix bug
* self review done
* fixbug
* fix bug
* fix bug
* fix bugs
* remove sub-token offset mapping
* fix name bug
* add some tests
* 1. support batch inference of text-generation,text2text-generation,token-classification,text-classification 2. add corresponding UTs
* add old logic back
* tmp save
* add tokenize by words logic back
* move outputs file back
* revert veco token-classification back
* fix typo
* Fix description
* Merge commit '4dd99b8f6e4e7aefe047c68a1bedd95d3ec596d6' into feat/refactor_config
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/pipelines/builder.py
2022-11-30 23:52:17 +08:00
|
|
|
def test_token_classification_tokenize_bert(self):
|
|
|
|
|
cfg = dict(
|
|
|
|
|
type='token-cls-tokenizer',
|
|
|
|
|
padding=False,
|
|
|
|
|
label_all_tokens=False,
|
|
|
|
|
model_dir='bert-base-cased',
|
|
|
|
|
label2id={
|
|
|
|
|
'O': 0,
|
|
|
|
|
'B': 1,
|
|
|
|
|
'I': 2
|
|
|
|
|
})
|
|
|
|
|
preprocessor = build_preprocessor(cfg, Fields.nlp)
|
|
|
|
|
input = 'Do not meddle in the affairs of wizards, ' \
|
|
|
|
|
'for they are subtle and quick to anger.'
|
|
|
|
|
output = preprocessor(input)
|
|
|
|
|
self.assertTrue(InputFields.text in output)
|
|
|
|
|
self.assertEqual(output['input_ids'].tolist()[0], [
|
|
|
|
|
101, 2091, 1136, 1143, 13002, 1107, 1103, 5707, 1104, 16678, 1116,
|
|
|
|
|
117, 1111, 1152, 1132, 11515, 1105, 3613, 1106, 4470, 119, 102
|
|
|
|
|
])
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
output['attention_mask'].tolist()[0],
|
|
|
|
|
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
|
|
|
|
|
self.assertEqual(output['label_mask'].tolist()[0], [
|
|
|
|
|
False, True, True, True, False, True, True, True, True, True,
|
|
|
|
|
False, True, True, True, True, True, True, True, True, True, True,
|
|
|
|
|
False
|
|
|
|
|
])
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
output['offset_mapping'].tolist()[0],
|
|
|
|
|
[[0, 2], [3, 6], [7, 13], [14, 16], [17, 20], [21, 28], [29, 31],
|
|
|
|
|
[32, 39], [39, 40], [41, 44], [45, 49], [50, 53], [54, 60],
|
|
|
|
|
[61, 64], [65, 70], [71, 73], [74, 79], [79, 80]])
|
2022-09-27 23:08:33 +08:00
|
|
|
|
[to #42322933] Refactor NLP and fix some user feedbacks
1. Abstract keys of dicts needed by nlp metric classes into the init method
2. Add Preprocessor.save_pretrained to save preprocessor information
3. Abstract the config saving function, which can lead to normally saving in the direct call of from_pretrained, and the modification of cfg one by one when training.
4. Remove SbertTokenizer and VecoTokenizer, use transformers' tokenizers instead
5. Use model/preprocessor's from_pretrained in all nlp pipeline classes.
6. Add model_kwargs and preprocessor_kwargs in all nlp pipeline classes
7. Add base classes for fill-mask and text-classification preprocessor, as a demo for later changes
8. Fix user feedback: Re-train the model in continue training scenario
9. Fix user feedback: Too many checkpoint saved
10. Simplify the nlp-trainer
11. Fix user feedback: Split the default trainer's __init__ method, which makes user easier to override
12. Add safe_get to Config class
---------------------------- Another refactor from version 36 -------------------------
13. Name all nlp transformers' preprocessors from TaskNamePreprocessor to TaskNameTransformersPreprocessor, for example:
TextClassificationPreprocessor -> TextClassificationTransformersPreprocessor
14. Add a base class per task for all nlp tasks' preprocessors which has at least two sub-preprocessors
15. Add output classes of nlp models
16. Refactor the logic for token-classification
17. Fix bug: checkpoint_hook does not support pytorch_model.pt
18. Fix bug: Pipeline name does not match with task name, so inference will not succeed after training
NOTE: This is just a stop bleeding solution, the root cause is the uncertainty of the relationship between models and pipelines
Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10723513
* add save_pretrained to preprocessor
* save preprocessor config in hook
* refactor label-id mapping fetching logic
* test ok on sentence-similarity
* run on finetuning
* fix bug
* pre-commit passed
* fix bug
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/preprocessors/nlp/nlp_base.py
* add params to init
* 1. support max ckpt num 2. support ignoring others but bin file in continue training 3. add arguments to some nlp metrics
* Split trainer init impls to overridable methods
* remove some obsolete tokenizers
* unfinished
* support input params in pipeline
* fix bugs
* fix ut bug
* fix bug
* fix ut bug
* fix ut bug
* fix ut bug
* add base class for some preprocessors
* Merge commit '379867739548f394d0fa349ba07afe04adf4c8b6' into feat/refactor_config
* compatible with old code
* fix ut bug
* fix ut bugs
* fix bug
* add some comments
* fix ut bug
* add a requirement
* fix pre-commit
* Merge commit '0451b3d3cb2bebfef92ec2c227b2a3dd8d01dc6a' into feat/refactor_config
* fixbug
* Support function type in registry
* fix ut bug
* fix bug
* Merge commit '5f719e542b963f0d35457e5359df879a5eb80b82' into feat/refactor_config
# Conflicts:
# modelscope/pipelines/nlp/multilingual_word_segmentation_pipeline.py
# modelscope/pipelines/nlp/named_entity_recognition_pipeline.py
# modelscope/pipelines/nlp/word_segmentation_pipeline.py
# modelscope/utils/hub.py
* remove obsolete file
* rename init args
* rename params
* fix merge bug
* add default preprocessor config for ner-model
* move a method a util file
* remove unused config
* Fix a bug in pbar
* bestckptsaver:change default ckpt numbers to 1
* 1. Add assert to max_epoch 2. split init_dist and get_device 3. change cmp func name
* Fix bug
* fix bug
* fix bug
* unfinished refactoring
* unfinished
* uw
* uw
* uw
* uw
* Merge branch 'feat/refactor_config' into feat/refactor_trainer
# Conflicts:
# modelscope/preprocessors/nlp/document_segmentation_preprocessor.py
# modelscope/preprocessors/nlp/faq_question_answering_preprocessor.py
# modelscope/preprocessors/nlp/relation_extraction_preprocessor.py
# modelscope/preprocessors/nlp/text_generation_preprocessor.py
* uw
* uw
* unify nlp task outputs
* uw
* uw
* uw
* uw
* change the order of text cls pipeline
* refactor t5
* refactor tg task preprocessor
* fix
* unfinished
* temp
* refactor code
* unfinished
* unfinished
* unfinished
* unfinished
* uw
* Merge branch 'feat/refactor_config' into feat/refactor_trainer
* smoke test pass
* ut testing
* pre-commit passed
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/models/nlp/bert/document_segmentation.py
# modelscope/pipelines/nlp/__init__.py
# modelscope/pipelines/nlp/document_segmentation_pipeline.py
* merge master
* unifnished
* Merge branch 'feat/fix_bug_pipeline_name' into feat/refactor_config
* fix bug
* fix ut bug
* support ner batch inference
* fix ut bug
* fix bug
* support batch inference on three nlp tasks
* unfinished
* fix bug
* fix bug
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/models/base/base_model.py
# modelscope/pipelines/nlp/conversational_text_to_sql_pipeline.py
# modelscope/pipelines/nlp/dialog_intent_prediction_pipeline.py
# modelscope/pipelines/nlp/dialog_modeling_pipeline.py
# modelscope/pipelines/nlp/dialog_state_tracking_pipeline.py
# modelscope/pipelines/nlp/document_segmentation_pipeline.py
# modelscope/pipelines/nlp/faq_question_answering_pipeline.py
# modelscope/pipelines/nlp/feature_extraction_pipeline.py
# modelscope/pipelines/nlp/fill_mask_pipeline.py
# modelscope/pipelines/nlp/information_extraction_pipeline.py
# modelscope/pipelines/nlp/named_entity_recognition_pipeline.py
# modelscope/pipelines/nlp/sentence_embedding_pipeline.py
# modelscope/pipelines/nlp/summarization_pipeline.py
# modelscope/pipelines/nlp/table_question_answering_pipeline.py
# modelscope/pipelines/nlp/text2text_generation_pipeline.py
# modelscope/pipelines/nlp/text_classification_pipeline.py
# modelscope/pipelines/nlp/text_error_correction_pipeline.py
# modelscope/pipelines/nlp/text_generation_pipeline.py
# modelscope/pipelines/nlp/text_ranking_pipeline.py
# modelscope/pipelines/nlp/token_classification_pipeline.py
# modelscope/pipelines/nlp/word_segmentation_pipeline.py
# modelscope/pipelines/nlp/zero_shot_classification_pipeline.py
# modelscope/trainers/nlp_trainer.py
* pre-commit passed
* fix bug
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/preprocessors/__init__.py
* fix bug
* fix bug
* fix bug
* fix bug
* fix bug
* fixbug
* pre-commit passed
* fix bug
* fixbug
* fix bug
* fix bug
* fix bug
* fix bug
* self review done
* fixbug
* fix bug
* fix bug
* fix bugs
* remove sub-token offset mapping
* fix name bug
* add some tests
* 1. support batch inference of text-generation,text2text-generation,token-classification,text-classification 2. add corresponding UTs
* add old logic back
* tmp save
* add tokenize by words logic back
* move outputs file back
* revert veco token-classification back
* fix typo
* Fix description
* Merge commit '4dd99b8f6e4e7aefe047c68a1bedd95d3ec596d6' into feat/refactor_config
* Merge branch 'master' into feat/refactor_config
# Conflicts:
# modelscope/pipelines/builder.py
2022-11-30 23:52:17 +08:00
|
|
|
def test_token_classification_tokenize_roberta(self):
|
|
|
|
|
cfg = dict(
|
|
|
|
|
type='token-cls-tokenizer',
|
|
|
|
|
padding=False,
|
|
|
|
|
label_all_tokens=False,
|
|
|
|
|
model_dir='xlm-roberta-base',
|
|
|
|
|
label2id={
|
|
|
|
|
'O': 0,
|
|
|
|
|
'B': 1,
|
|
|
|
|
'I': 2
|
|
|
|
|
})
|
|
|
|
|
preprocessor = build_preprocessor(cfg, Fields.nlp)
|
|
|
|
|
input = 'Do not meddle in the affairs of wizards, ' \
|
|
|
|
|
'for they are subtle and quick to anger.'
|
|
|
|
|
output = preprocessor(input)
|
|
|
|
|
self.assertTrue(InputFields.text in output)
|
|
|
|
|
self.assertEqual(output['input_ids'].tolist()[0], [
|
|
|
|
|
0, 984, 959, 128, 19298, 23, 70, 103086, 7, 111, 6, 44239, 99397,
|
|
|
|
|
4, 100, 1836, 621, 1614, 17991, 136, 63773, 47, 348, 56, 5, 2
|
|
|
|
|
])
|
|
|
|
|
self.assertEqual(output['attention_mask'].tolist()[0], [
|
|
|
|
|
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
|
|
|
|
1, 1, 1, 1
|
|
|
|
|
])
|
|
|
|
|
self.assertEqual(output['label_mask'].tolist()[0], [
|
|
|
|
|
False, True, True, True, False, True, True, True, False, True,
|
|
|
|
|
True, False, False, False, True, True, True, True, False, True,
|
|
|
|
|
True, True, True, False, False, False
|
|
|
|
|
])
|
|
|
|
|
self.assertEqual(
|
|
|
|
|
output['offset_mapping'].tolist()[0],
|
|
|
|
|
[[0, 2], [3, 6], [7, 13], [14, 16], [17, 20], [21, 28], [29, 31],
|
|
|
|
|
[32, 40], [41, 44], [45, 49], [50, 53], [54, 60], [61, 64],
|
|
|
|
|
[65, 70], [71, 73], [74, 80]])
|
2022-09-27 23:08:33 +08:00
|
|
|
|
2022-05-19 22:18:35 +08:00
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
unittest.main()
|