Files
modelscope/examples/pytorch/token_classification/run_train_structbert.sh

17 lines
640 B
Bash
Raw Normal View History

PYTHONPATH=. torchrun examples/pytorch/token_classification/finetune_token_classification.py \
--trainer 'nlp-base-trainer' \
--work_dir './tmp' \
--model 'damo/nlp_structbert_backbone_base_std' \
--dataset_name 'GeoGLUE' \
--subset_name 'GeoETA' \
--train_dataset_params 'first_sequence=tokens,label=ner_tags,sequence_length=128' \
--preprocessor 'token-cls-tokenizer' \
--preprocessor_padding 'max_length' \
--max_epochs 2 \
--per_device_train_batch_size 32 \
--lr 3e-5 \
--save_ckpt_strategy 'by_epoch' \
--logging_interval 1 \
--eval_strategy 'by_step' \
--eval_interval 20 \