mirror of
https://github.com/modelscope/modelscope.git
synced 2025-12-20 10:09:23 +01:00
23 lines
765 B
Bash
23 lines
765 B
Bash
PYTHONPATH=. python examples/pytorch/token_classification/finetune_token_classification.py \
|
|
--task 'token-classification' \
|
|
--trainer 'nlp-base-trainer' \
|
|
--work_dir './tmp' \
|
|
--model 'damo/nlp_structbert_backbone_base_std' \
|
|
--train_dataset_name 'GeoGLUE' \
|
|
--train_subset_name 'GeoETA' \
|
|
--train_dataset_namespace 'damo' \
|
|
--first_sequence 'tokens' \
|
|
--eval_strategy by_step \
|
|
--eval_interval 20 \
|
|
--label 'ner_tags' \
|
|
--sequence_length 128 \
|
|
--preprocessor 'token-cls-tokenizer' \
|
|
--preprocessor_padding 'max_length' \
|
|
--max_epochs 2 \
|
|
--mode 'inference' \
|
|
--use_model_config True \
|
|
--per_device_train_batch_size 32 \
|
|
--train_data_worker 0 \
|
|
--eval_data_worker 0 \
|
|
--lr 3e-5 \
|