PYTHONPATH=. python examples/pytorch/token_classification/finetune_token_classification.py \ --task 'token-classification' \ --trainer 'nlp-base-trainer' \ --work_dir './tmp' \ --model 'damo/nlp_structbert_backbone_base_std' \ --train_dataset_name 'GeoGLUE' \ --train_subset_name 'GeoETA' \ --train_dataset_namespace 'damo' \ --first_sequence 'tokens' \ --eval_strategy by_step \ --eval_interval 20 \ --label 'ner_tags' \ --sequence_length 128 \ --preprocessor 'token-cls-tokenizer' \ --preprocessor_padding 'max_length' \ --max_epochs 2 \ --mode 'inference' \ --use_model_config True \ --per_device_train_batch_size 32 \ --train_data_worker 0 \ --eval_data_worker 0 \ --lr 3e-5 \