From 4e8374e850962c17fac32e8066882b822d4380ed Mon Sep 17 00:00:00 2001 From: "xingjun.wang" Date: Sat, 1 Jul 2023 23:05:03 +0800 Subject: [PATCH 01/87] merge internal master 0701 --- =0.0.4 | 0 modelscope/version.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 =0.0.4 diff --git a/=0.0.4 b/=0.0.4 new file mode 100644 index 00000000..e69de29b diff --git a/modelscope/version.py b/modelscope/version.py index 32823e7b..e4028ca2 100644 --- a/modelscope/version.py +++ b/modelscope/version.py @@ -1,5 +1,5 @@ # Make sure to modify __release_datetime__ to release time when making official release. -__version__ = '1.6.2' +__version__ = '1.7.0' # default release datetime for branches under active development is set # to be a time far-far-away-into-the-future __release_datetime__ = '2099-10-13 08:56:12' From 4f548ae8660d44d35221ae2f755a069fcaf0fae1 Mon Sep 17 00:00:00 2001 From: "xingjun.wang" Date: Sat, 1 Jul 2023 23:16:54 +0800 Subject: [PATCH 02/87] del =0.0.4 file --- =0.0.4 | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 =0.0.4 diff --git a/=0.0.4 b/=0.0.4 deleted file mode 100644 index e69de29b..00000000 From a50342ed5d50caf959d054894bfa0334dda17d4b Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Mon, 3 Jul 2023 13:52:35 +0800 Subject: [PATCH 03/87] Add stable diffusion tutorial ipynb (#346) --- .../finetune_stable_diffusion_dreambooth.py | 2 + .../lora/finetune_stable_diffusion_lora.py | 2 + .../pytorch/stable_diffusion/tutorial.ipynb | 83 +++++++++++++++++++ 3 files changed, 87 insertions(+) create mode 100644 examples/pytorch/stable_diffusion/tutorial.ipynb diff --git a/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py b/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py index 1f38cff7..f852d752 100644 --- a/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py +++ b/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py @@ -113,4 +113,6 @@ pipe = pipeline( model_revision=args.model_revision) output = pipe({'text': args.prompt}) +# visualize the result on ipynb and save it +output cv2.imwrite('./dreambooth_result.png', output['output_imgs'][0]) diff --git a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py index 183e817d..2561a6a7 100644 --- a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py +++ b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py @@ -66,4 +66,6 @@ pipe = pipeline( model_revision=args.model_revision) output = pipe({'text': args.prompt}) +# visualize the result on ipynb and save it +output cv2.imwrite('./lora_result.png', output['output_imgs'][0]) diff --git a/examples/pytorch/stable_diffusion/tutorial.ipynb b/examples/pytorch/stable_diffusion/tutorial.ipynb new file mode 100644 index 00000000..941b4e76 --- /dev/null +++ b/examples/pytorch/stable_diffusion/tutorial.ipynb @@ -0,0 +1,83 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Modelscope微调Stable Diffusion教程\n", + "## 原理讲解\n", + "\n", + "从头训练stable diffusion需要数十万美元和一个月以上的时间,巨额的算力和时间成本让普通人难以承受。因此最理想的手段是利用开源的stable diffusion模型,通过微调手段定制化属于自己的模型。近年涌现出很多有效的微调stable diffusion手段,如[Textual Inversion](https://arxiv.org/abs/2208.01618)、[Dreambooth](https://arxiv.org/pdf/2208.12242.pdf)、[Lora](https://arxiv.org/abs/2106.09685)、[Custom Diffusion](https://arxiv.org/pdf/2302.05543.pdf)等,Modelscope目前已经支持了Dreambooth和Lora两种方法。\n", + "\n", + "### Dreambooth\n", + "如果我们直接使用几张图片微调Stable Diffusion模型,很容易陷入“过拟合”的状态,通常的表现为模型生成的结果同质化且损失了泛化能力。除此之外,还容易遇到语言漂移的问题,严重影响了模型性能。Dreambooth提出了重建损失和特定类别先验保留损失相结合的方法来解决这一问题。\n", + "\n", + "### Lora\n", + "Lora的全称是Low-Rank Adaptation,是一种低阶自适应技术。这项技术起源于微调大型语言模型,在stable diffusion上也能取得非常好的效果。因为大模型是一般是过参数化的,它们有更小的内在维度,Lora模型主要依赖于这个低的内在维度去做任务适配。通过低秩分解(先降维再升维)来模拟参数的改变量,从而以极小的参数量来实现大模型的间接训练。\n", + "\n", + "如下图所示,Lora在原先的模型层中并行插入了可训练的排序分解矩阵层,这个矩阵层是由一个降维矩阵A和一个升维矩阵B组成的。降维矩阵A采用高斯分布初始化,升维矩阵B初始化为全0,保证训练开始时旁路为0矩阵。在训练的时候原模型固定,只训练降维矩阵A和升维矩阵B;在推理的时候,将矩阵层加到原参数上。大量实验表明,对于stable diffusion我们用Lora微调Unet网络注意力层可以取得良好的效果。\n", + "\n", + "## 动手实践\n", + "\n", + "首先我们需要下载代码和安装环境。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "git clone https://github.com/modelscope/modelscope.git\n", + "cd modelscope" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "然后我们执行脚本,开始dreambooth和lora的训练和推理。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "bash examples/pytorch/stable_diffusion/dreambooth/run_train_dreambooth.sh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "plaintext" + } + }, + "outputs": [], + "source": [ + "bash examples/pytorch/stable_diffusion/lora/run_train_lora.sh" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 432f0ee20faf73d995679144450b8afa2ba6e518 Mon Sep 17 00:00:00 2001 From: Jintao Date: Mon, 3 Jul 2023 16:19:00 +0800 Subject: [PATCH 04/87] add baichuan/chatglm2 +lora+agent examples (#350) --- examples/pytorch/llm_agent/_common.py | 382 ++++ .../pytorch/llm_agent/baichuan_infer.ipynb | 492 +++++ examples/pytorch/llm_agent/baichuan_sft.ipynb | 1824 ++++++++++++++++ .../pytorch/llm_agent/chatglm2_infer.ipynb | 526 +++++ examples/pytorch/llm_agent/chatglm2_sft.ipynb | 1931 +++++++++++++++++ 5 files changed, 5155 insertions(+) create mode 100644 examples/pytorch/llm_agent/_common.py create mode 100644 examples/pytorch/llm_agent/baichuan_infer.ipynb create mode 100644 examples/pytorch/llm_agent/baichuan_sft.ipynb create mode 100644 examples/pytorch/llm_agent/chatglm2_infer.ipynb create mode 100644 examples/pytorch/llm_agent/chatglm2_sft.ipynb diff --git a/examples/pytorch/llm_agent/_common.py b/examples/pytorch/llm_agent/_common.py new file mode 100644 index 00000000..12e57eab --- /dev/null +++ b/examples/pytorch/llm_agent/_common.py @@ -0,0 +1,382 @@ +import os +import random +import re +import sys +import math +import json +import ast +import datetime as dt +from typing import List, Tuple, Dict, Callable, Optional, Union, Any +from functools import partial +# +from tqdm import tqdm +import numpy as np +from numpy import ndarray +import matplotlib.pyplot as plt +from matplotlib.axes import Axes +from matplotlib.figure import Figure +# +import torch +import torch.nn as nn +import torch.optim as optim +from torch import Tensor, device as Device, dtype as Dtype +from torch.nn import Module +from torch.optim import Optimizer +from torch.utils.data import Dataset +from torch.nn.parameter import Parameter +from torch.optim import lr_scheduler as lrs +from torch.optim.lr_scheduler import _LRScheduler as LRScheduler +from torch.nn.utils.rnn import pad_sequence +# +from torchmetrics import Accuracy, MeanMetric +from tensorboard.backend.event_processing.event_accumulator import EventAccumulator +# +from modelscope import get_logger +from modelscope import MsDataset, snapshot_download, Model, read_config +from modelscope.utils.config import Config, ConfigDict +from modelscope.msdatasets.dataset_cls.custom_datasets import TorchCustomDataset +from modelscope.trainers import EpochBasedTrainer +from modelscope.swift import Swift, LoRAConfig +from modelscope.metrics.base import Metric +from modelscope.metrics.builder import METRICS +from modelscope.utils.registry import default_group +from modelscope.models.nlp.chatglm2 import ChatGLM2Tokenizer + +# +SYSTEM_TEXT = """{system}""" +USER_TEXT = """\n\n### 用户 +{user}""" +ASSISTANT_PROMPT = """\n\n### 助手 +""" +MAX_LENGTH = 2048 +TEST_MAX_LENGTH = MAX_LENGTH + +COLOR, COLOR_S = "#FFE2D9", "#FF7043" +logger = get_logger() +# + + +def get_model_dir(model_id: str, model_revision: Optional[str] = None) -> str: + model_dir = snapshot_download(model_id, model_revision) + return model_dir + + +def _get_version(work_dir: str) -> int: + if os.path.isdir(work_dir): + fnames = os.listdir(work_dir) + else: + fnames = [] + v_list = [-1] + for fname in fnames: + m = re.match(r"v(\d+)", fname) + if m is None: + continue + v = m.group(1) + v_list.append(int(v)) + return max(v_list) + 1 + + +def get_work_dir(work_dir: str) -> str: + """add version""" + work_dir = os.path.abspath(work_dir) + version = _get_version(work_dir) + time = dt.datetime.now().strftime("%Y%m%d-%H%M%S") + # + work_dir = os.path.join(work_dir, f"v{version}-{time}") + logger.info(f"work_dir: {work_dir}") + return work_dir + + +def select_device(device_ids: List[int]) -> Device: + """Call this function before cuda is initialized. + Return: master device + """ + if torch.cuda.is_initialized(): + logger.warning("CUDA has been initialized! Device selection fails!") + return torch.device("cuda:0") + # + log_s = "Using device: " + if len(device_ids) == 0: # cpu + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + device: str = "cpu" + log_s += device + else: + os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(d) for d in device_ids]) + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device_ids) + log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. "cuda:1,7,8" + device = "cuda:0" + logger.info(log_s) + return torch.device(device) + + +def seed_everything(seed: Optional[int] = None, gpu_dtm: bool = False) -> int: + if seed is None: + seed_max = np.iinfo(np.int32).max + seed = random.randint(0, seed_max) + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + logger.info(f"Global seed set to {seed}") + if gpu_dtm: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + logger.info(f"Setting deterministic: {True}, benchmark: {False}") + return seed + + +def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, drop_last: bool) -> int: + """Calculate T_max in CosineAnnealingLR""" + if drop_last: + T_max = dataset_len // batch_size + else: + T_max = math.ceil(dataset_len / batch_size) + T_max *= max_epochs + return T_max + + +def tokenize_function(system: str, user: str, assistant: Optional[str], tokenizer) -> Dict[str, Any]: + """Only applicable to baichuan and chatglm2. Other models need to be tested""" + system_text = SYSTEM_TEXT.format(system=system) + user_text = USER_TEXT.format(user=user) + system_text_ids: List[int] = tokenizer(system_text, return_attention_mask=False, + add_special_tokens=True)["input_ids"] + user_text_ids: List[int] = tokenizer(user_text, return_attention_mask=False, + add_special_tokens=False)["input_ids"] + assistant_p_input_ids: List[int] = tokenizer(ASSISTANT_PROMPT, return_attention_mask=False, + add_special_tokens=False)["input_ids"] + + # tokenizer.bos_token_id: Avoid `assistant` being empty + assistant_input_ids: List[int] = [tokenizer.bos_token_id] + if assistant is not None: + assistant_input_ids += tokenizer(assistant, return_attention_mask=False, add_special_tokens=False)["input_ids"] + assistant_input_ids += [tokenizer.eos_token_id] + # + input_ids = system_text_ids + user_text_ids + assistant_p_input_ids + assistant_input_ids + if assistant is not None: # train, val + if len(input_ids) > MAX_LENGTH: + return {} + len_mask = len(input_ids) - len(assistant_input_ids) + labels = [-100] * len_mask + assistant_input_ids + else: # test + input_ids = input_ids[-TEST_MAX_LENGTH:] + labels = None + + # + return {"input_ids": input_ids, "labels": labels} + + +class MyDataset(TorchCustomDataset): + def __init__(self, system: List[str], user: List[str], assistant: List[str], + tokenize_function) -> None: + self._data = [] + for i in tqdm(range(len(system))): + _d = tokenize_function(system[i], user[i], assistant[i]) + if len(_d) == 0: + continue + self._data.append(_d) + + def __getitem__(self, idx: int) -> Dict[str, Any]: + return self._data[idx] + + def __len__(self) -> int: + return len(self._data) + + +def stat_dataset(dataset: "MyDataset") -> None: + """Statistical analysis was performed on the data set""" + _token_len = [] + for d in dataset: + _token_len.append(len(d["input_ids"])) + _token_len = np.array(_token_len) + mean = _token_len.mean().item() + std = _token_len.std().item() + min_ = _token_len.min().item() + max_ = _token_len.max().item() + logger.info( + f"Dataset Token Length: {mean:.6f}±{std:.6f}, min={min_:.6f}, max={max_:.6f}, size={_token_len.shape[0]}") + + +def print_examples(examples: Dict[str, Any], tokenizer) -> None: + input_ids, labels = examples["input_ids"], examples["labels"] + print(f"[INPUT_IDS] {tokenizer.decode(input_ids)}") + print() + print(f"[LABLES] {tokenizer.decode([l if l != -100 else 0 for l in labels])}") + + +def data_collate_fn(batch: List[Dict[str, Any]], tokenizer) -> Dict[str, Any]: + input_ids = [torch.tensor(b["input_ids"]) for b in batch] + labels = [torch.tensor(b["labels"]) for b in batch] + attention_mask = [torch.ones(len(input_ids[i]), dtype=torch.int64) for i in range(len(input_ids))] + # + input_ids = pad_sequence(input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) + attention_mask = pad_sequence(attention_mask, batch_first=True, padding_value=0) + labels = pad_sequence(labels, batch_first=True, padding_value=-100) + return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels} + + +def print_model_info(model: Module, name: Optional[str] = None) -> None: + if name is None: + name = model.__class__.__name__ + # + n_params = sum(p.numel() for p in model.parameters()) + n_grads = sum(p.numel() for p in model.parameters() if p.requires_grad) + n_buffers = sum(p.numel() for p in model.buffers()) + # + n_params /= 1e6 + n_grads /= 1e6 + n_buffers /= 1e6 + s = [ + f"{name}: ", + f"{n_params:.4f}M Params ({n_grads:.4f}M Trainable), ", + f"{n_buffers:.4f}M Buffers", + ] + s += "." + logger.info("".join(s)) + + +def show_freeze_layers(model: Module, max_lines: int = 20) -> None: + named_p = list(model.named_parameters()) + for i, (n, p) in enumerate(named_p): + if i >= max_lines: + logger.info("...") + break + logger.info(f"{n}: requires_grad={p.requires_grad}") + + +@METRICS.register_module(group_key=default_group, module_name='my_metric') +class MyMetric(Metric): + def __init__(self, vocab_size: int): + self.acc = Accuracy("multiclass", num_classes=vocab_size) + self.loss = MeanMetric() + + def add(self, outputs: Dict[str, Any], inputs: Dict[str, Any]) -> None: + loss: Tensor = outputs.loss + self.loss.update(loss) + # + labels: Tensor = inputs["labels"] + labels = labels[:, 1:] + labels_mask = labels != -100 + logits: Tensor = outputs.logits[:, :-1] + logits = logits[labels_mask].contiguous().view(-1, logits.shape[-1]) + pred = logits.argmax(dim=-1) + labels = labels[labels_mask].to(logits.device) + self.acc.update(pred, labels) + + def evaluate(self): + return { + "acc": self.acc.compute().item(), + "loss": self.loss.compute().item() + } + + def merge(self, other: "MyMetric") -> None: + """This script does not support ddp""" + raise NotImplementedError + + +def get_baichuan_model_tokenizer(model_dir: Optional[str] = None, load_model: bool = True): + if model_dir is None: + model_id = "baichuan-inc/baichuan-7B" + model_dir = get_model_dir(model_id, None) + # + sys.path.insert(0, model_dir) + from configuration_baichuan import BaiChuanConfig + from tokenization_baichuan import BaiChuanTokenizer + from modeling_baichuan import BaiChuanForCausalLM + model_config = BaiChuanConfig.from_pretrained(model_dir) + model_config.torch_dtype = torch.float16 + logger.info(f"model_config: {model_config}") + tokenizer = BaiChuanTokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = BaiChuanForCausalLM.from_pretrained(model_dir, config=model_config, + device_map="auto", torch_dtype=torch.float16) + # + return model, tokenizer + + +def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, load_model: bool = True): + if model_dir is None: + model_id = "ZhipuAI/chatglm2-6b" + model_revision = "v1.0.3" + model_dir = snapshot_download(model_id, model_revision) + # + config = read_config(model_dir) + config["model"] = ConfigDict({ + "type": "chatglm2-6b" + }) + tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = Model.from_pretrained( + model_dir, cfg_dict=config, device_map='auto', torch_dtype=torch.float16) + return model, tokenizer + + +def make_dataset(split: str, + tokenize_function: Callable[[str, str, Optional[str]], Dict[str, Any]]) -> MyDataset: + """ + split: Literal["train", "validation"] + """ + dataset = MsDataset.load('modelscope/ms_hackathon_23_agent_train_dev', split=split) + system = [] + user = [] + assistant = [] + for d in dataset: + content = ast.literal_eval(d["conversations"]) + s = content[0]["value"] + assert len(content) % 2 == 1 + for i in range(len(content) // 2): + system.append(s) + user.append(content[2 * i + 1]["value"]) + assistant.append(content[2 * i + 2]["value"]) + return MyDataset(system, user, assistant, tokenize_function) + + +Item = Dict[str, float] + + +def read_tensorboard_file(fpath: str) -> Dict[str, List[Item]]: + if not os.path.isfile(fpath): + raise FileNotFoundError(f"fpath: {fpath}") + ea = EventAccumulator(fpath) + ea.Reload() + res = {} + tags = ea.Tags()["scalars"] + for tag in tags: + values = ea.Scalars(tag) + r = [] + for v in values: + r.append({"step": v.step, "value": v.value}) + res[tag] = r + return res + + +def tensorboard_smoothing(values: List[float], smooth: float = 0.9) -> List[float]: + norm_factor = 1 + x = 0 + res = [] + for i in range(len(values)): + x = x * smooth + values[i] # Exponential decay + res.append(x / norm_factor) + # + norm_factor *= smooth + norm_factor += 1 + return res + + +def plot_image(data: Dict[str, List[Item]], key_name: str, smooth: float) -> Figure: + _data = data[key_name] + steps = [d["step"] for d in _data] + values = [d["value"] for d in _data] + fig, ax = plt.subplots(1, 1, squeeze=True, + figsize=(8, 5), dpi=100) + ax.set_title(key_name) + if smooth != 0: + ax.plot(steps, values, color=COLOR) + values_s = tensorboard_smoothing(values, smooth) + ax.plot(steps, values_s, color=COLOR_S) + else: + ax.plot(steps, values, color=COLOR_S) + return fig diff --git a/examples/pytorch/llm_agent/baichuan_infer.ipynb b/examples/pytorch/llm_agent/baichuan_infer.ipynb new file mode 100644 index 00000000..77719fc1 --- /dev/null +++ b/examples/pytorch/llm_agent/baichuan_infer.ipynb @@ -0,0 +1,492 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Baichuan 推理" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 配置实验环境" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install transformers" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2023-07-02 22:28:00,199] [INFO] [real_accelerator.py:110:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 22:28:00,675 - modelscope - INFO - PyTorch version 2.0.1 Found.\n", + "2023-07-02 22:28:00,676 - modelscope - INFO - Loading ast index from /home/hackathon/.cache/modelscope/ast_indexer\n", + "2023-07-02 22:28:00,700 - modelscope - INFO - Loading done! Current index file version is 1.6.2, with md5 ddf811ee982377c1357284a2bfda3dec and a total number of 861 components indexed\n", + "2023-07-02 22:28:01,367 - modelscope - INFO - [0, 1]\n", + "2023-07-02 22:28:01,512 - modelscope - INFO - Using device: cuda:0,1\n" + ] + }, + { + "data": { + "text/plain": [ + "device(type='cuda', index=0)" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from _common import *\n", + "from transformers import TextStreamer\n", + "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "logger.info(device_ids)\n", + "select_device(device_ids)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Model, Tokenizer\n", + "Note: 你需要设置CKPT_FPATH的内容, 指向`.bin`文件, 或`.pth`文件" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 22:28:03,375 - modelscope - INFO - Model revision not specified, use default: master in development mode\n", + "2023-07-02 22:28:03,375 - modelscope - INFO - Development mode use revision: master\n", + "2023-07-02 22:28:03,695 - modelscope - INFO - model_config: BaiChuanConfig {\n", + " \"architectures\": [\n", + " \"BaiChuanForCausalLM\"\n", + " ],\n", + " \"auto_map\": {\n", + " \"AutoConfig\": \"configuration_baichuan.BaiChuanConfig\",\n", + " \"AutoModelForCausalLM\": \"modeling_baichuan.BaiChuanForCausalLM\"\n", + " },\n", + " \"bos_token_id\": 1,\n", + " \"eos_token_id\": 2,\n", + " \"hidden_act\": \"silu\",\n", + " \"hidden_size\": 4096,\n", + " \"initializer_range\": 0.02,\n", + " \"intermediate_size\": 11008,\n", + " \"max_position_embeddings\": 4096,\n", + " \"model_type\": \"baichuan\",\n", + " \"num_attention_heads\": 32,\n", + " \"num_hidden_layers\": 32,\n", + " \"pad_token_id\": 0,\n", + " \"rms_norm_eps\": 1e-06,\n", + " \"tie_word_embeddings\": false,\n", + " \"torch_dtype\": \"float16\",\n", + " \"transformers_version\": \"4.30.2\",\n", + " \"use_cache\": true,\n", + " \"vocab_size\": 64000\n", + "}\n", + "\n", + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function.\n" + ] + }, + { + "data": { + "text/plain": [ + "BaiChuanForCausalLM(\n", + " (model): Model(\n", + " (embed_tokens): Embedding(64000, 4096, padding_idx=0)\n", + " (layers): ModuleList(\n", + " (0-31): 32 x DecoderLayer(\n", + " (self_attn): Attention(\n", + " (W_pack): Linear(in_features=4096, out_features=12288, bias=False)\n", + " (o_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (rotary_emb): RotaryEmbedding()\n", + " )\n", + " (mlp): MLP(\n", + " (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (down_proj): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (up_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (act_fn): SiLUActivation()\n", + " )\n", + " (input_layernorm): RMSNorm()\n", + " (post_attention_layernorm): RMSNorm()\n", + " )\n", + " )\n", + " (norm): RMSNorm()\n", + " )\n", + " (lm_head): Linear(in_features=4096, out_features=64000, bias=False)\n", + ")" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "CKPT_FAPTH = \"/home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/output_best/pytorch_model.bin\"\n", + "LORA_TARGET_MODULES = [\"W_pack\"]\n", + "\n", + "model, tokenizer = get_baichuan_model_tokenizer()\n", + "if tokenizer.pad_token_id is None:\n", + " tokenizer.pad_token_id = tokenizer.eos_token_id\n", + "model.bfloat16() # Consistent with training" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Lora" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 22:28:14,108 - modelscope - INFO - lora_config: LoRAConfig(rank=8, replace_modules=['W_pack'], lora_alpha=32, lora_dropout=0, merge_weights=True, use_merged_linear=False, enable_lora=None, fan_in_fan_out=False, bias='none', only_lora_trainable=True, pretrained_weights='/home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/output_best/pytorch_model.bin')\n" + ] + }, + { + "data": { + "text/plain": [ + "BaiChuanForCausalLM(\n", + " (model): Model(\n", + " (embed_tokens): Embedding(64000, 4096, padding_idx=0)\n", + " (layers): ModuleList(\n", + " (0-31): 32 x DecoderLayer(\n", + " (self_attn): Attention(\n", + " (W_pack): Linear(in_features=4096, out_features=12288, bias=False)\n", + " (o_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (rotary_emb): RotaryEmbedding()\n", + " )\n", + " (mlp): MLP(\n", + " (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (down_proj): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (up_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (act_fn): SiLUActivation()\n", + " )\n", + " (input_layernorm): RMSNorm()\n", + " (post_attention_layernorm): RMSNorm()\n", + " )\n", + " )\n", + " (norm): RMSNorm()\n", + " )\n", + " (lm_head): Linear(in_features=4096, out_features=64000, bias=False)\n", + ")" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "LORA_RANK = 8\n", + "LORA_ALPHA = 32\n", + "LORA_DROPOUT_P = 0 # Arbitrary value\n", + "lora_config = LoRAConfig(\n", + " replace_modules=LORA_TARGET_MODULES,\n", + " rank=LORA_RANK,\n", + " lora_alpha=LORA_ALPHA,\n", + " lora_dropout=LORA_DROPOUT_P,\n", + " pretrained_weights=CKPT_FAPTH)\n", + "logger.info(f\"lora_config: {lora_config}\")\n", + "Swift.prepare_model(model, lora_config)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 22:28:28,832 - modelscope - INFO - No subset_name specified, defaulting to the default\n", + "2023-07-02 22:28:29,317 - modelscope - WARNING - Reusing dataset ms_hackathon_23_agent_train_dev (/home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files)\n", + "2023-07-02 22:28:29,318 - modelscope - INFO - Generating dataset ms_hackathon_23_agent_train_dev (/home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files)\n", + "2023-07-02 22:28:29,318 - modelscope - INFO - Reusing cached meta-data file: /home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files/941b733ec0354c2172a3386d8788bb37\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "682dc9eedfce4092a25fcadc977c794a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading data files: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8e53d79d8e4845618231f3afb5bc096f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Extracting data files: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 285/285 [00:00<00:00, 1566679.74it/s]\n" + ] + } + ], + "source": [ + "test_dataset = make_dataset(\"validation\", lambda system, user, assistant:\n", + " {\"system\": system, \"user\": user, \"assistant\": assistant})" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 推理" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[TEST] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_speech-generation\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_speech-generation\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"url\": \"http://90.49.118.175:2603/\", \"paths\": [{\"name\": \"modelscope_speech-generation\", \"model_id\": \"/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"method\": \"post\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"parameters\": [{\"name\": \"text\", \"description\": \"要转成语音的文本\", \"required\": \"True\"}, {\"name\": \"gender\", \"description\": \"用户身份\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_speech-generation\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_speech-generation\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"url\": \"http://132.94.116.115:5983/\", \"paths\": [{\"name\": \"modelscope_speech-generation\", \"model_id\": \"/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"method\": \"post\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"parameters\": [{\"name\": \"text\", \"description\": \"要转成语音的文本\", \"required\": \"True\"}, {\"name\": \"gender\", \"description\": \"用户身份\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_speech-generation\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_speech-generation\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"url\": \"http://94.43.176.75:1062/\", \"paths\": [{\"name\": \"modelscope_speech-generation\", \"model_id\": \"/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"method\": \"post\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"parameters\": [{\"name\": \"text\", \"description\": \"要转成语音的文本\", \"required\": \"True\"}, {\"name\": \"gender\", \"description\": \"用户身份\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "生成一首诗歌,主题为“秋天的美景”,读出来这段话 \n", + "\n", + "### 助手\n", + "秋天,是一个美丽的季节,是一个收获的季节,是一个充满诗意的季节。秋天的天空,湛蓝湛蓝的,像一块蓝宝石;秋天的田野,金黄色的稻谷,像一片金色的海洋;秋天的果园,硕果累累,像一幅美丽的画卷。秋天的山林,层林尽染,像一幅色彩斑斓的油画;秋天的河流,清澈见底,像一条银色的丝带。秋天的天空,湛蓝湛蓝的,像一块蓝宝石;秋天的田野,金黄色的稻谷,像一片金色的海洋;秋天的果园,硕果累累,像一幅美丽的画卷。秋天的山林,层林尽染,像一幅色彩斑斓的油画;秋天的河流,清澈见底,像一条银色的丝带。\n", + "\n", + "[LABELS]秋树红叶舞飘零,\n", + "山间小溪水潺潺。\n", + "微风拂面感清凉,\n", + "散步赏景心旷神怡。\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_speech-generation\", \"url\": \"http://90.49.118.175:2603/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"parameters\": {\"text\": \"秋树红叶舞飘零,\n", + "山间小溪水潺潺。\n", + "微风拂面感清凉,\n", + "散步赏景心旷神怡。\", \"gender\": \"woman\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"result\": \"\"}\n", + "```<|endofexec|>\n", + "\n", + "-----------------------------------------------------------------------------------\n", + "[TEST] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-address\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-address\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"url\": \"http://159.1.4.174:3210/\", \"paths\": [{\"name\": \"modelscope_text-address\", \"model_id\": \"/damo/mgeo_geographic_elements_tagging_chinese_base\", \"method\": \"post\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的地址信息\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-address\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-address\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"url\": \"http://172.163.158.154:5325/\", \"paths\": [{\"name\": \"modelscope_text-address\", \"model_id\": \"/damo/mgeo_geographic_elements_tagging_chinese_base\", \"method\": \"post\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的地址信息\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-address\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-address\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"url\": \"http://133.94.12.37:3160/\", \"paths\": [{\"name\": \"modelscope_text-address\", \"model_id\": \"/damo/mgeo_geographic_elements_tagging_chinese_base\", \"method\": \"post\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的地址信息\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "现在我给你另一条地址,请识别出里面的元素。输入地址:广东省深圳市南山区科技园北区 \n", + "\n", + "### 助手\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-address\", \"url\": \"http://133.94.12.37:3160/damo/mgeo_geographic_elements_tagging_chinese_base\", \"parameters\": {\"text\": \"广东省深圳市南山区科技园北区\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"community\": \"科技园北区\"}\n", + "```<|endofexec|>\n", + "地址识别json表示:{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"community\": \"科技园北区\"}。我使用的模型是ModelScope的'damo/mgeo_geographic_elements_tagging_chinese_base'模型。这是基于达摩院联合高德发布的多任务多模态地址预训练底座MGeo模型微调得到的。\n", + "\n", + "[LABELS]<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-address\", \"url\": \"http://159.1.4.174:3210/damo/mgeo_geographic_elements_tagging_chinese_base\", \"parameters\": {\"text\": \"广东省深圳市南山区科技园北区\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"town\": \"\", \"community\": \"科技园北区\", \"poi\": \"\"}\n", + "```<|endofexec|>\n", + "地址识别json表示:{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"town\": \"\", \"community\": \"科技园北区\", \"poi\": \"\"}。我使用的模型是ModelScope的'damo/mgeo_geographic_elements_tagging_chinese_base'模型。这是基于达摩院联合高德发布的多任务多模态地址预训练底座MGeo模型微调得到的。\n", + "-----------------------------------------------------------------------------------\n", + "[TEST] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。目前支持的插件信息如下,请自行判断是否需要调用插件来解决当前用户问题。若需要调用插件,则需要将插件调用请求按照json格式给出,必须包含api_name、url、parameters字段,并在其前后使用<|startofthink|>和<|endofthink|>作为标志。然后你需要根据插件API调用结果生成合理的答复;若无需调用插件,则直接给出对应回复即可:\n", + "\n", + "1. {\"name\": \"modelscope_text-translation-zh2en\", \"description\": \"将输入的中文文本翻译成英文\", \"url\": \"http://api-inference.modelscope.cn/api-inference/v1/models\", \"paths\": [{\"name\": \"modelscope_text-translation-zh2en\", \"model_id\": \"/damo/nlp_csanmt_translation_zh2en\", \"method\": \"post\", \"description\": \"将输入的中文文本翻译成英文\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的中文文本\", \"required\": \"True\"}]}]}\n", + "\n", + "2. {\"name\": \"modelscope_speech-generation\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"url\": \"http://api-inference.modelscope.cn/api-inference/v1/models\", \"paths\": [{\"name\": \"modelscope_speech-generation\", \"model_id\": \"/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"method\": \"post\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"parameters\": [{\"name\": \"text\", \"description\": \"要转成语音的文本\", \"required\": \"True\"}, {\"name\": \"gender\", \"description\": \"用户身份\", \"required\": \"True\"}]}]}\n", + "\n", + "3. {\"name\": \"modelscope_image-generation\", \"description\": \"针对文本输入,生成对应的图片\", \"url\": \"http://api-inference.modelscope.cn/api-inference/v1/models\", \"paths\": [{\"name\": \"modelscope_image-generation\", \"model_id\": \"/damo/image_generation\", \"method\": \"post\", \"description\": \"针对文本输入,生成对应的图片\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本信息\", \"required\": \"True\"}]}]} \n", + "\n", + "### 用户\n", + "歌手:古巨基\n", + "歌曲名:爱情马戏班\n", + "经典歌词:情是何等诡秘能令人使出看家把戏;恋爱就像走纲线般惊险;为你献技 像马戏班\n", + "请结合以上信息,编写一个智能音响的播放导语,需要有文采,字数30字以内,凸显一下即将播放该歌曲 \n", + "\n", + "### 助手\n", + "古巨基的《爱情马戏班》,是一首经典的情歌,歌词中充满了对爱情的向往和对爱情的渴望,让人不禁沉醉其中。这首歌的旋律优美动听,歌词朗朗上口,让人听了就忍不住跟着哼唱。\n", + "\n", + "[LABELS]亲爱的主人,今天我为您带来的是古巨基的经典之作——《爱情马戏班》。这首歌曲描绘了情与爱的神秘和惊险,让人们为之倾倒。让我们一起享受这场爱情的马戏表演吧!\n", + "-----------------------------------------------------------------------------------\n", + "[TEST] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://114.42.178.183:8005/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://93.82.87.89:6631/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://4.105.93.165:8143/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "按照给定的schema抽取出下面文本对应的信息\n", + "schema:{\"动物\": null, \"食物\": null, \"颜色\": null}\n", + "这只棕色的狗狗很喜欢吃狗粮。 \n", + "\n", + "### 助手\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://4.105.93.165:8143/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"动物\": [\"棕色的狗狗\"], \"食物\": [\"狗粮\"], \"颜色\": [\"棕色\"]}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"动物\": [\"棕色的狗狗\"], \"食物\": [\"狗粮\"], \"颜色\": [\"棕色\"]}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "\n", + "[LABELS]<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://114.42.178.183:8005/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"动物\": [\"狗狗\"], \"食物\": [\"狗粮\"], \"颜色\": [\"棕色\"]}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"动物\": [\"狗狗\"], \"食物\": [\"狗粮\"], \"颜色\": [\"棕色\"]}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "-----------------------------------------------------------------------------------\n", + "[TEST] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://28.179.171.5:6428/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://100.111.18.38:6408/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://144.67.18.142:6381/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "按照给定的schema抽取出下面文本对应的信息\n", + "schema:{\"人物\": null, \"地理位置\": null, \"组织机构\": null}\n", + "谷歌公司是一家全球知名的科技公司,总部位于美国的加利福尼亚州山景市。 \n", + "\n", + "### 助手\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://144.67.18.142:6381/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"谷歌公司是一家全球知名的科技公司,总部位于美国的加利福尼亚州山景市。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [\"谷歌公司\"], \"地理位置\": [\"美国\"], \"组织机构\": [\"科技公司\"]}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [\"谷歌公司\"], \"地理位置\": [\"美国\"], \"组织机构\": [\"科技公司\"]}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "\n", + "[LABELS]<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://100.111.18.38:6408/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"谷歌公司是一家全球知名的科技公司,总部位于美国的加利福尼亚州山景市。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [], \"地理位置\": [\"美国\", \"加利福尼亚州山景市\"], \"组织机构\": [\"谷歌公司\"]}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [], \"地理位置\": [\"美国\", \"加利福尼亚州山景市\"], \"组织机构\": [\"谷歌公司\"]}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "-----------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n", + "for d in test_dataset[:5]:\n", + " system = d[\"system\"]\n", + " user = d[\"user\"]\n", + " assistant = d[\"assistant\"]\n", + " input_ids = tokenize_function(system, user, None, tokenizer)[\"input_ids\"]\n", + " print(f\"[TEST]{tokenizer.decode(input_ids)}\", end=\"\")\n", + " input_ids = torch.tensor(input_ids)[None].cuda()\n", + " attention_mask = torch.ones_like(input_ids)\n", + " generate_ids = model.generate(input_ids=input_ids, max_new_tokens=512,\n", + " attention_mask=attention_mask,\n", + " streamer=streamer, pad_token_id=tokenizer.pad_token_id)\n", + " print()\n", + " print(f\"[LABELS]{assistant}\")\n", + " print(\"-----------------------------------------------------------------------------------\")\n", + " # input(\"next[ENTER]\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/pytorch/llm_agent/baichuan_sft.ipynb b/examples/pytorch/llm_agent/baichuan_sft.ipynb new file mode 100644 index 00000000..5e656a24 --- /dev/null +++ b/examples/pytorch/llm_agent/baichuan_sft.ipynb @@ -0,0 +1,1824 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Baichuan + Lora + Agent\n", + "baichuan-7B是由百川智能开发的一个开源的大规模预训练模型。基于Transformer结构,在大约1.2万亿tokens上训练的70亿参数模型,支持中英双语,上下文窗口长度为4096。在标准的中文和英文权威benchmark(C-EVAL/MMLU)上均取得同尺寸最好的效果。" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1. Ref: https://modelscope.cn/models/baichuan-inc/baichuan-7B/summary\n", + "2. 以下脚本可以在2*A10环境下正常运行, 大概占用40G显存\n", + "3. python>=3.8" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 配置实验环境" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install modelscope -U\n", + "# !pip install numpy pandas matplotlib scikit-learn\n", + "# !pip install transformers datasets\n", + "# !conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n", + "# !pip install tqdm\n", + "# !pip install tensorboard\n", + "# !pip install torchmetrics\n", + "#\n", + "# !pip install numpy -U # Resolve torchmetrics dependencies and update numpy" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2023-07-02 17:24:09,391] [INFO] [real_accelerator.py:110:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/hackathon/miniconda3/envs/hackathon/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "2023-07-02 17:24:09,870 - modelscope - INFO - PyTorch version 2.0.1 Found.\n", + "2023-07-02 17:24:09,871 - modelscope - INFO - Loading ast index from /home/hackathon/.cache/modelscope/ast_indexer\n", + "2023-07-02 17:24:09,895 - modelscope - INFO - Loading done! Current index file version is 1.6.2, with md5 ddf811ee982377c1357284a2bfda3dec and a total number of 861 components indexed\n", + "2023-07-02 17:24:10,570 - modelscope - INFO - [0, 1]\n", + "2023-07-02 17:24:10,719 - modelscope - INFO - Using device: cuda:0,1\n", + "2023-07-02 17:24:10,720 - modelscope - INFO - Global seed set to 42\n" + ] + } + ], + "source": [ + "from _common import *\n", + "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "logger.info(device_ids)\n", + "select_device(device_ids)\n", + "_ = seed_everything(42)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Model, Tokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 17:24:11,036 - modelscope - INFO - Model revision not specified, use default: master in development mode\n", + "2023-07-02 17:24:11,037 - modelscope - INFO - Development mode use revision: master\n", + "2023-07-02 17:24:11,364 - modelscope - INFO - model_config: BaiChuanConfig {\n", + " \"architectures\": [\n", + " \"BaiChuanForCausalLM\"\n", + " ],\n", + " \"auto_map\": {\n", + " \"AutoConfig\": \"configuration_baichuan.BaiChuanConfig\",\n", + " \"AutoModelForCausalLM\": \"modeling_baichuan.BaiChuanForCausalLM\"\n", + " },\n", + " \"bos_token_id\": 1,\n", + " \"eos_token_id\": 2,\n", + " \"hidden_act\": \"silu\",\n", + " \"hidden_size\": 4096,\n", + " \"initializer_range\": 0.02,\n", + " \"intermediate_size\": 11008,\n", + " \"max_position_embeddings\": 4096,\n", + " \"model_type\": \"baichuan\",\n", + " \"num_attention_heads\": 32,\n", + " \"num_hidden_layers\": 32,\n", + " \"pad_token_id\": 0,\n", + " \"rms_norm_eps\": 1e-06,\n", + " \"tie_word_embeddings\": false,\n", + " \"torch_dtype\": \"float16\",\n", + " \"transformers_version\": \"4.30.2\",\n", + " \"use_cache\": true,\n", + " \"vocab_size\": 64000\n", + "}\n", + "\n", + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function.\n" + ] + } + ], + "source": [ + "model_id = \"baichuan-inc/baichuan-7B\"\n", + "WORK_DIR = \"runs/baichuan\"\n", + "LORA_TARGET_MODULES = [\"W_pack\"]\n", + "#\n", + "model_dir = get_model_dir(model_id, None)\n", + "model, tokenizer = get_baichuan_model_tokenizer(model_dir)\n", + "#\n", + "GRADIENT_CHECKPOINTING = True\n", + "if GRADIENT_CHECKPOINTING:\n", + " model.gradient_checkpointing_enable()\n", + " model.enable_input_require_grads()\n", + "if tokenizer.pad_token_id is None:\n", + " tokenizer.pad_token_id = tokenizer.eos_token_id\n", + "#\n", + "logger.info(f\"bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, \"\n", + " f\"pad_token_id: {tokenizer.pad_token_id}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 准备Lora" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 17:24:21,741 - modelscope - INFO - lora_config: LoRAConfig(rank=8, replace_modules=['W_pack'], lora_alpha=32, lora_dropout=0.1, merge_weights=True, use_merged_linear=False, enable_lora=None, fan_in_fan_out=False, bias='none', only_lora_trainable=True, pretrained_weights=None)\n", + "2023-07-02 17:24:36,360 - modelscope - INFO - model.embed_tokens.weight: requires_grad=False\n", + "2023-07-02 17:24:36,360 - modelscope - INFO - model.layers.0.self_attn.W_pack.weight: requires_grad=False\n", + "2023-07-02 17:24:36,361 - modelscope - INFO - model.layers.0.self_attn.W_pack.lora_A: requires_grad=True\n", + "2023-07-02 17:24:36,361 - modelscope - INFO - model.layers.0.self_attn.W_pack.lora_B: requires_grad=True\n", + "2023-07-02 17:24:36,361 - modelscope - INFO - model.layers.0.self_attn.o_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,362 - modelscope - INFO - model.layers.0.mlp.gate_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,362 - modelscope - INFO - model.layers.0.mlp.down_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,363 - modelscope - INFO - model.layers.0.mlp.up_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,363 - modelscope - INFO - model.layers.0.input_layernorm.weight: requires_grad=False\n", + "2023-07-02 17:24:36,363 - modelscope - INFO - model.layers.0.post_attention_layernorm.weight: requires_grad=False\n", + "2023-07-02 17:24:36,363 - modelscope - INFO - model.layers.1.self_attn.W_pack.weight: requires_grad=False\n", + "2023-07-02 17:24:36,364 - modelscope - INFO - model.layers.1.self_attn.W_pack.lora_A: requires_grad=True\n", + "2023-07-02 17:24:36,364 - modelscope - INFO - model.layers.1.self_attn.W_pack.lora_B: requires_grad=True\n", + "2023-07-02 17:24:36,364 - modelscope - INFO - model.layers.1.self_attn.o_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,364 - modelscope - INFO - model.layers.1.mlp.gate_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,365 - modelscope - INFO - model.layers.1.mlp.down_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,365 - modelscope - INFO - model.layers.1.mlp.up_proj.weight: requires_grad=False\n", + "2023-07-02 17:24:36,365 - modelscope - INFO - model.layers.1.input_layernorm.weight: requires_grad=False\n", + "2023-07-02 17:24:36,365 - modelscope - INFO - model.layers.1.post_attention_layernorm.weight: requires_grad=False\n", + "2023-07-02 17:24:36,365 - modelscope - INFO - model.layers.2.self_attn.W_pack.weight: requires_grad=False\n", + "2023-07-02 17:24:36,366 - modelscope - INFO - ...\n", + "2023-07-02 17:24:36,368 - modelscope - INFO - BaiChuanForCausalLM: 7004.7539M Params (4.1943M Trainable), 33.5565M Buffers.\n", + "2023-07-02 17:24:36,370 - modelscope - INFO - device: cuda:0, dtype: torch.float16\n" + ] + }, + { + "data": { + "text/plain": [ + "BaiChuanForCausalLM(\n", + " (model): Model(\n", + " (embed_tokens): Embedding(64000, 4096, padding_idx=0)\n", + " (layers): ModuleList(\n", + " (0-31): 32 x DecoderLayer(\n", + " (self_attn): Attention(\n", + " (W_pack): Linear(\n", + " in_features=4096, out_features=12288, bias=False\n", + " (lora_dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (o_proj): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (rotary_emb): RotaryEmbedding()\n", + " )\n", + " (mlp): MLP(\n", + " (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (down_proj): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (up_proj): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (act_fn): SiLUActivation()\n", + " )\n", + " (input_layernorm): RMSNorm()\n", + " (post_attention_layernorm): RMSNorm()\n", + " )\n", + " )\n", + " (norm): RMSNorm()\n", + " )\n", + " (lm_head): Linear(in_features=4096, out_features=64000, bias=False)\n", + ")" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "LORA_RANK = 8\n", + "LORA_ALPHA = 32\n", + "LORA_DROPOUT_P = 0.1\n", + "lora_config = LoRAConfig(\n", + " replace_modules=LORA_TARGET_MODULES,\n", + " rank=LORA_RANK,\n", + " lora_alpha=LORA_ALPHA,\n", + " lora_dropout=LORA_DROPOUT_P)\n", + "logger.info(f\"lora_config: {lora_config}\")\n", + "Swift.prepare_model(model, lora_config)\n", + "#\n", + "show_freeze_layers(model)\n", + "print_model_info(model)\n", + "_p = list(model.parameters())[100]\n", + "logger.info(f\"device: {_p.device}, dtype: {_p.dtype}\")\n", + "model.bfloat16()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 5036/5036 [00:12<00:00, 398.82it/s]\n", + "100%|██████████| 285/285 [00:00<00:00, 383.15it/s]\n", + "2023-07-02 17:24:49,863 - modelscope - INFO - Dataset Token Length: 958.649707±371.357483, min=44.000000, max=2045.000000, size=4953\n", + "2023-07-02 17:24:49,864 - modelscope - INFO - Dataset Token Length: 993.447653±337.821458, min=75.000000, max=1946.000000, size=277\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[INPUT_IDS] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://109.199.101.10:1485/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://9.32.64.200:5873/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://54.149.78.185:3979/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "按照给定的schema抽取出下面文本对应的信息\n", + "schema:{\"人物\": null, \"地理位置\": null, \"组织机构\": null}\n", + "近日,美国政府宣布将对中国1000多种商品加征关税,并威胁进一步加征关税。 \n", + "\n", + "### 助手\n", + " <|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://9.32.64.200:5873/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"近日,美国政府宣布将对中国1000多种商品加征关税,并威胁进一步加征关税。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "\n", + "[LABLES] <|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://9.32.64.200:5873/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"近日,美国政府宣布将对中国1000多种商品加征关税,并威胁进一步加征关税。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n" + ] + } + ], + "source": [ + "tokenize_function = partial(tokenize_function, tokenizer=tokenizer)\n", + "train_dataset = make_dataset(\"train\", tokenize_function)\n", + "val_dataset = make_dataset(\"validation\", tokenize_function)\n", + "# Data analysis\n", + "stat_dataset(train_dataset)\n", + "stat_dataset(val_dataset)\n", + "data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer)\n", + "print_examples(train_dataset[0], tokenizer)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 配置Config" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 17:24:49,892 - modelscope - INFO - work_dir: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449\n" + ] + } + ], + "source": [ + "cfg_file = os.path.join(model_dir, \"configuration.json\")\n", + "#\n", + "BATCH_SIZE = 1\n", + "MAX_EPOCHS = 1\n", + "T_max = get_T_max(len(train_dataset), BATCH_SIZE, MAX_EPOCHS, True)\n", + "WORK_DIR = get_work_dir(WORK_DIR)\n", + "EVAL_INTERVAL = 200\n", + "CONFIG = Config({\n", + " \"train\": {\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": BATCH_SIZE,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": True,\n", + " \"drop_last\": True,\n", + " \"pin_memory\": True\n", + " },\n", + " \"max_epochs\": MAX_EPOCHS,\n", + " \"work_dir\": WORK_DIR,\n", + " \"optimizer\": {\n", + " \"type\": \"AdamW\",\n", + " \"lr\": 1e-4,\n", + " \"weight_decay\": 0.01,\n", + " \"options\": {\n", + " \"cumulative_iters\": 16, \"grad_clip\": {\n", + " \"norm_type\": 2,\n", + " \"max_norm\": 2.0\n", + " }\n", + " }\n", + " },\n", + " \"lr_scheduler\": {\n", + " \"type\": \"CosineAnnealingLR\",\n", + " \"T_max\": T_max,\n", + " \"eta_min\": 1e-5,\n", + " \"options\": {\n", + " \"by_epoch\": False,\n", + " \"warmup\": {\n", + " 'type': 'LinearWarmup',\n", + " 'warmup_ratio': 0.1,\n", + " \"warmup_iters\": 200\n", + " }\n", + " }\n", + " },\n", + " \"hooks\": [\n", + " {\"type\": \"CheckpointHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL, \"max_checkpoint_num\": 1},\n", + " {\"type\": \"EvaluationHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL},\n", + " {\"type\": \"BestCkptSaverHook\",\n", + " \"metric_key\": \"acc\",\n", + " \"save_best\": True, \"rule\": \"max\", \"max_checkpoint_num\": 1},\n", + " {\"type\": \"TextLoggerHook\",\n", + " \"by_epoch\": True, # Whether EpochBasedTrainer is used\n", + " \"interval\": 5},\n", + " {\"type\": \"TensorboardHook\", \"by_epoch\": False, \"interval\": 5}\n", + " ]\n", + " },\n", + " \"evaluation\": {\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": BATCH_SIZE,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": False,\n", + " \"drop_last\": False,\n", + " \"pin_memory\": True\n", + " },\n", + " \"metrics\": [\n", + " {\"type\": \"my_metric\", \"vocab_size\": tokenizer.vocab_size}\n", + " ]\n", + " }\n", + "})" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 微调" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 17:24:49,903 - modelscope - INFO - ==========================Training Config Start==========================\n", + "2023-07-02 17:24:49,904 - modelscope - INFO - {\n", + " \"framework\": \"pytorch\",\n", + " \"task\": \"text-generation\",\n", + " \"model\": {\n", + " \"type\": \"Baichuan-7B\"\n", + " },\n", + " \"pipeline\": {\n", + " \"type\": \"Baichuan-7B-text-generation-pipe\"\n", + " },\n", + " \"allow_remote\": true,\n", + " \"train\": {\n", + " \"hooks\": [\n", + " {\n", + " \"type\": \"TensorboardHook\",\n", + " \"by_epoch\": false,\n", + " \"interval\": 5\n", + " }\n", + " ],\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": 1,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": true,\n", + " \"drop_last\": true,\n", + " \"pin_memory\": true\n", + " },\n", + " \"max_epochs\": 1,\n", + " \"work_dir\": \"/home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449\",\n", + " \"optimizer\": {\n", + " \"type\": \"AdamW\",\n", + " \"lr\": 0.0001,\n", + " \"weight_decay\": 0.01,\n", + " \"options\": {\n", + " \"cumulative_iters\": 16,\n", + " \"grad_clip\": {\n", + " \"norm_type\": 2,\n", + " \"max_norm\": 2.0\n", + " }\n", + " }\n", + " },\n", + " \"lr_scheduler\": {\n", + " \"type\": \"CosineAnnealingLR\",\n", + " \"T_max\": 4953,\n", + " \"eta_min\": 1e-05,\n", + " \"options\": {\n", + " \"by_epoch\": false,\n", + " \"warmup\": {\n", + " \"type\": \"LinearWarmup\",\n", + " \"warmup_ratio\": 0.1,\n", + " \"warmup_iters\": 200\n", + " }\n", + " }\n", + " },\n", + " \"checkpoint\": {\n", + " \"period\": {\n", + " \"by_epoch\": false,\n", + " \"interval\": 200,\n", + " \"max_checkpoint_num\": 1\n", + " },\n", + " \"best\": {\n", + " \"metric_key\": \"acc\",\n", + " \"save_best\": true,\n", + " \"rule\": \"max\",\n", + " \"max_checkpoint_num\": 1\n", + " }\n", + " },\n", + " \"logging\": {\n", + " \"by_epoch\": true,\n", + " \"interval\": 5\n", + " }\n", + " },\n", + " \"evaluation\": {\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": 1,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": false,\n", + " \"drop_last\": false,\n", + " \"pin_memory\": true\n", + " },\n", + " \"metrics\": [\n", + " {\n", + " \"type\": \"my_metric\",\n", + " \"vocab_size\": 64000\n", + " }\n", + " ],\n", + " \"period\": {\n", + " \"by_epoch\": false,\n", + " \"interval\": 200\n", + " }\n", + " }\n", + "}\n", + "2023-07-02 17:24:49,904 - modelscope - INFO - ===========================Training Config End===========================\n", + "2023-07-02 17:24:49,905 - modelscope - WARNING - ('OPTIMIZER', 'default', 'AdamW') not found in ast index file\n", + "2023-07-02 17:24:49,906 - modelscope - WARNING - ('LR_SCHEDULER', 'default', 'CosineAnnealingLR') not found in ast index file\n", + "2023-07-02 17:24:49,907 - modelscope - INFO - Stage: before_run:\n", + " (ABOVE_NORMAL) OptimizerHook \n", + " (LOW ) LrSchedulerHook \n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: before_train_epoch:\n", + " (LOW ) LrSchedulerHook \n", + " -------------------- \n", + "Stage: before_train_iter:\n", + " (ABOVE_NORMAL) OptimizerHook \n", + " -------------------- \n", + "Stage: after_train_iter:\n", + " (ABOVE_NORMAL) OptimizerHook \n", + " (NORMAL ) EvaluationHook \n", + " (LOW ) LrSchedulerHook \n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: after_train_epoch:\n", + " (NORMAL ) EvaluationHook \n", + " (LOW ) LrSchedulerHook \n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: after_val_epoch:\n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: after_run:\n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "2023-07-02 17:24:49,913 - modelscope - INFO - Checkpoints will be saved to /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449\n", + "2023-07-02 17:24:49,916 - modelscope - INFO - Checkpoints will be saved to /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449\n", + "2023-07-02 17:24:49,917 - modelscope - INFO - Text logs will be saved to /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449\n", + "2023-07-02 17:24:49,917 - modelscope - INFO - tensorboard files will be saved to /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/tensorboard_output\n", + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\n", + "2023-07-02 17:24:55,315 - modelscope - INFO - epoch [1][5/4953]\tlr: 1.000e-05, memory: 7084, loss: 5.2094\n", + "2023-07-02 17:24:59,926 - modelscope - INFO - epoch [1][10/4953]\tlr: 1.000e-05, memory: 7084, loss: 1.9516\n", + "2023-07-02 17:25:05,112 - modelscope - INFO - epoch [1][15/4953]\tlr: 1.000e-05, memory: 7504, loss: 1.8344\n", + "2023-07-02 17:25:13,131 - modelscope - INFO - epoch [1][20/4953]\tlr: 1.225e-05, memory: 8075, loss: 3.3937\n", + "2023-07-02 17:25:19,098 - modelscope - INFO - epoch [1][25/4953]\tlr: 1.450e-05, memory: 8102, loss: 1.8047\n", + "2023-07-02 17:25:25,763 - modelscope - INFO - epoch [1][30/4953]\tlr: 1.675e-05, memory: 8102, loss: 1.5594\n", + "2023-07-02 17:25:33,888 - modelscope - INFO - epoch [1][35/4953]\tlr: 1.900e-05, memory: 8293, loss: 1.5852\n", + "2023-07-02 17:25:39,548 - modelscope - INFO - epoch [1][40/4953]\tlr: 2.125e-05, memory: 8293, loss: 1.7828\n", + "2023-07-02 17:25:44,599 - modelscope - INFO - epoch [1][45/4953]\tlr: 2.350e-05, memory: 8293, loss: 5.5922\n", + "2023-07-02 17:25:49,692 - modelscope - INFO - epoch [1][50/4953]\tlr: 2.575e-05, memory: 8293, loss: 2.6641\n", + "2023-07-02 17:25:56,104 - modelscope - INFO - epoch [1][55/4953]\tlr: 2.800e-05, memory: 8742, loss: 2.2344\n", + "2023-07-02 17:26:04,765 - modelscope - INFO - epoch [1][60/4953]\tlr: 3.025e-05, memory: 8742, loss: 1.7320\n", + "2023-07-02 17:26:10,288 - modelscope - INFO - epoch [1][65/4953]\tlr: 3.250e-05, memory: 8742, loss: 5.0578\n", + "2023-07-02 17:26:14,998 - modelscope - INFO - epoch [1][70/4953]\tlr: 3.475e-05, memory: 8742, loss: 4.0109\n", + "2023-07-02 17:26:21,600 - modelscope - INFO - epoch [1][75/4953]\tlr: 3.700e-05, memory: 8742, loss: 1.7266\n", + "2023-07-02 17:26:26,920 - modelscope - INFO - epoch [1][80/4953]\tlr: 3.925e-05, memory: 8742, loss: 2.9578\n", + "2023-07-02 17:26:32,447 - modelscope - INFO - epoch [1][85/4953]\tlr: 4.150e-05, memory: 8742, loss: 5.8422\n", + "2023-07-02 17:26:38,768 - modelscope - INFO - epoch [1][90/4953]\tlr: 4.375e-05, memory: 8742, loss: 1.8719\n", + "2023-07-02 17:26:45,955 - modelscope - INFO - epoch [1][95/4953]\tlr: 4.600e-05, memory: 8742, loss: 1.4359\n", + "2023-07-02 17:26:50,324 - modelscope - INFO - epoch [1][100/4953]\tlr: 4.825e-05, memory: 8742, loss: 5.6125\n", + "2023-07-02 17:26:58,123 - modelscope - INFO - epoch [1][105/4953]\tlr: 5.050e-05, memory: 8742, loss: 2.9656\n", + "2023-07-02 17:27:04,523 - modelscope - INFO - epoch [1][110/4953]\tlr: 5.275e-05, memory: 8742, loss: 1.7484\n", + "2023-07-02 17:27:09,550 - modelscope - INFO - epoch [1][115/4953]\tlr: 5.500e-05, memory: 8742, loss: 2.7133\n", + "2023-07-02 17:27:17,037 - modelscope - INFO - epoch [1][120/4953]\tlr: 5.725e-05, memory: 8742, loss: 1.9953\n", + "2023-07-02 17:27:22,364 - modelscope - INFO - epoch [1][125/4953]\tlr: 5.950e-05, memory: 8742, loss: 4.4578\n", + "2023-07-02 17:27:26,915 - modelscope - INFO - epoch [1][130/4953]\tlr: 6.175e-05, memory: 8742, loss: 4.4344\n", + "2023-07-02 17:27:34,586 - modelscope - INFO - epoch [1][135/4953]\tlr: 6.400e-05, memory: 8742, loss: 1.6328\n", + "2023-07-02 17:27:41,580 - modelscope - INFO - epoch [1][140/4953]\tlr: 6.625e-05, memory: 8742, loss: 3.9422\n", + "2023-07-02 17:27:47,073 - modelscope - INFO - epoch [1][145/4953]\tlr: 6.850e-05, memory: 8742, loss: 2.0562\n", + "2023-07-02 17:27:53,069 - modelscope - INFO - epoch [1][150/4953]\tlr: 7.075e-05, memory: 8742, loss: 1.8477\n", + "2023-07-02 17:27:58,364 - modelscope - INFO - epoch [1][155/4953]\tlr: 7.300e-05, memory: 8742, loss: 4.5445\n", + "2023-07-02 17:28:05,747 - modelscope - INFO - epoch [1][160/4953]\tlr: 7.525e-05, memory: 8742, loss: 4.0109\n", + "2023-07-02 17:28:12,108 - modelscope - INFO - epoch [1][165/4953]\tlr: 7.750e-05, memory: 8742, loss: 2.0578\n", + "2023-07-02 17:28:17,145 - modelscope - INFO - epoch [1][170/4953]\tlr: 7.975e-05, memory: 8742, loss: 1.9109\n", + "2023-07-02 17:28:23,027 - modelscope - INFO - epoch [1][175/4953]\tlr: 8.200e-05, memory: 8742, loss: 3.2410\n", + "2023-07-02 17:28:27,778 - modelscope - INFO - epoch [1][180/4953]\tlr: 8.425e-05, memory: 8742, loss: 2.9000\n", + "2023-07-02 17:28:34,508 - modelscope - INFO - epoch [1][185/4953]\tlr: 8.650e-05, memory: 8742, loss: 1.6062\n", + "2023-07-02 17:28:40,560 - modelscope - INFO - epoch [1][190/4953]\tlr: 8.875e-05, memory: 8742, loss: 1.5594\n", + "2023-07-02 17:28:46,479 - modelscope - INFO - epoch [1][195/4953]\tlr: 9.100e-05, memory: 8742, loss: 1.9875\n", + "2023-07-02 17:28:53,324 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 17:31:08,796 - modelscope - INFO - Saving checkpoint at 200 iter\n", + "2023-07-02 17:31:08,837 - modelscope - INFO - Saving checkpoint at 200 iter\n", + "2023-07-02 17:31:08,875 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8742, evaluation/acc: 0.7108, evaluation/loss: 2.4241, loss: 1.8062\n", + "2023-07-02 17:31:15,472 - modelscope - INFO - epoch [1][205/4953]\tlr: 9.550e-05, memory: 8742, loss: 1.9172\n", + "2023-07-02 17:31:21,195 - modelscope - INFO - epoch [1][210/4953]\tlr: 9.775e-05, memory: 8742, loss: 2.5586\n", + "2023-07-02 17:31:26,642 - modelscope - INFO - epoch [1][215/4953]\tlr: 1.000e-04, memory: 8742, loss: 2.1422\n", + "2023-07-02 17:31:32,941 - modelscope - INFO - epoch [1][220/4953]\tlr: 9.998e-05, memory: 8742, loss: 2.8609\n", + "2023-07-02 17:31:37,465 - modelscope - INFO - epoch [1][225/4953]\tlr: 9.996e-05, memory: 8742, loss: 1.9953\n", + "2023-07-02 17:31:42,190 - modelscope - INFO - epoch [1][230/4953]\tlr: 9.994e-05, memory: 8742, loss: 1.8422\n", + "2023-07-02 17:31:49,617 - modelscope - INFO - epoch [1][235/4953]\tlr: 9.992e-05, memory: 8742, loss: 1.8328\n", + "2023-07-02 17:31:54,582 - modelscope - INFO - epoch [1][240/4953]\tlr: 9.990e-05, memory: 8742, loss: 2.5031\n", + "2023-07-02 17:32:03,094 - modelscope - INFO - epoch [1][245/4953]\tlr: 9.988e-05, memory: 8742, loss: 3.4578\n", + "2023-07-02 17:32:09,110 - modelscope - INFO - epoch [1][250/4953]\tlr: 9.986e-05, memory: 8742, loss: 3.1359\n", + "2023-07-02 17:32:14,901 - modelscope - INFO - epoch [1][255/4953]\tlr: 9.984e-05, memory: 8742, loss: 3.4672\n", + "2023-07-02 17:32:21,012 - modelscope - INFO - epoch [1][260/4953]\tlr: 9.982e-05, memory: 8742, loss: 1.3734\n", + "2023-07-02 17:32:26,921 - modelscope - INFO - epoch [1][265/4953]\tlr: 9.979e-05, memory: 8742, loss: 1.7055\n", + "2023-07-02 17:32:33,958 - modelscope - INFO - epoch [1][270/4953]\tlr: 9.977e-05, memory: 8933, loss: 4.9609\n", + "2023-07-02 17:32:39,555 - modelscope - INFO - epoch [1][275/4953]\tlr: 9.975e-05, memory: 8933, loss: 3.0906\n", + "2023-07-02 17:32:45,339 - modelscope - INFO - epoch [1][280/4953]\tlr: 9.972e-05, memory: 8933, loss: 3.2016\n", + "2023-07-02 17:32:51,159 - modelscope - INFO - epoch [1][285/4953]\tlr: 9.970e-05, memory: 8933, loss: 3.4461\n", + "2023-07-02 17:32:57,166 - modelscope - INFO - epoch [1][290/4953]\tlr: 9.967e-05, memory: 8933, loss: 1.9609\n", + "2023-07-02 17:33:06,217 - modelscope - INFO - epoch [1][295/4953]\tlr: 9.965e-05, memory: 8933, loss: 1.9680\n", + "2023-07-02 17:33:12,393 - modelscope - INFO - epoch [1][300/4953]\tlr: 9.962e-05, memory: 8933, loss: 1.5422\n", + "2023-07-02 17:33:17,688 - modelscope - INFO - epoch [1][305/4953]\tlr: 9.960e-05, memory: 8933, loss: 2.6953\n", + "2023-07-02 17:33:21,863 - modelscope - INFO - epoch [1][310/4953]\tlr: 9.957e-05, memory: 8933, loss: 3.0094\n", + "2023-07-02 17:33:27,411 - modelscope - INFO - epoch [1][315/4953]\tlr: 9.954e-05, memory: 8933, loss: 1.9156\n", + "2023-07-02 17:33:33,136 - modelscope - INFO - epoch [1][320/4953]\tlr: 9.952e-05, memory: 8933, loss: 1.9672\n", + "2023-07-02 17:33:38,217 - modelscope - INFO - epoch [1][325/4953]\tlr: 9.949e-05, memory: 8933, loss: 4.3375\n", + "2023-07-02 17:33:44,012 - modelscope - INFO - epoch [1][330/4953]\tlr: 9.946e-05, memory: 8933, loss: 1.8797\n", + "2023-07-02 17:33:49,670 - modelscope - INFO - epoch [1][335/4953]\tlr: 9.943e-05, memory: 8933, loss: 3.0969\n", + "2023-07-02 17:33:55,428 - modelscope - INFO - epoch [1][340/4953]\tlr: 9.940e-05, memory: 8933, loss: 3.2477\n", + "2023-07-02 17:34:02,117 - modelscope - INFO - epoch [1][345/4953]\tlr: 9.937e-05, memory: 8933, loss: 2.7969\n", + "2023-07-02 17:34:08,037 - modelscope - INFO - epoch [1][350/4953]\tlr: 9.934e-05, memory: 8933, loss: 2.3578\n", + "2023-07-02 17:34:13,172 - modelscope - INFO - epoch [1][355/4953]\tlr: 9.931e-05, memory: 8933, loss: 2.0656\n", + "2023-07-02 17:34:19,283 - modelscope - INFO - epoch [1][360/4953]\tlr: 9.928e-05, memory: 8933, loss: 1.8438\n", + "2023-07-02 17:34:25,323 - modelscope - INFO - epoch [1][365/4953]\tlr: 9.925e-05, memory: 8933, loss: 2.1828\n", + "2023-07-02 17:34:31,845 - modelscope - INFO - epoch [1][370/4953]\tlr: 9.922e-05, memory: 8933, loss: 2.0234\n", + "2023-07-02 17:34:40,587 - modelscope - INFO - epoch [1][375/4953]\tlr: 9.919e-05, memory: 8933, loss: 2.3086\n", + "2023-07-02 17:34:45,650 - modelscope - INFO - epoch [1][380/4953]\tlr: 9.915e-05, memory: 8933, loss: 3.6734\n", + "2023-07-02 17:34:51,009 - modelscope - INFO - epoch [1][385/4953]\tlr: 9.912e-05, memory: 8933, loss: 1.3594\n", + "2023-07-02 17:34:57,229 - modelscope - INFO - epoch [1][390/4953]\tlr: 9.909e-05, memory: 8933, loss: 2.3117\n", + "2023-07-02 17:35:03,231 - modelscope - INFO - epoch [1][395/4953]\tlr: 9.905e-05, memory: 8933, loss: 1.4961\n", + "2023-07-02 17:35:08,373 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.05it/s]\n", + "2023-07-02 17:37:23,763 - modelscope - INFO - Saving checkpoint at 400 iter\n", + "2023-07-02 17:37:23,803 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_200\n", + "2023-07-02 17:37:23,807 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8933, evaluation/acc: 0.7079, evaluation/loss: 2.1381, loss: 1.9438\n", + "2023-07-02 17:37:28,880 - modelscope - INFO - epoch [1][405/4953]\tlr: 9.898e-05, memory: 8933, loss: 3.1016\n", + "2023-07-02 17:37:35,463 - modelscope - INFO - epoch [1][410/4953]\tlr: 9.895e-05, memory: 8933, loss: 2.5531\n", + "2023-07-02 17:37:41,349 - modelscope - INFO - epoch [1][415/4953]\tlr: 9.891e-05, memory: 8933, loss: 2.2984\n", + "2023-07-02 17:37:47,522 - modelscope - INFO - epoch [1][420/4953]\tlr: 9.888e-05, memory: 8933, loss: 1.5930\n", + "2023-07-02 17:37:54,150 - modelscope - INFO - epoch [1][425/4953]\tlr: 9.884e-05, memory: 8933, loss: 2.2938\n", + "2023-07-02 17:37:59,915 - modelscope - INFO - epoch [1][430/4953]\tlr: 9.880e-05, memory: 8933, loss: 2.5562\n", + "2023-07-02 17:38:07,433 - modelscope - INFO - epoch [1][435/4953]\tlr: 9.877e-05, memory: 8933, loss: 1.5555\n", + "2023-07-02 17:38:14,761 - modelscope - INFO - epoch [1][440/4953]\tlr: 9.873e-05, memory: 8933, loss: 2.9109\n", + "2023-07-02 17:38:19,100 - modelscope - INFO - epoch [1][445/4953]\tlr: 9.869e-05, memory: 8933, loss: 1.6234\n", + "2023-07-02 17:38:24,534 - modelscope - INFO - epoch [1][450/4953]\tlr: 9.865e-05, memory: 8933, loss: 2.2734\n", + "2023-07-02 17:38:31,059 - modelscope - INFO - epoch [1][455/4953]\tlr: 9.861e-05, memory: 8933, loss: 1.3438\n", + "2023-07-02 17:38:37,366 - modelscope - INFO - epoch [1][460/4953]\tlr: 9.857e-05, memory: 8933, loss: 1.8469\n", + "2023-07-02 17:38:43,640 - modelscope - INFO - epoch [1][465/4953]\tlr: 9.853e-05, memory: 8933, loss: 1.7102\n", + "2023-07-02 17:38:48,102 - modelscope - INFO - epoch [1][470/4953]\tlr: 9.849e-05, memory: 8933, loss: 2.1500\n", + "2023-07-02 17:38:52,751 - modelscope - INFO - epoch [1][475/4953]\tlr: 9.845e-05, memory: 8933, loss: 2.4086\n", + "2023-07-02 17:38:59,938 - modelscope - INFO - epoch [1][480/4953]\tlr: 9.841e-05, memory: 8933, loss: 1.1828\n", + "2023-07-02 17:39:06,061 - modelscope - INFO - epoch [1][485/4953]\tlr: 9.837e-05, memory: 8933, loss: 1.0625\n", + "2023-07-02 17:39:13,230 - modelscope - INFO - epoch [1][490/4953]\tlr: 9.832e-05, memory: 8933, loss: 1.5750\n", + "2023-07-02 17:39:19,107 - modelscope - INFO - epoch [1][495/4953]\tlr: 9.828e-05, memory: 8933, loss: 1.9844\n", + "2023-07-02 17:39:27,177 - modelscope - INFO - epoch [1][500/4953]\tlr: 9.824e-05, memory: 8933, loss: 1.7211\n", + "2023-07-02 17:39:31,312 - modelscope - INFO - epoch [1][505/4953]\tlr: 9.819e-05, memory: 8933, loss: 2.9953\n", + "2023-07-02 17:39:37,871 - modelscope - INFO - epoch [1][510/4953]\tlr: 9.815e-05, memory: 8933, loss: 1.7234\n", + "2023-07-02 17:39:42,983 - modelscope - INFO - epoch [1][515/4953]\tlr: 9.811e-05, memory: 8933, loss: 3.3328\n", + "2023-07-02 17:39:50,299 - modelscope - INFO - epoch [1][520/4953]\tlr: 9.806e-05, memory: 8933, loss: 1.1523\n", + "2023-07-02 17:39:57,449 - modelscope - INFO - epoch [1][525/4953]\tlr: 9.802e-05, memory: 8933, loss: 2.2969\n", + "2023-07-02 17:40:03,936 - modelscope - INFO - epoch [1][530/4953]\tlr: 9.797e-05, memory: 8933, loss: 2.0359\n", + "2023-07-02 17:40:10,017 - modelscope - INFO - epoch [1][535/4953]\tlr: 9.792e-05, memory: 8933, loss: 2.2484\n", + "2023-07-02 17:40:15,110 - modelscope - INFO - epoch [1][540/4953]\tlr: 9.788e-05, memory: 8933, loss: 2.5000\n", + "2023-07-02 17:40:22,837 - modelscope - INFO - epoch [1][545/4953]\tlr: 9.783e-05, memory: 8933, loss: 1.6344\n", + "2023-07-02 17:40:27,326 - modelscope - INFO - epoch [1][550/4953]\tlr: 9.778e-05, memory: 8933, loss: 1.9516\n", + "2023-07-02 17:40:32,836 - modelscope - INFO - epoch [1][555/4953]\tlr: 9.774e-05, memory: 8933, loss: 2.7078\n", + "2023-07-02 17:40:38,900 - modelscope - INFO - epoch [1][560/4953]\tlr: 9.769e-05, memory: 8933, loss: 2.9023\n", + "2023-07-02 17:40:44,092 - modelscope - INFO - epoch [1][565/4953]\tlr: 9.764e-05, memory: 8933, loss: 3.7687\n", + "2023-07-02 17:40:51,182 - modelscope - INFO - epoch [1][570/4953]\tlr: 9.759e-05, memory: 8933, loss: 2.8531\n", + "2023-07-02 17:40:56,580 - modelscope - INFO - epoch [1][575/4953]\tlr: 9.754e-05, memory: 8933, loss: 1.8938\n", + "2023-07-02 17:41:04,432 - modelscope - INFO - epoch [1][580/4953]\tlr: 9.749e-05, memory: 8933, loss: 1.4187\n", + "2023-07-02 17:41:11,299 - modelscope - INFO - epoch [1][585/4953]\tlr: 9.744e-05, memory: 8933, loss: 2.2406\n", + "2023-07-02 17:41:17,405 - modelscope - INFO - epoch [1][590/4953]\tlr: 9.739e-05, memory: 8933, loss: 3.2250\n", + "2023-07-02 17:41:23,093 - modelscope - INFO - epoch [1][595/4953]\tlr: 9.734e-05, memory: 8933, loss: 1.5625\n", + "2023-07-02 17:41:29,552 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.05it/s]\n", + "2023-07-02 17:43:44,919 - modelscope - INFO - Saving checkpoint at 600 iter\n", + "2023-07-02 17:43:44,959 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter200_acc0.7107985615730286\n", + "2023-07-02 17:43:44,963 - modelscope - INFO - Saving checkpoint at 600 iter\n", + "2023-07-02 17:43:45,002 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_400\n", + "2023-07-02 17:43:45,006 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8933, evaluation/acc: 0.7199, evaluation/loss: 1.9766, loss: 1.2516\n", + "2023-07-02 17:43:50,488 - modelscope - INFO - epoch [1][605/4953]\tlr: 9.723e-05, memory: 8933, loss: 1.8469\n", + "2023-07-02 17:43:56,664 - modelscope - INFO - epoch [1][610/4953]\tlr: 9.718e-05, memory: 8933, loss: 1.5445\n", + "2023-07-02 17:44:02,529 - modelscope - INFO - epoch [1][615/4953]\tlr: 9.713e-05, memory: 8933, loss: 1.8422\n", + "2023-07-02 17:44:07,376 - modelscope - INFO - epoch [1][620/4953]\tlr: 9.707e-05, memory: 8933, loss: 2.4242\n", + "2023-07-02 17:44:12,991 - modelscope - INFO - epoch [1][625/4953]\tlr: 9.702e-05, memory: 8933, loss: 1.8070\n", + "2023-07-02 17:44:17,716 - modelscope - INFO - epoch [1][630/4953]\tlr: 9.697e-05, memory: 8933, loss: 2.0000\n", + "2023-07-02 17:44:22,023 - modelscope - INFO - epoch [1][635/4953]\tlr: 9.691e-05, memory: 8933, loss: 1.3898\n", + "2023-07-02 17:44:27,160 - modelscope - INFO - epoch [1][640/4953]\tlr: 9.686e-05, memory: 8933, loss: 1.6227\n", + "2023-07-02 17:44:33,519 - modelscope - INFO - epoch [1][645/4953]\tlr: 9.680e-05, memory: 8933, loss: 1.6672\n", + "2023-07-02 17:44:40,193 - modelscope - INFO - epoch [1][650/4953]\tlr: 9.674e-05, memory: 8933, loss: 1.4438\n", + "2023-07-02 17:44:44,906 - modelscope - INFO - epoch [1][655/4953]\tlr: 9.669e-05, memory: 8933, loss: 1.6648\n", + "2023-07-02 17:44:49,519 - modelscope - INFO - epoch [1][660/4953]\tlr: 9.663e-05, memory: 8933, loss: 1.2945\n", + "2023-07-02 17:44:55,845 - modelscope - INFO - epoch [1][665/4953]\tlr: 9.657e-05, memory: 8933, loss: 1.5773\n", + "2023-07-02 17:45:02,184 - modelscope - INFO - epoch [1][670/4953]\tlr: 9.652e-05, memory: 8933, loss: 1.8625\n", + "2023-07-02 17:45:05,554 - modelscope - INFO - epoch [1][675/4953]\tlr: 9.646e-05, memory: 8933, loss: 1.7039\n", + "2023-07-02 17:45:10,948 - modelscope - INFO - epoch [1][680/4953]\tlr: 9.640e-05, memory: 8933, loss: 2.0211\n", + "2023-07-02 17:45:15,605 - modelscope - INFO - epoch [1][685/4953]\tlr: 9.634e-05, memory: 8933, loss: 1.5969\n", + "2023-07-02 17:45:19,449 - modelscope - INFO - epoch [1][690/4953]\tlr: 9.628e-05, memory: 8933, loss: 1.7523\n", + "2023-07-02 17:45:26,684 - modelscope - INFO - epoch [1][695/4953]\tlr: 9.622e-05, memory: 8933, loss: 1.0891\n", + "2023-07-02 17:45:32,244 - modelscope - INFO - epoch [1][700/4953]\tlr: 9.616e-05, memory: 8933, loss: 1.9469\n", + "2023-07-02 17:45:37,894 - modelscope - INFO - epoch [1][705/4953]\tlr: 9.610e-05, memory: 8933, loss: 2.0938\n", + "2023-07-02 17:45:43,345 - modelscope - INFO - epoch [1][710/4953]\tlr: 9.604e-05, memory: 8933, loss: 2.7961\n", + "2023-07-02 17:45:49,260 - modelscope - INFO - epoch [1][715/4953]\tlr: 9.598e-05, memory: 8933, loss: 1.4719\n", + "2023-07-02 17:45:56,740 - modelscope - INFO - epoch [1][720/4953]\tlr: 9.592e-05, memory: 8992, loss: 2.2742\n", + "2023-07-02 17:46:00,368 - modelscope - INFO - epoch [1][725/4953]\tlr: 9.585e-05, memory: 8992, loss: 2.5391\n", + "2023-07-02 17:46:06,793 - modelscope - INFO - epoch [1][730/4953]\tlr: 9.579e-05, memory: 8992, loss: 1.0074\n", + "2023-07-02 17:46:13,010 - modelscope - INFO - epoch [1][735/4953]\tlr: 9.573e-05, memory: 8992, loss: 1.9289\n", + "2023-07-02 17:46:19,044 - modelscope - INFO - epoch [1][740/4953]\tlr: 9.567e-05, memory: 8992, loss: 1.7352\n", + "2023-07-02 17:46:26,858 - modelscope - INFO - epoch [1][745/4953]\tlr: 9.560e-05, memory: 8992, loss: 1.6711\n", + "2023-07-02 17:46:32,975 - modelscope - INFO - epoch [1][750/4953]\tlr: 9.554e-05, memory: 8992, loss: 2.0008\n", + "2023-07-02 17:46:41,458 - modelscope - INFO - epoch [1][755/4953]\tlr: 9.547e-05, memory: 8992, loss: 1.4602\n", + "2023-07-02 17:46:45,793 - modelscope - INFO - epoch [1][760/4953]\tlr: 9.541e-05, memory: 8992, loss: 3.6859\n", + "2023-07-02 17:46:50,447 - modelscope - INFO - epoch [1][765/4953]\tlr: 9.534e-05, memory: 8992, loss: 2.0977\n", + "2023-07-02 17:46:56,543 - modelscope - INFO - epoch [1][770/4953]\tlr: 9.528e-05, memory: 8992, loss: 1.6078\n", + "2023-07-02 17:47:02,551 - modelscope - INFO - epoch [1][775/4953]\tlr: 9.521e-05, memory: 8992, loss: 2.8766\n", + "2023-07-02 17:47:09,599 - modelscope - INFO - epoch [1][780/4953]\tlr: 9.514e-05, memory: 8992, loss: 2.9023\n", + "2023-07-02 17:47:15,456 - modelscope - INFO - epoch [1][785/4953]\tlr: 9.508e-05, memory: 8992, loss: 1.2570\n", + "2023-07-02 17:47:22,689 - modelscope - INFO - epoch [1][790/4953]\tlr: 9.501e-05, memory: 8992, loss: 1.7406\n", + "2023-07-02 17:47:28,263 - modelscope - INFO - epoch [1][795/4953]\tlr: 9.494e-05, memory: 8992, loss: 1.9820\n", + "2023-07-02 17:47:34,260 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:16<00:00, 2.04it/s]\n", + "2023-07-02 17:49:50,358 - modelscope - INFO - Saving checkpoint at 800 iter\n", + "2023-07-02 17:49:50,399 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter600_acc0.7198567390441895\n", + "2023-07-02 17:49:50,403 - modelscope - INFO - Saving checkpoint at 800 iter\n", + "2023-07-02 17:49:50,442 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_600\n", + "2023-07-02 17:49:50,447 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8992, evaluation/acc: 0.7412, evaluation/loss: 1.8238, loss: 1.3484\n", + "2023-07-02 17:49:56,027 - modelscope - INFO - epoch [1][805/4953]\tlr: 9.481e-05, memory: 8992, loss: 1.9234\n", + "2023-07-02 17:50:02,709 - modelscope - INFO - epoch [1][810/4953]\tlr: 9.474e-05, memory: 8992, loss: 1.3625\n", + "2023-07-02 17:50:05,927 - modelscope - INFO - epoch [1][815/4953]\tlr: 9.467e-05, memory: 8992, loss: 3.0219\n", + "2023-07-02 17:50:11,744 - modelscope - INFO - epoch [1][820/4953]\tlr: 9.460e-05, memory: 8992, loss: 1.4125\n", + "2023-07-02 17:50:17,173 - modelscope - INFO - epoch [1][825/4953]\tlr: 9.453e-05, memory: 8992, loss: 2.7422\n", + "2023-07-02 17:50:20,860 - modelscope - INFO - epoch [1][830/4953]\tlr: 9.446e-05, memory: 8992, loss: 2.2609\n", + "2023-07-02 17:50:26,716 - modelscope - INFO - epoch [1][835/4953]\tlr: 9.439e-05, memory: 8992, loss: 2.0391\n", + "2023-07-02 17:50:33,433 - modelscope - INFO - epoch [1][840/4953]\tlr: 9.431e-05, memory: 8992, loss: 1.2227\n", + "2023-07-02 17:50:38,310 - modelscope - INFO - epoch [1][845/4953]\tlr: 9.424e-05, memory: 8992, loss: 2.3312\n", + "2023-07-02 17:50:42,956 - modelscope - INFO - epoch [1][850/4953]\tlr: 9.417e-05, memory: 8992, loss: 1.8562\n", + "2023-07-02 17:50:48,973 - modelscope - INFO - epoch [1][855/4953]\tlr: 9.410e-05, memory: 8992, loss: 1.5039\n", + "2023-07-02 17:50:52,835 - modelscope - INFO - epoch [1][860/4953]\tlr: 9.402e-05, memory: 8992, loss: 2.6664\n", + "2023-07-02 17:50:59,665 - modelscope - INFO - epoch [1][865/4953]\tlr: 9.395e-05, memory: 8992, loss: 1.1352\n", + "2023-07-02 17:51:05,311 - modelscope - INFO - epoch [1][870/4953]\tlr: 9.388e-05, memory: 8992, loss: 0.9805\n", + "2023-07-02 17:51:10,329 - modelscope - INFO - epoch [1][875/4953]\tlr: 9.380e-05, memory: 8992, loss: 1.9438\n", + "2023-07-02 17:51:15,416 - modelscope - INFO - epoch [1][880/4953]\tlr: 9.373e-05, memory: 8992, loss: 1.5938\n", + "2023-07-02 17:51:18,285 - modelscope - INFO - epoch [1][885/4953]\tlr: 9.365e-05, memory: 8992, loss: 3.1656\n", + "2023-07-02 17:51:23,293 - modelscope - INFO - epoch [1][890/4953]\tlr: 9.358e-05, memory: 8992, loss: 1.3336\n", + "2023-07-02 17:51:29,054 - modelscope - INFO - epoch [1][895/4953]\tlr: 9.350e-05, memory: 8992, loss: 1.9094\n", + "2023-07-02 17:51:34,572 - modelscope - INFO - epoch [1][900/4953]\tlr: 9.343e-05, memory: 8992, loss: 2.2406\n", + "2023-07-02 17:51:40,191 - modelscope - INFO - epoch [1][905/4953]\tlr: 9.335e-05, memory: 8992, loss: 1.1078\n", + "2023-07-02 17:51:49,310 - modelscope - INFO - epoch [1][910/4953]\tlr: 9.327e-05, memory: 8992, loss: 1.4352\n", + "2023-07-02 17:51:53,688 - modelscope - INFO - epoch [1][915/4953]\tlr: 9.320e-05, memory: 8992, loss: 2.3406\n", + "2023-07-02 17:51:58,710 - modelscope - INFO - epoch [1][920/4953]\tlr: 9.312e-05, memory: 8992, loss: 1.6012\n", + "2023-07-02 17:52:04,686 - modelscope - INFO - epoch [1][925/4953]\tlr: 9.304e-05, memory: 8992, loss: 1.7086\n", + "2023-07-02 17:52:12,123 - modelscope - INFO - epoch [1][930/4953]\tlr: 9.296e-05, memory: 8992, loss: 1.3492\n", + "2023-07-02 17:52:15,935 - modelscope - INFO - epoch [1][935/4953]\tlr: 9.288e-05, memory: 8992, loss: 1.4781\n", + "2023-07-02 17:52:20,994 - modelscope - INFO - epoch [1][940/4953]\tlr: 9.280e-05, memory: 8992, loss: 2.1047\n", + "2023-07-02 17:52:28,615 - modelscope - INFO - epoch [1][945/4953]\tlr: 9.272e-05, memory: 8992, loss: 1.2547\n", + "2023-07-02 17:52:34,278 - modelscope - INFO - epoch [1][950/4953]\tlr: 9.264e-05, memory: 8992, loss: 1.7332\n", + "2023-07-02 17:52:40,908 - modelscope - INFO - epoch [1][955/4953]\tlr: 9.256e-05, memory: 8992, loss: 1.2336\n", + "2023-07-02 17:52:45,957 - modelscope - INFO - epoch [1][960/4953]\tlr: 9.248e-05, memory: 8992, loss: 1.3078\n", + "2023-07-02 17:52:51,185 - modelscope - INFO - epoch [1][965/4953]\tlr: 9.240e-05, memory: 8992, loss: 2.4461\n", + "2023-07-02 17:52:56,088 - modelscope - INFO - epoch [1][970/4953]\tlr: 9.232e-05, memory: 8992, loss: 2.0934\n", + "2023-07-02 17:53:00,822 - modelscope - INFO - epoch [1][975/4953]\tlr: 9.224e-05, memory: 8992, loss: 1.5676\n", + "2023-07-02 17:53:04,695 - modelscope - INFO - epoch [1][980/4953]\tlr: 9.216e-05, memory: 8992, loss: 2.7031\n", + "2023-07-02 17:53:09,760 - modelscope - INFO - epoch [1][985/4953]\tlr: 9.207e-05, memory: 8992, loss: 1.9406\n", + "2023-07-02 17:53:14,950 - modelscope - INFO - epoch [1][990/4953]\tlr: 9.199e-05, memory: 8992, loss: 1.9484\n", + "2023-07-02 17:53:20,534 - modelscope - INFO - epoch [1][995/4953]\tlr: 9.191e-05, memory: 8992, loss: 3.2953\n", + "2023-07-02 17:53:25,342 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:16<00:00, 2.04it/s]\n", + "2023-07-02 17:55:41,348 - modelscope - INFO - Saving checkpoint at 1000 iter\n", + "2023-07-02 17:55:41,389 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter800_acc0.7412243485450745\n", + "2023-07-02 17:55:41,393 - modelscope - INFO - Saving checkpoint at 1000 iter\n", + "2023-07-02 17:55:41,431 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_800\n", + "2023-07-02 17:55:41,435 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8992, evaluation/acc: 0.7551, evaluation/loss: 1.6418, loss: 2.1023\n", + "2023-07-02 17:55:48,321 - modelscope - INFO - epoch [1][1005/4953]\tlr: 9.174e-05, memory: 8992, loss: 0.9020\n", + "2023-07-02 17:55:52,978 - modelscope - INFO - epoch [1][1010/4953]\tlr: 9.166e-05, memory: 8992, loss: 2.8094\n", + "2023-07-02 17:55:59,951 - modelscope - INFO - epoch [1][1015/4953]\tlr: 9.157e-05, memory: 8992, loss: 1.5145\n", + "2023-07-02 17:56:06,752 - modelscope - INFO - epoch [1][1020/4953]\tlr: 9.149e-05, memory: 8992, loss: 1.2547\n", + "2023-07-02 17:56:13,123 - modelscope - INFO - epoch [1][1025/4953]\tlr: 9.140e-05, memory: 8992, loss: 1.5836\n", + "2023-07-02 17:56:18,535 - modelscope - INFO - epoch [1][1030/4953]\tlr: 9.132e-05, memory: 8992, loss: 1.5500\n", + "2023-07-02 17:56:23,898 - modelscope - INFO - epoch [1][1035/4953]\tlr: 9.123e-05, memory: 8992, loss: 1.1477\n", + "2023-07-02 17:56:29,262 - modelscope - INFO - epoch [1][1040/4953]\tlr: 9.114e-05, memory: 8992, loss: 1.8488\n", + "2023-07-02 17:56:36,281 - modelscope - INFO - epoch [1][1045/4953]\tlr: 9.106e-05, memory: 8992, loss: 1.7969\n", + "2023-07-02 17:56:42,786 - modelscope - INFO - epoch [1][1050/4953]\tlr: 9.097e-05, memory: 8992, loss: 1.0703\n", + "2023-07-02 17:56:48,367 - modelscope - INFO - epoch [1][1055/4953]\tlr: 9.088e-05, memory: 8992, loss: 1.5227\n", + "2023-07-02 17:56:53,185 - modelscope - INFO - epoch [1][1060/4953]\tlr: 9.079e-05, memory: 8992, loss: 2.5859\n", + "2023-07-02 17:56:59,040 - modelscope - INFO - epoch [1][1065/4953]\tlr: 9.070e-05, memory: 8992, loss: 1.4641\n", + "2023-07-02 17:57:05,006 - modelscope - INFO - epoch [1][1070/4953]\tlr: 9.062e-05, memory: 8992, loss: 0.9602\n", + "2023-07-02 17:57:08,833 - modelscope - INFO - epoch [1][1075/4953]\tlr: 9.053e-05, memory: 8992, loss: 2.7281\n", + "2023-07-02 17:57:15,081 - modelscope - INFO - epoch [1][1080/4953]\tlr: 9.044e-05, memory: 8992, loss: 0.8438\n", + "2023-07-02 17:57:19,054 - modelscope - INFO - epoch [1][1085/4953]\tlr: 9.035e-05, memory: 8992, loss: 2.0336\n", + "2023-07-02 17:57:27,789 - modelscope - INFO - epoch [1][1090/4953]\tlr: 9.026e-05, memory: 8992, loss: 1.0059\n", + "2023-07-02 17:57:32,658 - modelscope - INFO - epoch [1][1095/4953]\tlr: 9.017e-05, memory: 8992, loss: 1.4187\n", + "2023-07-02 17:57:37,809 - modelscope - INFO - epoch [1][1100/4953]\tlr: 9.008e-05, memory: 8992, loss: 1.8813\n", + "2023-07-02 17:57:44,029 - modelscope - INFO - epoch [1][1105/4953]\tlr: 8.999e-05, memory: 8992, loss: 1.2219\n", + "2023-07-02 17:57:49,772 - modelscope - INFO - epoch [1][1110/4953]\tlr: 8.989e-05, memory: 8992, loss: 1.0527\n", + "2023-07-02 17:57:53,867 - modelscope - INFO - epoch [1][1115/4953]\tlr: 8.980e-05, memory: 8992, loss: 1.7289\n", + "2023-07-02 17:57:59,243 - modelscope - INFO - epoch [1][1120/4953]\tlr: 8.971e-05, memory: 8992, loss: 2.4305\n", + "2023-07-02 17:58:08,887 - modelscope - INFO - epoch [1][1125/4953]\tlr: 8.962e-05, memory: 8992, loss: 0.7469\n", + "2023-07-02 17:58:16,138 - modelscope - INFO - epoch [1][1130/4953]\tlr: 8.952e-05, memory: 8992, loss: 1.7727\n", + "2023-07-02 17:58:23,930 - modelscope - INFO - epoch [1][1135/4953]\tlr: 8.943e-05, memory: 8992, loss: 2.0129\n", + "2023-07-02 17:58:30,185 - modelscope - INFO - epoch [1][1140/4953]\tlr: 8.934e-05, memory: 8992, loss: 2.9025\n", + "2023-07-02 17:58:36,114 - modelscope - INFO - epoch [1][1145/4953]\tlr: 8.924e-05, memory: 8992, loss: 1.8898\n", + "2023-07-02 17:58:42,583 - modelscope - INFO - epoch [1][1150/4953]\tlr: 8.915e-05, memory: 8992, loss: 1.6789\n", + "2023-07-02 17:58:47,491 - modelscope - INFO - epoch [1][1155/4953]\tlr: 8.905e-05, memory: 8992, loss: 1.5578\n", + "2023-07-02 17:58:51,182 - modelscope - INFO - epoch [1][1160/4953]\tlr: 8.896e-05, memory: 8992, loss: 2.6266\n", + "2023-07-02 17:58:56,692 - modelscope - INFO - epoch [1][1165/4953]\tlr: 8.886e-05, memory: 8992, loss: 1.8508\n", + "2023-07-02 17:59:01,780 - modelscope - INFO - epoch [1][1170/4953]\tlr: 8.877e-05, memory: 8992, loss: 1.7000\n", + "2023-07-02 17:59:05,790 - modelscope - INFO - epoch [1][1175/4953]\tlr: 8.867e-05, memory: 8992, loss: 2.2281\n", + "2023-07-02 17:59:10,420 - modelscope - INFO - epoch [1][1180/4953]\tlr: 8.858e-05, memory: 8992, loss: 2.2180\n", + "2023-07-02 17:59:15,762 - modelscope - INFO - epoch [1][1185/4953]\tlr: 8.848e-05, memory: 8992, loss: 1.2668\n", + "2023-07-02 17:59:20,930 - modelscope - INFO - epoch [1][1190/4953]\tlr: 8.838e-05, memory: 8992, loss: 1.8664\n", + "2023-07-02 17:59:27,122 - modelscope - INFO - epoch [1][1195/4953]\tlr: 8.828e-05, memory: 8992, loss: 2.4109\n", + "2023-07-02 17:59:32,910 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:01:48,692 - modelscope - INFO - Saving checkpoint at 1200 iter\n", + "2023-07-02 18:01:48,732 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter1000_acc0.7551158666610718\n", + "2023-07-02 18:01:48,736 - modelscope - INFO - Saving checkpoint at 1200 iter\n", + "2023-07-02 18:01:48,775 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_1000\n", + "2023-07-02 18:01:48,780 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8992, evaluation/acc: 0.7694, evaluation/loss: 1.5234, loss: 1.7117\n", + "2023-07-02 18:01:56,354 - modelscope - INFO - epoch [1][1205/4953]\tlr: 8.809e-05, memory: 8992, loss: 1.2402\n", + "2023-07-02 18:02:00,660 - modelscope - INFO - epoch [1][1210/4953]\tlr: 8.799e-05, memory: 8992, loss: 1.9062\n", + "2023-07-02 18:02:04,421 - modelscope - INFO - epoch [1][1215/4953]\tlr: 8.789e-05, memory: 8992, loss: 1.4750\n", + "2023-07-02 18:02:10,614 - modelscope - INFO - epoch [1][1220/4953]\tlr: 8.779e-05, memory: 8992, loss: 1.0879\n", + "2023-07-02 18:02:16,579 - modelscope - INFO - epoch [1][1225/4953]\tlr: 8.769e-05, memory: 8992, loss: 1.9461\n", + "2023-07-02 18:02:23,602 - modelscope - INFO - epoch [1][1230/4953]\tlr: 8.759e-05, memory: 8992, loss: 2.3242\n", + "2023-07-02 18:02:31,155 - modelscope - INFO - epoch [1][1235/4953]\tlr: 8.749e-05, memory: 8992, loss: 1.9867\n", + "2023-07-02 18:02:36,373 - modelscope - INFO - epoch [1][1240/4953]\tlr: 8.739e-05, memory: 8992, loss: 2.1641\n", + "2023-07-02 18:02:41,792 - modelscope - INFO - epoch [1][1245/4953]\tlr: 8.729e-05, memory: 8992, loss: 1.9109\n", + "2023-07-02 18:02:49,746 - modelscope - INFO - epoch [1][1250/4953]\tlr: 8.719e-05, memory: 8992, loss: 0.7258\n", + "2023-07-02 18:02:54,809 - modelscope - INFO - epoch [1][1255/4953]\tlr: 8.709e-05, memory: 8992, loss: 1.7203\n", + "2023-07-02 18:03:02,266 - modelscope - INFO - epoch [1][1260/4953]\tlr: 8.699e-05, memory: 8992, loss: 1.3533\n", + "2023-07-02 18:03:10,570 - modelscope - INFO - epoch [1][1265/4953]\tlr: 8.689e-05, memory: 8992, loss: 1.6199\n", + "2023-07-02 18:03:17,332 - modelscope - INFO - epoch [1][1270/4953]\tlr: 8.679e-05, memory: 8992, loss: 1.4033\n", + "2023-07-02 18:03:24,075 - modelscope - INFO - epoch [1][1275/4953]\tlr: 8.668e-05, memory: 8992, loss: 1.3773\n", + "2023-07-02 18:03:31,046 - modelscope - INFO - epoch [1][1280/4953]\tlr: 8.658e-05, memory: 8992, loss: 1.3973\n", + "2023-07-02 18:03:37,326 - modelscope - INFO - epoch [1][1285/4953]\tlr: 8.648e-05, memory: 8992, loss: 1.6422\n", + "2023-07-02 18:03:42,789 - modelscope - INFO - epoch [1][1290/4953]\tlr: 8.637e-05, memory: 8992, loss: 1.8156\n", + "2023-07-02 18:03:49,191 - modelscope - INFO - epoch [1][1295/4953]\tlr: 8.627e-05, memory: 8992, loss: 0.8660\n", + "2023-07-02 18:03:57,916 - modelscope - INFO - epoch [1][1300/4953]\tlr: 8.617e-05, memory: 8992, loss: 1.4477\n", + "2023-07-02 18:04:04,809 - modelscope - INFO - epoch [1][1305/4953]\tlr: 8.606e-05, memory: 8992, loss: 0.7375\n", + "2023-07-02 18:04:12,169 - modelscope - INFO - epoch [1][1310/4953]\tlr: 8.596e-05, memory: 8992, loss: 0.4646\n", + "2023-07-02 18:04:17,928 - modelscope - INFO - epoch [1][1315/4953]\tlr: 8.585e-05, memory: 8992, loss: 1.6566\n", + "2023-07-02 18:04:26,868 - modelscope - INFO - epoch [1][1320/4953]\tlr: 8.575e-05, memory: 8992, loss: 1.0375\n", + "2023-07-02 18:04:32,785 - modelscope - INFO - epoch [1][1325/4953]\tlr: 8.564e-05, memory: 8992, loss: 1.1785\n", + "2023-07-02 18:04:36,876 - modelscope - INFO - epoch [1][1330/4953]\tlr: 8.553e-05, memory: 8992, loss: 2.0953\n", + "2023-07-02 18:04:43,149 - modelscope - INFO - epoch [1][1335/4953]\tlr: 8.543e-05, memory: 8992, loss: 1.4941\n", + "2023-07-02 18:04:48,128 - modelscope - INFO - epoch [1][1340/4953]\tlr: 8.532e-05, memory: 8992, loss: 2.3219\n", + "2023-07-02 18:04:54,519 - modelscope - INFO - epoch [1][1345/4953]\tlr: 8.521e-05, memory: 8992, loss: 1.7479\n", + "2023-07-02 18:05:00,734 - modelscope - INFO - epoch [1][1350/4953]\tlr: 8.511e-05, memory: 8992, loss: 2.5168\n", + "2023-07-02 18:05:07,571 - modelscope - INFO - epoch [1][1355/4953]\tlr: 8.500e-05, memory: 8992, loss: 1.5414\n", + "2023-07-02 18:05:13,130 - modelscope - INFO - epoch [1][1360/4953]\tlr: 8.489e-05, memory: 8992, loss: 1.8086\n", + "2023-07-02 18:05:22,837 - modelscope - INFO - epoch [1][1365/4953]\tlr: 8.478e-05, memory: 8992, loss: 1.1250\n", + "2023-07-02 18:05:28,381 - modelscope - INFO - epoch [1][1370/4953]\tlr: 8.468e-05, memory: 8992, loss: 1.2740\n", + "2023-07-02 18:05:34,762 - modelscope - INFO - epoch [1][1375/4953]\tlr: 8.457e-05, memory: 8992, loss: 1.6906\n", + "2023-07-02 18:05:40,998 - modelscope - INFO - epoch [1][1380/4953]\tlr: 8.446e-05, memory: 8992, loss: 2.1523\n", + "2023-07-02 18:05:48,330 - modelscope - INFO - epoch [1][1385/4953]\tlr: 8.435e-05, memory: 8992, loss: 0.6824\n", + "2023-07-02 18:05:52,136 - modelscope - INFO - epoch [1][1390/4953]\tlr: 8.424e-05, memory: 8992, loss: 1.8422\n", + "2023-07-02 18:05:58,132 - modelscope - INFO - epoch [1][1395/4953]\tlr: 8.413e-05, memory: 8992, loss: 0.8705\n", + "2023-07-02 18:06:04,317 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:08:20,133 - modelscope - INFO - Saving checkpoint at 1400 iter\n", + "2023-07-02 18:08:20,173 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter1200_acc0.7693551182746887\n", + "2023-07-02 18:08:20,177 - modelscope - INFO - Saving checkpoint at 1400 iter\n", + "2023-07-02 18:08:20,216 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_1200\n", + "2023-07-02 18:08:20,220 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 8992, evaluation/acc: 0.7789, evaluation/loss: 1.4656, loss: 1.8477\n", + "2023-07-02 18:08:25,847 - modelscope - INFO - epoch [1][1405/4953]\tlr: 8.391e-05, memory: 8992, loss: 1.5250\n", + "2023-07-02 18:08:32,815 - modelscope - INFO - epoch [1][1410/4953]\tlr: 8.380e-05, memory: 8992, loss: 1.2430\n", + "2023-07-02 18:08:38,362 - modelscope - INFO - epoch [1][1415/4953]\tlr: 8.369e-05, memory: 8992, loss: 1.4227\n", + "2023-07-02 18:08:43,312 - modelscope - INFO - epoch [1][1420/4953]\tlr: 8.358e-05, memory: 8992, loss: 1.3088\n", + "2023-07-02 18:08:50,596 - modelscope - INFO - epoch [1][1425/4953]\tlr: 8.346e-05, memory: 8992, loss: 1.0277\n", + "2023-07-02 18:08:55,317 - modelscope - INFO - epoch [1][1430/4953]\tlr: 8.335e-05, memory: 8992, loss: 2.0480\n", + "2023-07-02 18:08:58,994 - modelscope - INFO - epoch [1][1435/4953]\tlr: 8.324e-05, memory: 8992, loss: 3.0969\n", + "2023-07-02 18:09:04,894 - modelscope - INFO - epoch [1][1440/4953]\tlr: 8.313e-05, memory: 8992, loss: 0.7141\n", + "2023-07-02 18:09:10,621 - modelscope - INFO - epoch [1][1445/4953]\tlr: 8.301e-05, memory: 8992, loss: 1.7031\n", + "2023-07-02 18:09:15,960 - modelscope - INFO - epoch [1][1450/4953]\tlr: 8.290e-05, memory: 8992, loss: 1.5277\n", + "2023-07-02 18:09:21,781 - modelscope - INFO - epoch [1][1455/4953]\tlr: 8.279e-05, memory: 8992, loss: 1.7842\n", + "2023-07-02 18:09:29,051 - modelscope - INFO - epoch [1][1460/4953]\tlr: 8.267e-05, memory: 8992, loss: 2.1768\n", + "2023-07-02 18:09:33,405 - modelscope - INFO - epoch [1][1465/4953]\tlr: 8.256e-05, memory: 8992, loss: 1.9969\n", + "2023-07-02 18:09:38,454 - modelscope - INFO - epoch [1][1470/4953]\tlr: 8.245e-05, memory: 8992, loss: 1.6043\n", + "2023-07-02 18:09:44,266 - modelscope - INFO - epoch [1][1475/4953]\tlr: 8.233e-05, memory: 8992, loss: 0.7842\n", + "2023-07-02 18:09:49,575 - modelscope - INFO - epoch [1][1480/4953]\tlr: 8.222e-05, memory: 8992, loss: 1.6766\n", + "2023-07-02 18:09:56,773 - modelscope - INFO - epoch [1][1485/4953]\tlr: 8.210e-05, memory: 8992, loss: 1.1123\n", + "2023-07-02 18:10:05,054 - modelscope - INFO - epoch [1][1490/4953]\tlr: 8.199e-05, memory: 9058, loss: 1.3289\n", + "2023-07-02 18:10:10,678 - modelscope - INFO - epoch [1][1495/4953]\tlr: 8.187e-05, memory: 9058, loss: 1.6414\n", + "2023-07-02 18:10:16,694 - modelscope - INFO - epoch [1][1500/4953]\tlr: 8.176e-05, memory: 9058, loss: 0.8203\n", + "2023-07-02 18:10:24,675 - modelscope - INFO - epoch [1][1505/4953]\tlr: 8.164e-05, memory: 9058, loss: 0.8189\n", + "2023-07-02 18:10:30,053 - modelscope - INFO - epoch [1][1510/4953]\tlr: 8.152e-05, memory: 9058, loss: 1.1646\n", + "2023-07-02 18:10:36,537 - modelscope - INFO - epoch [1][1515/4953]\tlr: 8.141e-05, memory: 9058, loss: 1.1387\n", + "2023-07-02 18:10:42,304 - modelscope - INFO - epoch [1][1520/4953]\tlr: 8.129e-05, memory: 9058, loss: 1.4477\n", + "2023-07-02 18:10:46,424 - modelscope - INFO - epoch [1][1525/4953]\tlr: 8.117e-05, memory: 9058, loss: 3.0531\n", + "2023-07-02 18:10:51,264 - modelscope - INFO - epoch [1][1530/4953]\tlr: 8.106e-05, memory: 9058, loss: 2.3023\n", + "2023-07-02 18:10:59,103 - modelscope - INFO - epoch [1][1535/4953]\tlr: 8.094e-05, memory: 9058, loss: 0.6086\n", + "2023-07-02 18:11:04,295 - modelscope - INFO - epoch [1][1540/4953]\tlr: 8.082e-05, memory: 9058, loss: 1.3912\n", + "2023-07-02 18:11:09,436 - modelscope - INFO - epoch [1][1545/4953]\tlr: 8.070e-05, memory: 9058, loss: 2.1668\n", + "2023-07-02 18:11:16,921 - modelscope - INFO - epoch [1][1550/4953]\tlr: 8.058e-05, memory: 9058, loss: 0.4180\n", + "2023-07-02 18:11:22,852 - modelscope - INFO - epoch [1][1555/4953]\tlr: 8.047e-05, memory: 9058, loss: 1.4855\n", + "2023-07-02 18:11:27,748 - modelscope - INFO - epoch [1][1560/4953]\tlr: 8.035e-05, memory: 9058, loss: 2.0650\n", + "2023-07-02 18:11:30,906 - modelscope - INFO - epoch [1][1565/4953]\tlr: 8.023e-05, memory: 9058, loss: 2.8250\n", + "2023-07-02 18:11:38,069 - modelscope - INFO - epoch [1][1570/4953]\tlr: 8.011e-05, memory: 9058, loss: 1.6609\n", + "2023-07-02 18:11:44,626 - modelscope - INFO - epoch [1][1575/4953]\tlr: 7.999e-05, memory: 9058, loss: 1.0016\n", + "2023-07-02 18:11:49,164 - modelscope - INFO - epoch [1][1580/4953]\tlr: 7.987e-05, memory: 9058, loss: 2.2371\n", + "2023-07-02 18:11:53,217 - modelscope - INFO - epoch [1][1585/4953]\tlr: 7.975e-05, memory: 9058, loss: 2.7695\n", + "2023-07-02 18:11:59,930 - modelscope - INFO - epoch [1][1590/4953]\tlr: 7.963e-05, memory: 9058, loss: 2.2398\n", + "2023-07-02 18:12:04,671 - modelscope - INFO - epoch [1][1595/4953]\tlr: 7.951e-05, memory: 9058, loss: 0.7875\n", + "2023-07-02 18:12:10,417 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:14:26,308 - modelscope - INFO - Saving checkpoint at 1600 iter\n", + "2023-07-02 18:14:26,349 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter1400_acc0.7789175510406494\n", + "2023-07-02 18:14:26,353 - modelscope - INFO - Saving checkpoint at 1600 iter\n", + "2023-07-02 18:14:26,392 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_1400\n", + "2023-07-02 18:14:26,396 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9058, evaluation/acc: 0.7892, evaluation/loss: 1.4188, loss: 2.1477\n", + "2023-07-02 18:14:31,893 - modelscope - INFO - epoch [1][1605/4953]\tlr: 7.927e-05, memory: 9058, loss: 0.7930\n", + "2023-07-02 18:14:37,157 - modelscope - INFO - epoch [1][1610/4953]\tlr: 7.914e-05, memory: 9058, loss: 1.6867\n", + "2023-07-02 18:14:41,163 - modelscope - INFO - epoch [1][1615/4953]\tlr: 7.902e-05, memory: 9058, loss: 1.3123\n", + "2023-07-02 18:14:46,222 - modelscope - INFO - epoch [1][1620/4953]\tlr: 7.890e-05, memory: 9058, loss: 1.9320\n", + "2023-07-02 18:14:50,200 - modelscope - INFO - epoch [1][1625/4953]\tlr: 7.878e-05, memory: 9058, loss: 2.3531\n", + "2023-07-02 18:14:55,640 - modelscope - INFO - epoch [1][1630/4953]\tlr: 7.866e-05, memory: 9058, loss: 2.1230\n", + "2023-07-02 18:15:00,591 - modelscope - INFO - epoch [1][1635/4953]\tlr: 7.853e-05, memory: 9058, loss: 1.2672\n", + "2023-07-02 18:15:06,311 - modelscope - INFO - epoch [1][1640/4953]\tlr: 7.841e-05, memory: 9058, loss: 1.8948\n", + "2023-07-02 18:15:12,067 - modelscope - INFO - epoch [1][1645/4953]\tlr: 7.829e-05, memory: 9058, loss: 1.9506\n", + "2023-07-02 18:15:18,834 - modelscope - INFO - epoch [1][1650/4953]\tlr: 7.817e-05, memory: 9058, loss: 0.8719\n", + "2023-07-02 18:15:24,490 - modelscope - INFO - epoch [1][1655/4953]\tlr: 7.804e-05, memory: 9058, loss: 0.7850\n", + "2023-07-02 18:15:30,533 - modelscope - INFO - epoch [1][1660/4953]\tlr: 7.792e-05, memory: 9058, loss: 1.0324\n", + "2023-07-02 18:15:39,715 - modelscope - INFO - epoch [1][1665/4953]\tlr: 7.779e-05, memory: 9058, loss: 0.8568\n", + "2023-07-02 18:15:46,536 - modelscope - INFO - epoch [1][1670/4953]\tlr: 7.767e-05, memory: 9058, loss: 1.5828\n", + "2023-07-02 18:15:50,976 - modelscope - INFO - epoch [1][1675/4953]\tlr: 7.755e-05, memory: 9058, loss: 1.5391\n", + "2023-07-02 18:15:56,272 - modelscope - INFO - epoch [1][1680/4953]\tlr: 7.742e-05, memory: 9058, loss: 1.6117\n", + "2023-07-02 18:16:04,187 - modelscope - INFO - epoch [1][1685/4953]\tlr: 7.730e-05, memory: 9058, loss: 0.4076\n", + "2023-07-02 18:16:08,882 - modelscope - INFO - epoch [1][1690/4953]\tlr: 7.717e-05, memory: 9058, loss: 1.3816\n", + "2023-07-02 18:16:16,150 - modelscope - INFO - epoch [1][1695/4953]\tlr: 7.705e-05, memory: 9058, loss: 1.9426\n", + "2023-07-02 18:16:20,599 - modelscope - INFO - epoch [1][1700/4953]\tlr: 7.692e-05, memory: 9058, loss: 2.4797\n", + "2023-07-02 18:16:26,001 - modelscope - INFO - epoch [1][1705/4953]\tlr: 7.679e-05, memory: 9058, loss: 1.3273\n", + "2023-07-02 18:16:32,374 - modelscope - INFO - epoch [1][1710/4953]\tlr: 7.667e-05, memory: 9058, loss: 0.9286\n", + "2023-07-02 18:16:39,243 - modelscope - INFO - epoch [1][1715/4953]\tlr: 7.654e-05, memory: 9058, loss: 1.3732\n", + "2023-07-02 18:16:44,919 - modelscope - INFO - epoch [1][1720/4953]\tlr: 7.642e-05, memory: 9058, loss: 1.2824\n", + "2023-07-02 18:16:47,647 - modelscope - INFO - epoch [1][1725/4953]\tlr: 7.629e-05, memory: 9058, loss: 2.0891\n", + "2023-07-02 18:16:53,984 - modelscope - INFO - epoch [1][1730/4953]\tlr: 7.616e-05, memory: 9058, loss: 0.5539\n", + "2023-07-02 18:16:58,439 - modelscope - INFO - epoch [1][1735/4953]\tlr: 7.604e-05, memory: 9058, loss: 1.4975\n", + "2023-07-02 18:17:03,726 - modelscope - INFO - epoch [1][1740/4953]\tlr: 7.591e-05, memory: 9058, loss: 1.6102\n", + "2023-07-02 18:17:08,657 - modelscope - INFO - epoch [1][1745/4953]\tlr: 7.578e-05, memory: 9058, loss: 1.6957\n", + "2023-07-02 18:17:13,371 - modelscope - INFO - epoch [1][1750/4953]\tlr: 7.565e-05, memory: 9058, loss: 1.5684\n", + "2023-07-02 18:17:17,513 - modelscope - INFO - epoch [1][1755/4953]\tlr: 7.553e-05, memory: 9058, loss: 2.9000\n", + "2023-07-02 18:17:24,347 - modelscope - INFO - epoch [1][1760/4953]\tlr: 7.540e-05, memory: 9058, loss: 1.5227\n", + "2023-07-02 18:17:28,183 - modelscope - INFO - epoch [1][1765/4953]\tlr: 7.527e-05, memory: 9058, loss: 2.3375\n", + "2023-07-02 18:17:35,427 - modelscope - INFO - epoch [1][1770/4953]\tlr: 7.514e-05, memory: 9058, loss: 1.0623\n", + "2023-07-02 18:17:39,708 - modelscope - INFO - epoch [1][1775/4953]\tlr: 7.501e-05, memory: 9058, loss: 1.5977\n", + "2023-07-02 18:17:45,757 - modelscope - INFO - epoch [1][1780/4953]\tlr: 7.488e-05, memory: 9058, loss: 1.0781\n", + "2023-07-02 18:17:49,525 - modelscope - INFO - epoch [1][1785/4953]\tlr: 7.475e-05, memory: 9058, loss: 1.6547\n", + "2023-07-02 18:17:55,072 - modelscope - INFO - epoch [1][1790/4953]\tlr: 7.463e-05, memory: 9058, loss: 1.4458\n", + "2023-07-02 18:18:01,439 - modelscope - INFO - epoch [1][1795/4953]\tlr: 7.450e-05, memory: 9058, loss: 1.0096\n", + "2023-07-02 18:18:06,478 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:20:22,335 - modelscope - INFO - Saving checkpoint at 1800 iter\n", + "2023-07-02 18:20:22,375 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter1600_acc0.7891753911972046\n", + "2023-07-02 18:20:22,379 - modelscope - INFO - Saving checkpoint at 1800 iter\n", + "2023-07-02 18:20:22,417 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_1600\n", + "2023-07-02 18:20:22,422 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9058, evaluation/acc: 0.7967, evaluation/loss: 1.3701, loss: 0.9414\n", + "2023-07-02 18:20:28,163 - modelscope - INFO - epoch [1][1805/4953]\tlr: 7.424e-05, memory: 9058, loss: 1.7404\n", + "2023-07-02 18:20:32,265 - modelscope - INFO - epoch [1][1810/4953]\tlr: 7.411e-05, memory: 9058, loss: 1.5176\n", + "2023-07-02 18:20:38,772 - modelscope - INFO - epoch [1][1815/4953]\tlr: 7.398e-05, memory: 9058, loss: 0.9519\n", + "2023-07-02 18:20:44,819 - modelscope - INFO - epoch [1][1820/4953]\tlr: 7.385e-05, memory: 9058, loss: 1.2756\n", + "2023-07-02 18:20:50,296 - modelscope - INFO - epoch [1][1825/4953]\tlr: 7.372e-05, memory: 9058, loss: 1.4785\n", + "2023-07-02 18:20:56,799 - modelscope - INFO - epoch [1][1830/4953]\tlr: 7.358e-05, memory: 9058, loss: 1.5188\n", + "2023-07-02 18:21:03,334 - modelscope - INFO - epoch [1][1835/4953]\tlr: 7.345e-05, memory: 9058, loss: 0.6644\n", + "2023-07-02 18:21:10,067 - modelscope - INFO - epoch [1][1840/4953]\tlr: 7.332e-05, memory: 9058, loss: 0.9434\n", + "2023-07-02 18:21:16,554 - modelscope - INFO - epoch [1][1845/4953]\tlr: 7.319e-05, memory: 9058, loss: 0.7092\n", + "2023-07-02 18:21:23,374 - modelscope - INFO - epoch [1][1850/4953]\tlr: 7.306e-05, memory: 9058, loss: 1.1020\n", + "2023-07-02 18:21:32,187 - modelscope - INFO - epoch [1][1855/4953]\tlr: 7.293e-05, memory: 9058, loss: 1.1508\n", + "2023-07-02 18:21:37,254 - modelscope - INFO - epoch [1][1860/4953]\tlr: 7.280e-05, memory: 9058, loss: 1.6852\n", + "2023-07-02 18:21:42,410 - modelscope - INFO - epoch [1][1865/4953]\tlr: 7.266e-05, memory: 9058, loss: 0.9865\n", + "2023-07-02 18:21:47,494 - modelscope - INFO - epoch [1][1870/4953]\tlr: 7.253e-05, memory: 9058, loss: 1.4111\n", + "2023-07-02 18:21:51,877 - modelscope - INFO - epoch [1][1875/4953]\tlr: 7.240e-05, memory: 9058, loss: 1.9342\n", + "2023-07-02 18:21:57,909 - modelscope - INFO - epoch [1][1880/4953]\tlr: 7.227e-05, memory: 9058, loss: 1.5063\n", + "2023-07-02 18:22:03,018 - modelscope - INFO - epoch [1][1885/4953]\tlr: 7.213e-05, memory: 9058, loss: 1.5504\n", + "2023-07-02 18:22:07,481 - modelscope - INFO - epoch [1][1890/4953]\tlr: 7.200e-05, memory: 9058, loss: 1.2473\n", + "2023-07-02 18:22:12,667 - modelscope - INFO - epoch [1][1895/4953]\tlr: 7.187e-05, memory: 9058, loss: 2.0055\n", + "2023-07-02 18:22:17,967 - modelscope - INFO - epoch [1][1900/4953]\tlr: 7.174e-05, memory: 9058, loss: 0.7781\n", + "2023-07-02 18:22:24,563 - modelscope - INFO - epoch [1][1905/4953]\tlr: 7.160e-05, memory: 9058, loss: 1.1995\n", + "2023-07-02 18:22:28,670 - modelscope - INFO - epoch [1][1910/4953]\tlr: 7.147e-05, memory: 9058, loss: 2.4594\n", + "2023-07-02 18:22:35,136 - modelscope - INFO - epoch [1][1915/4953]\tlr: 7.133e-05, memory: 9058, loss: 0.7545\n", + "2023-07-02 18:22:41,042 - modelscope - INFO - epoch [1][1920/4953]\tlr: 7.120e-05, memory: 9058, loss: 1.8008\n", + "2023-07-02 18:22:45,686 - modelscope - INFO - epoch [1][1925/4953]\tlr: 7.107e-05, memory: 9058, loss: 1.4076\n", + "2023-07-02 18:22:50,652 - modelscope - INFO - epoch [1][1930/4953]\tlr: 7.093e-05, memory: 9058, loss: 1.6135\n", + "2023-07-02 18:22:55,346 - modelscope - INFO - epoch [1][1935/4953]\tlr: 7.080e-05, memory: 9058, loss: 1.3820\n", + "2023-07-02 18:23:00,407 - modelscope - INFO - epoch [1][1940/4953]\tlr: 7.066e-05, memory: 9058, loss: 1.3170\n", + "2023-07-02 18:23:07,089 - modelscope - INFO - epoch [1][1945/4953]\tlr: 7.053e-05, memory: 9058, loss: 1.5059\n", + "2023-07-02 18:23:14,519 - modelscope - INFO - epoch [1][1950/4953]\tlr: 7.039e-05, memory: 9058, loss: 1.1481\n", + "2023-07-02 18:23:20,167 - modelscope - INFO - epoch [1][1955/4953]\tlr: 7.026e-05, memory: 9058, loss: 1.5484\n", + "2023-07-02 18:23:26,522 - modelscope - INFO - epoch [1][1960/4953]\tlr: 7.012e-05, memory: 9058, loss: 1.5056\n", + "2023-07-02 18:23:31,990 - modelscope - INFO - epoch [1][1965/4953]\tlr: 6.999e-05, memory: 9058, loss: 0.8258\n", + "2023-07-02 18:23:36,765 - modelscope - INFO - epoch [1][1970/4953]\tlr: 6.985e-05, memory: 9058, loss: 2.1605\n", + "2023-07-02 18:23:44,015 - modelscope - INFO - epoch [1][1975/4953]\tlr: 6.972e-05, memory: 9058, loss: 0.5347\n", + "2023-07-02 18:23:50,763 - modelscope - INFO - epoch [1][1980/4953]\tlr: 6.958e-05, memory: 9058, loss: 0.5833\n", + "2023-07-02 18:23:56,081 - modelscope - INFO - epoch [1][1985/4953]\tlr: 6.945e-05, memory: 9058, loss: 1.3211\n", + "2023-07-02 18:24:02,890 - modelscope - INFO - epoch [1][1990/4953]\tlr: 6.931e-05, memory: 9058, loss: 0.6614\n", + "2023-07-02 18:24:11,102 - modelscope - INFO - epoch [1][1995/4953]\tlr: 6.917e-05, memory: 9058, loss: 1.0019\n", + "2023-07-02 18:24:15,188 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:26:31,178 - modelscope - INFO - Saving checkpoint at 2000 iter\n", + "2023-07-02 18:26:31,219 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter1800_acc0.79673832654953\n", + "2023-07-02 18:26:31,223 - modelscope - INFO - Saving checkpoint at 2000 iter\n", + "2023-07-02 18:26:31,262 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_1800\n", + "2023-07-02 18:26:31,267 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9058, evaluation/acc: 0.8048, evaluation/loss: 1.3532, loss: 2.3406\n", + "2023-07-02 18:26:36,725 - modelscope - INFO - epoch [1][2005/4953]\tlr: 6.890e-05, memory: 9058, loss: 1.7643\n", + "2023-07-02 18:26:43,719 - modelscope - INFO - epoch [1][2010/4953]\tlr: 6.876e-05, memory: 9058, loss: 1.3211\n", + "2023-07-02 18:26:50,532 - modelscope - INFO - epoch [1][2015/4953]\tlr: 6.863e-05, memory: 9058, loss: 1.0998\n", + "2023-07-02 18:26:55,084 - modelscope - INFO - epoch [1][2020/4953]\tlr: 6.849e-05, memory: 9058, loss: 1.0711\n", + "2023-07-02 18:27:01,229 - modelscope - INFO - epoch [1][2025/4953]\tlr: 6.835e-05, memory: 9058, loss: 0.9915\n", + "2023-07-02 18:27:05,887 - modelscope - INFO - epoch [1][2030/4953]\tlr: 6.822e-05, memory: 9058, loss: 1.4650\n", + "2023-07-02 18:27:10,177 - modelscope - INFO - epoch [1][2035/4953]\tlr: 6.808e-05, memory: 9058, loss: 1.7047\n", + "2023-07-02 18:27:16,232 - modelscope - INFO - epoch [1][2040/4953]\tlr: 6.794e-05, memory: 9058, loss: 1.1574\n", + "2023-07-02 18:27:20,822 - modelscope - INFO - epoch [1][2045/4953]\tlr: 6.780e-05, memory: 9058, loss: 2.8094\n", + "2023-07-02 18:27:26,542 - modelscope - INFO - epoch [1][2050/4953]\tlr: 6.767e-05, memory: 9058, loss: 1.8707\n", + "2023-07-02 18:27:33,544 - modelscope - INFO - epoch [1][2055/4953]\tlr: 6.753e-05, memory: 9058, loss: 0.4879\n", + "2023-07-02 18:27:38,872 - modelscope - INFO - epoch [1][2060/4953]\tlr: 6.739e-05, memory: 9058, loss: 1.4332\n", + "2023-07-02 18:27:45,755 - modelscope - INFO - epoch [1][2065/4953]\tlr: 6.725e-05, memory: 9058, loss: 1.3403\n", + "2023-07-02 18:27:52,231 - modelscope - INFO - epoch [1][2070/4953]\tlr: 6.712e-05, memory: 9058, loss: 1.4531\n", + "2023-07-02 18:27:55,367 - modelscope - INFO - epoch [1][2075/4953]\tlr: 6.698e-05, memory: 9058, loss: 2.8781\n", + "2023-07-02 18:28:03,691 - modelscope - INFO - epoch [1][2080/4953]\tlr: 6.684e-05, memory: 9058, loss: 1.1735\n", + "2023-07-02 18:28:12,186 - modelscope - INFO - epoch [1][2085/4953]\tlr: 6.670e-05, memory: 9058, loss: 0.9088\n", + "2023-07-02 18:28:18,486 - modelscope - INFO - epoch [1][2090/4953]\tlr: 6.656e-05, memory: 9058, loss: 0.4293\n", + "2023-07-02 18:28:24,461 - modelscope - INFO - epoch [1][2095/4953]\tlr: 6.642e-05, memory: 9058, loss: 2.8336\n", + "2023-07-02 18:28:31,009 - modelscope - INFO - epoch [1][2100/4953]\tlr: 6.628e-05, memory: 9058, loss: 0.6750\n", + "2023-07-02 18:28:35,682 - modelscope - INFO - epoch [1][2105/4953]\tlr: 6.614e-05, memory: 9058, loss: 1.2004\n", + "2023-07-02 18:28:42,815 - modelscope - INFO - epoch [1][2110/4953]\tlr: 6.601e-05, memory: 9058, loss: 0.7390\n", + "2023-07-02 18:28:48,536 - modelscope - INFO - epoch [1][2115/4953]\tlr: 6.587e-05, memory: 9058, loss: 1.2892\n", + "2023-07-02 18:28:54,885 - modelscope - INFO - epoch [1][2120/4953]\tlr: 6.573e-05, memory: 9058, loss: 1.1596\n", + "2023-07-02 18:29:01,644 - modelscope - INFO - epoch [1][2125/4953]\tlr: 6.559e-05, memory: 9058, loss: 1.2383\n", + "2023-07-02 18:29:06,513 - modelscope - INFO - epoch [1][2130/4953]\tlr: 6.545e-05, memory: 9058, loss: 1.6500\n", + "2023-07-02 18:29:12,125 - modelscope - INFO - epoch [1][2135/4953]\tlr: 6.531e-05, memory: 9058, loss: 1.4234\n", + "2023-07-02 18:29:16,930 - modelscope - INFO - epoch [1][2140/4953]\tlr: 6.517e-05, memory: 9058, loss: 0.9209\n", + "2023-07-02 18:29:23,051 - modelscope - INFO - epoch [1][2145/4953]\tlr: 6.503e-05, memory: 9058, loss: 1.3340\n", + "2023-07-02 18:29:26,259 - modelscope - INFO - epoch [1][2150/4953]\tlr: 6.489e-05, memory: 9058, loss: 2.2531\n", + "2023-07-02 18:29:30,151 - modelscope - INFO - epoch [1][2155/4953]\tlr: 6.475e-05, memory: 9058, loss: 2.4398\n", + "2023-07-02 18:29:35,984 - modelscope - INFO - epoch [1][2160/4953]\tlr: 6.461e-05, memory: 9058, loss: 1.2609\n", + "2023-07-02 18:29:42,072 - modelscope - INFO - epoch [1][2165/4953]\tlr: 6.447e-05, memory: 9058, loss: 1.3589\n", + "2023-07-02 18:29:47,131 - modelscope - INFO - epoch [1][2170/4953]\tlr: 6.433e-05, memory: 9058, loss: 1.9894\n", + "2023-07-02 18:29:52,463 - modelscope - INFO - epoch [1][2175/4953]\tlr: 6.419e-05, memory: 9058, loss: 1.4546\n", + "2023-07-02 18:29:56,467 - modelscope - INFO - epoch [1][2180/4953]\tlr: 6.405e-05, memory: 9058, loss: 2.2633\n", + "2023-07-02 18:30:00,810 - modelscope - INFO - epoch [1][2185/4953]\tlr: 6.391e-05, memory: 9058, loss: 1.4179\n", + "2023-07-02 18:30:04,745 - modelscope - INFO - epoch [1][2190/4953]\tlr: 6.377e-05, memory: 9058, loss: 1.1947\n", + "2023-07-02 18:30:10,179 - modelscope - INFO - epoch [1][2195/4953]\tlr: 6.363e-05, memory: 9058, loss: 1.5030\n", + "2023-07-02 18:30:16,533 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:16<00:00, 2.04it/s]\n", + "2023-07-02 18:32:32,577 - modelscope - INFO - Saving checkpoint at 2200 iter\n", + "2023-07-02 18:32:32,617 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter2000_acc0.8048229217529297\n", + "2023-07-02 18:32:32,621 - modelscope - INFO - Saving checkpoint at 2200 iter\n", + "2023-07-02 18:32:32,661 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_2000\n", + "2023-07-02 18:32:32,665 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9058, evaluation/acc: 0.8064, evaluation/loss: 1.3193, loss: 0.8660\n", + "2023-07-02 18:32:38,756 - modelscope - INFO - epoch [1][2205/4953]\tlr: 6.334e-05, memory: 9058, loss: 1.2521\n", + "2023-07-02 18:32:45,468 - modelscope - INFO - epoch [1][2210/4953]\tlr: 6.320e-05, memory: 9058, loss: 1.0652\n", + "2023-07-02 18:32:51,626 - modelscope - INFO - epoch [1][2215/4953]\tlr: 6.306e-05, memory: 9058, loss: 0.8250\n", + "2023-07-02 18:32:56,742 - modelscope - INFO - epoch [1][2220/4953]\tlr: 6.292e-05, memory: 9058, loss: 1.2680\n", + "2023-07-02 18:33:02,927 - modelscope - INFO - epoch [1][2225/4953]\tlr: 6.278e-05, memory: 9058, loss: 1.5531\n", + "2023-07-02 18:33:08,196 - modelscope - INFO - epoch [1][2230/4953]\tlr: 6.264e-05, memory: 9058, loss: 1.5766\n", + "2023-07-02 18:33:14,926 - modelscope - INFO - epoch [1][2235/4953]\tlr: 6.250e-05, memory: 9058, loss: 1.6031\n", + "2023-07-02 18:33:19,152 - modelscope - INFO - epoch [1][2240/4953]\tlr: 6.236e-05, memory: 9058, loss: 1.8438\n", + "2023-07-02 18:33:26,986 - modelscope - INFO - epoch [1][2245/4953]\tlr: 6.221e-05, memory: 9058, loss: 1.0715\n", + "2023-07-02 18:33:34,062 - modelscope - INFO - epoch [1][2250/4953]\tlr: 6.207e-05, memory: 9058, loss: 1.3094\n", + "2023-07-02 18:33:40,767 - modelscope - INFO - epoch [1][2255/4953]\tlr: 6.193e-05, memory: 9058, loss: 0.5586\n", + "2023-07-02 18:33:45,996 - modelscope - INFO - epoch [1][2260/4953]\tlr: 6.179e-05, memory: 9058, loss: 1.0727\n", + "2023-07-02 18:33:50,926 - modelscope - INFO - epoch [1][2265/4953]\tlr: 6.165e-05, memory: 9058, loss: 0.5758\n", + "2023-07-02 18:33:54,762 - modelscope - INFO - epoch [1][2270/4953]\tlr: 6.151e-05, memory: 9058, loss: 1.1336\n", + "2023-07-02 18:34:00,210 - modelscope - INFO - epoch [1][2275/4953]\tlr: 6.136e-05, memory: 9058, loss: 1.0373\n", + "2023-07-02 18:34:08,272 - modelscope - INFO - epoch [1][2280/4953]\tlr: 6.122e-05, memory: 9058, loss: 0.7815\n", + "2023-07-02 18:34:14,309 - modelscope - INFO - epoch [1][2285/4953]\tlr: 6.108e-05, memory: 9058, loss: 1.4531\n", + "2023-07-02 18:34:21,626 - modelscope - INFO - epoch [1][2290/4953]\tlr: 6.094e-05, memory: 9058, loss: 1.6297\n", + "2023-07-02 18:34:28,588 - modelscope - INFO - epoch [1][2295/4953]\tlr: 6.080e-05, memory: 9082, loss: 1.6783\n", + "2023-07-02 18:34:33,419 - modelscope - INFO - epoch [1][2300/4953]\tlr: 6.065e-05, memory: 9082, loss: 2.0078\n", + "2023-07-02 18:34:38,966 - modelscope - INFO - epoch [1][2305/4953]\tlr: 6.051e-05, memory: 9082, loss: 1.6065\n", + "2023-07-02 18:34:44,320 - modelscope - INFO - epoch [1][2310/4953]\tlr: 6.037e-05, memory: 9082, loss: 1.6664\n", + "2023-07-02 18:34:49,557 - modelscope - INFO - epoch [1][2315/4953]\tlr: 6.023e-05, memory: 9082, loss: 2.1622\n", + "2023-07-02 18:34:54,691 - modelscope - INFO - epoch [1][2320/4953]\tlr: 6.008e-05, memory: 9082, loss: 2.2738\n", + "2023-07-02 18:35:02,067 - modelscope - INFO - epoch [1][2325/4953]\tlr: 5.994e-05, memory: 9082, loss: 0.6338\n", + "2023-07-02 18:35:07,658 - modelscope - INFO - epoch [1][2330/4953]\tlr: 5.980e-05, memory: 9082, loss: 0.9046\n", + "2023-07-02 18:35:13,966 - modelscope - INFO - epoch [1][2335/4953]\tlr: 5.966e-05, memory: 9082, loss: 1.2388\n", + "2023-07-02 18:35:19,741 - modelscope - INFO - epoch [1][2340/4953]\tlr: 5.951e-05, memory: 9082, loss: 0.7371\n", + "2023-07-02 18:35:25,904 - modelscope - INFO - epoch [1][2345/4953]\tlr: 5.937e-05, memory: 9082, loss: 1.4103\n", + "2023-07-02 18:35:31,382 - modelscope - INFO - epoch [1][2350/4953]\tlr: 5.923e-05, memory: 9082, loss: 1.4088\n", + "2023-07-02 18:35:36,193 - modelscope - INFO - epoch [1][2355/4953]\tlr: 5.909e-05, memory: 9082, loss: 2.0184\n", + "2023-07-02 18:35:40,781 - modelscope - INFO - epoch [1][2360/4953]\tlr: 5.894e-05, memory: 9082, loss: 1.1237\n", + "2023-07-02 18:35:45,133 - modelscope - INFO - epoch [1][2365/4953]\tlr: 5.880e-05, memory: 9082, loss: 2.1938\n", + "2023-07-02 18:35:51,029 - modelscope - INFO - epoch [1][2370/4953]\tlr: 5.866e-05, memory: 9082, loss: 0.9563\n", + "2023-07-02 18:35:57,943 - modelscope - INFO - epoch [1][2375/4953]\tlr: 5.852e-05, memory: 9082, loss: 1.3258\n", + "2023-07-02 18:36:05,016 - modelscope - INFO - epoch [1][2380/4953]\tlr: 5.837e-05, memory: 9082, loss: 1.2687\n", + "2023-07-02 18:36:09,977 - modelscope - INFO - epoch [1][2385/4953]\tlr: 5.823e-05, memory: 9082, loss: 1.2655\n", + "2023-07-02 18:36:16,229 - modelscope - INFO - epoch [1][2390/4953]\tlr: 5.809e-05, memory: 9082, loss: 0.9164\n", + "2023-07-02 18:36:21,471 - modelscope - INFO - epoch [1][2395/4953]\tlr: 5.794e-05, memory: 9082, loss: 1.6281\n", + "2023-07-02 18:36:27,959 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:38:43,433 - modelscope - INFO - Saving checkpoint at 2400 iter\n", + "2023-07-02 18:38:43,474 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter2200_acc0.8063529133796692\n", + "2023-07-02 18:38:43,478 - modelscope - INFO - Saving checkpoint at 2400 iter\n", + "2023-07-02 18:38:43,517 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_2200\n", + "2023-07-02 18:38:43,521 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8076, evaluation/loss: 1.3023, loss: 0.6604\n", + "2023-07-02 18:38:48,050 - modelscope - INFO - epoch [1][2405/4953]\tlr: 5.766e-05, memory: 9082, loss: 1.8258\n", + "2023-07-02 18:38:54,650 - modelscope - INFO - epoch [1][2410/4953]\tlr: 5.751e-05, memory: 9082, loss: 1.3132\n", + "2023-07-02 18:38:59,846 - modelscope - INFO - epoch [1][2415/4953]\tlr: 5.737e-05, memory: 9082, loss: 1.6910\n", + "2023-07-02 18:39:07,443 - modelscope - INFO - epoch [1][2420/4953]\tlr: 5.723e-05, memory: 9082, loss: 1.4445\n", + "2023-07-02 18:39:15,603 - modelscope - INFO - epoch [1][2425/4953]\tlr: 5.708e-05, memory: 9082, loss: 0.9867\n", + "2023-07-02 18:39:21,112 - modelscope - INFO - epoch [1][2430/4953]\tlr: 5.694e-05, memory: 9082, loss: 1.5023\n", + "2023-07-02 18:39:26,278 - modelscope - INFO - epoch [1][2435/4953]\tlr: 5.680e-05, memory: 9082, loss: 1.5297\n", + "2023-07-02 18:39:32,189 - modelscope - INFO - epoch [1][2440/4953]\tlr: 5.666e-05, memory: 9082, loss: 1.2663\n", + "2023-07-02 18:39:39,288 - modelscope - INFO - epoch [1][2445/4953]\tlr: 5.651e-05, memory: 9082, loss: 1.1214\n", + "2023-07-02 18:39:45,604 - modelscope - INFO - epoch [1][2450/4953]\tlr: 5.637e-05, memory: 9082, loss: 0.7744\n", + "2023-07-02 18:39:50,026 - modelscope - INFO - epoch [1][2455/4953]\tlr: 5.623e-05, memory: 9082, loss: 1.3865\n", + "2023-07-02 18:39:57,039 - modelscope - INFO - epoch [1][2460/4953]\tlr: 5.608e-05, memory: 9082, loss: 0.5821\n", + "2023-07-02 18:40:04,905 - modelscope - INFO - epoch [1][2465/4953]\tlr: 5.594e-05, memory: 9082, loss: 1.6459\n", + "2023-07-02 18:40:12,277 - modelscope - INFO - epoch [1][2470/4953]\tlr: 5.580e-05, memory: 9082, loss: 1.5098\n", + "2023-07-02 18:40:21,189 - modelscope - INFO - epoch [1][2475/4953]\tlr: 5.565e-05, memory: 9082, loss: 0.7347\n", + "2023-07-02 18:40:25,832 - modelscope - INFO - epoch [1][2480/4953]\tlr: 5.551e-05, memory: 9082, loss: 1.9617\n", + "2023-07-02 18:40:31,034 - modelscope - INFO - epoch [1][2485/4953]\tlr: 5.537e-05, memory: 9082, loss: 1.3300\n", + "2023-07-02 18:40:35,486 - modelscope - INFO - epoch [1][2490/4953]\tlr: 5.522e-05, memory: 9082, loss: 1.7078\n", + "2023-07-02 18:40:43,211 - modelscope - INFO - epoch [1][2495/4953]\tlr: 5.508e-05, memory: 9082, loss: 1.5921\n", + "2023-07-02 18:40:48,454 - modelscope - INFO - epoch [1][2500/4953]\tlr: 5.494e-05, memory: 9082, loss: 1.9926\n", + "2023-07-02 18:40:53,713 - modelscope - INFO - epoch [1][2505/4953]\tlr: 5.479e-05, memory: 9082, loss: 1.1594\n", + "2023-07-02 18:40:58,439 - modelscope - INFO - epoch [1][2510/4953]\tlr: 5.465e-05, memory: 9082, loss: 1.1770\n", + "2023-07-02 18:41:04,372 - modelscope - INFO - epoch [1][2515/4953]\tlr: 5.451e-05, memory: 9082, loss: 1.6250\n", + "2023-07-02 18:41:09,182 - modelscope - INFO - epoch [1][2520/4953]\tlr: 5.436e-05, memory: 9082, loss: 1.7578\n", + "2023-07-02 18:41:14,114 - modelscope - INFO - epoch [1][2525/4953]\tlr: 5.422e-05, memory: 9082, loss: 2.3328\n", + "2023-07-02 18:41:20,090 - modelscope - INFO - epoch [1][2530/4953]\tlr: 5.408e-05, memory: 9082, loss: 2.0059\n", + "2023-07-02 18:41:24,643 - modelscope - INFO - epoch [1][2535/4953]\tlr: 5.393e-05, memory: 9082, loss: 1.9216\n", + "2023-07-02 18:41:30,805 - modelscope - INFO - epoch [1][2540/4953]\tlr: 5.379e-05, memory: 9082, loss: 0.7870\n", + "2023-07-02 18:41:35,276 - modelscope - INFO - epoch [1][2545/4953]\tlr: 5.365e-05, memory: 9082, loss: 1.8344\n", + "2023-07-02 18:41:40,107 - modelscope - INFO - epoch [1][2550/4953]\tlr: 5.350e-05, memory: 9082, loss: 1.0918\n", + "2023-07-02 18:41:45,127 - modelscope - INFO - epoch [1][2555/4953]\tlr: 5.336e-05, memory: 9082, loss: 0.8277\n", + "2023-07-02 18:41:49,439 - modelscope - INFO - epoch [1][2560/4953]\tlr: 5.322e-05, memory: 9082, loss: 1.3539\n", + "2023-07-02 18:41:54,796 - modelscope - INFO - epoch [1][2565/4953]\tlr: 5.307e-05, memory: 9082, loss: 1.4898\n", + "2023-07-02 18:41:59,982 - modelscope - INFO - epoch [1][2570/4953]\tlr: 5.293e-05, memory: 9082, loss: 1.4383\n", + "2023-07-02 18:42:06,280 - modelscope - INFO - epoch [1][2575/4953]\tlr: 5.279e-05, memory: 9082, loss: 1.3823\n", + "2023-07-02 18:42:11,765 - modelscope - INFO - epoch [1][2580/4953]\tlr: 5.264e-05, memory: 9082, loss: 1.6961\n", + "2023-07-02 18:42:18,475 - modelscope - INFO - epoch [1][2585/4953]\tlr: 5.250e-05, memory: 9082, loss: 1.7096\n", + "2023-07-02 18:42:25,377 - modelscope - INFO - epoch [1][2590/4953]\tlr: 5.236e-05, memory: 9082, loss: 0.2711\n", + "2023-07-02 18:42:31,462 - modelscope - INFO - epoch [1][2595/4953]\tlr: 5.222e-05, memory: 9082, loss: 1.8032\n", + "2023-07-02 18:42:37,270 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:44:53,170 - modelscope - INFO - Saving checkpoint at 2600 iter\n", + "2023-07-02 18:44:53,210 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter2400_acc0.8075699210166931\n", + "2023-07-02 18:44:53,214 - modelscope - INFO - Saving checkpoint at 2600 iter\n", + "2023-07-02 18:44:53,253 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_2400\n", + "2023-07-02 18:44:53,258 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8082, evaluation/loss: 1.3051, loss: 1.3200\n", + "2023-07-02 18:44:56,746 - modelscope - INFO - epoch [1][2605/4953]\tlr: 5.193e-05, memory: 9082, loss: 2.4016\n", + "2023-07-02 18:45:02,237 - modelscope - INFO - epoch [1][2610/4953]\tlr: 5.179e-05, memory: 9082, loss: 1.4620\n", + "2023-07-02 18:45:08,746 - modelscope - INFO - epoch [1][2615/4953]\tlr: 5.164e-05, memory: 9082, loss: 1.0342\n", + "2023-07-02 18:45:15,827 - modelscope - INFO - epoch [1][2620/4953]\tlr: 5.150e-05, memory: 9082, loss: 1.2133\n", + "2023-07-02 18:45:20,967 - modelscope - INFO - epoch [1][2625/4953]\tlr: 5.136e-05, memory: 9082, loss: 1.1039\n", + "2023-07-02 18:45:28,010 - modelscope - INFO - epoch [1][2630/4953]\tlr: 5.122e-05, memory: 9082, loss: 2.2398\n", + "2023-07-02 18:45:33,346 - modelscope - INFO - epoch [1][2635/4953]\tlr: 5.107e-05, memory: 9082, loss: 1.0719\n", + "2023-07-02 18:45:38,505 - modelscope - INFO - epoch [1][2640/4953]\tlr: 5.093e-05, memory: 9082, loss: 2.1718\n", + "2023-07-02 18:45:46,286 - modelscope - INFO - epoch [1][2645/4953]\tlr: 5.079e-05, memory: 9082, loss: 1.4109\n", + "2023-07-02 18:45:50,359 - modelscope - INFO - epoch [1][2650/4953]\tlr: 5.065e-05, memory: 9082, loss: 2.7281\n", + "2023-07-02 18:45:54,451 - modelscope - INFO - epoch [1][2655/4953]\tlr: 5.050e-05, memory: 9082, loss: 1.4117\n", + "2023-07-02 18:46:01,191 - modelscope - INFO - epoch [1][2660/4953]\tlr: 5.036e-05, memory: 9082, loss: 1.0565\n", + "2023-07-02 18:46:06,247 - modelscope - INFO - epoch [1][2665/4953]\tlr: 5.022e-05, memory: 9082, loss: 0.9540\n", + "2023-07-02 18:46:13,076 - modelscope - INFO - epoch [1][2670/4953]\tlr: 5.008e-05, memory: 9082, loss: 1.5935\n", + "2023-07-02 18:46:18,638 - modelscope - INFO - epoch [1][2675/4953]\tlr: 4.993e-05, memory: 9082, loss: 2.1958\n", + "2023-07-02 18:46:23,885 - modelscope - INFO - epoch [1][2680/4953]\tlr: 4.979e-05, memory: 9082, loss: 1.6164\n", + "2023-07-02 18:46:31,178 - modelscope - INFO - epoch [1][2685/4953]\tlr: 4.965e-05, memory: 9082, loss: 0.9352\n", + "2023-07-02 18:46:38,014 - modelscope - INFO - epoch [1][2690/4953]\tlr: 4.951e-05, memory: 9082, loss: 1.4887\n", + "2023-07-02 18:46:41,545 - modelscope - INFO - epoch [1][2695/4953]\tlr: 4.936e-05, memory: 9082, loss: 1.2578\n", + "2023-07-02 18:46:46,458 - modelscope - INFO - epoch [1][2700/4953]\tlr: 4.922e-05, memory: 9082, loss: 1.1711\n", + "2023-07-02 18:46:53,227 - modelscope - INFO - epoch [1][2705/4953]\tlr: 4.908e-05, memory: 9082, loss: 1.3223\n", + "2023-07-02 18:46:59,578 - modelscope - INFO - epoch [1][2710/4953]\tlr: 4.894e-05, memory: 9082, loss: 1.4570\n", + "2023-07-02 18:47:04,896 - modelscope - INFO - epoch [1][2715/4953]\tlr: 4.880e-05, memory: 9082, loss: 1.0868\n", + "2023-07-02 18:47:10,404 - modelscope - INFO - epoch [1][2720/4953]\tlr: 4.865e-05, memory: 9082, loss: 1.5884\n", + "2023-07-02 18:47:16,038 - modelscope - INFO - epoch [1][2725/4953]\tlr: 4.851e-05, memory: 9082, loss: 1.0243\n", + "2023-07-02 18:47:22,354 - modelscope - INFO - epoch [1][2730/4953]\tlr: 4.837e-05, memory: 9082, loss: 1.4346\n", + "2023-07-02 18:47:29,290 - modelscope - INFO - epoch [1][2735/4953]\tlr: 4.823e-05, memory: 9082, loss: 0.9521\n", + "2023-07-02 18:47:37,813 - modelscope - INFO - epoch [1][2740/4953]\tlr: 4.809e-05, memory: 9082, loss: 0.7296\n", + "2023-07-02 18:47:40,908 - modelscope - INFO - epoch [1][2745/4953]\tlr: 4.795e-05, memory: 9082, loss: 1.5844\n", + "2023-07-02 18:47:46,334 - modelscope - INFO - epoch [1][2750/4953]\tlr: 4.781e-05, memory: 9082, loss: 1.5023\n", + "2023-07-02 18:47:51,224 - modelscope - INFO - epoch [1][2755/4953]\tlr: 4.766e-05, memory: 9082, loss: 0.9710\n", + "2023-07-02 18:47:58,431 - modelscope - INFO - epoch [1][2760/4953]\tlr: 4.752e-05, memory: 9082, loss: 1.1539\n", + "2023-07-02 18:48:04,898 - modelscope - INFO - epoch [1][2765/4953]\tlr: 4.738e-05, memory: 9082, loss: 1.6984\n", + "2023-07-02 18:48:10,316 - modelscope - INFO - epoch [1][2770/4953]\tlr: 4.724e-05, memory: 9082, loss: 1.5420\n", + "2023-07-02 18:48:16,843 - modelscope - INFO - epoch [1][2775/4953]\tlr: 4.710e-05, memory: 9082, loss: 1.2396\n", + "2023-07-02 18:48:22,406 - modelscope - INFO - epoch [1][2780/4953]\tlr: 4.696e-05, memory: 9082, loss: 1.8611\n", + "2023-07-02 18:48:28,234 - modelscope - INFO - epoch [1][2785/4953]\tlr: 4.682e-05, memory: 9082, loss: 1.2051\n", + "2023-07-02 18:48:35,175 - modelscope - INFO - epoch [1][2790/4953]\tlr: 4.668e-05, memory: 9082, loss: 0.9440\n", + "2023-07-02 18:48:40,689 - modelscope - INFO - epoch [1][2795/4953]\tlr: 4.654e-05, memory: 9082, loss: 1.5422\n", + "2023-07-02 18:48:46,340 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:51:02,313 - modelscope - INFO - Saving checkpoint at 2800 iter\n", + "2023-07-02 18:51:02,352 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_2600\n", + "2023-07-02 18:51:02,357 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8080, evaluation/loss: 1.2874, loss: 0.3999\n", + "2023-07-02 18:51:09,389 - modelscope - INFO - epoch [1][2805/4953]\tlr: 4.625e-05, memory: 9082, loss: 0.9511\n", + "2023-07-02 18:51:14,406 - modelscope - INFO - epoch [1][2810/4953]\tlr: 4.611e-05, memory: 9082, loss: 0.9344\n", + "2023-07-02 18:51:19,383 - modelscope - INFO - epoch [1][2815/4953]\tlr: 4.597e-05, memory: 9082, loss: 1.5798\n", + "2023-07-02 18:51:26,100 - modelscope - INFO - epoch [1][2820/4953]\tlr: 4.583e-05, memory: 9082, loss: 1.1518\n", + "2023-07-02 18:51:31,560 - modelscope - INFO - epoch [1][2825/4953]\tlr: 4.569e-05, memory: 9082, loss: 1.9438\n", + "2023-07-02 18:51:37,772 - modelscope - INFO - epoch [1][2830/4953]\tlr: 4.555e-05, memory: 9082, loss: 1.2336\n", + "2023-07-02 18:51:45,037 - modelscope - INFO - epoch [1][2835/4953]\tlr: 4.541e-05, memory: 9082, loss: 0.4342\n", + "2023-07-02 18:51:50,379 - modelscope - INFO - epoch [1][2840/4953]\tlr: 4.527e-05, memory: 9082, loss: 1.5258\n", + "2023-07-02 18:51:55,219 - modelscope - INFO - epoch [1][2845/4953]\tlr: 4.513e-05, memory: 9082, loss: 1.3063\n", + "2023-07-02 18:52:00,648 - modelscope - INFO - epoch [1][2850/4953]\tlr: 4.499e-05, memory: 9082, loss: 1.0977\n", + "2023-07-02 18:52:05,123 - modelscope - INFO - epoch [1][2855/4953]\tlr: 4.486e-05, memory: 9082, loss: 1.2469\n", + "2023-07-02 18:52:10,542 - modelscope - INFO - epoch [1][2860/4953]\tlr: 4.472e-05, memory: 9082, loss: 1.0984\n", + "2023-07-02 18:52:17,747 - modelscope - INFO - epoch [1][2865/4953]\tlr: 4.458e-05, memory: 9082, loss: 0.7611\n", + "2023-07-02 18:52:23,635 - modelscope - INFO - epoch [1][2870/4953]\tlr: 4.444e-05, memory: 9082, loss: 1.9703\n", + "2023-07-02 18:52:29,494 - modelscope - INFO - epoch [1][2875/4953]\tlr: 4.430e-05, memory: 9082, loss: 1.2950\n", + "2023-07-02 18:52:35,837 - modelscope - INFO - epoch [1][2880/4953]\tlr: 4.416e-05, memory: 9082, loss: 0.8969\n", + "2023-07-02 18:52:40,187 - modelscope - INFO - epoch [1][2885/4953]\tlr: 4.402e-05, memory: 9082, loss: 2.0484\n", + "2023-07-02 18:52:46,608 - modelscope - INFO - epoch [1][2890/4953]\tlr: 4.388e-05, memory: 9082, loss: 1.3309\n", + "2023-07-02 18:52:52,971 - modelscope - INFO - epoch [1][2895/4953]\tlr: 4.374e-05, memory: 9082, loss: 2.1859\n", + "2023-07-02 18:52:57,418 - modelscope - INFO - epoch [1][2900/4953]\tlr: 4.360e-05, memory: 9082, loss: 1.4730\n", + "2023-07-02 18:53:02,915 - modelscope - INFO - epoch [1][2905/4953]\tlr: 4.347e-05, memory: 9082, loss: 1.1398\n", + "2023-07-02 18:53:08,380 - modelscope - INFO - epoch [1][2910/4953]\tlr: 4.333e-05, memory: 9082, loss: 1.1520\n", + "2023-07-02 18:53:14,293 - modelscope - INFO - epoch [1][2915/4953]\tlr: 4.319e-05, memory: 9082, loss: 1.4763\n", + "2023-07-02 18:53:19,782 - modelscope - INFO - epoch [1][2920/4953]\tlr: 4.305e-05, memory: 9082, loss: 1.3924\n", + "2023-07-02 18:53:24,564 - modelscope - INFO - epoch [1][2925/4953]\tlr: 4.291e-05, memory: 9082, loss: 1.1281\n", + "2023-07-02 18:53:28,764 - modelscope - INFO - epoch [1][2930/4953]\tlr: 4.278e-05, memory: 9082, loss: 1.3961\n", + "2023-07-02 18:53:34,633 - modelscope - INFO - epoch [1][2935/4953]\tlr: 4.264e-05, memory: 9082, loss: 1.1989\n", + "2023-07-02 18:53:40,740 - modelscope - INFO - epoch [1][2940/4953]\tlr: 4.250e-05, memory: 9082, loss: 1.4141\n", + "2023-07-02 18:53:45,991 - modelscope - INFO - epoch [1][2945/4953]\tlr: 4.236e-05, memory: 9082, loss: 1.8516\n", + "2023-07-02 18:53:53,446 - modelscope - INFO - epoch [1][2950/4953]\tlr: 4.223e-05, memory: 9082, loss: 1.0945\n", + "2023-07-02 18:53:57,916 - modelscope - INFO - epoch [1][2955/4953]\tlr: 4.209e-05, memory: 9082, loss: 2.4191\n", + "2023-07-02 18:54:03,814 - modelscope - INFO - epoch [1][2960/4953]\tlr: 4.195e-05, memory: 9082, loss: 1.0555\n", + "2023-07-02 18:54:11,481 - modelscope - INFO - epoch [1][2965/4953]\tlr: 4.181e-05, memory: 9082, loss: 1.0359\n", + "2023-07-02 18:54:18,062 - modelscope - INFO - epoch [1][2970/4953]\tlr: 4.168e-05, memory: 9082, loss: 0.5380\n", + "2023-07-02 18:54:23,157 - modelscope - INFO - epoch [1][2975/4953]\tlr: 4.154e-05, memory: 9082, loss: 1.7539\n", + "2023-07-02 18:54:27,560 - modelscope - INFO - epoch [1][2980/4953]\tlr: 4.140e-05, memory: 9082, loss: 1.5100\n", + "2023-07-02 18:54:32,977 - modelscope - INFO - epoch [1][2985/4953]\tlr: 4.127e-05, memory: 9082, loss: 1.5968\n", + "2023-07-02 18:54:38,633 - modelscope - INFO - epoch [1][2990/4953]\tlr: 4.113e-05, memory: 9082, loss: 1.0911\n", + "2023-07-02 18:54:46,186 - modelscope - INFO - epoch [1][2995/4953]\tlr: 4.100e-05, memory: 9082, loss: 0.9789\n", + "2023-07-02 18:54:52,074 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 18:57:08,067 - modelscope - INFO - Saving checkpoint at 3000 iter\n", + "2023-07-02 18:57:08,107 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter2600_acc0.8082306385040283\n", + "2023-07-02 18:57:08,111 - modelscope - INFO - Saving checkpoint at 3000 iter\n", + "2023-07-02 18:57:08,150 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_2800\n", + "2023-07-02 18:57:08,155 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8084, evaluation/loss: 1.2728, loss: 0.7777\n", + "2023-07-02 18:57:14,568 - modelscope - INFO - epoch [1][3005/4953]\tlr: 4.072e-05, memory: 9082, loss: 1.7105\n", + "2023-07-02 18:57:20,305 - modelscope - INFO - epoch [1][3010/4953]\tlr: 4.059e-05, memory: 9082, loss: 0.9040\n", + "2023-07-02 18:57:25,518 - modelscope - INFO - epoch [1][3015/4953]\tlr: 4.045e-05, memory: 9082, loss: 1.3430\n", + "2023-07-02 18:57:30,679 - modelscope - INFO - epoch [1][3020/4953]\tlr: 4.032e-05, memory: 9082, loss: 1.9619\n", + "2023-07-02 18:57:36,997 - modelscope - INFO - epoch [1][3025/4953]\tlr: 4.018e-05, memory: 9082, loss: 0.9646\n", + "2023-07-02 18:57:42,949 - modelscope - INFO - epoch [1][3030/4953]\tlr: 4.005e-05, memory: 9082, loss: 0.8223\n", + "2023-07-02 18:57:47,568 - modelscope - INFO - epoch [1][3035/4953]\tlr: 3.991e-05, memory: 9082, loss: 1.9203\n", + "2023-07-02 18:57:53,111 - modelscope - INFO - epoch [1][3040/4953]\tlr: 3.978e-05, memory: 9082, loss: 1.0070\n", + "2023-07-02 18:57:59,474 - modelscope - INFO - epoch [1][3045/4953]\tlr: 3.964e-05, memory: 9082, loss: 1.2164\n", + "2023-07-02 18:58:04,237 - modelscope - INFO - epoch [1][3050/4953]\tlr: 3.951e-05, memory: 9082, loss: 1.6008\n", + "2023-07-02 18:58:09,687 - modelscope - INFO - epoch [1][3055/4953]\tlr: 3.937e-05, memory: 9082, loss: 2.0203\n", + "2023-07-02 18:58:14,949 - modelscope - INFO - epoch [1][3060/4953]\tlr: 3.924e-05, memory: 9082, loss: 1.4613\n", + "2023-07-02 18:58:21,818 - modelscope - INFO - epoch [1][3065/4953]\tlr: 3.911e-05, memory: 9082, loss: 1.2766\n", + "2023-07-02 18:58:28,251 - modelscope - INFO - epoch [1][3070/4953]\tlr: 3.897e-05, memory: 9082, loss: 1.2920\n", + "2023-07-02 18:58:34,440 - modelscope - INFO - epoch [1][3075/4953]\tlr: 3.884e-05, memory: 9082, loss: 1.1436\n", + "2023-07-02 18:58:41,344 - modelscope - INFO - epoch [1][3080/4953]\tlr: 3.870e-05, memory: 9082, loss: 1.6750\n", + "2023-07-02 18:58:47,507 - modelscope - INFO - epoch [1][3085/4953]\tlr: 3.857e-05, memory: 9082, loss: 1.4508\n", + "2023-07-02 18:58:53,152 - modelscope - INFO - epoch [1][3090/4953]\tlr: 3.844e-05, memory: 9082, loss: 1.1961\n", + "2023-07-02 18:58:57,615 - modelscope - INFO - epoch [1][3095/4953]\tlr: 3.830e-05, memory: 9082, loss: 2.0420\n", + "2023-07-02 18:59:04,675 - modelscope - INFO - epoch [1][3100/4953]\tlr: 3.817e-05, memory: 9082, loss: 0.3189\n", + "2023-07-02 18:59:09,594 - modelscope - INFO - epoch [1][3105/4953]\tlr: 3.804e-05, memory: 9082, loss: 1.5581\n", + "2023-07-02 18:59:16,591 - modelscope - INFO - epoch [1][3110/4953]\tlr: 3.791e-05, memory: 9082, loss: 0.9396\n", + "2023-07-02 18:59:23,334 - modelscope - INFO - epoch [1][3115/4953]\tlr: 3.777e-05, memory: 9082, loss: 0.6580\n", + "2023-07-02 18:59:28,047 - modelscope - INFO - epoch [1][3120/4953]\tlr: 3.764e-05, memory: 9082, loss: 1.4602\n", + "2023-07-02 18:59:31,315 - modelscope - INFO - epoch [1][3125/4953]\tlr: 3.751e-05, memory: 9082, loss: 1.3484\n", + "2023-07-02 18:59:36,121 - modelscope - INFO - epoch [1][3130/4953]\tlr: 3.738e-05, memory: 9082, loss: 2.1273\n", + "2023-07-02 18:59:44,336 - modelscope - INFO - epoch [1][3135/4953]\tlr: 3.725e-05, memory: 9082, loss: 0.8621\n", + "2023-07-02 18:59:49,884 - modelscope - INFO - epoch [1][3140/4953]\tlr: 3.712e-05, memory: 9082, loss: 1.0844\n", + "2023-07-02 18:59:52,597 - modelscope - INFO - epoch [1][3145/4953]\tlr: 3.698e-05, memory: 9082, loss: 1.5453\n", + "2023-07-02 18:59:59,243 - modelscope - INFO - epoch [1][3150/4953]\tlr: 3.685e-05, memory: 9082, loss: 1.1129\n", + "2023-07-02 19:00:04,220 - modelscope - INFO - epoch [1][3155/4953]\tlr: 3.672e-05, memory: 9082, loss: 1.1824\n", + "2023-07-02 19:00:11,762 - modelscope - INFO - epoch [1][3160/4953]\tlr: 3.659e-05, memory: 9082, loss: 0.5676\n", + "2023-07-02 19:00:18,630 - modelscope - INFO - epoch [1][3165/4953]\tlr: 3.646e-05, memory: 9082, loss: 0.9189\n", + "2023-07-02 19:00:23,483 - modelscope - INFO - epoch [1][3170/4953]\tlr: 3.633e-05, memory: 9082, loss: 1.0324\n", + "2023-07-02 19:00:27,164 - modelscope - INFO - epoch [1][3175/4953]\tlr: 3.620e-05, memory: 9082, loss: 1.2984\n", + "2023-07-02 19:00:32,041 - modelscope - INFO - epoch [1][3180/4953]\tlr: 3.607e-05, memory: 9082, loss: 1.6036\n", + "2023-07-02 19:00:37,245 - modelscope - INFO - epoch [1][3185/4953]\tlr: 3.594e-05, memory: 9082, loss: 1.3896\n", + "2023-07-02 19:00:44,493 - modelscope - INFO - epoch [1][3190/4953]\tlr: 3.581e-05, memory: 9082, loss: 1.1153\n", + "2023-07-02 19:00:49,874 - modelscope - INFO - epoch [1][3195/4953]\tlr: 3.568e-05, memory: 9082, loss: 1.2354\n", + "2023-07-02 19:00:55,061 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 19:03:10,730 - modelscope - INFO - Saving checkpoint at 3200 iter\n", + "2023-07-02 19:03:10,770 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter3000_acc0.8084218502044678\n", + "2023-07-02 19:03:10,774 - modelscope - INFO - Saving checkpoint at 3200 iter\n", + "2023-07-02 19:03:10,813 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_3000\n", + "2023-07-02 19:03:10,818 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8086, evaluation/loss: 1.2627, loss: 1.5492\n", + "2023-07-02 19:03:18,070 - modelscope - INFO - epoch [1][3205/4953]\tlr: 3.542e-05, memory: 9082, loss: 0.1662\n", + "2023-07-02 19:03:26,317 - modelscope - INFO - epoch [1][3210/4953]\tlr: 3.530e-05, memory: 9082, loss: 1.6430\n", + "2023-07-02 19:03:32,449 - modelscope - INFO - epoch [1][3215/4953]\tlr: 3.517e-05, memory: 9082, loss: 0.4798\n", + "2023-07-02 19:03:38,508 - modelscope - INFO - epoch [1][3220/4953]\tlr: 3.504e-05, memory: 9082, loss: 1.0096\n", + "2023-07-02 19:03:45,266 - modelscope - INFO - epoch [1][3225/4953]\tlr: 3.491e-05, memory: 9082, loss: 1.1305\n", + "2023-07-02 19:03:48,361 - modelscope - INFO - epoch [1][3230/4953]\tlr: 3.478e-05, memory: 9082, loss: 1.6721\n", + "2023-07-02 19:03:54,630 - modelscope - INFO - epoch [1][3235/4953]\tlr: 3.465e-05, memory: 9082, loss: 1.1138\n", + "2023-07-02 19:03:59,780 - modelscope - INFO - epoch [1][3240/4953]\tlr: 3.453e-05, memory: 9082, loss: 1.2146\n", + "2023-07-02 19:04:04,310 - modelscope - INFO - epoch [1][3245/4953]\tlr: 3.440e-05, memory: 9082, loss: 0.9602\n", + "2023-07-02 19:04:09,085 - modelscope - INFO - epoch [1][3250/4953]\tlr: 3.427e-05, memory: 9082, loss: 2.0369\n", + "2023-07-02 19:04:13,329 - modelscope - INFO - epoch [1][3255/4953]\tlr: 3.415e-05, memory: 9082, loss: 1.3604\n", + "2023-07-02 19:04:19,728 - modelscope - INFO - epoch [1][3260/4953]\tlr: 3.402e-05, memory: 9082, loss: 1.0500\n", + "2023-07-02 19:04:25,537 - modelscope - INFO - epoch [1][3265/4953]\tlr: 3.389e-05, memory: 9082, loss: 1.0730\n", + "2023-07-02 19:04:33,616 - modelscope - INFO - epoch [1][3270/4953]\tlr: 3.377e-05, memory: 9082, loss: 1.3219\n", + "2023-07-02 19:04:36,942 - modelscope - INFO - epoch [1][3275/4953]\tlr: 3.364e-05, memory: 9082, loss: 0.7494\n", + "2023-07-02 19:04:43,190 - modelscope - INFO - epoch [1][3280/4953]\tlr: 3.351e-05, memory: 9082, loss: 0.8293\n", + "2023-07-02 19:04:51,311 - modelscope - INFO - epoch [1][3285/4953]\tlr: 3.339e-05, memory: 9082, loss: 0.7475\n", + "2023-07-02 19:04:54,815 - modelscope - INFO - epoch [1][3290/4953]\tlr: 3.326e-05, memory: 9082, loss: 1.8000\n", + "2023-07-02 19:05:00,342 - modelscope - INFO - epoch [1][3295/4953]\tlr: 3.314e-05, memory: 9082, loss: 1.9621\n", + "2023-07-02 19:05:06,094 - modelscope - INFO - epoch [1][3300/4953]\tlr: 3.301e-05, memory: 9082, loss: 1.3162\n", + "2023-07-02 19:05:10,639 - modelscope - INFO - epoch [1][3305/4953]\tlr: 3.289e-05, memory: 9082, loss: 1.4781\n", + "2023-07-02 19:05:12,888 - modelscope - INFO - epoch [1][3310/4953]\tlr: 3.276e-05, memory: 9082, loss: 1.9320\n", + "2023-07-02 19:05:18,374 - modelscope - INFO - epoch [1][3315/4953]\tlr: 3.264e-05, memory: 9082, loss: 0.4891\n", + "2023-07-02 19:05:25,255 - modelscope - INFO - epoch [1][3320/4953]\tlr: 3.252e-05, memory: 9082, loss: 0.9572\n", + "2023-07-02 19:05:31,095 - modelscope - INFO - epoch [1][3325/4953]\tlr: 3.239e-05, memory: 9082, loss: 1.0703\n", + "2023-07-02 19:05:37,787 - modelscope - INFO - epoch [1][3330/4953]\tlr: 3.227e-05, memory: 9082, loss: 0.4883\n", + "2023-07-02 19:05:42,067 - modelscope - INFO - epoch [1][3335/4953]\tlr: 3.214e-05, memory: 9082, loss: 2.1445\n", + "2023-07-02 19:05:47,958 - modelscope - INFO - epoch [1][3340/4953]\tlr: 3.202e-05, memory: 9082, loss: 1.5414\n", + "2023-07-02 19:05:52,434 - modelscope - INFO - epoch [1][3345/4953]\tlr: 3.190e-05, memory: 9082, loss: 1.9531\n", + "2023-07-02 19:05:57,227 - modelscope - INFO - epoch [1][3350/4953]\tlr: 3.178e-05, memory: 9082, loss: 1.2508\n", + "2023-07-02 19:06:03,488 - modelscope - INFO - epoch [1][3355/4953]\tlr: 3.165e-05, memory: 9082, loss: 1.1402\n", + "2023-07-02 19:06:08,978 - modelscope - INFO - epoch [1][3360/4953]\tlr: 3.153e-05, memory: 9082, loss: 1.1211\n", + "2023-07-02 19:06:16,191 - modelscope - INFO - epoch [1][3365/4953]\tlr: 3.141e-05, memory: 9082, loss: 0.7613\n", + "2023-07-02 19:06:23,420 - modelscope - INFO - epoch [1][3370/4953]\tlr: 3.129e-05, memory: 9082, loss: 1.3293\n", + "2023-07-02 19:06:30,067 - modelscope - INFO - epoch [1][3375/4953]\tlr: 3.117e-05, memory: 9082, loss: 1.9758\n", + "2023-07-02 19:06:36,844 - modelscope - INFO - epoch [1][3380/4953]\tlr: 3.104e-05, memory: 9082, loss: 0.3589\n", + "2023-07-02 19:06:43,906 - modelscope - INFO - epoch [1][3385/4953]\tlr: 3.092e-05, memory: 9082, loss: 0.9208\n", + "2023-07-02 19:06:49,972 - modelscope - INFO - epoch [1][3390/4953]\tlr: 3.080e-05, memory: 9082, loss: 1.2713\n", + "2023-07-02 19:06:56,815 - modelscope - INFO - epoch [1][3395/4953]\tlr: 3.068e-05, memory: 9082, loss: 1.3320\n", + "2023-07-02 19:07:00,998 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 19:09:16,634 - modelscope - INFO - Saving checkpoint at 3400 iter\n", + "2023-07-02 19:09:16,674 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter3200_acc0.8085957169532776\n", + "2023-07-02 19:09:16,679 - modelscope - INFO - Saving checkpoint at 3400 iter\n", + "2023-07-02 19:09:16,718 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_3200\n", + "2023-07-02 19:09:16,723 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8090, evaluation/loss: 1.2532, loss: 1.3594\n", + "2023-07-02 19:09:23,967 - modelscope - INFO - epoch [1][3405/4953]\tlr: 3.044e-05, memory: 9082, loss: 1.4662\n", + "2023-07-02 19:09:27,883 - modelscope - INFO - epoch [1][3410/4953]\tlr: 3.032e-05, memory: 9082, loss: 1.6219\n", + "2023-07-02 19:09:36,612 - modelscope - INFO - epoch [1][3415/4953]\tlr: 3.020e-05, memory: 9082, loss: 0.8362\n", + "2023-07-02 19:09:43,660 - modelscope - INFO - epoch [1][3420/4953]\tlr: 3.008e-05, memory: 9082, loss: 0.5874\n", + "2023-07-02 19:09:50,318 - modelscope - INFO - epoch [1][3425/4953]\tlr: 2.996e-05, memory: 9082, loss: 0.5588\n", + "2023-07-02 19:09:55,763 - modelscope - INFO - epoch [1][3430/4953]\tlr: 2.985e-05, memory: 9082, loss: 1.5086\n", + "2023-07-02 19:10:00,017 - modelscope - INFO - epoch [1][3435/4953]\tlr: 2.973e-05, memory: 9082, loss: 1.7063\n", + "2023-07-02 19:10:04,359 - modelscope - INFO - epoch [1][3440/4953]\tlr: 2.961e-05, memory: 9082, loss: 1.0250\n", + "2023-07-02 19:10:11,212 - modelscope - INFO - epoch [1][3445/4953]\tlr: 2.949e-05, memory: 9082, loss: 1.7650\n", + "2023-07-02 19:10:18,583 - modelscope - INFO - epoch [1][3450/4953]\tlr: 2.937e-05, memory: 9082, loss: 1.0846\n", + "2023-07-02 19:10:24,668 - modelscope - INFO - epoch [1][3455/4953]\tlr: 2.926e-05, memory: 9082, loss: 0.6735\n", + "2023-07-02 19:10:29,335 - modelscope - INFO - epoch [1][3460/4953]\tlr: 2.914e-05, memory: 9082, loss: 1.6277\n", + "2023-07-02 19:10:36,188 - modelscope - INFO - epoch [1][3465/4953]\tlr: 2.902e-05, memory: 9082, loss: 0.5597\n", + "2023-07-02 19:10:40,421 - modelscope - INFO - epoch [1][3470/4953]\tlr: 2.891e-05, memory: 9082, loss: 1.6338\n", + "2023-07-02 19:10:45,436 - modelscope - INFO - epoch [1][3475/4953]\tlr: 2.879e-05, memory: 9082, loss: 1.2394\n", + "2023-07-02 19:10:51,181 - modelscope - INFO - epoch [1][3480/4953]\tlr: 2.867e-05, memory: 9082, loss: 1.4753\n", + "2023-07-02 19:10:57,524 - modelscope - INFO - epoch [1][3485/4953]\tlr: 2.856e-05, memory: 9082, loss: 0.2870\n", + "2023-07-02 19:11:04,534 - modelscope - INFO - epoch [1][3490/4953]\tlr: 2.844e-05, memory: 9082, loss: 1.1145\n", + "2023-07-02 19:11:09,939 - modelscope - INFO - epoch [1][3495/4953]\tlr: 2.833e-05, memory: 9082, loss: 1.5525\n", + "2023-07-02 19:11:16,051 - modelscope - INFO - epoch [1][3500/4953]\tlr: 2.821e-05, memory: 9082, loss: 0.9821\n", + "2023-07-02 19:11:21,112 - modelscope - INFO - epoch [1][3505/4953]\tlr: 2.810e-05, memory: 9082, loss: 0.5899\n", + "2023-07-02 19:11:26,462 - modelscope - INFO - epoch [1][3510/4953]\tlr: 2.798e-05, memory: 9082, loss: 1.0081\n", + "2023-07-02 19:11:31,458 - modelscope - INFO - epoch [1][3515/4953]\tlr: 2.787e-05, memory: 9082, loss: 1.9700\n", + "2023-07-02 19:11:36,854 - modelscope - INFO - epoch [1][3520/4953]\tlr: 2.775e-05, memory: 9082, loss: 1.4628\n", + "2023-07-02 19:11:42,492 - modelscope - INFO - epoch [1][3525/4953]\tlr: 2.764e-05, memory: 9082, loss: 2.0672\n", + "2023-07-02 19:11:46,917 - modelscope - INFO - epoch [1][3530/4953]\tlr: 2.753e-05, memory: 9082, loss: 1.2469\n", + "2023-07-02 19:11:51,730 - modelscope - INFO - epoch [1][3535/4953]\tlr: 2.741e-05, memory: 9082, loss: 1.8609\n", + "2023-07-02 19:11:58,366 - modelscope - INFO - epoch [1][3540/4953]\tlr: 2.730e-05, memory: 9082, loss: 1.0629\n", + "2023-07-02 19:12:03,036 - modelscope - INFO - epoch [1][3545/4953]\tlr: 2.719e-05, memory: 9082, loss: 1.9508\n", + "2023-07-02 19:12:07,669 - modelscope - INFO - epoch [1][3550/4953]\tlr: 2.707e-05, memory: 9082, loss: 1.1436\n", + "2023-07-02 19:12:12,567 - modelscope - INFO - epoch [1][3555/4953]\tlr: 2.696e-05, memory: 9082, loss: 1.7292\n", + "2023-07-02 19:12:18,906 - modelscope - INFO - epoch [1][3560/4953]\tlr: 2.685e-05, memory: 9082, loss: 1.4152\n", + "2023-07-02 19:12:27,058 - modelscope - INFO - epoch [1][3565/4953]\tlr: 2.674e-05, memory: 9082, loss: 1.5086\n", + "2023-07-02 19:12:34,096 - modelscope - INFO - epoch [1][3570/4953]\tlr: 2.663e-05, memory: 9082, loss: 0.4786\n", + "2023-07-02 19:12:40,666 - modelscope - INFO - epoch [1][3575/4953]\tlr: 2.652e-05, memory: 9082, loss: 1.7496\n", + "2023-07-02 19:12:47,997 - modelscope - INFO - epoch [1][3580/4953]\tlr: 2.641e-05, memory: 9082, loss: 1.0977\n", + "2023-07-02 19:12:51,897 - modelscope - INFO - epoch [1][3585/4953]\tlr: 2.630e-05, memory: 9082, loss: 1.6832\n", + "2023-07-02 19:12:59,020 - modelscope - INFO - epoch [1][3590/4953]\tlr: 2.619e-05, memory: 9082, loss: 0.4163\n", + "2023-07-02 19:13:07,038 - modelscope - INFO - epoch [1][3595/4953]\tlr: 2.608e-05, memory: 9082, loss: 0.7688\n", + "2023-07-02 19:13:13,293 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.05it/s]\n", + "2023-07-02 19:15:28,735 - modelscope - INFO - Saving checkpoint at 3600 iter\n", + "2023-07-02 19:15:28,776 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter3400_acc0.8089956045150757\n", + "2023-07-02 19:15:28,780 - modelscope - INFO - Saving checkpoint at 3600 iter\n", + "2023-07-02 19:15:28,819 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_3400\n", + "2023-07-02 19:15:28,824 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8097, evaluation/loss: 1.2494, loss: 0.8758\n", + "2023-07-02 19:15:35,336 - modelscope - INFO - epoch [1][3605/4953]\tlr: 2.586e-05, memory: 9082, loss: 0.5239\n", + "2023-07-02 19:15:41,849 - modelscope - INFO - epoch [1][3610/4953]\tlr: 2.575e-05, memory: 9082, loss: 1.5448\n", + "2023-07-02 19:15:46,600 - modelscope - INFO - epoch [1][3615/4953]\tlr: 2.564e-05, memory: 9082, loss: 1.2828\n", + "2023-07-02 19:15:53,236 - modelscope - INFO - epoch [1][3620/4953]\tlr: 2.553e-05, memory: 9082, loss: 1.3886\n", + "2023-07-02 19:15:59,060 - modelscope - INFO - epoch [1][3625/4953]\tlr: 2.542e-05, memory: 9082, loss: 1.2750\n", + "2023-07-02 19:16:04,370 - modelscope - INFO - epoch [1][3630/4953]\tlr: 2.532e-05, memory: 9082, loss: 1.0339\n", + "2023-07-02 19:16:09,908 - modelscope - INFO - epoch [1][3635/4953]\tlr: 2.521e-05, memory: 9082, loss: 1.6308\n", + "2023-07-02 19:16:16,808 - modelscope - INFO - epoch [1][3640/4953]\tlr: 2.510e-05, memory: 9082, loss: 1.2590\n", + "2023-07-02 19:16:22,072 - modelscope - INFO - epoch [1][3645/4953]\tlr: 2.500e-05, memory: 9082, loss: 2.3364\n", + "2023-07-02 19:16:29,035 - modelscope - INFO - epoch [1][3650/4953]\tlr: 2.489e-05, memory: 9082, loss: 1.1231\n", + "2023-07-02 19:16:35,184 - modelscope - INFO - epoch [1][3655/4953]\tlr: 2.478e-05, memory: 9082, loss: 0.8313\n", + "2023-07-02 19:16:41,731 - modelscope - INFO - epoch [1][3660/4953]\tlr: 2.468e-05, memory: 9082, loss: 1.2649\n", + "2023-07-02 19:16:47,773 - modelscope - INFO - epoch [1][3665/4953]\tlr: 2.457e-05, memory: 9082, loss: 0.1984\n", + "2023-07-02 19:16:53,645 - modelscope - INFO - epoch [1][3670/4953]\tlr: 2.447e-05, memory: 9082, loss: 1.2534\n", + "2023-07-02 19:16:58,300 - modelscope - INFO - epoch [1][3675/4953]\tlr: 2.436e-05, memory: 9082, loss: 1.1865\n", + "2023-07-02 19:17:02,935 - modelscope - INFO - epoch [1][3680/4953]\tlr: 2.426e-05, memory: 9082, loss: 1.0458\n", + "2023-07-02 19:17:10,508 - modelscope - INFO - epoch [1][3685/4953]\tlr: 2.415e-05, memory: 9082, loss: 1.4961\n", + "2023-07-02 19:17:15,416 - modelscope - INFO - epoch [1][3690/4953]\tlr: 2.405e-05, memory: 9082, loss: 1.9992\n", + "2023-07-02 19:17:21,634 - modelscope - INFO - epoch [1][3695/4953]\tlr: 2.394e-05, memory: 9082, loss: 1.0555\n", + "2023-07-02 19:17:25,173 - modelscope - INFO - epoch [1][3700/4953]\tlr: 2.384e-05, memory: 9082, loss: 1.3477\n", + "2023-07-02 19:17:31,506 - modelscope - INFO - epoch [1][3705/4953]\tlr: 2.374e-05, memory: 9082, loss: 1.4563\n", + "2023-07-02 19:17:37,274 - modelscope - INFO - epoch [1][3710/4953]\tlr: 2.364e-05, memory: 9082, loss: 1.0638\n", + "2023-07-02 19:17:42,368 - modelscope - INFO - epoch [1][3715/4953]\tlr: 2.353e-05, memory: 9082, loss: 1.0961\n", + "2023-07-02 19:17:48,384 - modelscope - INFO - epoch [1][3720/4953]\tlr: 2.343e-05, memory: 9082, loss: 0.6570\n", + "2023-07-02 19:17:54,584 - modelscope - INFO - epoch [1][3725/4953]\tlr: 2.333e-05, memory: 9082, loss: 1.4391\n", + "2023-07-02 19:18:00,199 - modelscope - INFO - epoch [1][3730/4953]\tlr: 2.323e-05, memory: 9082, loss: 1.0986\n", + "2023-07-02 19:18:06,613 - modelscope - INFO - epoch [1][3735/4953]\tlr: 2.313e-05, memory: 9082, loss: 1.2259\n", + "2023-07-02 19:18:11,954 - modelscope - INFO - epoch [1][3740/4953]\tlr: 2.303e-05, memory: 9082, loss: 1.2266\n", + "2023-07-02 19:18:19,245 - modelscope - INFO - epoch [1][3745/4953]\tlr: 2.293e-05, memory: 9082, loss: 0.8633\n", + "2023-07-02 19:18:24,296 - modelscope - INFO - epoch [1][3750/4953]\tlr: 2.283e-05, memory: 9082, loss: 1.2285\n", + "2023-07-02 19:18:31,793 - modelscope - INFO - epoch [1][3755/4953]\tlr: 2.273e-05, memory: 9082, loss: 1.7500\n", + "2023-07-02 19:18:37,572 - modelscope - INFO - epoch [1][3760/4953]\tlr: 2.263e-05, memory: 9082, loss: 0.6735\n", + "2023-07-02 19:18:44,200 - modelscope - INFO - epoch [1][3765/4953]\tlr: 2.253e-05, memory: 9082, loss: 1.8328\n", + "2023-07-02 19:18:49,475 - modelscope - INFO - epoch [1][3770/4953]\tlr: 2.243e-05, memory: 9082, loss: 1.3798\n", + "2023-07-02 19:18:53,690 - modelscope - INFO - epoch [1][3775/4953]\tlr: 2.233e-05, memory: 9082, loss: 2.3062\n", + "2023-07-02 19:18:58,638 - modelscope - INFO - epoch [1][3780/4953]\tlr: 2.223e-05, memory: 9082, loss: 1.1617\n", + "2023-07-02 19:19:05,096 - modelscope - INFO - epoch [1][3785/4953]\tlr: 2.213e-05, memory: 9082, loss: 1.7489\n", + "2023-07-02 19:19:12,468 - modelscope - INFO - epoch [1][3790/4953]\tlr: 2.204e-05, memory: 9082, loss: 1.1701\n", + "2023-07-02 19:19:22,097 - modelscope - INFO - epoch [1][3795/4953]\tlr: 2.194e-05, memory: 9082, loss: 0.3038\n", + "2023-07-02 19:19:29,069 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 19:21:44,819 - modelscope - INFO - Saving checkpoint at 3800 iter\n", + "2023-07-02 19:21:44,859 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter3600_acc0.8096736669540405\n", + "2023-07-02 19:21:44,863 - modelscope - INFO - Saving checkpoint at 3800 iter\n", + "2023-07-02 19:21:44,902 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_3600\n", + "2023-07-02 19:21:44,907 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8099, evaluation/loss: 1.2569, loss: 1.0828\n", + "2023-07-02 19:21:50,359 - modelscope - INFO - epoch [1][3805/4953]\tlr: 2.174e-05, memory: 9082, loss: 1.3383\n", + "2023-07-02 19:21:56,101 - modelscope - INFO - epoch [1][3810/4953]\tlr: 2.165e-05, memory: 9082, loss: 1.3833\n", + "2023-07-02 19:22:02,037 - modelscope - INFO - epoch [1][3815/4953]\tlr: 2.155e-05, memory: 9082, loss: 1.1005\n", + "2023-07-02 19:22:07,031 - modelscope - INFO - epoch [1][3820/4953]\tlr: 2.146e-05, memory: 9082, loss: 1.6941\n", + "2023-07-02 19:22:11,810 - modelscope - INFO - epoch [1][3825/4953]\tlr: 2.136e-05, memory: 9082, loss: 1.8938\n", + "2023-07-02 19:22:16,752 - modelscope - INFO - epoch [1][3830/4953]\tlr: 2.127e-05, memory: 9082, loss: 1.6121\n", + "2023-07-02 19:22:25,240 - modelscope - INFO - epoch [1][3835/4953]\tlr: 2.117e-05, memory: 9082, loss: 0.7009\n", + "2023-07-02 19:22:31,231 - modelscope - INFO - epoch [1][3840/4953]\tlr: 2.108e-05, memory: 9082, loss: 1.8273\n", + "2023-07-02 19:22:37,939 - modelscope - INFO - epoch [1][3845/4953]\tlr: 2.098e-05, memory: 9082, loss: 0.8680\n", + "2023-07-02 19:22:43,021 - modelscope - INFO - epoch [1][3850/4953]\tlr: 2.089e-05, memory: 9082, loss: 1.5473\n", + "2023-07-02 19:22:49,156 - modelscope - INFO - epoch [1][3855/4953]\tlr: 2.080e-05, memory: 9082, loss: 1.1435\n", + "2023-07-02 19:22:53,445 - modelscope - INFO - epoch [1][3860/4953]\tlr: 2.071e-05, memory: 9082, loss: 1.1194\n", + "2023-07-02 19:22:59,485 - modelscope - INFO - epoch [1][3865/4953]\tlr: 2.061e-05, memory: 9082, loss: 1.0640\n", + "2023-07-02 19:23:03,673 - modelscope - INFO - epoch [1][3870/4953]\tlr: 2.052e-05, memory: 9082, loss: 1.0879\n", + "2023-07-02 19:23:08,721 - modelscope - INFO - epoch [1][3875/4953]\tlr: 2.043e-05, memory: 9082, loss: 0.9207\n", + "2023-07-02 19:23:14,908 - modelscope - INFO - epoch [1][3880/4953]\tlr: 2.034e-05, memory: 9082, loss: 0.5737\n", + "2023-07-02 19:23:21,843 - modelscope - INFO - epoch [1][3885/4953]\tlr: 2.025e-05, memory: 9082, loss: 1.3052\n", + "2023-07-02 19:23:30,760 - modelscope - INFO - epoch [1][3890/4953]\tlr: 2.016e-05, memory: 9082, loss: 1.1666\n", + "2023-07-02 19:23:36,181 - modelscope - INFO - epoch [1][3895/4953]\tlr: 2.007e-05, memory: 9082, loss: 1.7224\n", + "2023-07-02 19:23:40,094 - modelscope - INFO - epoch [1][3900/4953]\tlr: 1.998e-05, memory: 9082, loss: 1.0042\n", + "2023-07-02 19:23:47,764 - modelscope - INFO - epoch [1][3905/4953]\tlr: 1.989e-05, memory: 9082, loss: 1.2044\n", + "2023-07-02 19:23:54,075 - modelscope - INFO - epoch [1][3910/4953]\tlr: 1.980e-05, memory: 9082, loss: 1.3367\n", + "2023-07-02 19:24:00,699 - modelscope - INFO - epoch [1][3915/4953]\tlr: 1.971e-05, memory: 9082, loss: 1.1395\n", + "2023-07-02 19:24:06,413 - modelscope - INFO - epoch [1][3920/4953]\tlr: 1.962e-05, memory: 9082, loss: 1.1899\n", + "2023-07-02 19:24:12,663 - modelscope - INFO - epoch [1][3925/4953]\tlr: 1.953e-05, memory: 9082, loss: 1.0320\n", + "2023-07-02 19:24:18,897 - modelscope - INFO - epoch [1][3930/4953]\tlr: 1.944e-05, memory: 9082, loss: 2.0555\n", + "2023-07-02 19:24:25,760 - modelscope - INFO - epoch [1][3935/4953]\tlr: 1.936e-05, memory: 9082, loss: 1.3466\n", + "2023-07-02 19:24:29,617 - modelscope - INFO - epoch [1][3940/4953]\tlr: 1.927e-05, memory: 9082, loss: 1.7797\n", + "2023-07-02 19:24:34,498 - modelscope - INFO - epoch [1][3945/4953]\tlr: 1.918e-05, memory: 9082, loss: 0.6168\n", + "2023-07-02 19:24:39,457 - modelscope - INFO - epoch [1][3950/4953]\tlr: 1.910e-05, memory: 9082, loss: 1.1122\n", + "2023-07-02 19:24:48,913 - modelscope - INFO - epoch [1][3955/4953]\tlr: 1.901e-05, memory: 9082, loss: 0.9353\n", + "2023-07-02 19:24:55,564 - modelscope - INFO - epoch [1][3960/4953]\tlr: 1.892e-05, memory: 9082, loss: 0.9599\n", + "2023-07-02 19:25:00,536 - modelscope - INFO - epoch [1][3965/4953]\tlr: 1.884e-05, memory: 9082, loss: 1.4582\n", + "2023-07-02 19:25:07,894 - modelscope - INFO - epoch [1][3970/4953]\tlr: 1.875e-05, memory: 9082, loss: 1.0347\n", + "2023-07-02 19:25:11,877 - modelscope - INFO - epoch [1][3975/4953]\tlr: 1.867e-05, memory: 9082, loss: 1.9000\n", + "2023-07-02 19:25:18,225 - modelscope - INFO - epoch [1][3980/4953]\tlr: 1.858e-05, memory: 9082, loss: 1.4125\n", + "2023-07-02 19:25:22,417 - modelscope - INFO - epoch [1][3985/4953]\tlr: 1.850e-05, memory: 9082, loss: 1.8959\n", + "2023-07-02 19:25:27,100 - modelscope - INFO - epoch [1][3990/4953]\tlr: 1.842e-05, memory: 9082, loss: 1.4008\n", + "2023-07-02 19:25:31,958 - modelscope - INFO - epoch [1][3995/4953]\tlr: 1.833e-05, memory: 9082, loss: 0.8114\n", + "2023-07-02 19:25:37,042 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 19:27:53,013 - modelscope - INFO - Saving checkpoint at 4000 iter\n", + "2023-07-02 19:27:53,054 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_3800\n", + "2023-07-02 19:27:53,059 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8099, evaluation/loss: 1.2522, loss: 1.1221\n", + "2023-07-02 19:27:58,830 - modelscope - INFO - epoch [1][4005/4953]\tlr: 1.817e-05, memory: 9082, loss: 1.9461\n", + "2023-07-02 19:28:04,138 - modelscope - INFO - epoch [1][4010/4953]\tlr: 1.809e-05, memory: 9082, loss: 1.5629\n", + "2023-07-02 19:28:09,984 - modelscope - INFO - epoch [1][4015/4953]\tlr: 1.801e-05, memory: 9082, loss: 0.7642\n", + "2023-07-02 19:28:13,463 - modelscope - INFO - epoch [1][4020/4953]\tlr: 1.792e-05, memory: 9082, loss: 2.2344\n", + "2023-07-02 19:28:20,355 - modelscope - INFO - epoch [1][4025/4953]\tlr: 1.784e-05, memory: 9082, loss: 0.9662\n", + "2023-07-02 19:28:26,276 - modelscope - INFO - epoch [1][4030/4953]\tlr: 1.776e-05, memory: 9082, loss: 1.0925\n", + "2023-07-02 19:28:32,273 - modelscope - INFO - epoch [1][4035/4953]\tlr: 1.768e-05, memory: 9082, loss: 1.4812\n", + "2023-07-02 19:28:38,431 - modelscope - INFO - epoch [1][4040/4953]\tlr: 1.760e-05, memory: 9082, loss: 2.1295\n", + "2023-07-02 19:28:43,468 - modelscope - INFO - epoch [1][4045/4953]\tlr: 1.752e-05, memory: 9082, loss: 1.6391\n", + "2023-07-02 19:28:51,453 - modelscope - INFO - epoch [1][4050/4953]\tlr: 1.744e-05, memory: 9082, loss: 1.4901\n", + "2023-07-02 19:28:57,688 - modelscope - INFO - epoch [1][4055/4953]\tlr: 1.737e-05, memory: 9082, loss: 1.2383\n", + "2023-07-02 19:29:01,776 - modelscope - INFO - epoch [1][4060/4953]\tlr: 1.729e-05, memory: 9082, loss: 1.4404\n", + "2023-07-02 19:29:07,738 - modelscope - INFO - epoch [1][4065/4953]\tlr: 1.721e-05, memory: 9082, loss: 0.5664\n", + "2023-07-02 19:29:12,827 - modelscope - INFO - epoch [1][4070/4953]\tlr: 1.713e-05, memory: 9082, loss: 1.4554\n", + "2023-07-02 19:29:19,309 - modelscope - INFO - epoch [1][4075/4953]\tlr: 1.706e-05, memory: 9082, loss: 0.8976\n", + "2023-07-02 19:29:23,218 - modelscope - INFO - epoch [1][4080/4953]\tlr: 1.698e-05, memory: 9082, loss: 1.0562\n", + "2023-07-02 19:29:32,543 - modelscope - INFO - epoch [1][4085/4953]\tlr: 1.690e-05, memory: 9082, loss: 0.9514\n", + "2023-07-02 19:29:39,285 - modelscope - INFO - epoch [1][4090/4953]\tlr: 1.683e-05, memory: 9082, loss: 0.4714\n", + "2023-07-02 19:29:44,617 - modelscope - INFO - epoch [1][4095/4953]\tlr: 1.675e-05, memory: 9082, loss: 1.2211\n", + "2023-07-02 19:29:49,645 - modelscope - INFO - epoch [1][4100/4953]\tlr: 1.668e-05, memory: 9082, loss: 2.0924\n", + "2023-07-02 19:29:55,362 - modelscope - INFO - epoch [1][4105/4953]\tlr: 1.660e-05, memory: 9082, loss: 2.2705\n", + "2023-07-02 19:30:01,166 - modelscope - INFO - epoch [1][4110/4953]\tlr: 1.653e-05, memory: 9082, loss: 1.6148\n", + "2023-07-02 19:30:08,386 - modelscope - INFO - epoch [1][4115/4953]\tlr: 1.645e-05, memory: 9082, loss: 0.4558\n", + "2023-07-02 19:30:15,808 - modelscope - INFO - epoch [1][4120/4953]\tlr: 1.638e-05, memory: 9082, loss: 1.3715\n", + "2023-07-02 19:30:21,186 - modelscope - INFO - epoch [1][4125/4953]\tlr: 1.631e-05, memory: 9082, loss: 1.4497\n", + "2023-07-02 19:30:26,639 - modelscope - INFO - epoch [1][4130/4953]\tlr: 1.623e-05, memory: 9082, loss: 1.0819\n", + "2023-07-02 19:30:32,756 - modelscope - INFO - epoch [1][4135/4953]\tlr: 1.616e-05, memory: 9082, loss: 0.5440\n", + "2023-07-02 19:30:39,286 - modelscope - INFO - epoch [1][4140/4953]\tlr: 1.609e-05, memory: 9082, loss: 1.7625\n", + "2023-07-02 19:30:45,148 - modelscope - INFO - epoch [1][4145/4953]\tlr: 1.602e-05, memory: 9082, loss: 1.4341\n", + "2023-07-02 19:30:49,574 - modelscope - INFO - epoch [1][4150/4953]\tlr: 1.595e-05, memory: 9082, loss: 1.2615\n", + "2023-07-02 19:30:56,310 - modelscope - INFO - epoch [1][4155/4953]\tlr: 1.588e-05, memory: 9082, loss: 1.1409\n", + "2023-07-02 19:31:00,158 - modelscope - INFO - epoch [1][4160/4953]\tlr: 1.580e-05, memory: 9082, loss: 1.3609\n", + "2023-07-02 19:31:06,731 - modelscope - INFO - epoch [1][4165/4953]\tlr: 1.573e-05, memory: 9082, loss: 1.5992\n", + "2023-07-02 19:31:10,582 - modelscope - INFO - epoch [1][4170/4953]\tlr: 1.566e-05, memory: 9082, loss: 1.2750\n", + "2023-07-02 19:31:17,613 - modelscope - INFO - epoch [1][4175/4953]\tlr: 1.560e-05, memory: 9082, loss: 1.5521\n", + "2023-07-02 19:31:21,814 - modelscope - INFO - epoch [1][4180/4953]\tlr: 1.553e-05, memory: 9082, loss: 2.2871\n", + "2023-07-02 19:31:28,108 - modelscope - INFO - epoch [1][4185/4953]\tlr: 1.546e-05, memory: 9082, loss: 1.4199\n", + "2023-07-02 19:31:31,428 - modelscope - INFO - epoch [1][4190/4953]\tlr: 1.539e-05, memory: 9082, loss: 1.6801\n", + "2023-07-02 19:31:36,958 - modelscope - INFO - epoch [1][4195/4953]\tlr: 1.532e-05, memory: 9082, loss: 1.2423\n", + "2023-07-02 19:31:43,408 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:16<00:00, 2.04it/s]\n", + "2023-07-02 19:33:59,477 - modelscope - INFO - Saving checkpoint at 4200 iter\n", + "2023-07-02 19:33:59,518 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_4000\n", + "2023-07-02 19:33:59,522 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8095, evaluation/loss: 1.2465, loss: 1.5236\n", + "2023-07-02 19:34:03,568 - modelscope - INFO - epoch [1][4205/4953]\tlr: 1.519e-05, memory: 9082, loss: 1.0014\n", + "2023-07-02 19:34:10,609 - modelscope - INFO - epoch [1][4210/4953]\tlr: 1.512e-05, memory: 9082, loss: 0.5158\n", + "2023-07-02 19:34:17,669 - modelscope - INFO - epoch [1][4215/4953]\tlr: 1.506e-05, memory: 9082, loss: 1.1637\n", + "2023-07-02 19:34:24,176 - modelscope - INFO - epoch [1][4220/4953]\tlr: 1.499e-05, memory: 9082, loss: 0.9216\n", + "2023-07-02 19:34:30,303 - modelscope - INFO - epoch [1][4225/4953]\tlr: 1.492e-05, memory: 9082, loss: 0.5468\n", + "2023-07-02 19:34:36,913 - modelscope - INFO - epoch [1][4230/4953]\tlr: 1.486e-05, memory: 9082, loss: 1.0229\n", + "2023-07-02 19:34:42,449 - modelscope - INFO - epoch [1][4235/4953]\tlr: 1.480e-05, memory: 9082, loss: 0.8887\n", + "2023-07-02 19:34:51,187 - modelscope - INFO - epoch [1][4240/4953]\tlr: 1.473e-05, memory: 9082, loss: 1.1398\n", + "2023-07-02 19:34:55,850 - modelscope - INFO - epoch [1][4245/4953]\tlr: 1.467e-05, memory: 9082, loss: 1.8500\n", + "2023-07-02 19:35:01,653 - modelscope - INFO - epoch [1][4250/4953]\tlr: 1.460e-05, memory: 9082, loss: 1.2860\n", + "2023-07-02 19:35:07,538 - modelscope - INFO - epoch [1][4255/4953]\tlr: 1.454e-05, memory: 9082, loss: 0.9241\n", + "2023-07-02 19:35:10,832 - modelscope - INFO - epoch [1][4260/4953]\tlr: 1.448e-05, memory: 9082, loss: 1.5016\n", + "2023-07-02 19:35:15,940 - modelscope - INFO - epoch [1][4265/4953]\tlr: 1.442e-05, memory: 9082, loss: 1.1250\n", + "2023-07-02 19:35:21,080 - modelscope - INFO - epoch [1][4270/4953]\tlr: 1.436e-05, memory: 9082, loss: 1.0505\n", + "2023-07-02 19:35:26,817 - modelscope - INFO - epoch [1][4275/4953]\tlr: 1.429e-05, memory: 9082, loss: 1.0356\n", + "2023-07-02 19:35:36,012 - modelscope - INFO - epoch [1][4280/4953]\tlr: 1.423e-05, memory: 9082, loss: 0.9335\n", + "2023-07-02 19:35:42,237 - modelscope - INFO - epoch [1][4285/4953]\tlr: 1.417e-05, memory: 9082, loss: 0.5855\n", + "2023-07-02 19:35:46,223 - modelscope - INFO - epoch [1][4290/4953]\tlr: 1.411e-05, memory: 9082, loss: 1.2945\n", + "2023-07-02 19:35:52,610 - modelscope - INFO - epoch [1][4295/4953]\tlr: 1.405e-05, memory: 9082, loss: 0.9766\n", + "2023-07-02 19:35:59,125 - modelscope - INFO - epoch [1][4300/4953]\tlr: 1.400e-05, memory: 9082, loss: 1.6789\n", + "2023-07-02 19:36:03,214 - modelscope - INFO - epoch [1][4305/4953]\tlr: 1.394e-05, memory: 9082, loss: 1.5262\n", + "2023-07-02 19:36:08,897 - modelscope - INFO - epoch [1][4310/4953]\tlr: 1.388e-05, memory: 9082, loss: 1.0785\n", + "2023-07-02 19:36:15,128 - modelscope - INFO - epoch [1][4315/4953]\tlr: 1.382e-05, memory: 9082, loss: 0.6479\n", + "2023-07-02 19:36:21,607 - modelscope - INFO - epoch [1][4320/4953]\tlr: 1.376e-05, memory: 9082, loss: 1.8496\n", + "2023-07-02 19:36:29,617 - modelscope - INFO - epoch [1][4325/4953]\tlr: 1.371e-05, memory: 9082, loss: 0.5391\n", + "2023-07-02 19:36:35,101 - modelscope - INFO - epoch [1][4330/4953]\tlr: 1.365e-05, memory: 9082, loss: 1.8141\n", + "2023-07-02 19:36:41,579 - modelscope - INFO - epoch [1][4335/4953]\tlr: 1.359e-05, memory: 9082, loss: 0.6881\n", + "2023-07-02 19:36:48,569 - modelscope - INFO - epoch [1][4340/4953]\tlr: 1.354e-05, memory: 9082, loss: 0.6677\n", + "2023-07-02 19:36:55,362 - modelscope - INFO - epoch [1][4345/4953]\tlr: 1.348e-05, memory: 9082, loss: 0.7067\n", + "2023-07-02 19:37:01,199 - modelscope - INFO - epoch [1][4350/4953]\tlr: 1.343e-05, memory: 9082, loss: 1.3036\n", + "2023-07-02 19:37:06,752 - modelscope - INFO - epoch [1][4355/4953]\tlr: 1.337e-05, memory: 9082, loss: 0.5832\n", + "2023-07-02 19:37:11,013 - modelscope - INFO - epoch [1][4360/4953]\tlr: 1.332e-05, memory: 9082, loss: 0.9969\n", + "2023-07-02 19:37:15,110 - modelscope - INFO - epoch [1][4365/4953]\tlr: 1.326e-05, memory: 9082, loss: 1.6590\n", + "2023-07-02 19:37:22,411 - modelscope - INFO - epoch [1][4370/4953]\tlr: 1.321e-05, memory: 9082, loss: 0.8229\n", + "2023-07-02 19:37:29,106 - modelscope - INFO - epoch [1][4375/4953]\tlr: 1.316e-05, memory: 9082, loss: 1.3289\n", + "2023-07-02 19:37:33,326 - modelscope - INFO - epoch [1][4380/4953]\tlr: 1.311e-05, memory: 9082, loss: 1.0410\n", + "2023-07-02 19:37:38,513 - modelscope - INFO - epoch [1][4385/4953]\tlr: 1.305e-05, memory: 9082, loss: 0.6374\n", + "2023-07-02 19:37:42,903 - modelscope - INFO - epoch [1][4390/4953]\tlr: 1.300e-05, memory: 9082, loss: 2.6094\n", + "2023-07-02 19:37:46,474 - modelscope - INFO - epoch [1][4395/4953]\tlr: 1.295e-05, memory: 9082, loss: 1.7327\n", + "2023-07-02 19:37:53,357 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:16<00:00, 2.03it/s]\n", + "2023-07-02 19:40:09,626 - modelscope - INFO - Saving checkpoint at 4400 iter\n", + "2023-07-02 19:40:09,667 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter3800_acc0.8098996877670288\n", + "2023-07-02 19:40:09,672 - modelscope - INFO - Saving checkpoint at 4400 iter\n", + "2023-07-02 19:40:09,712 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_4200\n", + "2023-07-02 19:40:09,717 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8100, evaluation/loss: 1.2437, loss: 1.0930\n", + "2023-07-02 19:40:15,785 - modelscope - INFO - epoch [1][4405/4953]\tlr: 1.285e-05, memory: 9082, loss: 0.5974\n", + "2023-07-02 19:40:23,144 - modelscope - INFO - epoch [1][4410/4953]\tlr: 1.280e-05, memory: 9082, loss: 1.0870\n", + "2023-07-02 19:40:28,966 - modelscope - INFO - epoch [1][4415/4953]\tlr: 1.275e-05, memory: 9082, loss: 1.0536\n", + "2023-07-02 19:40:35,092 - modelscope - INFO - epoch [1][4420/4953]\tlr: 1.270e-05, memory: 9082, loss: 1.4613\n", + "2023-07-02 19:40:41,826 - modelscope - INFO - epoch [1][4425/4953]\tlr: 1.265e-05, memory: 9082, loss: 0.8297\n", + "2023-07-02 19:40:46,568 - modelscope - INFO - epoch [1][4430/4953]\tlr: 1.261e-05, memory: 9082, loss: 2.0414\n", + "2023-07-02 19:40:53,278 - modelscope - INFO - epoch [1][4435/4953]\tlr: 1.256e-05, memory: 9082, loss: 1.1800\n", + "2023-07-02 19:40:58,208 - modelscope - INFO - epoch [1][4440/4953]\tlr: 1.251e-05, memory: 9082, loss: 0.8595\n", + "2023-07-02 19:41:04,905 - modelscope - INFO - epoch [1][4445/4953]\tlr: 1.246e-05, memory: 9082, loss: 0.0801\n", + "2023-07-02 19:41:08,125 - modelscope - INFO - epoch [1][4450/4953]\tlr: 1.242e-05, memory: 9082, loss: 1.7031\n", + "2023-07-02 19:41:13,374 - modelscope - INFO - epoch [1][4455/4953]\tlr: 1.237e-05, memory: 9082, loss: 1.8381\n", + "2023-07-02 19:41:17,994 - modelscope - INFO - epoch [1][4460/4953]\tlr: 1.233e-05, memory: 9082, loss: 1.1123\n", + "2023-07-02 19:41:21,181 - modelscope - INFO - epoch [1][4465/4953]\tlr: 1.228e-05, memory: 9082, loss: 2.0922\n", + "2023-07-02 19:41:27,252 - modelscope - INFO - epoch [1][4470/4953]\tlr: 1.224e-05, memory: 9082, loss: 0.8977\n", + "2023-07-02 19:41:31,600 - modelscope - INFO - epoch [1][4475/4953]\tlr: 1.219e-05, memory: 9082, loss: 0.9191\n", + "2023-07-02 19:41:36,554 - modelscope - INFO - epoch [1][4480/4953]\tlr: 1.215e-05, memory: 9082, loss: 1.9734\n", + "2023-07-02 19:41:42,916 - modelscope - INFO - epoch [1][4485/4953]\tlr: 1.210e-05, memory: 9082, loss: 0.7236\n", + "2023-07-02 19:41:49,532 - modelscope - INFO - epoch [1][4490/4953]\tlr: 1.206e-05, memory: 9082, loss: 1.5750\n", + "2023-07-02 19:41:55,282 - modelscope - INFO - epoch [1][4495/4953]\tlr: 1.202e-05, memory: 9082, loss: 0.9306\n", + "2023-07-02 19:42:01,377 - modelscope - INFO - epoch [1][4500/4953]\tlr: 1.198e-05, memory: 9082, loss: 1.9801\n", + "2023-07-02 19:42:05,379 - modelscope - INFO - epoch [1][4505/4953]\tlr: 1.193e-05, memory: 9082, loss: 2.3320\n", + "2023-07-02 19:42:11,849 - modelscope - INFO - epoch [1][4510/4953]\tlr: 1.189e-05, memory: 9082, loss: 1.3637\n", + "2023-07-02 19:42:18,695 - modelscope - INFO - epoch [1][4515/4953]\tlr: 1.185e-05, memory: 9082, loss: 1.5328\n", + "2023-07-02 19:42:26,045 - modelscope - INFO - epoch [1][4520/4953]\tlr: 1.181e-05, memory: 9082, loss: 1.0721\n", + "2023-07-02 19:42:32,060 - modelscope - INFO - epoch [1][4525/4953]\tlr: 1.177e-05, memory: 9082, loss: 1.1867\n", + "2023-07-02 19:42:38,307 - modelscope - INFO - epoch [1][4530/4953]\tlr: 1.173e-05, memory: 9082, loss: 1.3500\n", + "2023-07-02 19:42:46,137 - modelscope - INFO - epoch [1][4535/4953]\tlr: 1.169e-05, memory: 9082, loss: 0.7637\n", + "2023-07-02 19:42:52,814 - modelscope - INFO - epoch [1][4540/4953]\tlr: 1.165e-05, memory: 9082, loss: 0.8551\n", + "2023-07-02 19:43:00,111 - modelscope - INFO - epoch [1][4545/4953]\tlr: 1.162e-05, memory: 9082, loss: 1.3265\n", + "2023-07-02 19:43:06,301 - modelscope - INFO - epoch [1][4550/4953]\tlr: 1.158e-05, memory: 9082, loss: 0.6115\n", + "2023-07-02 19:43:10,926 - modelscope - INFO - epoch [1][4555/4953]\tlr: 1.154e-05, memory: 9082, loss: 1.8475\n", + "2023-07-02 19:43:17,954 - modelscope - INFO - epoch [1][4560/4953]\tlr: 1.150e-05, memory: 9082, loss: 1.3332\n", + "2023-07-02 19:43:22,493 - modelscope - INFO - epoch [1][4565/4953]\tlr: 1.147e-05, memory: 9082, loss: 1.9062\n", + "2023-07-02 19:43:28,213 - modelscope - INFO - epoch [1][4570/4953]\tlr: 1.143e-05, memory: 9082, loss: 0.6227\n", + "2023-07-02 19:43:34,862 - modelscope - INFO - epoch [1][4575/4953]\tlr: 1.140e-05, memory: 9082, loss: 0.7937\n", + "2023-07-02 19:43:40,905 - modelscope - INFO - epoch [1][4580/4953]\tlr: 1.136e-05, memory: 9082, loss: 1.4903\n", + "2023-07-02 19:43:47,007 - modelscope - INFO - epoch [1][4585/4953]\tlr: 1.133e-05, memory: 9082, loss: 1.0449\n", + "2023-07-02 19:43:52,730 - modelscope - INFO - epoch [1][4590/4953]\tlr: 1.129e-05, memory: 9082, loss: 1.0068\n", + "2023-07-02 19:43:56,715 - modelscope - INFO - epoch [1][4595/4953]\tlr: 1.126e-05, memory: 9082, loss: 1.5157\n", + "2023-07-02 19:44:04,629 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 19:46:20,481 - modelscope - INFO - Saving checkpoint at 4600 iter\n", + "2023-07-02 19:46:20,521 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_4400\n", + "2023-07-02 19:46:20,526 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8098, evaluation/loss: 1.2390, loss: 1.1334\n", + "2023-07-02 19:46:25,140 - modelscope - INFO - epoch [1][4605/4953]\tlr: 1.119e-05, memory: 9082, loss: 1.6938\n", + "2023-07-02 19:46:30,413 - modelscope - INFO - epoch [1][4610/4953]\tlr: 1.116e-05, memory: 9082, loss: 2.1351\n", + "2023-07-02 19:46:37,216 - modelscope - INFO - epoch [1][4615/4953]\tlr: 1.113e-05, memory: 9082, loss: 0.9270\n", + "2023-07-02 19:46:43,728 - modelscope - INFO - epoch [1][4620/4953]\tlr: 1.110e-05, memory: 9082, loss: 1.1201\n", + "2023-07-02 19:46:50,227 - modelscope - INFO - epoch [1][4625/4953]\tlr: 1.107e-05, memory: 9082, loss: 1.2715\n", + "2023-07-02 19:46:53,772 - modelscope - INFO - epoch [1][4630/4953]\tlr: 1.103e-05, memory: 9082, loss: 1.4461\n", + "2023-07-02 19:46:59,663 - modelscope - INFO - epoch [1][4635/4953]\tlr: 1.100e-05, memory: 9082, loss: 1.2715\n", + "2023-07-02 19:47:06,614 - modelscope - INFO - epoch [1][4640/4953]\tlr: 1.097e-05, memory: 9082, loss: 0.6478\n", + "2023-07-02 19:47:14,999 - modelscope - INFO - epoch [1][4645/4953]\tlr: 1.094e-05, memory: 9082, loss: 1.0031\n", + "2023-07-02 19:47:19,690 - modelscope - INFO - epoch [1][4650/4953]\tlr: 1.092e-05, memory: 9082, loss: 1.0572\n", + "2023-07-02 19:47:27,827 - modelscope - INFO - epoch [1][4655/4953]\tlr: 1.089e-05, memory: 9082, loss: 0.9459\n", + "2023-07-02 19:47:33,520 - modelscope - INFO - epoch [1][4660/4953]\tlr: 1.086e-05, memory: 9082, loss: 0.9813\n", + "2023-07-02 19:47:39,880 - modelscope - INFO - epoch [1][4665/4953]\tlr: 1.083e-05, memory: 9082, loss: 1.3258\n", + "2023-07-02 19:47:46,513 - modelscope - INFO - epoch [1][4670/4953]\tlr: 1.080e-05, memory: 9082, loss: 1.2884\n", + "2023-07-02 19:47:51,769 - modelscope - INFO - epoch [1][4675/4953]\tlr: 1.078e-05, memory: 9082, loss: 1.6375\n", + "2023-07-02 19:47:57,474 - modelscope - INFO - epoch [1][4680/4953]\tlr: 1.075e-05, memory: 9082, loss: 0.9726\n", + "2023-07-02 19:48:02,354 - modelscope - INFO - epoch [1][4685/4953]\tlr: 1.073e-05, memory: 9082, loss: 1.1402\n", + "2023-07-02 19:48:09,946 - modelscope - INFO - epoch [1][4690/4953]\tlr: 1.070e-05, memory: 9082, loss: 0.9941\n", + "2023-07-02 19:48:16,660 - modelscope - INFO - epoch [1][4695/4953]\tlr: 1.068e-05, memory: 9082, loss: 1.5975\n", + "2023-07-02 19:48:22,892 - modelscope - INFO - epoch [1][4700/4953]\tlr: 1.065e-05, memory: 9082, loss: 0.9816\n", + "2023-07-02 19:48:28,221 - modelscope - INFO - epoch [1][4705/4953]\tlr: 1.063e-05, memory: 9082, loss: 0.9115\n", + "2023-07-02 19:48:35,152 - modelscope - INFO - epoch [1][4710/4953]\tlr: 1.060e-05, memory: 9082, loss: 1.4184\n", + "2023-07-02 19:48:40,666 - modelscope - INFO - epoch [1][4715/4953]\tlr: 1.058e-05, memory: 9082, loss: 1.6391\n", + "2023-07-02 19:48:46,682 - modelscope - INFO - epoch [1][4720/4953]\tlr: 1.056e-05, memory: 9082, loss: 2.1836\n", + "2023-07-02 19:48:53,274 - modelscope - INFO - epoch [1][4725/4953]\tlr: 1.054e-05, memory: 9082, loss: 1.1783\n", + "2023-07-02 19:48:56,851 - modelscope - INFO - epoch [1][4730/4953]\tlr: 1.051e-05, memory: 9082, loss: 1.0398\n", + "2023-07-02 19:49:03,951 - modelscope - INFO - epoch [1][4735/4953]\tlr: 1.049e-05, memory: 9082, loss: 0.4896\n", + "2023-07-02 19:49:09,418 - modelscope - INFO - epoch [1][4740/4953]\tlr: 1.047e-05, memory: 9082, loss: 0.8757\n", + "2023-07-02 19:49:15,768 - modelscope - INFO - epoch [1][4745/4953]\tlr: 1.045e-05, memory: 9082, loss: 1.5896\n", + "2023-07-02 19:49:21,308 - modelscope - INFO - epoch [1][4750/4953]\tlr: 1.043e-05, memory: 9082, loss: 1.3535\n", + "2023-07-02 19:49:27,455 - modelscope - INFO - epoch [1][4755/4953]\tlr: 1.041e-05, memory: 9082, loss: 1.3389\n", + "2023-07-02 19:49:34,436 - modelscope - INFO - epoch [1][4760/4953]\tlr: 1.039e-05, memory: 9082, loss: 0.6073\n", + "2023-07-02 19:49:42,538 - modelscope - INFO - epoch [1][4765/4953]\tlr: 1.037e-05, memory: 9082, loss: 0.6708\n", + "2023-07-02 19:49:49,238 - modelscope - INFO - epoch [1][4770/4953]\tlr: 1.036e-05, memory: 9082, loss: 0.8630\n", + "2023-07-02 19:49:55,165 - modelscope - INFO - epoch [1][4775/4953]\tlr: 1.034e-05, memory: 9082, loss: 0.7835\n", + "2023-07-02 19:50:01,434 - modelscope - INFO - epoch [1][4780/4953]\tlr: 1.032e-05, memory: 9082, loss: 1.7195\n", + "2023-07-02 19:50:08,788 - modelscope - INFO - epoch [1][4785/4953]\tlr: 1.030e-05, memory: 9082, loss: 1.1434\n", + "2023-07-02 19:50:14,523 - modelscope - INFO - epoch [1][4790/4953]\tlr: 1.029e-05, memory: 9082, loss: 0.6416\n", + "2023-07-02 19:50:21,717 - modelscope - INFO - epoch [1][4795/4953]\tlr: 1.027e-05, memory: 9082, loss: 1.0909\n", + "2023-07-02 19:50:25,524 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 277/277 [02:15<00:00, 2.04it/s]\n", + "2023-07-02 19:52:41,308 - modelscope - INFO - Saving checkpoint at 4800 iter\n", + "2023-07-02 19:52:41,348 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/best_iter4400_acc0.8100214004516602\n", + "2023-07-02 19:52:41,353 - modelscope - INFO - Saving checkpoint at 4800 iter\n", + "2023-07-02 19:52:41,392 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_4600\n", + "2023-07-02 19:52:41,397 - modelscope - INFO - epoch(eval) [1][277]\tmemory: 9082, evaluation/acc: 0.8101, evaluation/loss: 1.2370, loss: 1.1855\n", + "2023-07-02 19:52:47,709 - modelscope - INFO - epoch [1][4805/4953]\tlr: 1.024e-05, memory: 9082, loss: 0.8004\n", + "2023-07-02 19:52:53,162 - modelscope - INFO - epoch [1][4810/4953]\tlr: 1.023e-05, memory: 9082, loss: 1.1193\n", + "2023-07-02 19:53:00,428 - modelscope - INFO - epoch [1][4815/4953]\tlr: 1.021e-05, memory: 9082, loss: 0.8555\n", + "2023-07-02 19:53:03,760 - modelscope - INFO - epoch [1][4820/4953]\tlr: 1.020e-05, memory: 9082, loss: 1.4422\n", + "2023-07-02 19:53:09,302 - modelscope - INFO - epoch [1][4825/4953]\tlr: 1.019e-05, memory: 9082, loss: 1.5247\n", + "2023-07-02 19:53:17,785 - modelscope - INFO - epoch [1][4830/4953]\tlr: 1.017e-05, memory: 9082, loss: 0.5462\n", + "2023-07-02 19:53:24,406 - modelscope - INFO - epoch [1][4835/4953]\tlr: 1.016e-05, memory: 9082, loss: 1.0023\n", + "2023-07-02 19:53:29,386 - modelscope - INFO - epoch [1][4840/4953]\tlr: 1.015e-05, memory: 9082, loss: 1.3645\n", + "2023-07-02 19:53:34,231 - modelscope - INFO - epoch [1][4845/4953]\tlr: 1.014e-05, memory: 9082, loss: 0.9927\n", + "2023-07-02 19:53:40,558 - modelscope - INFO - epoch [1][4850/4953]\tlr: 1.013e-05, memory: 9082, loss: 2.0516\n", + "2023-07-02 19:53:47,846 - modelscope - INFO - epoch [1][4855/4953]\tlr: 1.012e-05, memory: 9082, loss: 0.7750\n", + "2023-07-02 19:53:52,341 - modelscope - INFO - epoch [1][4860/4953]\tlr: 1.011e-05, memory: 9082, loss: 1.4390\n", + "2023-07-02 19:53:57,172 - modelscope - INFO - epoch [1][4865/4953]\tlr: 1.010e-05, memory: 9082, loss: 1.0197\n", + "2023-07-02 19:54:02,776 - modelscope - INFO - epoch [1][4870/4953]\tlr: 1.009e-05, memory: 9082, loss: 0.7660\n", + "2023-07-02 19:54:08,311 - modelscope - INFO - epoch [1][4875/4953]\tlr: 1.008e-05, memory: 9082, loss: 0.8775\n", + "2023-07-02 19:54:14,394 - modelscope - INFO - epoch [1][4880/4953]\tlr: 1.007e-05, memory: 9082, loss: 1.3374\n", + "2023-07-02 19:54:20,602 - modelscope - INFO - epoch [1][4885/4953]\tlr: 1.006e-05, memory: 9082, loss: 1.0018\n", + "2023-07-02 19:54:28,123 - modelscope - INFO - epoch [1][4890/4953]\tlr: 1.006e-05, memory: 9082, loss: 1.4156\n", + "2023-07-02 19:54:34,101 - modelscope - INFO - epoch [1][4895/4953]\tlr: 1.005e-05, memory: 9082, loss: 1.4742\n", + "2023-07-02 19:54:39,802 - modelscope - INFO - epoch [1][4900/4953]\tlr: 1.004e-05, memory: 9082, loss: 1.2737\n", + "2023-07-02 19:54:45,785 - modelscope - INFO - epoch [1][4905/4953]\tlr: 1.004e-05, memory: 9082, loss: 1.2928\n", + "2023-07-02 19:54:52,274 - modelscope - INFO - epoch [1][4910/4953]\tlr: 1.003e-05, memory: 9082, loss: 0.9859\n", + "2023-07-02 19:54:57,409 - modelscope - INFO - epoch [1][4915/4953]\tlr: 1.003e-05, memory: 9082, loss: 1.8160\n", + "2023-07-02 19:55:04,217 - modelscope - INFO - epoch [1][4920/4953]\tlr: 1.002e-05, memory: 9082, loss: 0.9310\n", + "2023-07-02 19:55:09,704 - modelscope - INFO - epoch [1][4925/4953]\tlr: 1.002e-05, memory: 9082, loss: 1.1717\n", + "2023-07-02 19:55:15,079 - modelscope - INFO - epoch [1][4930/4953]\tlr: 1.001e-05, memory: 9082, loss: 1.8821\n", + "2023-07-02 19:55:19,843 - modelscope - INFO - epoch [1][4935/4953]\tlr: 1.001e-05, memory: 9082, loss: 0.7700\n", + "2023-07-02 19:55:24,826 - modelscope - INFO - epoch [1][4940/4953]\tlr: 1.001e-05, memory: 9082, loss: 1.1562\n", + "2023-07-02 19:55:29,831 - modelscope - INFO - epoch [1][4945/4953]\tlr: 1.000e-05, memory: 9082, loss: 1.2777\n", + "2023-07-02 19:55:34,919 - modelscope - INFO - epoch [1][4950/4953]\tlr: 1.000e-05, memory: 9082, loss: 0.9414\n", + "2023-07-02 19:55:38,429 - modelscope - INFO - Saving checkpoint at 4953 iter\n", + "2023-07-02 19:55:38,697 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/iter_4800\n", + "2023-07-02 19:55:38,741 - modelscope - INFO - Train finished. Uploading models, waiting...\n", + "2023-07-02 19:55:38,823 - modelscope - INFO - {'done': True}\n" + ] + } + ], + "source": [ + "def cfg_modify_fn(cfg: Config) -> Config:\n", + " cfg.update(CONFIG)\n", + " return cfg\n", + "\n", + "\n", + "trainer = EpochBasedTrainer(\n", + " model=model,\n", + " cfg_file=cfg_file,\n", + " data_collator=data_collate_fn,\n", + " train_dataset=train_dataset,\n", + " eval_dataset=val_dataset,\n", + " remove_unused_data=True,\n", + " seed=42,\n", + " device='cpu', # No placement for model, leave the model to `device_map`\n", + " cfg_modify_fn=cfg_modify_fn,\n", + ")\n", + "\n", + "trainer.train()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 可视化\n", + "tensorboard 命令: (e.g.) \n", + "`tensorboard --logdir /home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449 --port 6006`" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['lr', 'loss', 'evaluation/acc', 'evaluation/loss'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAApIAAAHDCAYAAACXsvqpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAACbJ0lEQVR4nO2dd7wkVZ32n+6b79w8d+7kRBokzJBhBEyMjogCLrqIqCy6uiiYcFfFzPrquPqur+ICsgZwdQUxoIgEiYNkGEDCwJAm53Rzvl3vH78+XaeqK3d1fr6fz0zfrq46dSo/9UsnYRiGAUIIIYQQQkKSLHYHCCGEEEJIeUIhSQghhBBCIkEhSQghhBBCIkEhSQghhBBCIkEhSQghhBBCIkEhSQghhBBCIkEhSQghhBBCIkEhSQghhBBCIkEhSQghhBBCIkEhSQghaa677jokEgls2LCh2F0hhJCygEKSEEIIIYREgkKSEEIIIYREgkKSEEIIIYREgkKSEEI8uOqqq3D44YejoaEBc+bMwcUXX4ze3l7LPC+//DLOOecczJo1C42NjZg3bx7e9773oa+vLzPPnXfeiVNOOQUdHR1oaWnBkiVL8KUvfanAW0MIIfFSW+wOEEJIqfKNb3wDl19+OVasWIGPf/zjWLduHa6++mo8/vjjePDBB1FXV4fx8XGsXLkSY2Nj+OQnP4lZs2Zh69atuOWWW9Db24v29nY8//zzeOc734mlS5fi3//939HQ0IBXXnkFDz74YLE3kRBCcoJCkhBCHNi9ezdWrVqFt73tbbjtttuQTIoD59BDD8Ull1yCX/3qV7jwwguxdu1arF+/Hr/97W/xnve8J7P81772tczfd955J8bHx3Hbbbehu7u74NtCCCH5gq5tQghx4K677sL4+Dg+85nPZEQkAHz0ox9FW1sb/vKXvwAA2tvbAQB33HEHhoeHHdvq6OgAAPzpT39CKpXKb8cJIaSAUEgSQogDGzduBAAsWbLEMr2+vh4HHHBA5vfFixfj0ksvxU9/+lN0d3dj5cqVuPLKKy3xkeeeey5OPvlk/PM//zNmzpyJ973vfbjxxhspKgkhZQ+FJCGE5Mh//ud/4plnnsGXvvQljIyM4FOf+hQOP/xwbNmyBQDQ1NSE+++/H3fddRc++MEP4plnnsG5556Lt771rZiamipy7wkhJDoUkoQQ4sDChQsBAOvWrbNMHx8fx/r16zO/K4488kh85Stfwf3334+//e1v2Lp1K3784x9nfk8mkzjttNPw/e9/H2vXrsW3vvUt3HPPPbj33nvzvzGEEJInKCQJIcSBFStWoL6+HldccQUMw8hM/9nPfoa+vj6cccYZAID+/n5MTk5alj3yyCORTCYxNjYGANi3b19W+0cddRQAZOYhhJByhFnbhBDiwIwZM3DZZZfh8ssvx9vf/naceeaZWLduHa666iocf/zx+MAHPgAAuOeee3DJJZfgve99Lw455BBMTk7il7/8JWpqanDOOecAAP793/8d999/P8444wwsXLgQu3btwlVXXYV58+bhlFNOKeZmEkJITlBIEkKIC9/4xjcwY8YM/Nd//Rc++9nPoqurCx/72Mfw7W9/G3V1dQCAZcuWYeXKlfjzn/+MrVu3orm5GcuWLcNtt92Gk046CQBw5plnYsOGDfj5z3+OPXv2oLu7G2984xtx+eWXZ7K+CSGkHEkYus+GEEIIIYSQgDBGkhBCCCGERIJCkhBCCCGERIJCkhBCCCGERIJCkhBCCCGERIJCkhBCCCGERIJCkhBCCCGERKLgdSRTqRS2bduG1tZWJBKJQq+eEEIIIYT4YBgGBgYGMGfOHCST7nbHggvJbdu2Yf78+YVeLSGEEEIICcnmzZsxb948198LLiRbW1sBSMfa2toKvXpCCCGEEOJDf38/5s+fn9FtbhRcSCp3dltbG4UkIYQQQkgJ4xeGyGQbQgghhBASidBCcuvWrfjABz6A6dOno6mpCUceeSSeeOKJfPSNEEIIIYSUMKFc2/v378fJJ5+MN7/5zbjtttswY8YMvPzyy+js7MxX/wghhBBCSIkSSkj+x3/8B+bPn49rr702M23x4sWxd4oQQgghhJQ+oVzbN998M4477ji8973vRU9PD44++mj85Cc/8VxmbGwM/f39ln+EEEIIIaT8CSUkX3vtNVx99dU4+OCDcccdd+DjH/84PvWpT+EXv/iF6zKrVq1Ce3t75h9rSBJCCCGEVAYJwzCMoDPX19fjuOOOw0MPPZSZ9qlPfQqPP/44Hn74YcdlxsbGMDY2lvmu6hL19fWx/A8hhBBCSAnS39+P9vZ2X70WyiI5e/ZsHHbYYZZpr3vd67Bp0ybXZRoaGjI1I1k7khBCCCGkcgglJE8++WSsW7fOMu2ll17CwoULY+0UIYQQQggpfUIJyc9+9rN45JFH8O1vfxuvvPIKfv3rX+O///u/cfHFF+erf4QQQgghpEQJJSSPP/543HTTTbj++utxxBFH4Jvf/CZ+8IMf4Pzzz89X/wghhBBCSIkSKtkmDoIGbxJCCCGEkOKQl2QbQgghhBBCFBSSADA5AaSmit0LQgghhJCygkJyahLYvh7Y+mqxe0IIIYQQUlZQSI6PFrsHhBBCCCFlCYUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIUkIYQQQgiJBIWkztgoYKSK3QtCCCGEkLKgOoWkYThP37UJ2LOtsH0hhBBCCClTqk9I9u0FtrwMjI86/z46XNj+EEIIIYSUKdUnJPv3ymfv7uL2gxBCCCGkzKk+IUkIIYQQQmKBQpIQQgghhESCQtIl74YQQgghhHhDIUkIIYQQQiJBIUkIIYQQQiJBIUkIIYQQQiJBIUkIIYQQQiJRvULSyPqDEEIIIYSEoHqFJCGEEEIIyYkqFpK0RBJCCCGE5EIVC0lCCCGEEJILVSwkE8XuACGEEEJIWVPFQpKubUIIIYSQXKhiIUkIIYQQQnKBQpIQQgghhESCQpIQQgghhESCQpIQQgghhESCQpIQQgghhESCQpIQQgghhESCQpIQQgghhESCQpIQQgghhESisoXkS08B3/oQ8OMvFLsnhBBCCCEVR22xO5BXxoeBPdvA4RAJIYQQQuKnsi2Smc3jcIiEEEIIIXFT2UIykbZEUkcSQgghhMROlQjJVHH7QQghhBBSgVSHkCSEEEIIIbFT2UJSJdkY9G0TQgghhMRNZQvJBIUkIYQQQki+qA4hyWwbQgghhJDYCSUkv/GNbyCRSFj+HXroofnqWwx4WCRTKaBvDzAxXtguEUIIIYRUCKELkh9++OG46667zAZqS7imedLDIjk5DvTvK2h3CCGEEEIqidAqsLa2FrNmzcpHX+KHMZKEEEIIIXkjdIzkyy+/jDlz5uCAAw7A+eefj02bNuWjX/GQSG8edSQhhBBCSOyEskieeOKJuO6667BkyRJs374dl19+OU499VQ899xzaG1tdVxmbGwMY2Njme/9/f259TgUTLYhhBBCCMkXoYTk6aefnvl76dKlOPHEE7Fw4ULceOON+MhHPuK4zKpVq3D55Zfn1suoZHQkhSQhhBBCSNzkVP6no6MDhxxyCF555RXXeS677DL09fVl/m3evDmXVYYj49ougpDs38tkHkIIIYRUNDkJycHBQbz66quYPXu26zwNDQ1oa2uz/CscRUq2mZoE+vZKeaEUx/kmhBBCSGUSSkj+67/+K1avXo0NGzbgoYcewrvf/W7U1NTgvPPOy1f/csOr/E8+sQhXutUJIYQQUpmEipHcsmULzjvvPOzduxczZszAKaecgkceeQQzZszIV/9yJOE/CyGEEEIIiUQoIXnDDTfkqx/5IWOQpHuZEEIIISRuKnysbdaRJIQQQgjJF5UtJIuVbEMIIYQQUgVUtpBMqs2jkCSEEEIIiZvKFpIsSE4IIYQQkjcqW0iWgmubGpYQQgghFUplC8lEkTYvwbJDhBBCCKl8KlxIpj9Z/ocQQgghJHYqXEiWwubRt00IIYSQyqQUlFb+YbINIYQQQkjsVLaQzBQkp5AkhBBCCImbCheSmSDJonaDEEIIIaQSqQ4hWWiLJC2ghBBCCKkCqkRIFrEPFJWEEEIIqVAqW0iCrm1CCCGEkHxR2UJSLwxOyyAhhBBCSKxUtpBMUkgSQgghhOSLyhaSoJAkhBBCCMkXlS0kLWNeF0tIUsASQgghpDKpHiFJiyQhhBBCSKxUuJCsMf8ulpCkfiWEEEJIhVLZQlInFyE5OgRMjIVZWfR1EUIIIYSUCZUtJC2u7VS0NibGgN1bgR0b4+kTIYQQQkiFUD1CMioT47m3QQghhBBSgVSPkIzq2o4iRg3XL4QQQgghFUOFC0lt86K6tgkhhBBCiCMVLiRzWDY1FUMjoEGSEEIIIRVLbbE7kFd0t3QqhEVysBfYvwvo7AFq683phhFP3CUhhBBCSAVQ4RbJiCPb9O+Tz/27Yu0OIYQQQkglUdlCUt+8MC7m+kZtOc2SGSlhh75tQgghhFQmlS0ko9aRTLrtFopCQgghhBBF9QjJ6I2YfwbWkRSchBBCCKl8KltIAsgIwTDJNq5QIBJCCCGEKCpfSGaskjGIwCgxktSehBBCCKlQqkBIpj8jC7pclaBt+cE+YNdmrU4lIYQQQkh5UgVCMr2JcYxsE9Qi6TXb/p3A2IhZYogQQgghpEypfCGZIYRl0XXWGP3UHLKREEIIIWVOFQjJtG87Ug1IG4x3JIQQQgjJUPlCMpGjkLQsRyVJCCGEEKKobCGZSEQUki7zcmQbQgghhJAMlS0kgdwtkpEwHP8khBBCCKkkKl9IZihSHUlCCCGEkAql8oVkzhZJxkgSQgghhDhR+UIy16xt6khCCCGEEEcqX0jGOUQilSQhhBBCSIbqEZKpGAqSM2ubEEIIISRD5QtJRaiRZOjPJoQQQgjxo8KFZEIbazuOGMkIY21ThxJCCCGkQqlwIYlMrk08pXuCtkH1SAghhJDKp/KFJGJMtqE+JIQQQgjJUPlCsuh1JKk+CSGEEFKZ5CQkv/Od7yCRSOAzn/lMTN2JmYT2dxyubY5sQwghhBCSIbKQfPzxx3HNNddg6dKlcfYnflSyTSjLIMfKJoQQQgjxI5KQHBwcxPnnn4+f/OQn6OzsjLtP8aKskmHqSFrQRSVVJSGEEEKIIpKQvPjii3HGGWdgxYoVcfcnD2SUZPBFXPViBCFJ7UkIIYSQCqU27AI33HADnnzySTz++OOB5h8bG8PY2Fjme39/f9hV5kbOyTYakZqgkiSEEEJIZRLKIrl582Z8+tOfxv/+7/+isbEx0DKrVq1Ce3t75t/8+fMjdTQaCWQskpFd24QQQgghxIlQQnLNmjXYtWsXjjnmGNTW1qK2tharV6/GFVdcgdraWkxNTWUtc9lll6Gvry/zb/PmzbF1PhCJhP88gaEYJYQQQghRhHJtn3baaXj22Wct0y688EIceuih+MIXvoCampqsZRoaGtDQ0JBbL3MhU4884ljbvbsjrJSCkxBCCCGVTygh2draiiOOOMIybdq0aZg+fXrW9NIhxhjJoFBHEkIIIaQK4Mg2eYeqkhBCCCGVSeisbTv33XdfDN3II4kIY227zcoREgkhhBBCMlS+RTKSa5vqjxBCCCHEj8oWklr1n3DJNm5QYBJCCCGEKCpbSAIoSrINIYQQQkgVUPlCMkqyDTUnIYQQQogvFS4kE9GSbWJVklSlhBBCCKlMKlxIQrNIFnKlFI+EEEIIqXwqX0gq4ki2iRJnWUxNaRjA2EhMiUaEEEIIIVaqQEhGiZGsEIviwH5g12Zgz7Zi94QQQgghFUjlC8mij2xTRAZ75XN0uKjdIIQQQkhlQiGZDwzXL4QQQgghFUP1CElCCCGEEBIrlS8kMzGSUyGWiXOwbTfKXOBOjAOpMPuUEEIIIZVGZQvJREIbIjHEcsXwRk9NSXLM1GQRVh6SiXFgxwZg62vF7gkhhBBCikhlC0kApkUyjvI/EWYMusy+7UDv7vLIsM4k7zD+kxBCCKlmKl9IFqUguU7AFStxNj6av64QQgghhMRI9QjJkvdtE0IIIYSUF5UvJON0bRNCCCGEkAyVLyR113bQWpKus9FSCaDsE84JIYQQEg9VICTTn6EskhSMhBBCCCF+VL6QjDLWNiGEEEII8aXyhWScQyRGaYIClhBCCCEVSuULyWJYJKkdCSGEEFIFVL6QjFL+h1ZEQgghhBBfqkdIxiIOg7ZBIUoIIYSQyqeKhCTrSBJCCCGExEltsTuQd5JprZwqQPmfiTFg/y5gbCT3tgghhBBCSpzKF5KJGvlMTSHvom7HxuxpfXuB+iagsZmxl4QQQgipKCrftZ2xSE4FXyZuvbd7S8wNEkIIIYQUn+oRklMhhKQbtCim4RiJhBBCCKkKIZl2bY8MhRCTFIyEEEIIIX5UvpBMpDdxdAjYu724fSGEEEIIqSAqX0jqMZLjo8XtCyGEEEJIBVEFQjLt2g4T35g3z3ahXeZ00RNCCCEkf1SBkIyQte0qwEpUmBmGuO379ha7J4QQQgipIipfSCaiFCTPE/nSoeOjwPAA0F8EIclMdkIIIaRqqXwhmXFtxyAkS1UzVfLwj6kU0L8PmBwvdk8IIYQQYqMKhGQU13YEaJnLD727gb49zqMGEUIIIaSoVIGQVEMkVrDVzo1K0LZjw/JJoU4IIYSUHFUgJGOMkTRSwOSE24/h2hrsBQb7cu1RpFXnjGVgGwo8QgghpFqpHiFpuLi2DQN44E/A8w/7tzU2Amxf7xyvF0hP2Wbav7NwljbDkKQcWvYIIYQQEhO1xe5A3vFzbe/YCDx2h/x9+PJgbY4OAy31ufcNgIjLAoxdvX8nMNQPtHUB7d3xtVug7hNCCCGk9KgCi6SPkNRHu9n4IjCRtjaOjYhgdMLRqhfR0lcoA+FQv3z27yvQCgkhhBBS6VS+kEz4ZG3XaEbZ318B/PYHEgt51b/KP9eYyFCdcP+pLF3NhTRB0txJCCGElCqVLyT9km3sOmXHBmCg1xR4g73A1CSwfYPZhpP489KDNTUe85SjkNQJ2f+hvuIUTieEEEJI7FSPkHRLtplymL71Ve33SeCe3wDXfxd44s50WyFd24l8WyTLSIzu2ylDOY6PFbsnhBBCCMmRyheSCZ8YyanJ7GkDmsVsbBh49kH5++G/pCfGKdzKSATGSb4LxBNCCCEk71S+kMxYJN2EpEMMZP9+8+8b/tP8W8VbOrq2gwjCkC7xKBQ65rJKdTAhhBBCqklIulokHSxjAy6ZzRlRmnu3MjgJv9FhYM/WmBJ97DB5pSgYhsSHTtClTwghpHKoAiEZwbWtWySd2gqrJD1nd/hx9xZgZEhqP8ZN7DqSJslAjAxKfCjHDCeEEFJBVIGQ9Cn/4xgj6WKRVEkzoetIevzm5YqedOgbKU/GRv3nIYQQQsqMUELy6quvxtKlS9HW1oa2tjYsX74ct912W776Fg9RLJLjLg/9jFsyYKxjxoLpRUQBWlSiFl8v1e0pBCG2ff8uYN+O/HWFEEIIiYlQQnLevHn4zne+gzVr1uCJJ57AW97yFpx11ll4/vnn89W/3FFibufG7JFqhvqtpX78mJwQMRlUEHmV/VFUs7Yi2RiG1C4d6s9TjCwhhBASH6GE5Lve9S684x3vwMEHH4xDDjkE3/rWt9DS0oJHHnkkX/3LnaS2iXf80vrbT78CrHsiXHvDgy4/BFCETtZPL1EaSWQaLn/HiGUVtnXs3wns3Fzl1seY4D4khBBS4tT6z+LM1NQUfvvb32JoaAjLly93nW9sbAxjY2aman9/f9RVRkMXkq/+3fqbk7DzY2Qg/Mg2hiGj5fTucvrRa8FgfXJzxRdDhwz2yefYMNA4zX0+Jo+7QPFICCGkfAidbPPss8+ipaUFDQ0NuOiii3DTTTfhsMMOc51/1apVaG9vz/ybP39+Th0OTcIlTjGItWfJsdnTRoaiubYdRWTAfngxOgz0a8lBvs0VSMFRD0XDcP1CCCGElByhheSSJUvw9NNP49FHH8XHP/5xXHDBBVi7dq3r/Jdddhn6+voy/zZv3pxTh0OjWyT15Jcg8Wft3dnTtq+H8wM+6kM/R7EwMpS/tmOn1PpTSKp52wkhhFQqoV3b9fX1OOiggwAAxx57LB5//HH88Ic/xDXXXOM4f0NDAxoaGnLrZS7oQrJG29yx4ex5AWBauxSOBoCOnuzfH7kV6JoJvPX8ACvPMdmmZGPkIsZhlurmFIIo217N+4sQQkhZkHMdyVQqZYmBLDl013Ztnfn32Ijz/G1d5t8dDhZJAHjsjuxpnqIvYh3JXHFqu2RiE0umI4QQQgiJSCiL5GWXXYbTTz8dCxYswMDAAH7961/jvvvuwx13OAirUkF3Z+sWSXspIEVrZ9p9DWBam/M8QcuyBNJKAYSkYUhikC6Ec2kvTmg1i5kCZN0TQgghMRFKSO7atQsf+tCHsH37drS3t2Pp0qW444478Na3vjVf/csdPeGlRrdIeghJRX2T+fe8g8WKqYYvDLZy+Yjsvk7/tm8HMDwATJ8NNLc6zxOq3RgZHpB9rFtyXSmSMBruB8bHJOY1SG3PamF8VM7plg7uF0IIIZEIJSR/9rOf5asf+WNKsx7WBrBI1tabfzdoQrKxGXjnPwM//oKI0NSU1doZVbgFqSM5PCCf/fschGQR0Lvcv1c+m1qAunrH2XMiDn2zNz1KTEOT9LMoBDw/Cqm1d26Sz0QSaGkv4Io9mJwQzwGFLSGElAWVP9Z2vZboo4s2t9qLTVrtw9o64OCj5e9jTgMams3fRgbNNqdcxvE2Vxy4u+FxeeAaLkNC5i020b6NJeiWdRtvvVQp1C6cKJEY59FhCSvZvaXYPSGEEBKQyAXJy4auWcDrTgBeeMwa2zg5bp3v5DPFrT3/EOC+35nxlGd8RFyjLR3yvb5RROjwgGR4790mru726dnrDjREYsxjbatF3MYWj4Wo1td4exGecrByVXGMpKqW4JYIRwghpOSofCEJAMetyBaS+qg2Z10EHLjU/H7hN8SVDUj5ICUiARmtRQlJwIyXHNjvvO59O0Vk1rjt6nwNY5hPIem0vsKuruzg/gmHuq6aPEZHIoQQUnQq37UNmNnOerzkZFpILnujVUQCQGePeyydEphKSHrx2nPAdZcDt3jElsY+1naaVBGUi2+caI59Ktm6mvkivb1jo8C214ChAg8vWiicjuuerfIvr5Z1QgghuVJdQtLJIlkb0iirxo+2C0knjfPIrfJpH+PbslzMrm21TFwWyVQq2pjk+WDbq6UTzxeaHMIU9m6TY7BvR6w9Kgn696aP67jz74W2rBNCCAlFdQhJVfZnatK0cChx5OpydkFZJINYh+xxmIGIKY7P7QEctvltr8k/39qZYYVSBGGVSgH7XcYsrxScXiwq2RLbt1eOa9/uYveEEEJIBKogRjJhLeS9dzuw/TUzazu0kFQWSbuQdHjY2y2gTuvKl0hwdQmGVJJKkI6PBCyIHrTd+JqqXAzbJ6QEVF19EcsY5QkD5ZELRQghxEIVCElYa0PeeyOw5WXze1SLZJAYST37dKjfpWh3uSTbBHnK+2xLJVvW8oHT7urbI5/zDyloVwghhBAnqsO1nUyaxcN1EQnkYJH0iZEcHTYf+gAw2Ofcnl1cxWWVyWeSgqP7FdZ9sGdbgPqaMWKkKlCoVtr2kEAYBmNDCSFlQ3UIScB0yzbbxs+OS0jaH/q7Nlu/D/VmtzU1Gb/4MWJOtgm+4uxJ/fsKs+qJcWDLK8D+ndm/lYq4zCFvqmr0ZLVspx/7d8r57JaARAghJUT1CckpW9JIZNe2T7LNjo3W73aL5GN/BX70WeDp1eHWH5RSEFC5itnB3mAP04G0YC3p8jhBj0cVFyQngjqP3WrTEkJICVF9QnJ8zHl6UFwtkjZ22oTkkCYkhweAB/4oQ/Y9cVe49QelKDGSYfARSeNjMWVol5kYK7PuxkfVbjghhJQ1lS8klf5RJYDsAiusRVKNtDGw39vqp4TkwtfJp7JI7t4C3Plrc77dW/IzJFwcz2V9+wLpSJfxtqcm09sYolOpmGpXlrM+MTx826VgcSaEEFL1VEfWNuBueQwrJNvSY2qPDrtbJcdHzfjAxUcAG18wYyR/+W3rvKkpYPv68P3wxU1oFLDGiurCtvXypbMn+zdXWAum+qA4JoSQcqPyLZKKuIRkXT0wLZ2w4zbSSMZ9ngCmz5I/3bK2AWDXFu1LXAXJXaaHat6tkaAWMptFTY2fnAuhtUapiJNcsm2cfiqV7SKEEFLNVJGQrHeeHsUS2N4tn3u3Z/822Ae88rT8XVcPtHSY0+00NMnn7s3Zv0UmxlTfvA5WE7V/IZfTZx8ZArZvMIvRlySG45+e81UCFbY5hBBSLVSBkEyb4NzG1K6JMFpLRkg6WCR/tQq45zfyd109MK1d/h4bBq7+vHXeg46ST3upoJJEM2UGTkAuMXUwMiDDVu7ZVvh1R9oVXhbJqB0hhJAyZGJMqqGMDBa7J8RGFQjJNG4WSTeB6UVHWkjuc7BI6mWBauvF6qjc6vYLYPER8ukkSKMSa+3BAoyfnStjIxKv6opDn/JZrD1XvCIE/CeWMZW2PYSQWNmzXcRkMQwBxJMqEpIxxUgCQJuLa9tugaurBxIJYFqH83pV8okqpD0VU6aydCbeJnxjKwMroPgwDLHmxrrfSghmbVfPdhJCvEkVcKQ0EorqEZJuLuxcXNv2NyN77J2ygk44xOR95Jvm2NvDA8DaR4H/uhR44VHrfKMxJKhEJseHeNbiIdqLVG7IaZYiCFxHohQkJ4QQQkqb6hGSbhbJKPUKlZDs3W11kw7ZEmrq0kJy9uLsNlraxe3dkB4p5/ZfiGXt1mut8+3eGr5/gIclJ0Tatt5G/z4t69plrO1gE7PbdoTlfzz3ES11hBBCSoDqFZINzUBHD9A+I3xbrR1AskaEnz6Gtj0zWwnJU84Cjl3h3Fb79PDrLwZjI8AeH1FbaHETaHUF6NPUFDA54T9frFBIEkKqiASNC6VK9QrJD3wRuOCrQE1N+LaSSTO+sW+vOX3QNjaucm1Pnw288R+AuobstuYcGH79nhiSmRzLEIlhxEpQK2WBKUQftr0qReWnPGJ44u5HKexbQgghVU/1CslkbTQRCchDfPps+fulJ4En7xHh1rvHOl+dLVNcFTLXWXiowwpyePMaG5VaiW7DLoZpOmexYnh+zZ0SU1MTY/7z+KFbdT0tvCG23TBK3xVe4t0jhBDiTOUPkaiEU5aQzFFDK4vk06vT7dVIzKSOveTQzIXZ88xalFs/7Ojlh3LG5eleiLyR2IRPOSuUGGIkVWZ7Mgl0zy1h91CIUZQIIYSUDJUvJJWStLuVkxGtkQAAA5h9gHXS1lfM8bUzs9kegm9+LzA+Ahx5ijltWpsk7/Rp1szUVHShG2eNxLgtkrEQdOSXPHbBDU+NViRBNDVhVhMwjPiFpGEAvbuk+oGqQkAIIbFTqi/BpHpc22o4QkWuFsmTTrd+HxkC+vfapg3AcvI3twLvvhg4aJl1vtmLHJaLiF285iQcwlqJ/MSS4fK3jd49wO4t7r8HXZ+b1S5OTWdZR9wiLeh6A7aRj/vw+KgkmfXt8Z83CvbNNAxgYL9PEXpCCCGFonqEZH2j9XsuFkkj3d6F3zCn7dos9SB1hvqDPbxPOdv6/ZkHcuib/clr70BC5unfl93fXDAM54e+6/webQ3sc/8tqAhMpYAdG5xHHyob4oiRDGnBDUssSV0hGB2S8JBALxokZybHgYnxYveCEBokSxgKyUikn8idPcAl35e/R4eyhVPQ5IslxwL/cAmw5Dj5vmmd9/xjI8C+nS5ZwrYH+9RkduLN+KhYkOwj89gp9QQNL0YGpSSP40MwvV2jw/ICkNODMo/p2DHl2jgvX6bHttJETaoYpaMCYhiSuLdjQ2kPK0oIKSpVLCRz3XTDbFePDWtoAk57nyT3nP5PwdtadBiw7FT5OrAPuPka4LbrnB8yuzZL8fO+3dm/2QXCLT8BfvJlcQcqCjakYAGGSMyF3VtEZPsJas8+5HO+GJRk4PaKTdDtqTBBszVdOqoUxaR+fXF4OkKIC1WQbJOmQROSiWTuSQf6c69rlplo09QKLHsDcOTJIjCHBuD7kFQ37Gnt8tm/z2yvew5w/Nucl5ucAOpsllX95j85Abzyd/n7hceBE1zace9YyPnjXbxg5CSsizT0YWARXSYHwbWb9hJSZbI9YRkfcR99ixAC+rZLlyqySGrJNnEX61Y1JQFJqAHSrvOAJ35GSDrUmdzlEQuWSMDzQbtvh/n3yIBY3sI8iEM/s7OCJONsPCQVIDiCHCsjJVbVQPPm3qWiQxcrIYSUFNUjJJ1GlckF/cHdNcv8u7klelv1jdkueN0lnYWPUNXH6V5zN/CLbwJP3BWmYyEmu83rEfM3NSmW18gWwYjJPHFSLHGm9uveHRLqYC895d+A+adTfG8+GBsGBnpzW1c5WSTLoRA8yQ+GIbHofPEpPrwG804VCMm02Mo5JtKD6ZqQbLXV0gtilNRP9JYO62/bXgV+/V3nLFU/97w+DrjinhsCdEj1K8QPhsPkyQlg22vOyxkA9myTpJ8924L3KVD/IjI5LiWcwsSDjcVdhiZgiST128igfLpluhsu7emTJye8M+W9CHMMdm2RmpOupXtczivL9zJ5MBsGsGOjJKqU7YOsXPtdAgwPADs3yUseiYconu2JcXmG2kvzkVipAiGZJywWSc21vfRU24weZ38ivfv1h2NrZ/Z8OzYAv/w28Orfbcs7uLZ1xkbdf1PENQyfG16iTBXKHg/Qz9CE6Ls6RDs3ydjp+3cFW25q0paoE3ccZIjfcl31UJwjIvkwmUPmddkIyZRs5+REMIs7NVtlMdQnn1GGTU1NSW1WJjjlTu8usQr3UUjmk8pPtslXfK4uwBqbpXwPAPTMdSnL40AikbbkaW21TXeff/UfgJPPtC7vhdt423mj0E/DuNaX3o/KDRXUymgXCHEXEI9DSbquNw/HKh8j5zitgxQG7urisGe73INGBoEZc4vdm+Aod35dQ548gBHuLTyHC0J1WSTto9vkhO0MXXSY/AtzsiuLpP7meeJK6eeS44B3/nP2Mvq8Soi64SYkLXGLXnGG+bwKY2g7X8bUvOcBxbHtEbK2PZcpYEakYz8Cbk+sQ4CmgMHe/JTeKZuyS6TkUC+yo0PF7YedqUnvOq5DfeLKL6nBAnjtFYLKt0jqNLXGZ6VzeyiHscgkE8AUrA/HtunAx1YBNbXyVrf8DODhv8hv47aah4mkt6vPbVstMWpxXWillt1SiP7Yj3Uc6yyge7ws8KhKMDUp1puWdueKB3707ZVktmQNMPfA3LqZhUeSWdlQth0n+UDFu885QJ5PdpQ7Py+hShHhKVwQqssiqUrzxIGrdSfh+dX6m4NFEgDq6k3XQEOzOb1vL/DUfdryPjGS4y5CclDLBI/kjg2QFOFHLDF5JXaXiLs7cbjKQ2XeR6HAx0B/cerdLee4XuYqDGqI0HzEogW1+uuMDRchHCUoJXatkXjxs/Tr53BZjS7F87YQVJmQjFCaJywJ+xevZBtbbJ4TRywH5h5kft/8ktvKslEPpVPONodfBKQES4YYLzTfprQZiv3WGjShpqjE4Lt3y9q2L1/QWr+5vIhoM+YqAAuVzBAkQSiVkqz2Us3y5fO4cunfB2x9xawA4YR+H8l3HLQbUdbLmOqCUF1CMor7y40gFkm/897NIqlT3wiceynw1vPl+2Cftf0gMZILlgBnfNgUpLpF0hOnB74RrixQiJ9Dk4vFbrA3zp6olUb8LeZVFagLkVdQKvf2fD5kXAW8yzylmKHLh3B10LdHPvftdJ9HfxlyE3Q8XaqW6hKSJ71D6jSesDL3tuKIkQxikVSo+pJhBJASkirJSAlpvci5Z7JN8FUV5y5SgXeuuB/ehRQD+VhV3GWOfNdnAL17PGpdRmwzKgP75QGfb9FbikKWlA7lWli9UPe/Kn/pqq5km2ntwEe/FZNpPuiJE6COZJByM05C0vDox+SEWcOsMR1n2dyW3UZRhzGMQEl1yZ4IEnDW1JQcg+ZW/xGXYi3fUyJZ2479cLN0F/iAD/VJcfaBfcD8Q6K3EyVG0on+vfIQb+2U2Ol8sHOT3CtmL/YY77ukLjxSaHSLZFmJpgL0df8uCQuYuRCoqcn/+kqQKrBI2pNfYnpgxmmRDEJLu3yODgUrV6JGKqlrABqnyd/KIqkXZy0zHZkfbMchNRUx6SG9w/ze3nt3S1zSjo3e840M2kYGitKlAG7VfGEY4jaLffSfmElqN//YSgEFEZIBjkFm2TweL/XCqZKPCLFTEhbJEq0jOdgrFSScRpKrEqpASOaLgELSM2s7xIXROM20XmWyVD2uEiUWO7rN9fTMl8/1zwd8QLlYjnT2bAM2vehtHc0X+RyVJ0jSg9P6B3slcN0rBCFMopGby7F/X0D3ayHL0NhWMNgr/dxlqysXl4iNa3vyUTzZEiKZQ+Z8AXSk/8qLtf6QjI8C+3fTTZ8PSsEiGckGFKGvkbevSElIJUAVCMk8nfSuzUYoSB5o3oQkzQDAcw8Bax+Vt0S3fqgA6s6Z5rR5B4vrqn+vKUbD7h67YPyf/wP87goRlGGytvNNsR68Khs8Kys8D1mP9sK/TjfAYgqAvJcJiWnj8pKFGpNru5hKMh+rHOzL33CcOzdJImHvnvy0X+mkUlKn2KkQeklYJCMQ9tobHwW2vMwhFUNSBUIyT7i6tkO0EfYBtvgI+Xx6NXD7L9I1JdP92PKKWax8YL/5d0ePuXxdvWmV3LM1PdEr2cbnItSFwq5NATbAA8OQsYlje9sN0U5kHREiRjLwCuMQHW7TysGsVCAs51mxhGSI41H0uLQY1j81CezfKS+x+dyeKONbEwCGhDfs3urwU8o6X6WiDAD9FJJhoJCMjNvFZH8oBagjGRT76BsbnpfP/n3Ajd8HfvFNuQn87KsiNgGgs8e6zPTZ8rnmHnHf5nJP0N23udaF3LsN2L4hZJxWEW9oKrs3Cl7HPXYdaZs4MZ52/eVh3wVtMkDERP7J8woN1y9RGyl/ytWqRazHrpRPy/FRYFivh1nAzharvmYJQCEZlUDJNj4nVtgTr2smUKNlVaqyPr27zWmP3ma96O1CsmuWfO7YAPxqlVgB3QhTi7F/n/e8gPc1PZJ2p+iliXJpL98MDziI54AdytsNx8+CPCbHfdtr2bMW9CYY5sDlYDEP2myxXNt+YjMWMZoL+VxnKauRGKi0zSsJi2SA63TnJjFKqHtz6NCtSjtwhYFCMipBT7ggQyQGJZEElp9ufh/sBWBYL3L7cHFdM63fZ8yxfn/qHvf1OY7IYZgXp14cfWAf/K/aOC7SgK7afN9Aphyye6O4tuO8cflZJEeG41+nbwdyXDZv9/V8WyR1IelmiQtxvRT7+RbL+gu0EbFeU4Z4efZsLT+RkUrFV4XAKKFzMQgZA0mJd9Ywyu+8ciCUklm1ahWOP/54tLa2oqenB2effTbWrVuXr76VJ/kq/6M44e3AmR+Tv/v3y3WiZ+/u2Wadv8MmJOcfav3+l2vd3dJ+2Y/6CDl9mpt3cgL466+AW68tYkmRkBdn6Is55LFztYDFeRMJEyMZ03ojNZNeqJg30LzHSLqtK8Q8QUbHyRWv8Iw4LKJjI873l7DNFcstnpqS5JORIY8XghJl22vA9vUxlrRSFOm6dbpMJ8e9y4uFvscUcNvUS8ruLWUvJkMJydWrV+Piiy/GI488gjvvvBMTExN429vehqEhhyyvSkcd+CwxaB8iMcYYScW0dvlUrmW3C6ljBtDUbJ1WUwuc80lzuMTRoWzxmZqS7XO6eRuZ/6yu7Y0vAlNp4fnCY5JZ/uLj8ndm2QJcLLqbPxRxW0s9ftMPe9Y+jnkf5d09GrDNvFg0cnFt51mkhS1IXghjflabhllvNi708zk1JXHYOzfBe2hVH0aHpaRW4Gs7ZotkuaKEbxyjNAXaDUXYV9s3SHmx8ZgSrHLahLDGBUOe3WMjkohWxoQa2eb222+3fL/uuuvQ09ODNWvW4A1veEOsHYuNvHnxNCFpuIgEP6LWr9OFpGFkl2s455MyT0Ojc4cOXw4sfB3w6+9KzNyercCcA+S38VG58Te1elsk1duUYmQQuOkq4A3vtorH/n3yMNn2GnDMmwNsnMMB27EB2LMdOPwk31kxsF8EdFhif2AETKixWzliP19dBFwsutkowg0wBkGaSjm/fI2PhYvRDUqgovBFskh6EnKd/fvEM9E9B2hqsd4/svZBiLaVgIx6bReFUhKgMb8kF01c20KCdEPMxChQ3+A8byhy8GSFXmUpnSO5kdMQiX19EiPX1dXlOs/Y2BjGxsy3hf7+PNUQcyVPB8tQN8kQWdp2wsZIKtQINakpcS8/cLP524HLRCRm1uHRn44ZItJ2axZJlTQzMuA8fN/UhNSBu/0XUm8LkDjMfTuBtY9I2QQ1HQCeulf+AcC0VuDgYwJvJgCxlv76u/L35Dhw7Gnajx7HNtTNLhHPzTGot1RfV5wuu0K7R3ds9E7WciWmfkR12e3e4uxutccXR2FiXPoVdr87nn9lFpemwlv27QTmtsTX57Cem7wZD3JdPiUvxE3TzCFvy5ISPBnj6lIJblo5EDnZJpVK4TOf+QxOPvlkHHHEEa7zrVq1Cu3t7Zl/8+fPj7rK0kIlmtjFYNQYyebW4MvV1JrDHj58qzm9vRs448M+C2vBvertfqfLUH1OIqd3t4g73er4lvfJ58gg8PJT7qveEaDWpP1C3rDW/Pue31jXGytxu7a9LJKl8HYfw3rtIjKneKSgLnL974gi3LVUVQz7RFn49XVMBRlppQgWyTDHK+7zNK+nvdb42EiOhfFjPAaDfeI9yhqsoAwouWQbn05MTkQc4SgHiyRHtgnPxRdfjOeeew433HCD53yXXXYZ+vr6Mv82bw4w9Fw5keWeto/b7PGw00Vo2Ldu5d7Wk1wOWiYj1/iSPvlnLZTP9c+5zOZwIaZSYq3U6Zxp9seLKAPa212QD90Svo0g+FrzbPgdL0+LpNdwY7ncpX22wVMM5Psm6PMgcu1anPvHZx25ND3YZ41FswhJN8tpmH0S83b37ZW4Q/cV+ovHyQnxYAR+YMccu9i/T4SiG5PjElazY0O0daRSEoMXFxUzdGMJKEm/LvTtkXCq2BvOdf7KJJKQvOSSS3DLLbfg3nvvxbx58zznbWhoQFtbm+VfWVAT0OtvF0f257GX1UQXI2GFZEtauO3R4hSdYhDt7Rowz/25B4mY3bNNRpywo4vgzS9Jhqdh2Cw6CXFZB4ldGh4E7vsd8Merg99U7YHi+vBqcY61nYl5TWZPC9uGNOTxW54skjs3A0N9tokB2y+gjoxFuMVJrv0YH5Xrxz5kpcItjtRXKMZkBZoYl+oJ+rkWx8gduzbLA3uf7d4R6FzKcacP98u6dzkYJiYnJMs61yE67S/Mg70SPx7IwlwGGKkQQwGWgEUy7D0qyr01Fx0Zun8h5y9hQglJwzBwySWX4KabbsI999yDxYsX56tf8ZHvQstJu5XNtr6Obo9l9d0f0SKp3CTHvRVodY9VtZI+gxuagNnpY/jsQ+mfHM7uXZuB3/4A+PnX5C1fZci1dQEf/obsg9mLspf74JetsUCDvcCT9wCvPQs89ldgoNe/q3aLw9hwsBtE6BtCeoFkDkJSJ2iMZJx3k9RU9kPdsqoYHgaGIS8ekbPjw67P53sp4BermUr5x8LmM751x4b0GMo+2bthV6EE8qj9mlQnv63/cR47/WV27/Zs0bhnawzrs13E/ftkvVFFeKlFsfTvD74tQazjed++fJVN0ymkRbIUb2bRCCUkL774YvzqV7/Cr3/9a7S2tmLHjh3YsWMHRkY83AvFJkvoxU3Cw0WdAOqbPBbVd3/Ik6rdJlCbW8ItrzjkaPl87iH3eTZptUJ1i2TPAtMSecrZ1mXO/yIwYy7wnk+Z0/Rkhof+DPz+CoeV2faD08Mv7tqUhv6gTyBzw8qpblzEGMnY7y1uDUZc0fioxMLmkt2c0zbm8+YbsW3Hl1VbW45WSb/1xWwFymkYU48OGCmpV+i7SIz7N6W1NTzgYg3O07kSOUGuxIRDOY9J7nRuRXnxt1+XUQ0QUZatIEIJyauvvhp9fX1405vehNmzZ2f+/eY3v8lX/3In30IykfAu4+M5so1eziDkeucfbP3ulqyTdRO2XXDzD5HPzevcb5D6DWd0yPyul1uw74OZC+Sza5aISiDbihUkS9bpZrdXZZnn4Nre+KK4dVTBXhUikIC5z3JKHnHY75k/vVzbMd+N8hln570yj98Mh2l+y5cZWZvmVpNV/Z1Hi2RkQjwkw5aByjWkw74/y7wOXyAmxs2hZENhE907NqbDlMII4jyF40Qmhj4M7Jc4Sot7nxbGKIQq/2OUxAkUkqi1Gu00t0oii31M6ZoaWUemGpBHOaDGZom9HIqhBNJsW1hBQ7PzfI5ox7FrtmzX6HBa2NktZClrluFgr+narmu0zltTJ4kF9n3ulYgzNmKOGe74u4NFcs92YMGh7tft/l3eb9trH5XyRYrl7wSWv8P8nkC6ba/z3eENIaCOLOxNuYBvzHlpvxD7qhTuaz5Cssg6Mpa2Yh26MIgIynF9cUdF5br5KmmoZ773PdOLvdvl088aaa/TWIkow0b/XqB9erQ2cjmngy471C/Hq727ZI9J5Y+1HZdFMpGQA1ljy4pO1nhnX+vfa2qBZrdkI9tJ5XfC1NQCF3xVBOX02eZINdkd915VTQ0wa5H8/dKT2a7km66WEWoU+3eZLrJ6W53J93xKLJBnf8I6vbnVvWamW+khhXoDf8eFwEFHyd+ZAHuXC1EXu06oupaKh2/Rkn8SwPi4v/vc93oO6NqOWyFkJVe5rcu+3nzcoNyEkNM2l4KYyyOR6kRGOE/GR+MZycSzL34U4GGXjzjpLEpNSabJKUQhKHaDQolZJP2up8DLRJjHbZ35CtnZt0Osp15DQRaZKhWSEW4QKZcDXVPrY/W0rauxWcSWcv0qspoP0Mfps4Hz/k0EZaOLRdJJWNhHwjnyFPn82x+z593wvHXaLT81hVi9zSI590Dgn74GLDrMOj2ZNIuoAyJ6u+fK37+7Anh6tXPf+/ZKPB4AzDkQWJQutP7CoyJo7dsRFCeLsLI0794MXPNF4GdfA4bCxmJqB3FyXNxHmYd6UNd2rgQdUSfm1YYhH57trFjTlJw7QeLZ3PoTmDgEh5NFUv87YOd2bpJ4wVzHV1ZJVVHI7A77CRfjSVessberBc9DFfE49u8D9nsk6U1OiMXUq6RTXN3KNaPfcZ25WCcDzFPC5aMqX0g61S6Mct9vbHJe1ssimbB9Vw+DaW3ZIqzWFmUQlwk7SDPHvEk+d260PoD83nydRr5xQ79w6xuBtk7z+z2/MV1V6oJKTQE3XSmfMxcArR3AgUsBJCT5Z/tr4eOFJsaAx263jhGuUO77J+6SN7+JMWtppSgM7HMvCaPI99t90ePs4lx3iDb27xIhFGq0Gpf2Uym5FtyOleM1Zpt399bssjGhYiRDEilmUFvf6FDIGLpwzeds4fTtWyY+pYQI0B83EaWfC+Oj/kIo5/tKHuKX+/bISGxubvX9O8UT5FTSyQ8D8hIVlKi1RR1XXKhlS9OtDVSDkLQLNiC8SJs+2z3OL1lj1nRMJsOPdDNjrpTIsQ+ZFYeQTGgZyF60dIr72TCsD95hzXI3fQ7wpvdal7OLXy8WHmr+vWMDMMM2wlHGKpm+uPbukL7UNQDv/azs12ntYvUEgFf+HnzdiodusQ4nqXPTlXKD1kfS0WsyqhEpvG7QEx5WoKD3DL8HgJGyFqH3bzBYH2K5RwXeSJf5/bY9RFeU1VlZtHNh9xZ5SIWpFuBkIe23Hzc/kR/yJSDWGMQY2vJKJMtZ5/iEAsWhI+OORwvSHzcRpW/v8ECMQsgFr2oSkeozasu4efd0cexoxPC4HqYmAgzXmocXi5xiJH1+Hx+LZ9jWAlD5QrKmNjsxJexTs7lVu6nYlk0mZbjCWYukHI4XTidd4zSgs8c7SScyiWA3w0QCmLlQ/t78EvC7H0o5IN0F/N5PS8Hzt19gThsN4YI4+V3m38eeZo6qo3jxCet3JeI6ZgCtmvXyoGVmP8OyV7soDzoKePuHgKWnmtNW/95a1mZQE5K7t4olU1kznW4C9gLGFlzuGmGtPg/+WdzuLzzu/Lvn4Y7BOul14wxaWzJCeJN/mwEbiBJbpR5qWcXeFU4WM4c2dYukYZiJD25dKHSyjZfFMOzY9fkmbmtpuJUXYZU5bG8sLxgh28h1nU7WxaxICV2chghhyRc5bbPDsjs3xpOYWwBCZW2XLbV1sLyixvWmmUyaI+DU1cunxWwfUMg5EZOODNSQYYilb/1zIqYAcR8fe5r8PfdAs7zQYSeKe+L5R4HDTgjel86ZwGd+JO3OPUhujG1dwNiouJK3r5eknsNOlPnVBWS31HbPkc8oBbGVCDz2NOD17xRr5/xDgGf+JtOffdB5fp2MGIjprhS2mcfukM/Vvwded7zDDF7JNnHg0d7osFgV1LXg2o8QfYrbNV+ILMsg86ambO5nv7aDrDtGd2bsp40R/Vg6lum0L283QZapa9t10ZBCUt8/Q/3yMh7mWeR1/rr+5rV9eQ6xKVb8YFzisdRO1ZBUh5AEgGTCNKnnIiT1RWfMC2lJDHPzjMsiGXDW498GPPAn67Q1d8unPdP8hLfLv7Aka6yJOBd+Qz6v/rxYfW69VoTIkSebLh67kOzskc8924B1a8RKNDIoboADj5SyQHZGh8WtrWIeDzvRjO9s7QLe+VHglp+Y83fNEpeCkwUqWcRMTv1hosIpANMdb09yAmxWLa8bV8DtCuJ6j9pGTloqF4ukx7yRr8OYLTgF1pE5kRnYpoBPRz/d6HYsB/YDtfVA07R89Swe3NzBQZgcl5di3bPjR5CBEsZHRcA1Bth3sZ8KDqEjhe9Ebm2WuXjUqR4hqd9E4qot6fTwjUlHxmKSDBojaRjAgiXAG/4BePQ2yaje+or5e1vEGlt+KGvurIXm6DkvPAbMOcDMDG/tsG6C3pe//Mza3lP3Apdelb2ee2+UdhVZ4tQ2Tvj8Q0RIbn0tuy0VAxuXdSpMO71ajN2uzfKvtRP4w3/JtPZueSFQcaRRMVKIbE2PvF8MxGOV812px6QY1++7H+yWY98Gg6w0wDxBl7e1Ndgr9fa656Y9PDGuypcg52GEGMmxEdOzoQZm0PFaPpWOVW5uDV7T0a8/TvG842NSZi1XV/7YKOAyZoUz9s46vJAq9/PsRSLGLbPbhXvMqsmwtVk0i6Trl6qi8mMkFXoSTE61JfWs7LAPWq8TzaP+ZC54tZP5Ld2vk98FXPyfwD9+1pqkZB9FJ27mae1vedkq+uz7paY2u3SSH+ttJYzsb9D24SbnHCCfrz0j/dFv4m71MD0JIyQ95rVbSP94NbD1VfN73x7grl97tB/AvWQYwLb1MvpFoP5GIB+WKtcmg7j6DZ82/NbhNG/YbfSzSAZoL+fd6mGxHhsRb0GgkBLbfSXTZpzH3UE4Zk3wWZ9vZrvL8iOD6Qzk3pAZxj79cSq3pOrsBi13lNnHtnXF+aiy45VkGKW9QI0Y1q9TxYqRzMU9HdLdX6LFyIGqEpK6APTY7PZus0C3b5sh+xDmRIsra9tTSLrsh0TCGuvpWuw8Jo55i+myBsw4QEDc3HbO/Zz7MbK/1adS1gLNBy7N3ie6aG5oAg5cZn5//hHrTTx2K10AF5Ji2LZtg73AtlcdZ3VsMMj5NzEub/eT496iy3V1QeKnCuhi8izQHrHNOObLGuM3T9bWuBdPpaSvXgX/A7UX9zkQ5X4ZsQ+G4Rw/HbaNUPMHEEr7dgLbN6SPkf3HHB9WnvGtQe4TebbWBbJI5uMF1qfN3t3i3SqFIu55pIqEpG6RTMowU9PnAE0t1uEFm1udkwWcGw3ZiVBKMmTbEdpI2ty0+sne2iWfTS3Rh+MKSn2jFDK3u0fe/3kzuSaDISWTjn2Lc1u/WmX9PjaCzH6/5PvAmf/ivJzK7H/je+QcePM/yve/3y+lJRSOxZZ98LImhLnBOGWF77WXh0hYRYqb68WtDp1T3VOXJpxxs3Tqf6e/6GWM7K4qxzYi3IyzhGQIF2HWs9Bl2bDdGh32H9Ep7Eomxq3Dtzpd03GsTw1oEKn/+XyY+lkobeuenHC4dnyWiUrmWOTShtNY7bYGh/rkXjXskOkb+lHl5UUJMW+gBiLgVF6r6DiI74H9kuzkNEBAvt+tC0gVCUmbRbKhCWhuEaEyrc19uaBtRvndc9kQ806f7b7+MBZJ/eI8/QJg4euA930uREdyIJEEzrnEOq1tOhwtdgaAdltco2Jgv/WiVaPf1DWIYHXbH2d8GDjrIhGoCUjiSiIp2eSP3G5dv/4ZBKcb7UtPAffcKCWGgrqt7BbJ+kZTjC1YYnasf6++csc/M8uNj0p8qnqjt4Q12fo1POBf1yzIflHzWOoyGjk+aN0skgHnC8Kky0M6KGrVetmfzG9+Itqn7Z2bAljJ4vDdp4LV0zQMwPAowB4Gp0vWN8cxYd2n9nUHqcUatL+BB0bI4dwLnWxjnz+PFkmnc9f+oup1LKamIo5SFtaqWwSLpOd8laMkq0hI2iySlt8c60sEaNPvDhfCrZZ1Y/S48O3B7p4xnwFiJPfvEreI3Z19zielbE+hUMMmKppa0ochSw2IVdINXUipm3xTiznNSXi3TU+7vdPnRmcPcNLp8veT9zisJMSFbxdkRkpiGZ++D/jjVdbEJq92lUVy2Rvkc3zUFHYrzpdySoAMLenYnkPbt/1C6oaqDH3L7LZ+OwmgLIK40qO6l6PcbD1elqK062jF8rquHW6xjm64HEVelkUmwhtPkF0d9MG5b4d1X9ktzkHaGeqXF7lAw9mFFEpxiopII2AVwYoX1+pczw3tb8/YUVsDuzanR35yiVlNTaVfTPQmSlB4hYl3rzCqSEh6xEhGthzm0SIZRAD6rcfPIqkEdWrKo9hyAbG70L36XlMLvOfTzvGbG16Qt9zVvweeSotAvbyHLiodSa/3iJPl740vaG5DQ4LidTciIKJu/07n5gxDfr/pKqlVuWuL+Qa+O/0Q8nLNpFLAK0+bRcg7e6wvQ4mkZG+3pMt79Npdxh68+nf5fPgvkmCjW9xyKTmSRUgRYV9ssM+nOG9Ai6TT8GxG1h/ebepMTniPDew0RKsTcRgLneYPtVwAJRno2BnOVsugfUml5J60b4fsX93DkFl/jsk2Wa7RHF8wAi0b5CXLrbmQ/fMb+cd/hR7f3fZdQPVp75vXiDTjo5JQ6JSIVAo6LUwIQAVTReV/XMbDlgkhGtLOjrCubc+bcJis7RBC0pMSzAJr77a5nQy4WnYXLJF/j9wq7tnaOqmpeN9vgXVPiDVD0aTVvgiUyQ4pPbRgCbDpReDWnwNds8VKa0/oefJeWScA/OOlwN5twCHHmuLVSAHPPCAF39c/B7zxHHPZkQH5fcsr2dnjihceA+74H/N7c6u49pVw7egWwaJqS2btvwBMTYrI0oVWlLijoEIjzIMxNeUu0n0WzTrWbuP8uuEXxK+fY04kawAEyGp1ejjnVD4lwlMs0KELMFOuluhtrwU493xct751JW3tT4yJ+NVjwqMKgVTKloyUvofFLSwcbo3xtW0/H21fwhaXj7rtKlxjdMhmaDAiNOpn1Q/QXu9uuVd2zUpfn0H74DBf3IMtFJHqtEhmubYd5m9MW61qbFrb72SzeLYLZJF0KpEDyE3RUzRF6lh+OfNjUufxrefL9yDX10nvkJJFev1E+wO+2c8K6cLBR8nntteA5x4E1j5i/d1ImSISAG78PnD3DcDd12vzGNaxY3WXXyplxj727TG3V5+/d5d1ne3dwAcuM793zZJPVR+zb7fZhlf8pf6WPzKYLZCjuI/0DHlLW7Z2HZMHXNoMneHq8bLnlW3sZITZ5iMUvUgkJGRE1UgNyoN/Bv77S7KsW9/8CGQlylrIYXm3dgO24znNbXEvERn1gZsWP7u2iBiwb8fOTRJfHaqcjwv2lx7HLudwTvvicGMP+yyKkmwTtDpFLCXECrSMnYH98sKhXkjdBnwIu8Ly1pFVJCTtrkALDrUhO2aIC9GrZmGsFkn3Lvm2a/8+c4EkEdkLb4daSZGYMQ/42Le1sj9+b3Iah7/evV0ltoKg78+Dj7aeL/pY3IB7oP1LT5p/p1LmSDpAdsarXfRufAG48nPA43fKd3sgescMqSxw3App9+QzZXpGSO6Rm53TeLU6qpC54tHbrd+jWCSH+rxdvUDamJBHV6Kl37ZrPWhik2ozp2zQBPDfl0kM6t7tCLx9j94m+/GPenH9iPsmsmvbBRWv5tlMABGq/z01KSEeTgW53bqYZZB0erm2zTQyKMOxDuzPQRAHwDUZKQfXdmhxnqMyybLE+7i2hwe8rfc56P94G4y0svTPtnN2eMCnIkfwpgNRwmKzeoRk2BjJZFIezHaLZJiC1PkqIJolJG2/19T6WyOd2ikHUlPusXIt7cD7v+D8m9PIFU7Y90lzK3Da+8zvvbZsT7e+6C8udgvc7i3WeW++xmpBuu06+f63m2SS3cqnYjzf8A9S0kiVSGrtMPsYJDHGnuVrt8aEEl0ajkLSdlcNVYbH5Q462Af8+rsSd6rPolsds/K0nIrL5+kOrVt8AyUq2bAU/9b6ONArxyo1JQ8z+8uNPr9X/JnLIvK3xz4JIvjC0LtbXpac4uCyiGJpTc+un88FTdYwLB8OXwKIc59pvkMahrzX26/9rPa0CZMTAc7vuC2Sdvd6oIVy+tmyvj3bZJstFTIg03ZtSc/rpyRj7n8RqR4hqWMPgA8jqHxrTHpkbYc6ERLW8ZRd1+H0PUemB7TetXW5x/XlkymPmLNZC4HFR2RPn5fD6DxLTwHecq783WdzM6skJb2oOSA34v3peY1UtrhqaAIOOcb8PqiJAT254LVns8v+6OjnrrJI+pXoUagRflZ+SD63vCxJPYp8PmydRKqbC9CtH2sfBXZsAO78X9OdD3gn5aj1zl4EzJwfpAPR2fSi+bdfLGWmC7Y+PHQL8OMvWktQ9e6S82lgvzy4nEabUc3oLwdxWyddF3XyInjMPxVieDu3dpxerr1qDeYyJnxkYrK4R2kv7CMi6/q0WVP1VXvdjx0pkiDyPXd8laTPz2nL7NhwsEoDXi8WZUb1CEn9ZqW7GQGEusrsgsGLXF3bnTPNki5e7cZtWWwMGE9YiHO/cVr4m6CTG9tpPzqSgOMK1fJbX7MKPZXlPHtR9rnxp6vl0zCAcZuQXPYGU8ABwE7tga+Xwfjj1cDmdIzd/EOssZF2OtKjA/Xt8a7jBqTFbdrSOU/LfL/5v81knVzcumMjZvtA9rniJCTHR4G7f2MVYI4LpxnVBPbax8x29cLtFndqymwrWSOjgFxzGfD0/c7t5yokdmww/15zt5R58mtTj40FJJFsuN8ce17HK6s+UuhAXFYjh2V3brSeTxMuIyf5WjsD9svJghbUIjk5kV0OLQiTE94i2jPuMIK1TLe8W+KCnRYIGyPpJe5DWlPt80R9ocm38LJsh8P+8t1O+73Gb9nyFo861SMkWzslqzeTbaURRohNaxch6ht/6NBu0GxhmSAfzeli6XqtyJyyzjVcizgHbS+PF0JNjRyr1s7wy+rDLc6YB3zwy+KirqmVTy9cdCRmL5bj3r9XMsIVg2khOa0deOv7rSJ2305xu06OA2OaQDjlLOD17xLr9uHLZdrN10jmNuBeT+3ks2REJjeaW0V4G4YZ8J9KAdd/D/jNf1pfpsbHzOM/rR048e3mb1tels9cxMSuzbbt8LAOKZ59EPj7auB3V1gXc+uHXoLpwT/J9k2Muc+vC69EUmIQh/qyxyf3Y2Is2JBs9ljYNffC95pxi7nt35u9XV6X6f6d1mQdwN8dGNS17YfborpI7N1lhnjo2xHIve2wjqx7lpOgCygk9+2Q88Kz3JSNkSGxOgetKRk6McPh9z1btfPQx7Udp0Uyq2suv7nu46jnVlQxmsb+khZ0XZlJIcS+77Xmc7yC9qlEqB4hWVcvYsBpFJswF1kyKe5TXay4oW5uXbNECHa5jEDjRV09MOcAa9KPX4xkEDpn5m5x8XrI50pNnRyrKNbW150gyTpnfwL44JekeHmyRo5/mKQbnaYW4LAT5G/9Ab3lJflsnwEsOQ644KvW5e78XykPpFzbbz0fOGGlGUOpssIB4KE/u4tIwFoL04lEwoyXVA+0wV55wG17DXjlKXNeFXdZUycvWCefCRz1pvSy6Xin1JRYJwOP3BECR4ukZrUddrEqKvbvsiY07d4KPH6HtyVA/abqq4YRCjo7NmaLNDsT49nzjA75Xy96iIPO5ET48Z3ttWEHe7Pjcy3EdS27CQvb90y4R4hr3Csb3c8DFNS1HUpwpMmUqRmGa0hTTjUHXWZQ9wvLz077MxHuXh0mRtJtX+ohNp7ZzVGw+9cD4FpCLL2//EbXCWOR9LLa7t8p3grLPg5i1fWfpVhUj5D0JM9JMdPaRAw2hHCL69TU2m6SMVgkW9rhemaGskgW4uwOuX119SLYDrDFSvoVaPdblxqWUVnCRgal+DkAHPMWcx0f/nfrck/dZz40VS20jnRbiw4359u3wzvjubHZ/TeFim9VIkYvMP7ac8DTq6XAr3I76212p1909qatQgP7ZVv3bA1383cafs6+vLKkTE0Bt14rGep6Itsjt2oubm3ZVAr466+AX347ex1bX/Xup7pxq/XYrYphTmU/F6wSFjW1Znzuzk3Z/duxUSyxSrzc+P/c2/zVd0J00IWxEQ8Lkv53HiySeb1X2K7trGQHxFxg3756PZnTbSav9UdwbVum20VMjtsaJgPbLSnPLXu9f59/ZQc/4nz0JCCC3O/lMEy1Ai+L82CfvBjGnbRWRCgkgXhjDBMuX8Kuw54t7tVW1P7n/GZYwq9IUUlk/stGudmV1ah/HwBDXMN6glKHLQFp26vyDzDjKNVnMgl8+HL5e2zUvQ5jfaOZHOOFipNc/7y4s5+4y/zthceAe34j09V6dFe/Gjpy+wbpryUJIuZjrR4+654AXnxcMtT1mMqn0y7usWHrqre8DDz3kJmNXNcArHh/ut/rvW/gKu5LWYN1669vpnlIlJBsnw6866Py98A+YPNLsu0P3QK8/DTwl5+K1fqWn1otjlnXP4CXnwo3apErHqokUv3JgO3nHEoDb5ehl7fYrXZpbATYBi+hEcra5de2f1d88XRtG9avnqLT4XxKTcVQrzPG+1EqJR6brFWEDD+wH1+nY6LvV8+hjR1XEHL+wkEhCSBWi6R+rMOUCrJjj8HMh9G0UK7tIJY0O0UrTZRw39et6YSb/rSQVA/+jhnIWuisi4BjT5MSPTrquFpG0OmSc2VqQosRS1iTwjp7gu0TZencvUUsdLr7V0eJYcsY5Gm3+NgwcMN/WscYz/ke5uJm1B8oTsN09u+znmN299N7P21u874d2ryJ7NXqFkmVYamvR83cvy+dDOKzSV4MpreltUteANSLQ+9u4Jm/icX1z/9tjou+Ya0UIVd84DJZrqFJwiXmHCj7zJJ0E7GDbpa58VFJCBoZNPfj84+IxTQOCl5yx8O17blohH56eYwyBbvDN2u2EeYHw2GyhwB3bDaMaztXt2yEe72BPL8YOK0zhNh3E6F6hrvb/Xz/LucawKWrIykkATi4RHJBWz6qGOqekz36TilaJIOutr7Jf55yIGOR7BVRogtJ+744cKkMhXjMW2D5UYkenZoaMytcZfp2dANn/Ys5T+fMYH10at+JB26WTxVTCWQL/s0vaV88zpW/3w+88nfn3/r2prNZbdOVFUOPW3JyLfXtsa5bF5s1dSJ+O2eY68q48lMizvQbt3rwJJPp4tSaFVKV0Nm7HfjpV2zFwCOgzo3WTrk+1XHp3Q08/7D/8tNni4D88OXy95J0qainV+fWL8D7AWwY8jJjGCKm7/gfsZja66dGIUiCki++Pl7za1AXbBwEugdrHdq12Xr+xWmRdJrfQPYIWU5MTgAvPSXnQP8+7eVMa08V43Zb/t7fShuWfnn0P/Djy2YV9RtwIVdSU9ZzNrRr20FYTrokIGZeNgy5d0SJ0y0iFJKA7USOwUqXabcAVrWc1hFgW2vqJLlIp71bprdNDyZGy6rwuVvaNtJu4IQ8iEcGTatT+3T3ZZJJq0CrrdPWo6GEhhKS9Y1Ai5axPmNesO53zAiW6T7YKxbPlR+0Ttddqnu2ygNjymM0k307ZEjIm68B7vsd8Itvitt2/05xkf/sq1LCyC34XH8gOdW/7NtjXbdKkGmfAbz7ExIP29IJdM+Vm/6atBX10Ttk5J57teErlSUukcwOvN+Vfii9+Lh8Khd0WLa8LMJaWXzVsVDH908/Dv4AnNZmWowPWCqfm14Ua/HUZPjkG0WgF0ibxXbH+mjreuRW4PG/RlvWCSPzn9uP2t+2+WIRsi4EiZHU93tqSrwGe7al7yNhhaLHdLd5gySX3f4L4JafANd+Q9y9Q/3ZYR9Tky61Sw1g9R/Ean7LT4Df/wixmtH0prySEuNi22vi1cms3+8Y6X+nnDfd8mKrTd+bFuyeNTlL1yRJIQnAau0LG7dgI1/uG4vnxCvxJgRB+2qvu9nWBcxZ7BzHFRtFEp9eq62pMTOnh/u1jO1u7+UWLPFfj4qr1IVk10wphH7qu4Gj3ujfd0COyUX/Afzz/7FOP/Xs7HmXniIvAzrnfBJYeJj8PdgL/PgL4oJ1Yv3zwJ1a6Zwn7xGL3iO3AtdeDlz/XZmeVRcSpkgb1R5STnGKfXudheQRJ5n7NZEwh9R8OW0JefBms08ZDHP+/TbrzI50qR79nFZJR244XT83/j8R1k/dJ9+758rnYSch8Dl9wsrsae3TpfYoIPGrV3w6esJCoGLcsArJrQ4xZH7075OXir/90bsmYygXpZPb1nk2Vyt4PvC8J7tY5QxDXkj378xdI+Rq0QTEI6CHwvz862lBlAr2rNi0Tsp3KTa+YIYBeS7vcV3EsV+2vAL85efA2keiPZ8Dxw17uLbVd9dapobUvw1SyLwEoZAE5CbQM1+sPvZRb0ITg2vbrXxD0FmDEuiaiuEGVVYWSR9U+aiXnzbfVg84Ap4HYsX7gSNeD5z3eW2ibX6VEa4EjsruPuqNwPFvDTCikkZDk1jCdFHUs8BM6lGc+u7sZecdDJxzCTBXK1L+2rPON+CbrpSYuiDYLQiqvTGX5CLF9tdgOceU61rVV1UWXiUqN62zFowHzEB6fRPsbvTNL0kJoYduMafZxx63Y98nTsJucTor/4AjgPP+FTj748A7LoTl+OtivqkFOOkdzut78z+a54VhmNbTsNgtrRPjIrh1S+nUpDXjf9urCMTUlHms9eHjvEZnChW7F3S6g2sxDte2YUjS2i++KVUQMgQQQ56bmatF0ratYfbp6LC4ou0vfBNj8jIRtC173VTA+SUyDoK+fGx+Cbjx+5LUd/v/RLNkqvMmlGtbG/xAJjgtkL0e+73LY/ZSgkJS0dAULSnETiECysNaJGcvcomxi+PG6jI9SMF2L9yy3/NNwiPZBjAFzCO3yufcg4CDjvJeprEZeNsH5Di4Ya9LGmYEJTuGIduhtzF9trhXj3urfD/i9cDCQ937veI8a/ykXfC53fA++QNgybHZ01UYQKaPaSuHn1Vt3ZPWwuPKIjmtXT5VQlvXLBFhk+PZcYR3/FKtNL1MIi1QARy4TL5vfEHc8zr3/0HiLBU7NlqzO+3JQXZ3edt064hKsxcDBxwJHHq89YV14evMv3vmaeEPNpJJ62hId9+QXeNz7aPAY3d434fsD+E7/kfCEm75ifnbcL+44RV7tkpW/F//N/tYKnZvAX7yZbFGb1pnLWXkFlMHhLxnOpkaveZNE4c1cnQYuPXnwG3XieX96fvM3/R7suu6vI6JgwVrw1qJlUtNudc4DJrI4/X7X38lx/6e38j3ZW8w798DveZ8uzYDV3/eWglCR50Xx78NOPwk+VuNyhVZBYVw6Tth9yqocm2huhBFSNqPp9M89uXh87JTukqSQjJu4hCSvropZFmh2nrncbtb05YQv9FePHHZXkubOQrBghs0PVZoFw+ZQvEhO2mfff4h1hCCXBKU1Dmot6EsqSefCXzoK1Jn07EjaabPlvka0i9X+2yuYH1MbkXXLLOGZ6b9NPbakkYqPQpN+sb5+nfJKD/vvjh7Pn2c6oyQVCM+pW9hiYTpRr7/D/KpHoaqOHvGvTRlZiIf9UZJjLL3UZ2/d/xPOgHhSeDX/wHc8H+BH35KHr524Wi3ch57Glz3r24ZOe40sVK2dDhbiXUOWgaceLr5/e/a8I6TExLj9sCfrKMv2bHfo157Vj779gK3/UJCFja/nL3M9d8DnntQBKwTT9wlArR/L/C7H1p/G4lLSLo2YjMA2b7HISTv/F9g3Rrz+8S47Ku924Pdh8Ns5/MPS4zvn39qDhDQtxf4yVfk+JqN2j5dV+48eaA3+1pu6zKvHb1A/u9/JK54dX3ZUffGae2m4UJ5WKJYYz1LeQXcl/YXmNuv87b6ORHUImnpnz0cwOE4Tdrd2GFekkoLCsm4UVbNXEr/OJEIKR6D0NIuiTRRR3sBgp33kbpbxPI/WZOSppvYvq86erLnj7Ke+kbg0OPM71GL1wPIHJQ3vltc1W893zxnamrE0hj0HFIWtf1aIszwgBQRt3POJ+WzvtGMWVT07RWLzu9/JBbC8THTGpmskSEaV37QdAXr7E2ve2zEfDAoIalfZ6qguuJN75FPe2LA+hdEkE5rB+YfbApQRV2DhAFMa5P5rvi01c09NSm1LF9cY11OWS8XLJGM/eVnZG8LYK0J2jNfHrqHHg987NveQ2AqXv9O80GvJ8Hs0katefBm95qkukVycsL6YH3xcQlZeOjP7ut/9e/mg1Jf1h53qrP5JVnmTz+WcyfLDRgQu0AENKu2h/DINTljYtyMv1VsfUX21a9WOS+TERMBXM367y8+Li8qALBxLbA7XSLr7hukFuljd5jWYj3b19KWz/q2vSahDNddnv1bqyYk77rBFFJ+BbTVS15Lu7m8Km/l1J9cyiIFPWdUnxYeKp+vPmPuWycG9gG/+b71hSGKRdJeYstpW53ihgtaIis+KCTjprZOXFhzDgi/bF1D2iUZwhqVi6hMJMx1RsbhxJ8xD7bsoBzaLwHmHWS6tN/0XquYbO2QzzjE/ZGnmG2pLN0oqENy0FHAP342W9SFoT1ttd6tjSHs5No66R3Z2eJnXWT+3bdHHsQbXxBry5q7TQteQ5N1/9nDIvZsk9jFKz+XvqkngKa0xVAv6jtdE4QHHGm6jIcH0mIjvWPWPSafS0+R5e1hBe/9jIg9PRzEaWjB57T6ihPjEjcLyJjox57m7qLuniOxkgcdZRYrD0MiAbwzvdz2DWKxHdgH3PA9c56+veKGdUJ3n7llfuv1PZ0qBuzdIfUwr/iMWOUA06KrkrUSCdNivOZuSXp49RkRSrplPxfX9nMPyXnx3ENmv/90TXqUI20xN1EdlN/8p/l3rS1eeWoy2yW5Zxvwo0uBh/8SUChpM9ljc5VFUoVjAOZ+dNp3g71myMPwgFxzev+ee1gs6//7HWcx09ZlxuIO90t8rL0cjdN6MxbJNvMa7t+TtXnZbQQ8/hYLc1CLZFpIHnyM6T169Rn5HBmS81I/Fx+5XV4Q/vIzbV0u45krxkezkwLHhl3Oa49+T47LaGJlCIVkPqitc6gDGYCZC4C5B7ov29gsNzFL/FwYARNk3hwEUVOLiGh7rKmfyJo+OzszvGgGSduK1UNUTW7tkPp+Cw+T4+RkQfNfifM+mb0I+IdPAuf+q2TFRybGt9qZC+VzYzpofnzUdKce/SbTgjbvIPuS4jJWQ0fu2mxmpAOSQfn7K+Rvu6Xj3EvFmnfKWfL9sdvNmFRAMudVjGFS24+vOy4dO5kQq11Dk3mt9O0xd4uy3CmXtm5Vbm41R/hRBeh1kjXiggfkgaMsXTs3SumOae1muSy38z6RkBfNMz8mGf9RmDEXQEL23fXfE5enHT2+U0d/wOm1UM+6KLuA/vs/b1pzAHN/bnk57eI2xCr32x+ax/H0C+T4nfd5YLb2Qr32UfNvNQpR2MSQvr3WMjbKuvTnn8jnU/eJxfQ3/ylu28duFzd/LsPRTYxZhfVH/j17nlFb+y88JsLgkVuDJazov6v4X8XwgFh+7WLu1mu1sA9bPKg6rr/9gXgBXtRCHdwGKVB0zwEOPsr8vuXl7NJBd/yPVZz37jZfJFo6zBfsXg+LpK+l1vab7goOa5Gc1m56KNSgD3f9Glj9e6t3Rd/HKoNanaNOJY8AGZXq518DNjxvTpuatLYVxPrqFntsb6MEyWf9FhKWRAKeCkq54PS3yLA60u9cDCvg9JNbT85wa6epRdzEuiWkuVWmWYbNKhErZn1D9rREAjj7IrlRZNyiMfQ3kQAOXpZ7CYgwNxw/ka8s6xvTomTvDjn/mtvEOptKSazg9NnOyysr5cA+q+tzzzbn+evqRVwtP0OSbJ68JzvOSY+/1V3brV3Av6ySthcfIets7ZQYtp2bzMxu9cBTfe6eI8vW1IiYUhnyp54FvKCJnw99RazRiYQ8jCfGZJu6Zkp5EUDWkdmnbvs2hnOlrl62bWCfdXpzq0xXGdjKmjI+Zr7gWaxTaUteS4cprJe9QSw1sxYCsxZZRcjRbwYevc1MzFCopIqmFumDcusft8LM7Nddw3/+ibxkfOCycOfr+Kh3sea9282/b/ovc91LjnOePwgqtAIwt89O/36zNBhgtZS//BRw7Fu812G3ZgFyT1TZ8727nffT6t9L3KyeFKO3qfbH2kckjAPIju1VdM8FTjlTXhYOOFKO3RN3yT7UR8AC5KWgcZqIs5EhKRUEyPXRNt0UfeMjYmVzus8YhvelMDzo/gLgd85MTsj+00NhlHdFjVylzkd94AW9nz/9itz/zvoXMZC4oV7Y/ng18CktNtgvRrKCoEWynEgkHMRm3BZJOzGc+PrFmUxmuxLLAtu+q6m13lxDHwanBRLxvHXG+eaq3EH9++SBod7K1ZCN9phLe4yfEpIb1prxUnbR+c5/1r5o+6WtS4SGHd2KZy8E3dCcHhkqPV2JxzV3y+fkhOk+6kq7ruvqgU98D/inr1v71tJhWlRVv5NJWadKXvuf/yMxcso9dsTrHTfFQlzvSE6C6txLxYqoVjI0IBaTqz8vYgIwrTlTU8AL6RJC+ohIdfXASacDi9Iuan1AAq/KA4B4VHQOXAq8/UPO/X3yHsmEjzrUnX6eqxcK/QVVL001PgrAkILZzzwQYh0pSbJSvOdTcvzfeI5kJqvwBz0pBbAmF22zudmNlMQo6oJe/1uJHyVehgecR30CJExk5ybnrG692oEKs5ic0OIWNVq7gA99WcItFPPT1866J51drk/ek7aGa0N7nvh2M2RK7ZvVv3e+JxmGd1yt0wAF+rJOrP69lGa68l9F2GWGKu2Q+3VTCwDD+sKho58/I4PiBn/wFvfHoB4jPDlh2x7D4U8KSVKKhInN85q1a5bcjO2JB37nfWjRErC/eS12HoaE5SP7Zz/rk3ezWYTdn21dDoldMd6s6htNMfjobcBtaTeQ21CMKrZKoUbnGRsxLS1LTzF/P+pNwCHHmN/t+6WlQ8TkiacDZ39CainqJXAsaCZ3tU9UKaLXnpN9qyx49Y1WF2JtrXNIyWEnyuecA6zXml5nU1lXO3us2xamHmwUZi3IntbRI9uuhO5QnwxfaaSkjh5gipahXlPEvfE97us55FiphXrBV61eByecYntneCQQ/fYH3u15oT/0a2rlQa6Eg92CNtQHbFwHrLlLXJpBM3d1d+NxK8xQl2NPk/NQuXDtQku3ot/3O/l9fFS29/99UmIUH9ZqluoJHWpZdS8eHjArDxx2EvCJ/wtcepVYDQ1DxJMTD//F/FtZLDe9iKz7w1vOBT7wRflbP8cXHCrW16E+cdUDsp8PPd6cZ/t6c1+esBJ43Qnmb8oq7TpWu5FdBSMoTi8fhiEvjHu3S5jJ+udkvppaM2ZTveg+cadze04xw2rIUCf0WquA1brpZJGMcmve/JLsw7497i8URYZCshxJuH4Js6CVaW1iTYijlmaY9WawXWHFEpKuQs5nG0LrA5cFoghze7HyUG0E6Liy3OmjxAQd09ueSY0EcOgJZsLC0W/y70/PfODkd0lR76PfZLoQ7db5kUEzZlEJSWUVGdgnrmh1I+6eYxXgbrusZz7wT18D/uES6/Tj35o976zF1v44veS5WqMj8NbzxSr2/s+LGF/xfnOdKqvdbtUxDNlHA/vMuMKOGd7XfTIpAnn6bOe4UcXydzocT2SfK0tPkVqDgMTf6WWZbr3WWRj17zPjdBW6UJocTxdNN+RlRllTFYN91hhdJf737fSucam7jJe/M/t39aK03xY/Zy/A/rebxCq/+SVkTrZHbzettGofjA6Z57AS7S8+bsYIz1poHqtjT5PPx+903ga9rM/ebSKW1BCiy94g7v7uuXIOKeGtn7M1NVJnFTDdt8veIKWq7C+M09qBpada66YuOkza27XZah1VRPGcbH5JYrSdyjm5JVQla8xQg1mL5NMeJzrQKxZ69UJw4deBMz4sf2fGG3dg0CYk9SL8jnUlI2zzb38gpae2vhJdeOcZCsmyJOH4py9+Ra4DWTdt86h4Ia+RVyyeeJd11NgyXC0Zr3mIl+yeaz5svQi86jgEfcjEA8X02dZtidt74lQeyu7CdKO+MT2SS5rGZhGC514KnPdvuYU5dM/J3pXKOqLOs8Zp5nm/f5c5FOK8g63LuRV8BmT77ddO50yrVRKQh5vvuW6b5pWQ40fbdLGKzVok4QG6NVQJHBUDqbj685KAcvcNZhxl47Tgno1EQgTs3IMkGWrFeTL99e8Clr/DtNDp6PeG+kaxgL3+nQAS5ugpm14UN+mLj4tVSbcYqoSi318h1lUA2LY+e9v+9kf5XHR4OhlJY6jfXBaQ5KiBXuCX3xJXqBJ0u7dY406Vy3rOgc73OCX29FFdDMMqWgERKE7xfqoCgpGS/XDX9fJ9WrvzdaeS3wA5h2cvFhFtF0b2WNK+PRLrql4sDl8uQulDX7YlOtrOg0Wvs35X9xk9jvvCb4jwOnCpdZSmxmYZUQuwFvJXRLlP3XSVnLvqpfbVZ6Siw9iIewWCiTFktmvxEc7z/PQrcl1MTYpI7ugxr+/e3e5jYA/Z1jmwXxK+br3W2eodZptHBmXccoU+8lSJQSFZ9oQQMF0zxcSv34xypblVrDbqhuGIRx/VA6y2zupWdyudoqxjudI0TW7U9pI1WVe6nyUyosh1WsxAeCGZgOwrlQSSaSiHfthRWcqK150QfOxvwBp32Jy2fMxc4B3A7sW0NnmINk6D6waofZFImDGVe7ebD/gFhzouFgp7ke3uue79sffLnOA2Y8ROpVlwiHxusRUWHx2SB7E+vF/f3nCrO/JkeRE4YaWUrPrnb0psHADXigRHvF7O0/d8WqxDNbWm6Ny5Uayj+oNSF10vP2VaYm6+RiyWytU79yCzvJVafuHrrNnigNQF1MXe3u3Azg0iHEYGgR9/UbK7f/lt4Pf/Zc6nxImTQAZkJCK1DYpn/mZazA5Lj/AyPKBZ5RISogFIaR5Afrv+e2YCyPFvBeYeYI05nrXIXB8g+1m1v2Et8Phfpb7k9vXmC1N9o+lifvQ2cz+6eRQSNg+HPSxBhYPoFsHOHlmPKiWnJyOpa3//ThF7T94rYlnVFQ3DUL+ZxKPc5XddL9baW37inkS05Djz/J6z2Ho/0y2oysLdMUO2Y1q7bJeRMo/TTVfJOdK/V15M7JbWFx4D7r1RXoj0LO4oFsm7rgfu/a35/W9/tCWklg6lEohGohJGyNTU5p7okvUcTGS7OdS63JZxa7Bpmjz0U1PZ5YAU09rjjRNp7xZBouoEunq2IwrKlg6XN2VXJem9niDtxF0mome+1Du89VrghLdLIkZdvWRBOgXu29HL69iH9ItCwkWsWObR3pG7ZsrxXf17M1lIJeGEWzEsx+eYt8jNXhWTP8Xm+gxybSYSzofcbbpnW0m5FkeHJJNXfwh5cfBRiCxcEwmrFSqRkH4YNtfjivdLgop+r2jvFgvOY3dkD5W5a7P5kmd/WK+5G1iWfpGZd7AIRz0Or326WMzP+aQkE619REoCqf4ZhtS01IfSnBw3M+/37RDxkEiark434aViJvfvErE4MmiO/NM9V2Jr1z4iLx3KZf2Gs81yWdvXA7/4P8BBS60jK805UNZ/5r9IRvyS45xfrg88Erj7enlhUCMUqTqJgNx/Dj1eRJJ+32xqcRZyaoQoZUFs7YDlvFdC8shTgGcfkPNMXxaQl9qxEdle9eK/f7eMdqTE/jN/E2uucqlPjMv8ax8RQXjY8uzELr3Sw8B+sfgpYbzxxezQh4WHAUuOSQ9hq53fujHiwKViQdRR1QsSCYkPfu5B4IGbJaN9ffoF7KdftS6z6LDsclt7t5uu9EyMZIiL2l4Af2pSRjs66k3WIVZLAArJciTh8ncpUVMrAiSRBFLaqBL2h6u9//obYiFIJGzxYTGLMKcEDlfPdq7rVjf8mGMkARGQi4/QHma2+EQv6urNB7jviE8hwyuCuIZPPF0sCuohVpMeNCDstWPTkTjyZBHJsxfJi09Ds3/8chARbO9/4P4l5GVsdCg7lnHGPBHTJ6wU0aGEwvIzpJh8XKgi5PYYtmQy+4Vz+hxrX448Waw8I4NiWfvE/5VrUyW8dM8xxYSK/+vqEVE2a6FphVMCdOHr5FirbPW2LhG0f/gv06LpxvCgWL5VncZOF09IU4sIrz1bxdWuFxN/y7lmgtnQADA6YvZPb2/vtuwxodULf1tXtkdAR1VMcCuJtH+ns/EgmQCcRo1MJKxGgJpaa5kp5dp+4z+IxVRPrtK9ALV1InzUdm5/Lds1+8ergXM/J6LzusutVui//00SinT0TOuxEXMAADsLDgUOOVrErtN1pF8bJ58ppbGef1i+zzkAOE6Lfz7mzcDzD8k55DTMsOLQ4x2EpBafrCzf9nvz8IAIenvtULfKAsMDIjD1UdBKALq2y5JyUJJIF4RugHcfgzxU4+pQBHwf6DlYcvJFPgrXGobVIhK2/x/8soi3t33Ae75AOlKbyW2ceH2e7jliDVPMXpTelhyPQSIplk1lPTcAf5EbcJ2RQya0W7oqu3P6hVL39O0fkqQl/aG1/Ix4E+wSyQAvC2kOsmV4H3y0Ndt67SNS+FoJwePealrAlCWqMx1HqFtF9b/nHCDu1eZW4L2fDR5Occ0XJX5RiVy9QLcdVbD9gT9Za/zOnG+OwLRzYzoZCCKg3UJ3FE5eHidq6rKtpa9/pxnfd/Sb5VM//1d+CKFCKlq0+Gt17tQ3iltdP3eczlkV56lcwzrb18ux3fSic/yofVhL+zyqgoSd0/9JEn/0/uj3xEOOlt9Pv1C2Y+UHJcbzxLdLUp1+bLrnAIenS3o9dofz+gDn2Mt9mvAdHZaXIN0jk0pJfO41l0kWubIQv/i4VBZQHHGyxKEuPVW+u2bBFw9aJMuRuMfxLib5FFRRCJu1rfofd6Z0FPJokAwdO2qne44k18SBJbO0Vqwe9vgo+zVy8DEyxjcQcTQiINsk6YB+HtQ63F6zQkO81hUBfd8cdpJst4p5U/F0Bx8lVg1leQoyNnNQVJ1NhSqq7cS8Q0Q4KoEw5wBrGMh9v7PO39IhrkI9aUZZ2/RhC6e1mg/sZBL4wJfkId01S+Ls6htNC96Jbxf3d79DiMb9f5BPVbDeLSHLXjJt6akieOsbnV90VNzgOZ+STNxj3iLCVe2nUMXTDck2VrzuBLEwn/B2sZCppLjj3iahBAuWSL/CJHnNXChDcTY0BX/pUO20TxdXsnKrt3UBZ3xELNF/+6NYHlWtWjt7t1tjRO3hD04cfLRzEqVepzNZYyaJKTpninXSiWWnWodDffM/AvffZCbgtE8XT8Dr3yVj1C85TuJtd/tZvfvNc//+m+SlYNkbrCEp9Y3A286Xv496o5R8Wh6jByEmKCTLEd1dqi6QZI24k+zjwOZMzNatLNd2iQlJN9y6qcpK2MfaLQqqk3lQkvbtC+qiDUuUygGO4QO2eeZqyRd63cowBNCRGetkXT1Q3wTAVh7E3vcwMbmJpH+Cgn05p0zjJceJ+NALjU85+TkjkEgGHx5WjSS0+vdiRatvFMFmd/MqWjpEfOp0zpQHsr6dtfUANNFXUwOgJu12T4gFTyUtHHSUhD5c8Wn53jgtWzDqFk4ndIvggUtNkZJIAO1dMm3rqyKETjrdPEYLDzWtmWddJLGgBy1LDwHqIcB1DEPcr/f9TpZ5/btkejIp5bIUyYQ1ntH1xdhh2pveK6ERBy0Lcc2n56utE2GtXgrmHChW4fZu4MGbxVLrlm29e4tVSI6nheTSU8Qq5/Ty7pYDkMv9eeFh8pKz7TWx8B79JqlP27tbaoGqYUVPWCkvqR0zREgO7E9XAdgvsbyq8sPEeDq+03ZveOZvYsUeGZQwmRXnWc+t7jnyr2RqLJuUXo+IPxaTffoB0DNfTti8xxiWgPBr7czj4PYhhbN6aEYdnaMQqPjEXNDHuQW8T4MZc92L+Nr70jbdag1ys7brVo0oMYa7twLv/Ki4RCNnbAdYb32DuM5ratN9sO2DMGV27DS3iuvULR5OJboEafvwk8zvhiFte9VTDEoikbbeDYso0129Tsw5wGqpPv0CGSlI55i3iBDo7MkWBM2tIiSXHCv1BVu73B+0qu6oHr/ZPVeE5ge/LMkum160JuEA5tB6bugP+4V6/cr08TjrIvmamrIOnahjr3s5a6HEFPoVTh8fBd58rjnEpdu226/FMJbwae1mZnxQ9PP34GPE4paaMq2Pakz73VvdayPu2gLozgNlkexZAPzTCkkg2rNN+qeGcO1wEZK5GEQSAP7xUinFo9zLza3y75xPmUacZNLcvvYZQN9uyfAGJDnstHPl75t/LOWr9HADQCyw99wofx9whDmYQtam5CF0KUcoJMsddWOtq4+vNE7cWCwUMVgkO2bIG5tfwHwUXK9Rl36qB4NXTFNYIecolIOYwyKsP2i/ssSLx3FrnCYuSJWoYFnMltHb2GwTkj772WkeR+udw7RDjjb74LWuXInDKxA12cZpuWTSxyJjyDWl4hP3bhdBZh/HOwjJpBzT2YtF1DidA170zJds69//SL5PawPepI26Y89WzQyFeaiUJOqcKQ/4vj0O4ik9b2uXmbSj2psxV/7NOUDO32cfMC1GrV3W011P+gGs7mu9vqr9WLiJSEcCWvwnJ2Qb/KzsWQW8Q7i24XHu1NaLwPeK+WztAD72bbECq2EXAYkV1d2/S46TzPPWTsmcV5U0FEpINjTKOlV92mcf1IRkwIESQmHI+lQBeDs1tdn7d9ZCEZKKLekRb0aHzezyZ9MJNc1t4sno221aylUheMfuUEiSuMm3SzWOc9bJ1VVXLyb+5gBFwV1L5RQQ19j09A8NTaYbJqvWl00E+j0fGpqyhWQQHZlLcWs/7JaRqOWQEklY0kWDhjqEjgv26J9X1zt7vMf/9STAOZmLRdJ3GZdEF7+2DMjDUAmixmYRPVGEpFq/XzKJnfomsepPjIkoPHaFxHE6PbwPO0mSNKa1w3IwVYJJskbizexWdCXOTnsfcOevgRNXZrfd3Cpu9rbpwF/TMbV217bTOfv+z4vFfN5BpuDJJZY9kfnPn97d/vM4tu803eEHL+GSgHPcpL2d7jnZ8aL6ctPazZFkdm8RS96WlyV7+eFbrG7gettLu/4S35mjkHR6iTcy/znj9Hw7cKm4txV7twM//oK4vxUqi33BEnlhWnO3dXk3KCRJ7OTLpVpTJ8HEaji6XEg4vIn3zAfGRvM0JGMO1NUHC+pW6DfMhiZnV1RWTUB7HFuD1QXomu3r4ir2xSbeLORyU4ogdurqpBxU5mZofyi7PHz17QxS1Nuzax4/elkT4xDkYWPM7NQ3+Li2nZbzOv5A1jmgLGdRrJJR91FNEuieL4WqEwkpL/PGfzDdhjqnvU8e+Idp7nn7PF5W6rYu4JxLsn/XOWiZxPAN9WXXM3Q6NgtfJ1Z4fb3JXM4XhzjgppbsuDrAebjAsO1nJocUkkHbd2rj+LfJiDzJGuvoV/oABnr2sqLB9szQ3fmqhmVUHM9fw2cfJLLP0wMcsriHB7ITyACxoh52oiQeTY4D7/mUlBpyymQHKkNI3n///fje976HNWvWYPv27bjppptw9tln56FrJBD5Oql65smJbM+Ai3Jv1N/YlPBN1uQmUkO5iQLQswAYG5JYI1X7TScnveVnvbOJJ8eHYA598Fp/3nSkh2WxcZp5k8zShEEskkGsmBEtkqVAbZ37fmjvNusq2nETkvq0xmZ5gdNfQN3OgfbpMp+eDBE63jbgvG4vELV1wLhNKNXVSxkjL6un634IePAbm6UkzL6dIhLtoxjZUftE346gFkn10m5H34aOGWK1cxKSUQgSQqLIWFidjr3b9Wr77nTOdM0ELvoP5z647RNAXNs6iw4XC97sxe7PhaDJS07bMzrsPo63WsReO7W+0fklyIkjXy8vbRd+DahrlHPP89wpPSEZ2vY+NDSEZcuW4corr8xHf0hQVFCx03iscVBbJ2/9cQg2/aYVlyu+vlFcTvrbay40pNvLS9ycn5C0T3C6LIP0K4Jru67OnCfOc8mru15j+wZxzwfaFQmPJAkvkZnD8Q9yf/dqv6FZhOKMed4PejXko1PbQVzbWU27dFwVlravPx+4bW+cGaqJRLiXiPpGM7NdubfthaMzKCGpn6cBH69RKhXkitvLgOfxDRNiZLdIOtz3J+yhBxrv+Cf33+zx6Cr7/4SV7vuyZ761RqkbkXZzwnm/Xfh1ifv8528Cn/phekjXNAcfLcL3o98yC6S3dpkeOi9rdiVYJE8//XScfvrp+egLCUNrh9RLy9eN3ZUcb2hxueJ1oaCPeKBobpNszlIg7C6LXIw6QnuJpMSWJRLeb92xdQbW+opZIieAazvob40tzkM4lqpFsq7erLoQ5bpOJJ33hWWak1XO48HU1GKNwQts2fFv2koAIZll4fGwwAZNwAratfpGuU6SSWfXaSZSQzt/nWLnGqeJEErWmLVPXUVDgP66DcEa1BrmtJwbTl6RoOWrwrqbDz5ahiR84Gb5fvYngD9dLcehqdV7WSdq6+TacnMXZ4hwjiQSzse6a5YZ9wkAH/8ucOf/yvXz9g95v2h4/VaCFUIYI1nOFFxExkCh6i1OnxWzkHS4Y/a4FNK10zZdshAz+MT4xfkQDLKsugmGXkcE93EiIW4r/bv9d8flPFzbbutxnc1r+VxUZgDV5DSLqoWoJ3V4jaVb3+g8PaiAsn/36rZysysrSL7uOW61J3WLaM98qTHpZcnyJIRrO2s5+NTHTO+funpJKknWZD/w27pMr4cuaNysyAnbdyfc+tQ9R/ZVXUP2C+L02dnjl2faC2uRjDhvkJqoR5wsWd2plCSkXPQfch56jjOd47XtV67Krd0gsaWJhP/IXvq8blSCRTIsY2NjGBszD05/f4lYiUg4auslEDiIe8CLQrxN5ZrAE+Q6ravPjtUBnG8A09rk4b9jQ3oen7YdS7jUBLAERXBtR5kvWGPuP3m5K6Nknrv+FsW6EPnHgDicXC3t2eP4ej3QG5vFGm+3trom29jCArLm8S0HYM7j+RDPAbdzwrI+A1nbEganlwt7oltU9F2owm3soy0la7R9r29HLi+JLkKyoUmKfw/1W4VkU4sIXVch6WUlC+Ha9tukmlqHrHobza0yCo4iSCWAXN8R6xoAhKynmgBghHH7B2mzwmMkw7Jq1Sq0t7dn/s2fP99/IVJ69MwTy0mudbqiWCTD3me7YoqbjBOnEUbccLqJtE9PCwiX+DjvBoNNC1uyJWo8kS4ashKNIri2PVYVvi2P3+pC7h8ngloTnISk3jUn916gfedglQuiIzP98ngJmLXIpyEP3ISzXvHBQAA3vf6bfZJt/poa97Has5bzadspRtLL8xAoltKrrTS68KutE1f3jHku7enrjStG0m89LuTrhSQXJdkxQ/ZfpHUGsEgGZVqb9724BC2SeReSl112Gfr6+jL/Nm+219gjZUFNrVhNgg5/ZkcFFOelYKxGY3N+blJZ124eA+3cLJIz5jk8+AJYaIKKMD+3ZSIhsadBEpy8LIvJpLgpZ8zLPp8i1cIskEWya5bs/55cXoZzEJI6TvvD7eETNdnGaQG367+hOdzLkh237XWq+BAEx3cnB2EXJdElqvHJTRe6nvM+6wRshfqTUgc1iEfGrb+eFskQ7fhde8UIy/LqUo1KLvWJcXW0nBsu+yai4OvsEctxo0tVkxIUknl3bTc0NKChocF/RlLZdHRLjFBUIVp0gl68MQhML/dkVGtakFmDlCmarmV2Ryovl16H20hAcVoeIy3g45LPtUpA0H0WNlu5pcPDmuJTsiXMcXS9fnN8uAV5+fMqi5SFi0Uy0vkVZJmwNV5jskj6ZYbbF8sIOIf+Nk6LL2vbb5fZ12MfKjUqUe+PjvfDGmSN6tMzX2qLNrcCOzbKtKlJa9x3zqTP0+mzZIx2O5UgJAcHB/HKK69kvq9fvx5PP/00urq6sGBBwOQDUp1EFpE+d6U5BwDbXovYdjGI002bU0ey0YfTa2qxJQWEja1zIGxNTcd5fCd4r8sz3hJS8mV4ULJhIxV79iLgPqurNxMz+vak+2azPikam8WKkfktYX3Y6OEkjmLKp08WPeNyfHJNonN1bSekIHgqlRbXNvdwqPM/okUyiGvbrcC+23IJl78DLGpBv6cGqafqFeM+Y65PH8Ls7JCu7TjLPEXCyQuUtNbxb2iSlxl7eNHkZPiwIM+upPvidllWgpB84okn8OY3vznz/dJLLwUAXHDBBbjuuuti6xghgSnGTSiIOyrutgvBzIXA6JC4sJNJGWnEjVwsklF/B7JFR6y7KyEirr1BRqIIIySD7I8w+0yVt0ompS9tneZveskY+4Nl9gHANs2S0dppll8xgPA7LIBrO1fB7WqRTFhHGwpskAxgkUwEbTCAazvIb24zBrqXRLRI6tTWmZ6AKGIkSGay17w69soDcd3znNpRwi9M0qFhiOhWiVjNrZIj4MTUBICYRmhL2K41exmnti53b04RCf0EftOb3gSjBBUxqWByGbO2YohiOdGYe6BYdbav925DBew7ErSInBc5CMnOmXJjtw+RFtZcG/SBMn2WOR5uIOJWkmkc3dYeQtIuyiwPHpd4rqC4XYu5VmNwTRTymxAmnMNFXPouF6BtJ4ukV2xq6GQbt1lCWDY7Zxb3JRUQ1/DEWHb8X1zdcnqhUbVZg7q2Zy+WElPJpOlur6nNfoma1i5u7rau/JS1SySAOYuB3j1mrdBIyZb5p9j2ZEL8aeuUYQuDZFjmKwkm8MtTTA+muBdO1uQe4B7H+2MuFkl7iZwMEeO0/NZf32g+LOIirpdwvZ++bdoFje27bwKci1jRrSV5qw8bUIwFbith/R6bazvI9tutoW7tey9qne63rJvQzENYSpB5G5pcLGox3bcbp1mHkcyyQAegplb+6WPaO21PZ49Z2q13T6TuZpFVe7I8jCjl0UtS3SRrgJkLxEVXMgS8K8WVnZiwfcbWYBhsN7m43GPWGcK3GTpzNMwDMWRfioHfcfCy6s1e7H9d6cvrVpm4hif1XHdEC6Sj9dG2eMJlPscFoxDQBx4oltdpHrsQdtlmp/Yivc849SHgEImAx/CSiM9SWlsvIxA59iNsTILfvk2IKA4dq1t5UEgSEgT7TTDIjSNZ42JFi2QqC7C+PF/OxbZIulFbZ42j8+1DzOsPQz7CgnytYR4WyUBJAkHcsG44jO4BBPQuOHXFw0Lpv3DAaTmsM0rx/6jnXI99TPYwlum4YiRDzOvqUQBiU2IJuN8Hw+7nUMcoqGD1a8ah72UQSkghSSoDdQE2udTeypWatFU0DNNnB3zw5nKjsLkaFXoWb2zEYJHMx6t7IiGZ1naXmevNv5jmg3wIyQI+aHJ1tbV3S5xc1yz/eQGfB7jdxGj/zW96QEtSXFZRN2FiQGKYvcS1va3GZtMaFpQcdWQoHLvl0ddix2764fsCHGZej98dx12nkCSkMMxeJMLNy32SK2FjmSIZHt3cXAGW1YVkpBEafMgSLPko/xPxgaLcTIHw6ncuD7QA+yMf56eva9tD0ATB4trWBZCRHlLOPpMPoQSQx3xeQxs6Gh/trmCf9kPN40Cz13CyNlWXrHGoPhGyb47ngdsNKS5xEsK17Vd2KxZiFKtRLZJ+87ZN92jGySLps+oSgEKSVAY1tfJGH6SWmi9Bbo4BLR5Bae+WbXDNyovZtV0IA4CTBTcfMZKKti7Zf7MWes/nJbyiWp+C0D3HLOkTK2GeNLbxqgPhck4bKdmm5lZgZtDRfnIQsQAwpWXlRhpu1fbAj5RsE3De2np30RA6wcbtBTOipWxah//6c8EtRtV9Ae/2nEIwmlqkXJnXC2Tg95Uc70v2sj1RcbT4l76SZNY2IUGJKQzGcdm2Lq1MRQj0Yelcx0HOk2r0s4TVN4rFSrcc5csiCchNWN+Hrm3l6cbsaBDSHgxexaBzWm/I7clpH9ssYLV1IZNuwu57W1+nJgO25Sa8Ag7J6dVUmP1nKcWUg4APsmiY86CjG2hskjI3fUEzjh3aD3U4c7AWds4Edm/Jnl7fkD+3eNQ4Vq95/e4BTiK09HUkLZKkCiiE2yRKO+3dcuMIE8+YrBG3dX2DxJnpb+PT2uTBnk/3vk6UwrhhLCj5oi6GIVuDjufe3i37qculmHGhiWSQdFkgyNjauT4E7eu2CEmH38O2l0/Xtlczoco3Be2Dj2vbLoyaWmJI0AtRkNw33tXr5xyWDYLvUJMh+uc2b3t39kuXvSauUz8c4yZLC1okCYlEDBd3W5eUXgnrXnMTnskkMGtR/t7Q7TROA7rnAnu2yvcgbpl8urb9qK0DehZ4l2QKuu965gPbN/jPV1Mj85YSDc3AYF/05WctAiYnskcnCUKuiUFBC583NjvX/9TP0ciu7TyLmqhN+e5ahwYb00ImSBJVztbHHCySQeMrs2YLuBOdxFqo+6guJHUvRCswMuDeZtdMOU/HRqVWslMoRHs3MD6Wn7j3mKCQJJWLGis6rvqTccdTRZk/SnuOq4jjLT4hWfI988U95lvYOsB68ymCE8nglkQ/auuzhy8rF5paxDJSH9QyazsmdfXBrJFO5OqG75wJ7N8pfzd5ZDk3tUj85p5t1ul2wRD79ec1IRfXtm1Zx93oNFGb5rSttfVSSzSuerde68pJfwdd2KehOQdI8fDhfut0RyEdwrXtZm12dFVrx0TFxRuGvCQ5HYeaWv+47yJDIUkql+mzZXSCsJaT+kZgdDh7eq4jQ1QqDU3uVjf7bory4J4+J/wyjvgcs1ze+BPJgEI6RtSoO81tHjO5WFrC1HGMVWv5ZRb70NIuInFqUsSsm5DPuG5tYj+OkUIiGqrcyeO9RG/arS+BaonaG4uCk2XXYWhJx0UjnoT2F56aWqDOYXtzPi9cLJJBQwcSCSARs5gvIBSSpHIJVRJGo7VLlm20B0aHjWuyUd8owtazMK8bJRAnE0dsYRQ8y6jEtY7W3Gpvzj2wcCEFis4Zsm/iyFotFE7XTdg+1tRolmW/hb1CKwK6trMIY1oLkDATpqi3F473pCBKMg8EiZG06Mgc+ua07MwFwEBv8CoJToLPfq4E7UPCRVRm8LESlyEUkoTYSSadY1VCB8jbmDEPmByPJshK4X7THcUymGWSjKMneSDHfhXjgZBISpxqKIpcRD7u4ukhdaQ1RjJIA3ESVknqi6aXbe0EBvZ7lAkrFYLsV01Jxl3Nob4RmO5W9N5J5Pol24RYt5tFskJEoxMUkoQEJaxr2+5WSSajJSiUArX1IVxgHoS+lxbo5hvlJl8Wz4U4YmFzb6J4K8iHRdKDXJrLKkjuQMcMsbIFHUrPiNH6FeYlwDHXxsMiWeys7bxZJB2WK4MhD8NCIUlIFLxuBnMPcg+cjkyRVEtDEzA2El/GoGutSxdmhRyWMiquJW6K5M4vKWI895zEUkGNgraVKe9D/97obVh/9PzqiLpPtLRL6Eum1qCLkIkjzjPvBLVIhpg9yKpyGRkrJ1zEY5CXgwqgOraSkLjxEpLJJEq+RGvQ+2b3XCkoHtWSqu+m7jnhsqYbmgon5NweJNPa5KXAMQ6xDEySpdTFae0uLyRxqQgHfF3bEAtfGCEZCg/rVNcsYHTIjJlOJG11BnVrYpB1lZClK7Qws83f3i0lqqYmIrYXdvU+92u/KgUWMau1Vastp+59JXSY4oJCkpAoFNo9Eft9NGCDyWS0hKUM2n7K18guQXE6ZMri6lbEPZGIr3xUUbAd54KHSGorLEpRdh/XdjGZ1ib/AhGgr465NnHep8K4tgP01+4Cnr0Y2L5e+00X0km5fxgph4oacbi2XV5wZy+WdfpaFrU+6FUC1MhPYyNauarKU5IUkoREotA3g1IyLRWIQoj1GfNkzOa46ktWJPm2BhUymziO8j+lei06XC+q4HgcYTb5vhy9YrATCTPZb/NLtt9Crkf3crR1yTCRjc3O8waNC/eKkWxuDVduqwyhkCQkCvm+qU6fLUW+JyfyvKIqJ5GgiCxnQntQ9coLAUfJyYW4LKBBxKvTPammVopw5zwUYkjyMXiDQtUG9YsldaOxWQrb1zUADXlIfpzWLvdtVw9M5VkkSzyQi5ASJd/WsubWwhe4zge5jPwS9z6OOhqLGyrez82aUQpkPawj7NO8W+DyGCOZNbs2/9io97wNzRLHGAavczbfu9HN/VpTW4QEnQAb25kOdXAqtQaItRDItubNWiix21GtfImExKXGKSIto9nUiPXULWyh8nQkLZKERKLgMZKl6k7zIdR+ikH0eJFLwXEnWjslxjLwUIPEghqZJ2jR6DhIJICaOkni8LNE98wrTJ+C4HX998wH+vbG++KpMtnVMQIQLkYywDyNzVLhws1aOq1dxLzdvVxTCzS5SJei3SbDJEZVnpKkkCQkEuV0M9DubJ0zgb7dtuzQAlAToQZlHLt4+mxgZAjo6onfKhN15KRyI18vMZ09YnWKoz5pGHrmyXjLrR3mtOmzgfExYGBfbm1nvTgVILmnoSl+0dvWJa7ZunpTSDpdj67HLkQyn+Pi6RqfcXsR8oVlACGfba9vAob6vecpMygkCakmWtrF5VIoC2dLBzDYG8wamNWlGJRkFQS6exPDcW6bLpmykYb29CCRKLyIBGSd3bYXKXWe5CoknYpuO/1d6iQS3pb2phbZj271ZfX90NrpXhXBTluXlETyHD8+a2Uh5i0EPv1RLu8KegmlkCSk2iikm7xjhjxIogiGcjL6lipxHOraOimDUorhFaXUpyancc9LqH85o12QNbXBXemtncELc7d3hx/+MeH6pTgEGe4x7peyIkMhSQjJHzlZnagkS4aogq1qDmEiwFj0JSByCsnsxenSWpQZlQ6ztgkpVarlIdw1S+IXu+fafqiWHZBPYihIXuo0t8oIIjU1QGtXcfpQZRoxELV11ZOI1tAk/4K68CsMvioQEoamFmBksEAlXyrxqe/AtDYRA3arV5VsPnFh9qJg86nEMcMonKu7e47UCuzdnZ7gst5yjZHU6eyR5KSuWcCuzemJJboxxepWIiHZ81UKhSQhYeiaJUKyaVqxe1JZOAoAKsmcyUcCU6GoDZmxW8h4SVVsWglJ13VXgJJs6RBLWyIhyTBD/UBbKQ0bWqb7tYKgkCQkDMlkiPFxS4Ryu88mkxJbVV85WY3FI33w6xuB8dGKC/InBUIJ5SjJMPmm1JJtqhAKSUJKlfqYRl4opczWIMxcIFaPllKyepQ5M+YBk+PWcYYLQhlZQKNAgU4IhSQhJUttHTBrkQy5FYWOHikm7DYEWalSW196Vo9yo7ZehKOqoZlMxvdiUhUEfPmaMRcYGwEaXUJd9Je4cnuhIyQgFJKElDK5jOzQ2mEdvYNUDzPny1jSpTwOeCWQrDHjJUmRoEAvNhSShBBSaSRrmBCm4zYUH6ksaPUtCry6CCGEVCYz5olLf0bIsajzokdCNlrh4aWxQe1YdGiRJIQQUpk0NgONC4rdC4GCh1QotEgSQgjJD23pkWZU0g8hsUOFXmxokSSEEJIf2qYDjS3VM1SeJyEFz7RWYHB/EUo2ERIOCklCCCH5IZEAGsqo7NC0dimZVQrlp+obgdmLgRo+pklpwzOUEEIKTUs7MNgHtJdZjc9Kp7MHaO2UGq5xE8UDm49+EBIzFJKEEFJoOnqA1i4KhVIjkcitdqt343lqt8phyZ+iw2QbQggpNIkERSQhpCKgkCSEEEIIIZGgkCSEEELyQbLG/JsuWFKhMEaSEEIIyQe1dZLAwyEa8wgFerGhkCSEEFIa1DcC46N5THgpAi0dxe5BZUMdWXQoJAkhhJQG3XOkLNK0tmL3hBASEApJQgghpUFNLWtrElJmMHCDEEIIIYREgkKSEEIIIWUKgySLDYUkIYQQQsoTZsQXnUhH4Morr8SiRYvQ2NiIE088EY899ljc/SKEEEII8aapBWhuBTpmFLsnVUtoIfmb3/wGl156Kb7+9a/jySefxLJly7By5Urs2rUrH/0jhBBCCHEmkQCmzwZaO4vdk6oltJD8/ve/j49+9KO48MILcdhhh+HHP/4xmpub8fOf/zwf/SOEEEIIISVKKCE5Pj6ONWvWYMWKFWYDySRWrFiBhx9+OPbOEUIIIYSQ0iVUHck9e/ZgamoKM2fOtEyfOXMmXnzxRcdlxsbGMDY2lvne398foZuEEEIIIaTUyHu606pVq9De3p75N3/+/HyvkhBCCCGEFIBQQrK7uxs1NTXYuXOnZfrOnTsxa9Ysx2Uuu+wy9PX1Zf5t3rw5em8JIYQQQkjJEEpI1tfX49hjj8Xdd9+dmZZKpXD33Xdj+fLljss0NDSgra3N8o8QQgghhJQ/ocfavvTSS3HBBRfguOOOwwknnIAf/OAHGBoawoUXXpiP/hFCCCGEkBIltJA899xzsXv3bnzta1/Djh07cNRRR+H222/PSsAhhBBCCCGVTcIwDKOQK+zv70d7ezv6+vro5iaEEEIIKUGC6jUOUkkIIYQQQiJBIUkIIYQQQiJBIUkIIYQQQiJBIUkIIYQQQiJBIUkIIYQQQiIRuvxPrqgkcY65TQghhBBSmiid5lfcp+BCcmBgAAA45jYhhBBCSIkzMDCA9vZ2198LXkcylUph27ZtaG1tRSKRyOu6+vv7MX/+fGzevJk1K8sYHsfKgMexMuBxrAx4HCuDfB5HwzAwMDCAOXPmIJl0j4QsuEUymUxi3rx5BV0nx/iuDHgcKwMex8qAx7Ey4HGsDPJ1HL0skQom2xBCCCGEkEhQSBJCCCGEkEhUtJBsaGjA17/+dTQ0NBS7KyQHeBwrAx7HyoDHsTLgcawMSuE4FjzZhhBCCCGEVAYVbZEkhBBCCCH5g0KSEEIIIYREgkKSEEIIIYREgkKSEEIIIYREoqKF5JVXXolFixahsbERJ554Ih577LFid6lquf/++/Gud70Lc+bMQSKRwB//+EfL74Zh4Gtf+xpmz56NpqYmrFixAi+//LJlnn379uH8889HW1sbOjo68JGPfASDg4OWeZ555hmceuqpaGxsxPz58/Hd734335tWVaxatQrHH388Wltb0dPTg7PPPhvr1q2zzDM6OoqLL74Y06dPR0tLC8455xzs3LnTMs+mTZtwxhlnoLm5GT09Pfi3f/s3TE5OWua57777cMwxx6ChoQEHHXQQrrvuunxvXtVw9dVXY+nSpZkixsuXL8dtt92W+Z3HsPz4zne+g0Qigc985jOZaTyO5cE3vvENJBIJy79DDz0083vJH0ejQrnhhhuM+vp64+c//7nx/PPPGx/96EeNjo4OY+fOncXuWlVy6623Gl/+8peNP/zhDwYA46abbrL8/p3vfMdob283/vjHPxp///vfjTPPPNNYvHixMTIykpnn7W9/u7Fs2TLjkUceMf72t78ZBx10kHHeeedlfu/r6zNmzpxpnH/++cZzzz1nXH/99UZTU5NxzTXXFGozK56VK1ca1157rfHcc88ZTz/9tPGOd7zDWLBggTE4OJiZ56KLLjLmz59v3H333cYTTzxhnHTSScbrX//6zO+Tk5PGEUccYaxYscJ46qmnjFtvvdXo7u42Lrvsssw8r732mtHc3Gxceumlxtq1a40f/ehHRk1NjXH77bcXdHsrlZtvvtn4y1/+Yrz00kvGunXrjC996UtGXV2d8dxzzxmGwWNYbjz22GPGokWLjKVLlxqf/vSnM9N5HMuDr3/968bhhx9ubN++PfNv9+7dmd9L/ThWrJA84YQTjIsvvjjzfWpqypgzZ46xatWqIvaKGIaRJSRTqZQxa9Ys43vf+15mWm9vr9HQ0GBcf/31hmEYxtq1aw0AxuOPP56Z57bbbjMSiYSxdetWwzAM46qrrjI6OzuNsbGxzDxf+MIXjCVLluR5i6qXXbt2GQCM1atXG4Yhx62urs747W9/m5nnhRdeMAAYDz/8sGEY8lKRTCaNHTt2ZOa5+uqrjba2tsyx+/znP28cfvjhlnWde+65xsqVK/O9SVVLZ2en8dOf/pTHsMwYGBgwDj74YOPOO+803vjGN2aEJI9j+fD1r3/dWLZsmeNv5XAcK9K1PT4+jjVr1mDFihWZaclkEitWrMDDDz9cxJ4RJ9avX48dO3ZYjld7eztOPPHEzPF6+OGH0dHRgeOOOy4zz4oVK5BMJvHoo49m5nnDG96A+vr6zDwrV67EunXrsH///gJtTXXR19cHAOjq6gIArFmzBhMTE5Zjeeihh2LBggWWY3nkkUdi5syZmXlWrlyJ/v5+PP/885l59DbUPLx+42dqago33HADhoaGsHz5ch7DMuPiiy/GGWeckbWveRzLi5dffhlz5szBAQccgPPPPx+bNm0CUB7HsSKF5J49ezA1NWXZqQAwc+ZM7Nixo0i9Im6oY+J1vHbs2IGenh7L77W1tejq6rLM49SGvg4SH6lUCp/5zGdw8skn44gjjgAg+7m+vh4dHR2Wee3H0u84uc3T39+PkZGRfGxO1fHss8+ipaUFDQ0NuOiii3DTTTfhsMMO4zEsI2644QY8+eSTWLVqVdZvPI7lw4knnojrrrsOt99+O66++mqsX78ep556KgYGBsriONbmtDQhpGq5+OKL8dxzz+GBBx4odldIBJYsWYKnn34afX19+N3vfocLLrgAq1evLna3SEA2b96MT3/607jzzjvR2NhY7O6QHDj99NMzfy9duhQnnngiFi5ciBtvvBFNTU1F7FkwKtIi2d3djZqamqyspp07d2LWrFlF6hVxQx0Tr+M1a9Ys7Nq1y/L75OQk9u3bZ5nHqQ19HSQeLrnkEtxyyy249957MW/evMz0WbNmYXx8HL29vZb57cfS7zi5zdPW1lYWN9ZyoL6+HgcddBCOPfZYrFq1CsuWLcMPf/hDHsMyYc2aNdi1axeOOeYY1NbWora2FqtXr8YVV1yB2tpazJw5k8exTOno6MAhhxyCV155pSyux4oUkvX19Tj22GNx9913Z6alUincfffdWL58eRF7RpxYvHgxZs2aZTle/f39ePTRRzPHa/ny5ejt7cWaNWsy89xzzz1IpVI48cQTM/Pcf//9mJiYyMxz5513YsmSJejs7CzQ1lQ2hmHgkksuwU033YR77rkHixcvtvx+7LHHoq6uznIs161bh02bNlmO5bPPPmt5MbjzzjvR1taGww47LDOP3oaah9dv/kilUhgbG+MxLBNOO+00PPvss3j66acz/4477jicf/75mb95HMuTwcFBvPrqq5g9e3Z5XI85p+uUKDfccIPR0NBgXHfddcbatWuNj33sY0ZHR4clq4kUjoGBAeOpp54ynnrqKQOA8f3vf9946qmnjI0bNxqGIeV/Ojo6jD/96U/GM888Y5x11lmO5X+OPvpo49FHHzUeeOAB4+CDD7aU/+nt7TVmzpxpfPCDHzSee+4544YbbjCam5tZ/idGPv7xjxvt7e3GfffdZylVMTw8nJnnoosuMhYsWGDcc889xhNPPGEsX77cWL58eeZ3VaribW97m/H0008bt99+uzFjxgzHUhX/9m//ZrzwwgvGlVdeyZIjMfLFL37RWL16tbF+/XrjmWeeMb74xS8aiUTC+Otf/2oYBo9huaJnbRsGj2O58LnPfc647777jPXr1xsPPvigsWLFCqO7u9vYtWuXYRilfxwrVkgahmH86Ec/MhYsWGDU19cbJ5xwgvHII48Uu0tVy7333msAyPp3wQUXGIYhJYC++tWvGjNnzjQaGhqM0047zVi3bp2ljb179xrnnXee0dLSYrS1tRkXXnihMTAwYJnn73//u3HKKacYDQ0Nxty5c43vfOc7hdrEqsDpGAIwrr322sw8IyMjxic+8Qmjs7PTaG5uNt797ncb27dvt7SzYcMG4/TTTzeampqM7u5u43Of+5wxMTFhmefee+81jjrqKKO+vt444IADLOsgufHhD3/YWLhwoVFfX2/MmDHDOO200zIi0jB4DMsVu5DkcSwPzj33XGP27NlGfX29MXfuXOPcc881XnnllczvpX4cE4ZhGLnbNQkhhBBCSLVRkTGShBBCCCEk/1BIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSFBIEkIIIYSQSPx/CTRL8LuU3nwAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAsIAAAHDCAYAAAAupnzhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABYsklEQVR4nO3dd3hUVf7H8fckIYWSgpGEQGjSFBAUJEYB1yUaFV1x/a2IKIoIiqBgUBELsMoKgihFiqAr7q4K6lppyoKIYgwQehFRQYomgCEFCGlzfn9cMjAaSii5mbmf1/PcZzIzZzLfyRX4ePK957iMMQYREREREYcJsLsAERERERE7KAiLiIiIiCMpCIuIiIiIIykIi4iIiIgjKQiLiIiIiCMpCIuIiIiIIykIi4iIiIgjKQiLiIiIiCMpCIuIiIiIIykIi4g40MyZM3G5XGzfvt3uUkREbKMgLCIiIiKOpCAsIiIiIo6kICwiIn9gjCE/P9/uMkREzikFYRERoUGDBtx444189tlntGvXjrCwMF599VW7yxIROacUhEVEBIAtW7bQvXt3rrnmGiZMmECbNm3sLklE5JwKsrsAERGpHH744QcWLFhAcnKy3aWIiFQIzQiLiAgADRs2VAgWEUdREBYREcAKwiIiTqIgLCIiAISFhdldgohIhVIQFhERERFHUhAWEREREUdSEBYRERERR1IQFhERERFHchljjN1FiIiIiIhUNM0Ii4iIiIgjKQiLiIiIiCMpCIuIiIiIIykIi4iIiIgjKQiLiIiIiCMpCIuIiIiIIwXZXYCvcbvd/PLLL9SoUQOXy2V3OSIiIiLyO8YY8vLyiIuLIyDg+PO+CsLl9MsvvxAfH293GSIiIiJyEjt37qRu3brHfV5BuJxq1KgBWD/Y8PBwm6sRERERkd/Lzc0lPj7ek9uOR0G4nErbIcLDwxWERURERCqxk7Wx6mI5EREREXEkBWERERERcSQFYRERERFxJAVhEREREXEkBWERERERcSQFYRERERFxJAVhEREREXEkBWERERERcSQFYRERERFxJAVhEREREXGk0wrCkydPpkGDBoSGhpKQkMDy5ctPOP69996jefPmhIaG0qpVK+bNm+f1vDGGYcOGUbt2bcLCwkhKSmLr1q1eY7KysujRowfh4eFERkbSu3dvDhw44Hn+8OHD3HPPPbRq1YqgoCC6du1aZi1Llizh0ksvJSQkhMaNGzNz5szT+RGIiIiIiI8rdxCePXs2KSkpDB8+nFWrVtG6dWuSk5PZs2dPmeO/+eYbunfvTu/evVm9ejVdu3ala9eubNiwwTNmzJgxTJw4kWnTppGWlka1atVITk7m8OHDnjE9evRg48aNLFy4kDlz5rB06VL69u3reb6kpISwsDAefvhhkpKSyqxl27ZtdOnShauvvpo1a9YwaNAg7rvvPj777LPy/hhERERExMe5jDGmPC9ISEjgsssu45VXXgHA7XYTHx/PQw89xBNPPPGH8d26dePgwYPMmTPH89jll19OmzZtmDZtGsYY4uLiGDx4MI8++igAOTk5xMTEMHPmTG6//XY2b97MRRddxIoVK2jXrh0ACxYs4IYbbmDXrl3ExcV5vec999xDdnY2H330kdfjQ4YMYe7cuV4h/Pbbbyc7O5sFCxac0ufPzc0lIiKCnJwcwsPDT+k1IiIiIlJxTjWvlWtGuLCwkPT0dK8Z14CAAJKSkkhNTS3zNampqX+YoU1OTvaM37ZtGxkZGV5jIiIiSEhI8IxJTU0lMjLSE4IBkpKSCAgIIC0t7ZTrP1ktjpSVCbt/hIJ8uysRERERqVBB5Rm8b98+SkpKiImJ8Xo8JiaG7777rszXZGRklDk+IyPD83zpYycaU6tWLe/Cg4KoWbOmZ8ypOF4tubm55OfnExYW9ofXFBQUUFBQ4Lmfm5t7yu9X6eXsg+fvhpJi636NmhAdB+fXOXJbF2o3gOi6EBhoa6kiIiIiZ1u5grATjRo1ir///e92l3Fu7Nl1NAQD5GVZx7YN3uMCq0BMPajd0ArGcY2gbhOoEVWh5YqIiIicTeUKwtHR0QQGBpKZmen1eGZmJrGxsWW+JjY29oTjS28zMzOpXbu215g2bdp4xvz+Yrzi4mKysrKO+77lqSU8PLzM2WCAoUOHkpKS4rmfm5tLfHz8Kb9npZafZ902aAF9RsK+X6zjt19g727I3AEZ26HwMPzyo3UcKyoG6jU7etRtCqFVK/xjiIiIiJyOcgXh4OBg2rZty6JFizzLk7ndbhYtWsSAAQPKfE1iYiKLFi1i0KBBnscWLlxIYmIiAA0bNiQ2NpZFixZ5gm9ubi5paWn069fP8z2ys7NJT0+nbdu2ACxevBi3201CQsIp15+YmPiHpduOraUsISEhhISEnPJ7+JT8I8vPVa0OVWscDbTHcrshK8MKxL9ug1+3Wz3Fe3fC/kzrWLvUGutyQe1GcEEraHSxdatZYxEREamkyt0akZKSwt133027du1o374948eP5+DBg/Tq1QuAnj17UqdOHUaNGgXAwIEDueqqqxg3bhxdunRh1qxZrFy5kunTpwPgcrkYNGgQI0eOpEmTJjRs2JBnnnmGuLg4T9i+8MILue666+jTpw/Tpk2jqKiIAQMGcPvtt3utGLFp0yYKCwvJysoiLy+PNWvWAHgC9gMPPMArr7zC448/zr333svixYt59913mTt37un+/HzboSNBOKz68ccEBFj9wtFx0PKKo48fPgg7t8KO72DHFti5BfbvOTpz/NVH1rha8dCoFVxwMTS9FMJrnrOPIyIiIlIe5Q7C3bp1Y+/evQwbNoyMjAzatGnDggULPBeh7dixg4CAo4tRXHHFFbz99ts8/fTTPPnkkzRp0oSPPvqIli1besY8/vjjHDx4kL59+5KdnU2HDh1YsGABoaGhnjFvvfUWAwYMoHPnzgQEBHDrrbcyceJEr9puuOEGfv75Z8/9Sy65BLA27ABr9nnu3Lk88sgjTJgwgbp16/Laa6+RnJxc3h+DfyhtjThRED6e0GrQpI11lMr5zeov/nEd/LTemkHes9M6vj0yEx93ATRvB80vg4YtIKjKmX4KERERkdNS7nWEnc6v1hH+7yT4+mO49k64/p6z//0P5sK2jfDTOti6FnZ97/18cCg0bgMXXgYtEiGqVpnfRkRERKQ8TjWvadUIJzt0BjPCp6JaOLRMtA6AA9mwJR2+W2Hd5u2HTd9ax38nWRfbtboCWnWA2PpWz7GIiIjIOaIg7GT5p9AjfDZVj4S2na3D7YZff4LvVsLGb2H7RmvGeNf3MH8mRNc5GorrX2j1KouIiIicRQrCTuZZNaJGxb93QADUaWwdnW+3Zoc3fgvrv4bvV8G+3fDFe9YRFQOX/Aku/bO1hrFmikVEROQsUBB2Ms+qEdXsrQOsZdYuv946Dh+yZorXfw0bU60l2hbPto6Y+nDp1VYojo47+fcVEREROQ4FYSfzrBphw4zwiYRWhTadrKOwwOohXvUFbEqDzJ+t1on5M62WifbJ1mxxRbV3iIiIiN9QEHayYzfUqKyCQ6DNVdaRfwDWL4P0xbB1Nfy82To+mgqtO0LCddZGHuonFhERkVOgIOxUhQVQXGR97SuzqWHVrRng9smQmwXp/4O0BdZW0Cv/Zx3n1T4y5jqIjLa7YhEREanEFISdqrQtIiAAQqraW8vpCK8JV98Gf/qbNSuctgBWL4HffrXaJj77l7UTXoebrbWKdYGdiIiI/I6CsFPlH7Ruw6r7dkh0uaDBRdbRtR+s+wq+nW/tbLfua+uIqQdX/gUuu8baEU9EREQEBWHnOlRJL5Q7EyFhcNm11vHLT/DNp7BiodU68cErMPd1aJcEHbtaq0+IiIiIoykIO1V+JVo67VyIawT/NxBuvM8Kw8s+sQLxsk+t46IEq62icWvfnhEXERGR06Yg7FSeIOxHM8JlCa1mzQB3uBl+WAtffQgbvrGWYtuUZm3ocfXfrFUpAvXHQURExEn0L79TlbZGVOal084mlwuatLGOvbvgyw9g+Wew+wf4zyiY8xp0ugUSu6iPWERExCG04KpTeWaEHRKEj3V+Xfi/h2H423D9Pdaudtl74ZPp8NydsOBfcDDX7ipFRETkHFMQdionB+FS1SLg2jvhmbfg9sFQK96aKf/sX1YgnvMa5O23u0oRERE5R9Qa4VSeXeX8vEf4VFQJhoTrrdUm1n0NC9+yVp1YNAuWfmi1S1x9mzboEBER8TMKwk7lWT7NwTPCvxcQaF0017oTbPwWFv4HdmyBpR9YK01cfh0k9VAgFhER8RMKwk6l1ojjc7mgZSK0uBy+XwWf/8faoGPZp9YFdlf+BTrfDtUj7a5UREREzoCCsFOpNeLkXC5o1tY6flgL896AbRtgyfuQOhc6/dVaek3/MyEiIuKTdLGcU3laI7RU2Clp3Boeehn6Pg91m0JBvtVL/Nyd8L+3rfsiIiLiUzQj7FRO2VDjbHK54ML20PwyWP81zJsJmT/D3H9a6xJf1xMuv0Ebc4iIiPgIzQg7UUnJ0RlMp2yocTa5XHBxR3h8OvR4AqLj4EA2vD8RxvSxdq4zxu4qRURE5CQUhJ2odDYYIFRB+LQFBEK7JHjin3DrQ9a6xHt2wuvDYPJg2PGd3RWKiIjICSgIO1H+kf7gkKoQGGhvLf4gMAg63AxPvQlJ3a11iX9cBy8PgH//A7Iy7K5QREREyqAg7ESeFSM0G3xWhVWHLr1h6Exod43VQrHqC3i+F3w6Aw4fsrtCEREROYaCsBMd0hrC51RULegxBFKmQJNLoKQIFs+GUb1gxUJwu+2uUERERFAQdqZ87SpXIeo2gX5j4L6REF0Hcn+Dt1+ASYOsHetERETEVgrCTpR/0LrVZhrnnstl7VA3ZAbceB8Eh8L2TTB+AMwaB3n77a5QRETEsRSEneiQZoQrXFCwtS3zkzOhbZK1vFrafHj+Hvjyv9aSdiIiIlKhFISdKF89wraJiIY7n4CHJ1itE4cPwkdT4aUH4efNdlcnIiLiKArCTuRZNUKtEbZp2AIeeQVue8Q6D7/8CBMehvcneK/zLCIiIueMgrATlbZGhFaztw6nCwiExC4w9A247FqrXWLZp9bqEumLtDudiIjIOaYg7ESaEa5cqkfCHY9D/xehVrx1Ad1/RsG0IbB3l93ViYiI+C0FYSdSj3Dl1LgNPPYqXH8PBFWB71fBmD6w4F9QXGR3dSIiIn5HQdiJFIQrr6BguPZOePw1aNbWCsCf/QvG9YOfv7O7OhEREb+iIOxEpT3Cao2ovM6vA/ePhrueslonMrZbF9N9/CoUHra7OhEREb+gIOw0xhzdUEMzwpWbywWXXg1DXoe2ncG4Ycl7MPZ++GGt3dWJiIj4PAVhpyk4ZAUqUBD2FdUj4M6h1lbNEdGwbzdMHgzvjbfWIRYREZHToiDsNIeO9AcHVYHgEHtrkfJpcTkMec1acg3gmznwwn2wKc3eukRERHyUgrDTeC6UU3+wTwqrbm3C8eCLcF5tyN4LM56CWeM0OywiIlJOCsJOk3/kQjm1Rfi2Jm3g8Rlw1a1WL3HafPUOi4iIlJOCsNOUtkZUVRD2ecGh0LWfNTtcMxayMmDKo/DRVCgssLs6ERGRSk9B2Gm0hrD/adwaHpsOl99grQry5X/hpX6wY4vdlYmIiFRqCsJOoyDsn0KrQrcU6PMPCD8PMnfAhIdg/ptQUmx3dSIiIpWSgrDTaDMN/3ZRAjw+HS75E7jd8Pm/YfxDkPGz3ZWJiIhUOgrCTqMZYf9XLQJ6Pg09n7L+h2fXVqtVYtknVuuEiIiIAArCzqMg7ByXXA2PvwbN20FRIbw/Ef45DA7k2F2ZiIhIpaAg7DSe1ggFYUeIOA/6PG+tLhFYBTakwti+sCXd7spERERspyDsNPlHNl3QhhrOERBgrTf8yCsQUw9yf4NpQ+CT6VBcZHd1IiIitlEQdhptqOFcdS6AlClw5U3W/S/ehQkPw56d9tYlIiJiEwVhp9GGGs4WHAr/NxDu/TtUC7cupBvXD76dpwvpRETEcRSEneZw6cVyao1wtFZXWptwNLkECg/D7Jfg38/D4YN2VyYiIlJhFISdpKjQOgDCqtlbi9gvIhoeeAFu7GP1Ea/+Al56EHb/YHdlIiIiFUJB2ElKV4xwBUBIVXtrkcohIAA6d4OHxkPk+bB3t7UBx7JP1SohIiJ+T0HYSTxtEdWsACRSqsFF8Oir0CLRWkni/Qnwr5FqlRAREb+mNOQkh7SZhpxAtXDo/Szc/AAEBMKaL60L6XZttbsyERGRc0JB2ElKWyN0oZwcj8sFf/o/eOhliKoF+36B8Q/D1x+rVUJERPyOgrCTlP6aW0unyck0uAgGT4OWiVBSBP+dBG8+p1YJERHxKwrCTnJIm2lIOVQLh3uPaZVYuxRe6g+/bre7MhERkbNCQdhJ8tUjLOVU2irx8Pgjq0rsgvEDYNUXdlcmIiJyxhSEnaQ0CFdVj7CUU/0LYfDUoxtw/Psf8OEUKCm2uzIREZHTpiDsJGqNkDNRPRIeGA1J3a37Sz+AyYMh5zdbyxIRETldCsJOotYIOVMBgdClN9z7dwitCts2wrgH4Md1dlcmIiJSbgrCTlI6I6xVI+RMtboSUqZA7QaQtx+mPApL3tcSayIi4lMUhJ0k/8jSV1pHWM6G8+vCwElw6Z/B7YaPp1lLrBXk212ZiIjIKVEQdpJ89QjLWRYSBncOhb8OOLrE2viHrI04REREKjkFYScp3WJZrRFyNrlc0LErDHgJatSEjO3wcn/Ykm53ZSIiIid0WkF48uTJNGjQgNDQUBISEli+fPkJx7/33ns0b96c0NBQWrVqxbx587yeN8YwbNgwateuTVhYGElJSWzdutVrTFZWFj169CA8PJzIyEh69+7NgQMHvMasW7eOjh07EhoaSnx8PGPGjPlDLePHj6dZs2aEhYURHx/PI488wuHDh0/nx+BbSkqg4JD1tVoj5Fxo2MLqG67X3OpHf3Wo+oZFRKRSK3cQnj17NikpKQwfPpxVq1bRunVrkpOT2bNnT5njv/nmG7p3707v3r1ZvXo1Xbt2pWvXrmzYsMEzZsyYMUycOJFp06aRlpZGtWrVSE5O9gqoPXr0YOPGjSxcuJA5c+awdOlS+vbt63k+NzeXa6+9lvr165Oens7YsWMZMWIE06dP94x5++23eeKJJxg+fDibN2/m9ddfZ/bs2Tz55JPl/TH4nsPH/E9DWDX76hD/FhltzQy3TwZzpG/4rRegsMDuykRERP7IlFP79u1N//79PfdLSkpMXFycGTVqVJnjb7vtNtOlSxevxxISEsz9999vjDHG7Xab2NhYM3bsWM/z2dnZJiQkxLzzzjvGGGM2bdpkALNixQrPmPnz5xuXy2V2795tjDFmypQpJioqyhQUFHjGDBkyxDRr1sxzv3///ubPf/6zVy0pKSnmyiuvPOXPn5OTYwCTk5Nzyq+pFPbsMmZQZ2OG3Gh3JeIEbrcxX35gTMo11n934/oZs3+P3VWJiIhDnGpeK9eMcGFhIenp6SQlJXkeCwgIICkpidTU1DJfk5qa6jUeIDk52TN+27ZtZGRkeI2JiIggISHBMyY1NZXIyEjatWvnGZOUlERAQABpaWmeMZ06dSI4ONjrfbZs2cL+/fsBuOKKK0hPT/e0cvz000/MmzePG264oTw/Bt+kNYSlIrlc0OkWeOAFayfDnd/DSw/CTxtO/loREZEKUq4gvG/fPkpKSoiJifF6PCYmhoyMjDJfk5GRccLxpbcnG1OrVi2v54OCgqhZs6bXmLK+x7Hvcccdd/Dss8/SoUMHqlSpwgUXXMCf/vSnE7ZGFBQUkJub63X4JAVhsUOTS6y+4bhGR9cb/maO3VWJiIgADls1YsmSJTz//PNMmTKFVatW8cEHHzB37lyee+65475m1KhRREREeI74+PgKrPgs8mymoQvlpIKdVxsengCtO0FJMbw33jpKiu2uTEREHK5cQTg6OprAwEAyMzO9Hs/MzCQ2NrbM18TGxp5wfOntycb8/mK84uJisrKyvMaU9T2OfY9nnnmGu+66i/vuu49WrVpxyy238PzzzzNq1CjcbneZ9Q8dOpScnBzPsXPnzjLHVXqaERY7hYTB3c/ADfdabRPfzIFpT8BBH/0Ni4iI+IVyBeHg4GDatm3LokWLPI+53W4WLVpEYmJima9JTEz0Gg+wcOFCz/iGDRsSGxvrNSY3N5e0tDTPmMTERLKzs0lPP7ou6eLFi3G73SQkJHjGLF26lKKiIq/3adasGVFRUQAcOnSIgADvjxwYGAhYS7iVJSQkhPDwcK/DJykIi91cLrjmDuj9nBWMf1gDEx6CPbvsrkxERByq3K0RKSkpzJgxgzfffJPNmzfTr18/Dh48SK9evQDo2bMnQ4cO9YwfOHAgCxYsYNy4cXz33XeMGDGClStXMmDAAABcLheDBg1i5MiRfPLJJ6xfv56ePXsSFxdH165dAbjwwgu57rrr6NOnD8uXL2fZsmUMGDCA22+/nbi4OMDq/w0ODqZ3795s3LiR2bNnM2HCBFJSUjy13HTTTUydOpVZs2axbds2Fi5cyDPPPMNNN93kCcR+65B2lZNKosXlVqtEVC3YuxvGD4DvV9ldlYiIONHpLEkxadIkU69ePRMcHGzat29vvv32W89zV111lbn77ru9xr/77rumadOmJjg42LRo0cLMnTvX63m3222eeeYZExMTY0JCQkznzp3Nli1bvMb89ttvpnv37qZ69eomPDzc9OrVy+Tl5XmNWbt2renQoYMJCQkxderUMaNHj/Z6vqioyIwYMcJccMEFJjQ01MTHx5sHH3zQ7N+//5Q/u88unzb7ZWsZqwX/srsSEUtuljHjH7L+u0y5xphln9pdkYiI+IlTzWsuY7TtU3nk5uYSERFBTk6Ob7VJvPkcrPkSbulvLWslUhkUFcLscZB+pDWq4y1w8wPg77+hERGRc+pU85qjVo1wtEPqEZZKqEow9HjCuogO4KsP4bWnj/a0i4iInEMKwk5RusWylk+Tyqb0Irp7hkGVEPhuBUwYCPt+sbsyERHxcwrCTuGZEa5mbx0ix9O6Ezz0MkScB5k/w/iH4Mf1dlclIiJ+TEHYKfJLV43QjLBUYvFNYdBkqNsUDubA1MdgxUK7qxIRET+lIOwExhztuayqHmGp5CKj4aGXju5E9/YL8Nm/rf+ORUREziIFYScoyIfSnfN0sZz4guBQ6Pk0/LmbdX/Bm/DOWCguOvHrREREykFB2AlK2yICq1gXI4n4goAAuKkP/G2Q9fWKz2H6k1pRQkREzhoFYSfIP2jdVq1uXaEv4kuuuBHuG2lty7x1tbWiRFaG3VWJiIgfUBB2Am2vLL7uwvbw0HiIiD66osSOLXZXJSIiPk5B2AnytZmG+IE6F8CgSRDXCPL2w+TBsOEbu6sSEREfpiDsBPnaTEP8ROT51lrDzdtB4WH453BY+qHdVYmIiI9SEHYCT2uENtMQPxBazeoZTuxiLan24WT4cAq4S+yuTEREfIyCsBN4WiM0Iyx+IjDIWk3ixj7W/aUfwBt/t2aJRURETpGCsBNoMw3xRy4XdO4GPZ+CoCpWv/CUx+BAjt2ViYiIj1AQdoJDulhO/NglV0O/MVYP/M+bYaKWVxMRkVOjIOwEpRtqqDVC/FWjVtbyapHnw95dMP5h2LXV7qpERKSSUxB2ArVGiBPE1oeBpcurZcErg2FLut1ViYhIJaYg7ARqjRCniIyGAS9B49ZQcMjakjl9kd1ViYhIJaUg7ATaUEOcJKw63D8KLvmTtaTaf0bB4tnWUmsiIiLHUBB2Am2oIU4TFAx3PglX3Wrd/3QGfDQV3G576xIRkUpFQdjfFRdCUYH1tWaExUkCAqBrP/jL/db9pR/Av0ZCUaG9dYmISKWhIOzvSvuDXS4IqWpvLSJ2uPpvcNeT1iYca5fCq08c/S2JiIg4moKwvzu2PzhAp1sc6tI/W33DIVXhx3UwcRBk77W7KhERsZmSkb8rDcKh1eytQ8RuTS6Bh16G8PMgY7u18caenXZXJSIiNlIQ9neHjmymoQvlRKDOBTBwIpxfF/bvsWaGd2yxuyoREbGJgrC/09JpIt5qxsDD4yG+KRzMgSmPauMNERGHUhD2dwrCIn9UPRIefNFqlyjIhxlPw5ov7a5KREQqmIKwv/O0RigIi3gJrQp9/wGtO0FJkbW02rJP7a5KREQqkIKwv/PMCKtHWOQPgoKh51OQeKO189z7E+Dz/2gXOhERh1AQ9neeXeU0IyxSpoBA+NtAuKaHdX/+TPhwsnahExFxAAVhf3dIPcIiJ+VywQ294Jb+1v2vPoK3RkNxka1liYjIuaUg7O/yj/QIqzVC5OQ63QJ3DrVmiVcthteHWRfTiYiIX1IQ9ndqjRApn7ad4b7noEoIfLcCpg2Bg7l2VyUiIueAgrC/U2uESPld2B4eHGttRLN9E7ySAtn77K5KRETOMgVhf6fWCJHT0+AiGPAyRJRuyfww7N1ld1UiInIWKQj7M3cJHD5kfa3WCJHyq90AHj5mS+ZJj8AvP9ldlYiInCUKwv4s/+DRr9UaIXJ6asbAQy9D3AWQt99qk9i+ye6qRETkLFAQ9melF8oFh0JgkL21iPiyGlEwYBw0aGH9uZr6OHy/yu6qRETkDCkI+zPPihHqDxY5Y2HV4YHR0LQtFB6G6U/B+mV2VyUiImdAQdifHSq9UK6avXWI+IuQMOjzHLTqACVFMPPvsPJ/dlclIiKnSUHYn5XOCGvFCJGzJygY7n4G2l1jbcP81mj4+mO7qxIRkdOgIOzPtJmGyLkRGAjdH4OOXa37/50E/3vH1pJERKT8FIT9WelmGqEKwiJnXUAA3NIfrulh3Z/7Onw6A4yxty4RETllCsL+rHQzDV0sJ3JuuFxwQy/4S1/r/uLZ8P4Eq2VCREQqPQVhf5av7ZVFKsTVt8Ftj1jB+Js5Vt9wSbHdVYmIyEkoCPszBWGRipPYBe58EgICYdVia0WJokK7qxIRkRNQEPZnpcun6WI5kYpx6dVw79+hSjBsSIXpTx7d5lxERCodBWF/VrrFspZPE6k4LS6Hvs9baw7/sMbahe5grt1ViYhIGRSE/VnpxXJqjRCpWI3bwINjrQtVd3wHUx6FvP12VyUiIr+jIOzPDmkdYRHb1GsOA16GGlHwy0/wymDI3md3VSIicgwFYX9ljHaWE7Fb7QZWGI48H/bsgFcegawMu6sSEZEjFIT9VeFhcJdYX4dVs7cWESerVRceehnOqw2//QqTHoG9u+yuSkREUBD2X6UrRgQGQXCovbWIOF3NWGtmuFY9yN4Lk1Lg1+12VyUi4ngKwv7q2DWEXS57axERiIyGAeMgrhHkZcHkFNi11e6qREQcTUHYX2kzDZHKp0YUPPgi1GtmLak2+VHYvsnuqkREHEtB2F9pMw2RyqlaOPQbAw1bwuGDMG0I/LDW7qpERBxJQdhfaTMNkcortBrcPwqaXAIF+TB9KHy3wu6qREQcR0HYX2kzDZHKLSQM+vwDLkqAokJ4bRisX2Z3VSIijqIg7K+0mYZI5VclGHqNgNadoKQIZv4dVn9hd1UiIo6hIOyvtJmGiG8IqgJ3PQXtksDthn+PguWf2V2ViIgjKAj7K09rhDbTEKn0AgOh++OQ2AWMG94ZC19/bHdVIiJ+T0HYX3laIzQjLOITAgLgb4Og01+t+/+dBF+8a2tJIiL+TkHYX2kdYRHf43JB135wzR3W/U+mw2f/BmPsrUtExE8pCPsrBWER3+RywQ33wg29rPsL3oQ5rykMi4icAwrC/srTI6zWCBGfdE0Pa3YYYPFs+GiqwrCIyFmmIOyvSjfU0PJpIr7rqlutvmGApR/AfydaK0uIiMhZoSDsj4qLoPCw9bVaI0R82xU3wu2DrZaJZZ/Cuy+Du8TuqkRE/MJpBeHJkyfToEEDQkNDSUhIYPny5Scc/95779G8eXNCQ0Np1aoV8+bN83reGMOwYcOoXbs2YWFhJCUlsXXrVq8xWVlZ9OjRg/DwcCIjI+nduzcHDhzwGrNu3To6duxIaGgo8fHxjBkz5g+1ZGdn079/f2rXrk1ISAhNmzb9Qz0+r7Q/2OWytnIVEd+WcD3cMQRcAZA231perURhWETkTJU7CM+ePZuUlBSGDx/OqlWraN26NcnJyezZs6fM8d988w3du3end+/erF69mq5du9K1a1c2bNjgGTNmzBgmTpzItGnTSEtLo1q1aiQnJ3P48GHPmB49erBx40YWLlzInDlzWLp0KX379vU8n5uby7XXXkv9+vVJT09n7NixjBgxgunTp3vGFBYWcs0117B9+3bef/99tmzZwowZM6hTp055fwyVW2kQDq1mLckkIr6vXRLcNdT6M73yf/DWaCgptrsqERHfZsqpffv2pn///p77JSUlJi4uzowaNarM8bfddpvp0qWL12MJCQnm/vvvN8YY43a7TWxsrBk7dqzn+ezsbBMSEmLeeecdY4wxmzZtMoBZsWKFZ8z8+fONy+Uyu3fvNsYYM2XKFBMVFWUKCgo8Y4YMGWKaNWvmuT916lTTqFEjU1hYWN6P7ZGTk2MAk5OTc9rf45zbttGYQZ2NebaH3ZWIyNm29itjBidbf8b/OdyYotP/+0xExF+dal4r13RhYWEh6enpJCUleR4LCAggKSmJ1NTUMl+TmprqNR4gOTnZM37btm1kZGR4jYmIiCAhIcEzJjU1lcjISNq1a+cZk5SUREBAAGlpaZ4xnTp1Ijg42Ot9tmzZwv79+wH45JNPSExMpH///sTExNCyZUuef/55Sk7wK8aCggJyc3O9jkpPS6eJ+K+LO0CvERBYBdZ9DTOfheJCu6sSEfFJ5QrC+/bto6SkhJiYGK/HY2JiyMjIKPM1GRkZJxxfenuyMbVq1fJ6PigoiJo1a3qNKet7HPseP/30E++//z4lJSXMmzePZ555hnHjxjFy5MjjfuZRo0YRERHhOeLj4487ttI4dGTpNO0qJ+KfWlwO9z0HVYJhYyq8PgwKC+yuSkTE5ziqgdTtdlOrVi2mT59O27Zt6datG0899RTTpk077muGDh1KTk6O59i5c2cFVnyaPDPCulBOxG81bwd9/gHBofDdSnjtaSjIt7sqERGfUq4gHB0dTWBgIJmZmV6PZ2ZmEhsbW+ZrYmNjTzi+9PZkY35/MV5xcTFZWVleY8r6Hse+R+3atWnatCmBgYGeMRdeeCEZGRkUFpb9q8WQkBDCw8O9jkrPE4Q1Iyzi15pcAn1HQUgYbF0N05+Ew4fsrkpExGeUKwgHBwfTtm1bFi1a5HnM7XazaNEiEhMTy3xNYmKi13iAhQsXesY3bNiQ2NhYrzG5ubmkpaV5xiQmJpKdnU16erpnzOLFi3G73SQkJHjGLF26lKKiIq/3adasGVFRUQBceeWV/PDDD7iPWZD++++/p3bt2l69xT7v0JEgrM00RPzfBa3ggRcgtCr8tB5efeLo/wyLiMgJlbs1IiUlhRkzZvDmm2+yefNm+vXrx8GDB+nVqxcAPXv2ZOjQoZ7xAwcOZMGCBYwbN47vvvuOESNGsHLlSgYMGACAy+Vi0KBBjBw5kk8++YT169fTs2dP4uLi6Nq1K2DN2l533XX06dOH5cuXs2zZMgYMGMDtt99OXFwcAHfccQfBwcH07t2bjRs3Mnv2bCZMmEBKSoqnln79+pGVlcXAgQP5/vvvmTt3Ls8//zz9+/c/7R9gpaSL5UScpcFF0G+sdV3A9k0w9fGj1wqIiMjxnc6SFJMmTTL16tUzwcHBpn379ubbb7/1PHfVVVeZu+++22v8u+++a5o2bWqCg4NNixYtzNy5c72ed7vd5plnnjExMTEmJCTEdO7c2WzZssVrzG+//Wa6d+9uqlevbsLDw02vXr1MXl6e15i1a9eaDh06mJCQEFOnTh0zevToP9T+zTffmISEBBMSEmIaNWpk/vGPf5ji4uJT/uw+sXzaP4dbSyt99ZHdlYhIRdq11Zin/mr9+R/b15i8bLsrEhGxxanmNZcxxtgdxn1Jbm4uERER5OTkVN5+4cmPwg9r4K4n4dI/212NiFSkX7dZM8J5+6F2A2umuEaU3VWJiFSoU81rjlo1wjHUGiHiXLUbQv9xEH4e/LodXhkMOfvsrkpEpFJSEPZHCsIizhZTDwa8BJHnw54dVhjev+fkrxMRcRgFYX+kDTVE5Pw68NDLUDMW9u2GV1Igq+yNj0REnEpB2N+43VBwZB1RzQiLOFvNWGtmODrOCsGTHoG9u+2uSkSk0lAQ9jeHD0Lp9Y8KwiISVcsKw7XiIXsvTE6BzB12VyUiUikoCPub0raI4FAIqmJvLSJSOUREW2E4tgHk/AaTB1sX0omIOJyCsL/JP2jdajZYRI5VIwr6vwhxF1hLq00eDLt/tLsqERFbKQj7m/wjM8IKwiLye9Uj4cGxEN8UDubAlEdh5/d2VyUiYhsFYX9zSEunicgJVAuHfmOg/oVWK9WUx6xtmUVEHEhB2N8cPhKEtXSaiBxPWHV44AVo1Mq6wHbaEPhpvd1ViYhUOAVhf1N6sVxYNXvrEJHKLbQq9H0emlwCBfnw6lDYusbuqkREKpSCsL/x7CqnGWEROYmQMLhvJDRvB4WHYcaT8N1Ku6sSEakwCsL+pjQIV1WPsIicguAQuPdZuOhyKCqE156Bjd/aXZWISIVQEPY3ulhORMqrSjD0Gg4Xd4CSInhjBKz72u6qRETOOQVhf+NZPk2tESJSDkFVoOfTcMnVUFIMbz4Lq5fYXZWIyDmlIOxv1BohIqcrMAjufALaJYHbDf9+Hlb+z+6qRETOGQVhf6PWCBE5EwGB0P0xSLgejBvefgHS5ttdlYjIOaEg7G/yFYRF5AwFBMJtj8CVN4ExMGscLPvU7qpERM46BWF/YswxrRHqERaRMxAQALc+DJ3+at1/fwJ8+YG9NYmInGUKwv6k8LB1kQtoRlhEzpzLBV37wZ+7Wfc/mgKLZttbk4jIWaQg7E9KZ4MDAiE41N5aRMQ/uFxw431w7V3W/Tkz4PP/2FuTiMhZoiDsT45ti3C57K1FRPyHywXX3w039LLuz58J896w2rFERHyYgrA/8awYUc3eOkTEP13TA/7S1/p64Vvw6QyFYRHxaQrC/kSbaYjIuXb1bXBLf+vrL96Fj6YqDIuIz1IQ9ifaTENEKkKnW+Bvg6yvl35grSjhdttakojI6VAQ9ifaTENEKsoVN8Ltj1r9w9/MgXdfAneJ3VWJiJSLgrA/8bRGKAiLSAVIuA56PAGuAEhbAG+PhRKFYRHxHQrC/iT/oHWrzTREpKK07Qw9n7KWbUz/H/zn+aPrmYuIVHIKwv7k0JEZ4VDNCItIBWpzFdwzDAKDYM2X8OZIKC6yuyoRkZNSEPYnulhOROzS6kq49+8QVAXWfw1vjICiQrurEhE5IQVhf1IahLV8mojY4aIEuG8kVAmBTWnw+jPW1u8iIpWUgrA/8Vwspw01RMQmzdpC339Y27xvSYcZT0NBvt1ViYiUSUHYnxw6ZotlERG7NG4D94+GkKrwwxp4dSgcPmh3VSIif6Ag7E/ytY6wiFQSjVpCvxcgtBps2wDThhz9O0pEpJJQEPYXJcVHe/EUhEWkMqh/ITz4ovVbqp+/gymPwcEcu6sSEfFQEPYXpUungXqERaTyiG8C/cdB9UjYtdUKw3n77a5KRARQEPYfpb9yDK1mLWwvIlJZxDWywnCNmvDLTzD5Ucj5ze6qREQUhP2G+oNFpDKLrQ8DXoKIaMj8GSYPhux9dlclIg6nIOwvSlsjtJmGiFRWtepaYTiqFuzdBa88AlmZdlclIg6mIOwvtJmGiPiC6DgY8DKcVxt++9UKw/t+sbsqEXEoBWF/odYIEfEVNWOsmeHz68L+PfBKCuzZZXdVIuJACsL+wrOZhoKwiPiAyPOtC+hi6kPOPisMZ/xsd1Ui4jAKwv7Cs72yWiNExEdEnAf9X4TaDSEvywrDu7baXZWIOIiCsL/wtEZoDWER8SE1oqwwHN/U2mxj8qOwfZPdVYmIQygI+wtPa4RmhEXEx1SLgH5joGFLOHwQpj4OW9fYXZWIOICCsL/QxXIi4svCqsP9o6BpW2u7+BlPwqY0u6sSET+nIOwvFIRFxNeFhMF9z0HLRCgqhH8Oh7VL7a5KRPyYgrC/8GyoodYIEfFhVYLhnuFwyZ+gpBjeHAkrFtpdlYj4KQVhf6EZYRHxF4FBcOdQaJ8Mxg1vvwDLPrW7KhHxQwrC/sDtti4wAc0Ii4h/CAiEboOhY1fr/vsTYMn7tpYkIv5HQdgfFBwCY6yvtXyaiPiLgAC4pT90vt26//E0+OzfR/++ExE5QwrC/qC0P7hKCAQF21uLiMjZ5HLBjffBDb2s+wvehE9nKAyLyFmhIOwP8rW9soj4uWt6QNd+1tdfvAv/nWS1hYmInAEFYX9QGoRDFYRFxI9ddSvc9og1S7zsE5j1IpSU2F2ViPgwBWF/4Fk6TUFYRPxcYhfo8YTVP7zic/j3P6C4yO6qRMRHKQj7g/wjK0aEacUIEXGAtp3h7mEQWMXacOONEdYGHCIi5aQg7A/yj8wIaw1hEXGKizvAfc9aG3BsSoMZT0FBvt1ViYiPURD2B4d0sZyIOFDzy6DvKGtr5q2rYdoTR6+ZEBE5BQrC/sCzq5xaI0TEYRq3hn5jrN+Ibd8IrwyGvP12VyUiPkJB2B94WiO0mYaIOFD9C2HAS1AjCn75ESYOgqxMu6sSER+gIOwPPK0RmhEWEYeKawQPjYeasbBvN0wcCJk/212ViFRyCsL+wNMaoR5hEXGw8+tYYTimHuTsg0mPwI4tdlclIpWYgrA/UBAWEbFERsOAl6FeMziYC1Megx/W2F2ViFRSCsL+wLOhhlojRESoHgH9xkLjNlBwCF4dChtS7a5KRCohBWFfZ8wxG2poRlhEBIDQqtD3eWiZaO0898ZwSF9kd1UiUskoCPu6ogIoObK9qIKwiMhRVYLhnhHQLgncbnhrNHz9sd1ViUgloiDs60pXjAgIsBaVFxGRowIDofvj0LGr9Ru0/06ChW9ZX4uI451WEJ48eTINGjQgNDSUhIQEli9ffsLx7733Hs2bNyc0NJRWrVoxb948r+eNMQwbNozatWsTFhZGUlISW7du9RqTlZVFjx49CA8PJzIykt69e3PggPcOQuvWraNjx46EhoYSHx/PmDFjjlvTrFmzcLlcdO3atXwfvrI5djMNl8veWkREKqOAALilP1x7l3V/3hvwyXSFYREpfxCePXs2KSkpDB8+nFWrVtG6dWuSk5PZs2dPmeO/+eYbunfvTu/evVm9ejVdu3ala9eubNiwwTNmzJgxTJw4kWnTppGWlka1atVITk7m8OHDnjE9evRg48aNLFy4kDlz5rB06VL69u3reT43N5drr72W+vXrk56eztixYxkxYgTTp0//Q03bt2/n0UcfpWPHjuX9+JWPZzMNtUWIiByXywXX3w1d+1n3l7wHs8eBu8TeukTEXqac2rdvb/r37++5X1JSYuLi4syoUaPKHH/bbbeZLl26eD2WkJBg7r//fmOMMW6328TGxpqxY8d6ns/OzjYhISHmnXfeMcYYs2nTJgOYFStWeMbMnz/fuFwus3v3bmOMMVOmTDFRUVGmoKDAM2bIkCGmWbNmXu9dXFxsrrjiCvPaa6+Zu+++29x8883l+vw5OTkGMDk5OeV63Tmz/htjBnU25qUH7a5ERMQ3pC0w5pFrrL87/znCmKKCk79GRHzKqea1cs0IFxYWkp6eTlJSkuexgIAAkpKSSE0te2ma1NRUr/EAycnJnvHbtm0jIyPDa0xERAQJCQmeMampqURGRtKuXTvPmKSkJAICAkhLS/OM6dSpE8HBwV7vs2XLFvbvP7rv/LPPPkutWrXo3bv3KX3mgoICcnNzvY5KRWsIi4iUT/tkuGcYBFaBdV/BjKfh8CG7qxIRG5QrCO/bt4+SkhJiYmK8Ho+JiSEjI6PM12RkZJxwfOntycbUqlXL6/mgoCBq1qzpNaas73Hse3z99de8/vrrzJgx49Q+MDBq1CgiIiI8R3x8/Cm/tkIoCIuIlN/FHaDPSAgOhe9XwZRHIW//yV8nIn7FMatG5OXlcddddzFjxgyio6NP+XVDhw4lJyfHc+zcufMcVnkaSjfTCNNmGiIi5dKsLfR/EapFwM7vYdIg+O1Xu6sSkQoUVJ7B0dHRBAYGkpmZ6fV4ZmYmsbGxZb4mNjb2hONLbzMzM6ldu7bXmDZt2njG/P5ivOLiYrKysry+T1nvU/rcjz/+yPbt27nppps8z7vdbsCaXd6yZQsXXHDBH+oPCQkhJCSkzM9WKZTOCFfVjLCISLnVaw4PT4BpQ2Dvbpg4CO4fBXGN7K5MRCpAuWaEg4ODadu2LYsWHd2dx+12s2jRIhITE8t8TWJiotd4gIULF3rGN2zYkNjYWK8xubm5pKWlecYkJiaSnZ1Nenq6Z8zixYtxu90kJCR4xixdupSioiKv92nWrBlRUVE0b96c9evXs2bNGs/xl7/8hauvvpo1a9ZUvpaHU6XWCBGRM1OrLgycALUbQu5vMOkR+HGd3VWJSAUod2tESkoKM2bM4M0332Tz5s3069ePgwcP0qtXLwB69uzJ0KFDPeMHDhzIggULGDduHN999x0jRoxg5cqVDBgwAACXy8WgQYMYOXIkn3zyCevXr6dnz57ExcV51vi98MILue666+jTpw/Lly9n2bJlDBgwgNtvv524uDgA7rjjDoKDg+nduzcbN25k9uzZTJgwgZSUFABCQ0Np2bKl1xEZGUmNGjVo2bKl10V2PkXLp4mInLmIaHjoZWjYEg4ftGaI1y+zuyoROcfK1RoB0K1bN/bu3cuwYcPIyMigTZs2LFiwwHNh2o4dOwgIOJqvr7jiCt5++22efvppnnzySZo0acJHH31Ey5YtPWMef/xxDh48SN++fcnOzqZDhw4sWLCA0NBQz5i33nqLAQMG0LlzZwICArj11luZOHGi5/mIiAg+//xz+vfvT9u2bYmOjmbYsGFeaw37pfyD1m1V9QiLiJyRsOrwwAvw75GwIRXe+DvcNgguv8HuykTkHHEZo611yiM3N5eIiAhycnIIDw+3uxwY0wd+3Wb95d2srd3ViIj4vpISeO9lSFtg3b+hFyTdod07RXzIqeY1x6wa4bfUIywicnYFBkK3wVb4BWtL5g9egSMXWIuI/1AQ9nWeVSPUGiEicta4XNDlXrilv3X/64/hP89DcaG9dYnIWaUg7MtKiqEg3/o6rJq9tYiI+KNOt8BdT0JgEKxeol3oRPyMgrAvK50NBrVGiIicK5f+Ge47Zhe6ydqFTsRfKAj7skNHgnBoVQgItLcWERF/1rwdPHhkF7pd38OEh2HPLrurEpEzpCDsy3ShnIhIxal/ZBe6mrHWVswTH4btm+yuSkTOgIKwL/NspqEL5UREKkStujBoEsQ3hYO5MOVRbbwh4sMUhH1ZaWtEVc0Ii4hUmBpR0H8cXJQARYXWxhtff2x3VSJyGhSEfZlaI0RE7BESBvc+a+06Z9zw30nw6QytNSziYxSEfZmnNUJBWESkwgUGwm2PWDvPASyeDW+N1lrDIj5EQdiXHdJmGiIitnK54Joe0P1xa/WeVYvh1Se9l7cUkUpLQdiXqTVCRKRyaH8t9H0eQqrCD2tg0iDYv8fuqkTkJBSEfZmCsIhI5dGsLTz0EoSfB79ut9Ya/uUnu6sSkRNQEPZlh470CKs1QkSkcqjTGAZNhJj6kLPPmhn+fpXdVYnIcSgI+7L8g9ZtaDV76xARkaOiYuDh8XDBxXD4ELw6FL6db3dVIlIGBWFflq8ZYRGRSqlqDbh/NFz6Z3CXwOxxMOc1La8mUskoCPuyfG2oISJSaVUJhjuHwrV3WfcXzYJ/jYTCAnvrEhEPBWFf5XYf0xqhICwiUim5XHD93XDH4xAYBGuXwpTBkLff7spEBAVh31VwyNrNCNQaISJS2V12LTzwgvX39c/fwfiHrJUlRMRWCsK+qrQtokqwdYiISOXWuDUMnATRdSArAyY+DFvS7a5KxNEUhH1VaRBWW4SIiO+oVddaXq1RK2tFielDIXWu3VWJOJaCsK86pAvlRER8UrUI6PcCtE2yrvd492X4ZLpWlBCxgYKwrypdOi1M/cEiIj4nKBh6DIHkntb9L96FN5+FwsP21iXiMArCvkpLp4mI+DaXC67raS2xFlgF1n0Nkx6B7L12VybiGArCvqq0NSJMQVhExKe17QwPjrFaJnZthZf6w/ZNdlcl4ggKwr7K0xqhICwi4vMatYKUyVC7IeRlweTBsGKh3VWJ+D0FYV9VupmG1hAWEfEPNWPh4QnQ8gooLoK3X4BPZ1hbNIvIOaEg7KsOaUZYRMTvhFaFXiMg6Q7r/uLZ8PpwOHzQ1rJE/JWCsK/KV4+wiIhfCgiALvdaF9EFVYFN38KEgbDvF7srE/E7CsK+yrNqhFojRET8UtvOMOBlCD8PMrbD+AHwwxq7qxLxKwrCvsqzakQ1e+sQEZFzp35zeGQyxDeFg7kwdQh8M8fuqkT8hoKwr9KGGiIizhAZbc0MX3K1deHce+Ph/YnWBXUickYUhH2VNtQQEXGO4BC460m44V7r/rJPYMpjkJtlb10iPk5B2BcVFhydCdDFciIizuBywTV3QO/nrNUltm2Al/pp8w2RM6Ag7ItK2yICAiCkqr21iIhIxWqZCI9MgZh6kPMbvDIYvp1nd1UiPklB2BeVbqYRVt2aIRAREWepVRcGvQIXd4CSIpj9Erw7HooL7a5MxKcoCPsibaYhIiKhVeGe4VbfsMsFqXNg8qOQs8/uykR8hoKwL9JmGiIiAkf7hvv8w/o3YfsmGPcg/LTB7spEfIKCsC/yBGEtnSYiIsCF7SFlCtRuAHlZMOVRa2UJY+yuTKRSUxD2RZ7WCG2mISIiR0THwcBJ0OYqKCm21hp+ewwUHra7MpFKS0HYF2l7ZRERKUtIGPR8Gm7qA64AWLnQ2pp5zy67KxOplBSEfZF6hEVE5HhcLvhzN3hwLNSIgl+3w0sPwpov7a5MpNJREPZFCsIiInIyjVvD4GlwwcVQcAjefA4+mKytmUWOoSDsi0p7hNUaISIiJxJxHvQbC51vt+5/9SG8kgL7M+2tS6SSUBD2RZoRFhGRUxUYCDfeB/eNtP7d+HkzjOsH362wuzIR2ykI+yIFYRERKa8Wl8PgqVC3KRzMhelPwvw3wV1id2UitlEQ9kWe1ggFYRERKYfzasPD4+HKm6w1hj//N0wdot3oxLEUhH2RNtQQEZHTVSUY/m8g3DkUgkPhhzUw9n7YvNzuykQqnIKwrykpgYJ862u1RoiIyOlq29laVaLOBXAwx2qV+PhVrSohjqIg7GtKZ4NBQVhERM5MrbrWbnQdu1r3l7wHEwfCvl9sLUukoigI+5r8I/3BIVWtK4FFRETORJVg+OsAuPfv1rKcO7+HFx+AVV/YXZnIOacg7Gs8/cHV7K1DRET8S6sr4dFXoWFLawOOf/8DZo072o4n4ocUhH3NoSNBWJtpiIjI2RZVC/qPg2vvtLZqTpsPL/eHXVvtrkzknFAQ9jWlrRHqDxYRkXMhMBCuv8fakS78PMjcAeMfgkWzteaw+B0FYV+Tf9C6VRAWEZFzqUkbeGy61TJRUgxzZsCUx7Q9s/gVBWFf49lMQ60RIiJyjlWPgF4j4PbB1prDP66DMX0hfZHdlYmcFQrCvkbbK4uISEVyuSDhemt2uP6FcPgg/GeUdTHdsUt6ivggBWFfoyAsIiJ2iI6Dh8bDdXdDQIC1vNqYPtbOdCI+SkHY1xzSxXIiImKTwEBIvgsengDRdSB7r9U3/PE0KCywuzqRclMQ9jX5Wj5NRERsVv9CeHQaXH4DGANL3ocX74ftm+yuTKRcFIR9jVojRESkMggJg24pcN9Ia5m1vbtg4iD4ZDoUFdpdncgpURD2NQrCIiJSmbS4HIa8Bu2SwLjhi3dh3APw83d2VyZyUgrCvkbLp4mISGVTtQb0eAJ6Pws1alqbcEx4GOa8BsWaHZbKS0HYlxhzzIYa1eytRURE5PdaXmHNDrftbM0OL5oFL/aDHZodlspJQdiXFByy/mIBCNOMsIiIVELVwuHOoXDv36F6JGT+bM0Of/IqFB62uzoRLwrCvuTQkf7goCoQHGJvLSIiIifS6kp44nW49Gpwu+GL9+CF+2BLut2ViXicVhCePHkyDRo0IDQ0lISEBJYvX37C8e+99x7NmzcnNDSUVq1aMW/ePK/njTEMGzaM2rVrExYWRlJSElu3bvUak5WVRY8ePQgPDycyMpLevXtz4ID3jjbr1q2jY8eOhIaGEh8fz5gxY7yenzFjBh07diQqKoqoqCiSkpJOWnulogvlRETEl1SLgLueslaWiDwfsjJg2hB4azQcyLG7OpHyB+HZs2eTkpLC8OHDWbVqFa1btyY5OZk9e/aUOf6bb76he/fu9O7dm9WrV9O1a1e6du3Khg0bPGPGjBnDxIkTmTZtGmlpaVSrVo3k5GQOHz76K5QePXqwceNGFi5cyJw5c1i6dCl9+/b1PJ+bm8u1115L/fr1SU9PZ+zYsYwYMYLp06d7xixZsoTu3bvzxRdfkJqaSnx8PNdeey27d+8u74/BHvmlm2moLUJERHxIi8thyOvQ8RZry+aV/4PR91q3xthdnTiZKaf27dub/v37e+6XlJSYuLg4M2rUqDLH33bbbaZLly5ejyUkJJj777/fGGOM2+02sbGxZuzYsZ7ns7OzTUhIiHnnnXeMMcZs2rTJAGbFihWeMfPnzzcul8vs3r3bGGPMlClTTFRUlCkoKPCMGTJkiGnWrNlxP0txcbGpUaOGefPNN0/145ucnBwDmJycnFN+zVmz9itjBnU2ZvxDFf/eIiIiZ8P2Tca8cJ/179mgzsZMG2LMvl/srkr8zKnmtXLNCBcWFpKenk5SUpLnsYCAAJKSkkhNTS3zNampqV7jAZKTkz3jt23bRkZGhteYiIgIEhISPGNSU1OJjIykXbt2njFJSUkEBASQlpbmGdOpUyeCg4O93mfLli3s37+/zNoOHTpEUVERNWvWLM+PwT5qjRAREV9X/0IYPBVuuNe65uW7lVbv8MK3tdSaVLhyBeF9+/ZRUlJCTEyM1+MxMTFkZGSU+ZqMjIwTji+9PdmYWrVqeT0fFBREzZo1vcaU9T2OfY/fGzJkCHFxcX8I6scqKCggNzfX67CNgrCIiPiDwCC45g54bAY0bg1FBTDvnzCmrxWMRSqIY1eNGD16NLNmzeLDDz8kNDT0uONGjRpFRESE54iPj6/AKn9Hm2mIiIg/qVUXHnzRWm6tRk1rm+ZXn4CZz0L2XrurEwcoVxCOjo4mMDCQzMxMr8czMzOJjY0t8zWxsbEnHF96e7Ixv78Yr7i4mKysLK8xZX2PY9+j1Isvvsjo0aP5/PPPufjii0/4mYcOHUpOTo7n2Llz5wnHn1OaERYREX/jclkbcAz9J3T6KwQEwNqlMKoXLJoNxUV2Vyh+rFxBODg4mLZt27Jo0SLPY263m0WLFpGYmFjmaxITE73GAyxcuNAzvmHDhsTGxnqNyc3NJS0tzTMmMTGR7Oxs0tOPrj24ePFi3G43CQkJnjFLly6lqKjI632aNWtGVFSU57ExY8bw3HPPsWDBAq+e4+MJCQkhPDzc67CNgrCIiPirsOpwy4OQMhUatrA235gzA8beD9+vsrs68VflvQpv1qxZJiQkxMycOdNs2rTJ9O3b10RGRpqMjAxjjDF33XWXeeKJJzzjly1bZoKCgsyLL75oNm/ebIYPH26qVKli1q9f7xkzevRoExkZaT7++GOzbt06c/PNN5uGDRua/Px8z5jrrrvOXHLJJSYtLc18/fXXpkmTJqZ79+6e57Ozs01MTIy56667zIYNG8ysWbNM1apVzauvvur1PsHBweb99983v/76q+fIy8s75c9v66oRrw61rrD9dl7Fv7eIiEhFcbuNSfvMmKdvPbq6xOvDjdm72+7KxEecal4rdxA2xphJkyaZevXqmeDgYNO+fXvz7bffep676qqrzN133+01/t133zVNmzY1wcHBpkWLFmbu3Llez7vdbvPMM8+YmJgYExISYjp37my2bNniNea3334z3bt3N9WrVzfh4eGmV69efwiwa9euNR06dDAhISGmTp06ZvTo0V7P169f3wB/OIYPH37Kn93WIDz+Yesvg7VfVfx7i4iIVLRDeca8P9GYlGusf/8GJxvz8avW4yIncKp5zWWMVrIuj9zcXCIiIsjJyan4NonR90LmDuvCgiZtKva9RURE7PLrdvh46tHtmatHwvX3QML1EBhoY2FSWZ1qXnPsqhE+6dCRHuGq6hEWEREHqd0A7h8Nff4BterBgWx4bzyMe0D9w3JGFIR9yeHSi+W0fJqIiDiMywUXJcDj0+GW/tZSor9ug6mPw/QnYfePdlcoPkhB2FcUFVoHQFg1e2sRERGxS2AQdLoFnnwTOt4CAYGwebk1O/yf0ZBV9iZaImVREPYVpZtpuAIgpKq9tYiIiNitWjj8tT888U+45E9gDKT/D56/Bz6cYrVPiJyEgrCv8LRFVLMWGxcRERE4vw70fBpSpkDTS6GkGJZ+ACN7wmf/hoJ8uyuUSkyJylcc0mYaIiIixxXfFPqNgQdegLpNoOAQLHgTRt4Fi2crEEuZguwuQE5RaWuELpQTERE5vmZtocklsPZLmPcG7PsFPp0BX7wHf+4GV94EwaF2VymVhGaEfcXhg9atlk4TERE5sYAAuORqq3/49kfhvNpWz/Anr8Jzd8KS960tnMXxFIR9hWdGWEFYRETklAQGQcJ1MPQNuH0w1Iy1AvHH06yWiSXvq2XC4RSEfUW+eoRFREROS2CQtQvdkzOh25FAnLffCsTP9YAF/4KDOXZXKTZQj7CvKA3CVdUjLCIicloCg+Dy6+Gya2DF57BoltVD/Nm/4It34fIb4E+3QlSM3ZVKBVEQ9hWe1ghtpiEiInJGAoOs0Ns+GdZ+ZQXi3T9Yy659/TFc+mfrwrraDeyuVM4xBWFfka/tlUVERM6qgEBrM442V8GWdGuZta2rYeVC67iwPXT6q7UShctld7VyDigI+4rSGWGtGiEiInJ2uVzQvJ11/PwdLJ4F65dZWzdvXg4x9aztnNslQUiY3dXKWaQg7CvyjyyfpovlREREzp36zaHXCNi7C776CJZ/Bpk74P0JMPd1q6Wiw81QU33E/kBB2Ffka0MNERGRCnN+XfjrALj+HisMf/UR/PardVHdkvehRQJc3gUuvMxqsRCfpCDsK0q3WFZrhIiISMUJqw5X3Qodu8Km5dYFdVtXw4ZU64g835olTrjO+lp8ioKwLygpsfZMB7VGiIiI2CEgEFomWkfmz5A6z1qCLXsvLHgTPvs3XJQAiV2g+WUQqFliX6Ag7AsOHzj6tYKwiIiIvWLqQ9d+0KU3rPsKUufCj+tgY6p11IiylmBrlwR1GmvFiUpMQdgXlLZFhIRZax+KiIiI/aoEQ9vO1pG5A76db80S5+2HL/9rHbUbQNtrrDGR0XZXLL+jVOULtL2yiIhI5RZTD26+H27sDd+ttALxxlT4dTvMmQFzX4PGbax1i1tdCdUjbS1XLArCvkBBWERExDcEBkGLy60j/wCs+RJW/g9+Wm9dZLd1tbUU2wWtoXUnuLiD1UohtlAQ9gWezTS0dJqIiIjPCKtuXTyX2AX2/WKF4rVLYdfWo6H4v5PgglZwcUcrPNeMtbtqR1EQ9gWeGeFq9tYhIiIipyc6DpK6W8e+X2DtV7BuKezYAj+stY4PXoHYBtbqExddDg0u0uoT55iCsC/wBGHNCIuIiPi86Djo3M06sjKsULwxFbZtgIzt1rF4tvWb4ObtrOXYmlyqi+3OAQVhX1DaGqEeYREREf9SMxau/pt1HMqzLrTb9C1sXm7dX/WFdQDUiocmbaDJJdC4NVSLsLV0f6Ag7AvyD1q3CsIiIiL+q2oNuPRq63CXwPbNsDkNvl8FO7fCnp3WsexTa23iuEbQ6GKrhaLBRRBVS2sWl5OCsC/I18VyIiIijhIQCI1aWkeX3lab5A9rYesa6yK7jO2w+0fr+OpD6zXh50GDC61QXP8iqNsYgkPt/BSVnoKwLzik5dNEREQcLay6tf5wqyut+3n7rVC8fSNs32QF4tzfYN3X1gHgCoDz60LdCyCuMdS5wDq0XJuHgrAvOKwgLCIiIseoEXW0jQKg8DDs/N4Kxds3wc+brbC8Z4d1lPYZg7WZR61464ipZ4XlmHpQM8aaiXYQBWFfUDojXFVBWERERMoQHAoXXGwdpXKzYPcPR44jbRT7dsOBbOv4ab339wgIhMjzrUAcdeSoWQsia0F4TStAVwv3q7CsIFyZFR6GbRut/2hBy6eJiIjIqQuvCeHt4cL2Rx8ryD960d2enZC5A/buso6iQms5t6yM439Pl8taraJGJFSPsvY4CKkKoVWPuQ2zgnlgEARVgcAqEBRkBeuYeuf8Y5eHgnBllpsF04Ycva8ZYRERETkTIWEQ39Q6juV2Q84+2L8H9mdaR9aRr7P3Wm0Wh3LBmKMzymwv33tfeRP838Cz8znOEgXhyiwwCGo3tL5u2MK6GlRERETkbAsIsJZfi6oFtCx7TEmJFYbz9lvHgWw4fBAOH4KCQ0du863HigqhuAhKiqGkCIqLrRaLSkZBuDKLqgWPz7C7ChERERFru+caUX616kSA3QWIiIiIiNhBQVhEREREHElBWEREREQcSUFYRERERBxJQVhEREREHElBWEREREQcSUFYRERERBxJQVhEREREHElBWEREREQcSUFYRERERBxJQVhEREREHElBWEREREQcSUFYRERERBxJQVhEREREHCnI7gJ8jTEGgNzcXJsrEREREZGylOa00tx2PArC5ZSXlwdAfHy8zZWIiIiIyInk5eURERFx3Odd5mRRWby43W5++eUXatSogcvlOqfvlZubS3x8PDt37iQ8PPycvpecOzqP/kHn0T/oPPoHnUf/cC7PozGGvLw84uLiCAg4fiewZoTLKSAggLp161boe4aHh+sPuh/QefQPOo/+QefRP+g8+odzdR5PNBNcShfLiYiIiIgjKQiLiIiIiCMpCFdiISEhDB8+nJCQELtLkTOg8+gfdB79g86jf9B59A+V4TzqYjkRERERcSTNCIuIiIiIIykIi4iIiIgjKQiLiIiIiCMpCIuIiIiIIykIV2KTJ0+mQYMGhIaGkpCQwPLly+0uybGWLl3KTTfdRFxcHC6Xi48++sjreWMMw4YNo3bt2oSFhZGUlMTWrVu9xmRlZdGjRw/Cw8OJjIykd+/eHDhwwGvMunXr6NixI6GhocTHxzNmzJhz/dEcZdSoUVx22WXUqFGDWrVq0bVrV7Zs2eI15vDhw/Tv35/zzjuP6tWrc+utt5KZmek1ZseOHXTp0oWqVatSq1YtHnvsMYqLi73GLFmyhEsvvZSQkBAaN27MzJkzz/XHc4ypU6dy8cUXexbhT0xMZP78+Z7ndQ59z+jRo3G5XAwaNMjzmM6jbxgxYgQul8vraN68uef5Sn8ejVRKs2bNMsHBweaf//yn2bhxo+nTp4+JjIw0mZmZdpfmSPPmzTNPPfWU+eCDDwxgPvzwQ6/nR48ebSIiIsxHH31k1q5da/7yl7+Yhg0bmvz8fM+Y6667zrRu3dp8++235quvvjKNGzc23bt39zyfk5NjYmJiTI8ePcyGDRvMO++8Y8LCwsyrr75aUR/T7yUnJ5s33njDbNiwwaxZs8bccMMNpl69eubAgQOeMQ888ICJj483ixYtMitXrjSXX365ueKKKzzPFxcXm5YtW5qkpCSzevVqM2/ePBMdHW2GDh3qGfPTTz+ZqlWrmpSUFLNp0yYzadIkExgYaBYsWFChn9dfffLJJ2bu3Lnm+++/N1u2bDFPPvmkqVKlitmwYYMxRufQ1yxfvtw0aNDAXHzxxWbgwIGex3UefcPw4cNNixYtzK+//uo59u7d63m+sp9HBeFKqn379qZ///6e+yUlJSYuLs6MGjXKxqrEGPOHIOx2u01sbKwZO3as57Hs7GwTEhJi3nnnHWOMMZs2bTKAWbFihWfM/PnzjcvlMrt37zbGGDNlyhQTFRVlCgoKPGOGDBlimjVrdo4/kXPt2bPHAObLL780xljnrUqVKua9997zjNm8ebMBTGpqqjHG+p+igIAAk5GR4RkzdepUEx4e7jl3jz/+uGnRooXXe3Xr1s0kJyef64/kWFFRUea1117TOfQxeXl5pkmTJmbhwoXmqquu8gRhnUffMXz4cNO6desyn/OF86jWiEqosLCQ9PR0kpKSPI8FBASQlJREamqqjZVJWbZt20ZGRobX+YqIiCAhIcFzvlJTU4mMjKRdu3aeMUlJSQQEBJCWluYZ06lTJ4KDgz1jkpOT2bJlC/v376+gT+MsOTk5ANSsWROA9PR0ioqKvM5l8+bNqVevnte5bNWqFTExMZ4xycnJ5ObmsnHjRs+YY79H6Rj9+T37SkpKmDVrFgcPHiQxMVHn0Mf079+fLl26/OFnrfPoW7Zu3UpcXByNGjWiR48e7NixA/CN86ggXAnt27ePkpISr/8oAGJiYsjIyLCpKjme0nNyovOVkZFBrVq1vJ4PCgqiZs2aXmPK+h7HvoecPW63m0GDBnHllVfSsmVLwPo5BwcHExkZ6TX29+fyZOfpeGNyc3PJz88/Fx/HcdavX0/16tUJCQnhgQce4MMPP+Siiy7SOfQhs2bNYtWqVYwaNeoPz+k8+o6EhARmzpzJggULmDp1Ktu2baNjx47k5eX5xHkMOqNXi4j4qP79+7Nhwwa+/vpru0uR09CsWTPWrFlDTk4O77//PnfffTdffvml3WXJKdq5cycDBw5k4cKFhIaG2l2OnIHrr7/e8/XFF19MQkIC9evX59133yUsLMzGyk6NZoQroejoaAIDA/9wVWVmZiaxsbE2VSXHU3pOTnS+YmNj2bNnj9fzxcXFZGVleY0p63sc+x5ydgwYMIA5c+bwxRdfULduXc/jsbGxFBYWkp2d7TX+9+fyZOfpeGPCw8N94h8GXxAcHEzjxo1p27Yto0aNonXr1kyYMEHn0Eekp6ezZ88eLr30UoKCgggKCuLLL79k4sSJBAUFERMTo/PooyIjI2natCk//PCDT/x5VBCuhIKDg2nbti2LFi3yPOZ2u1m0aBGJiYk2ViZladiwIbGxsV7nKzc3l7S0NM/5SkxMJDs7m/T0dM+YxYsX43a7SUhI8IxZunQpRUVFnjELFy6kWbNmREVFVdCn8W/GGAYMGMCHH37I4sWLadiwodfzbdu2pUqVKl7ncsuWLezYscPrXK5fv97rf2wWLlxIeHg4F110kWfMsd+jdIz+/J47brebgoICnUMf0blzZ9avX8+aNWs8R7t27ejRo4fna51H33TgwAF+/PFHateu7Rt/Hs/4cjs5J2bNmmVCQkLMzJkzzaZNm0zfvn1NZGSk11WVUnHy8vLM6tWrzerVqw1gXnrpJbN69Wrz888/G2Os5dMiIyPNxx9/bNatW2duvvnmMpdPu+SSS0xaWpr5+uuvTZMmTbyWT8vOzjYxMTHmrrvuMhs2bDCzZs0yVatW1fJpZ1G/fv1MRESEWbJkiddSP4cOHfKMeeCBB0y9evXM4sWLzcqVK01iYqJJTEz0PF+61M+1115r1qxZYxYsWGDOP//8Mpf6eeyxx8zmzZvN5MmTtWTTWfTEE0+YL7/80mzbts2sW7fOPPHEE8blcpnPP//cGKNz6KuOXTXCGJ1HXzF48GCzZMkSs23bNrNs2TKTlJRkoqOjzZ49e4wxlf88KghXYpMmTTL16tUzwcHBpn379ubbb7+1uyTH+uKLLwzwh+Puu+82xlhLqD3zzDMmJibGhISEmM6dO5stW7Z4fY/ffvvNdO/e3VSvXt2Eh4ebXr16mby8PK8xa9euNR06dDAhISGmTp06ZvTo0RX1ER2hrHMImDfeeMMzJj8/3zz44IMmKirKVK1a1dxyyy3m119/9fo+27dvN9dff70JCwsz0dHRZvDgwaaoqMhrzBdffGHatGljgoODTaNGjbzeQ87Mvffea+rXr2+Cg4PN+eefbzp37uwJwcboHPqq3wdhnUff0K1bN1O7dm0THBxs6tSpY7p162Z++OEHz/OV/Ty6jDHmzOeVRURERER8i3qERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkf4ftmKsAZ2XwyAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArYAAAHDCAYAAADRBFkDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABFkklEQVR4nO3deXxU1f3/8fdkmyyQhM2EJRAQBNkVSMQiYE0NigtqERQEEaX6BVuLpYqtUG1rrG0prSLU/iLUuoAsIgoiEAFFURQEjGBcWAWSEJYkBEhC5vz+uMkkQ8KSQHJnJq/n43Efc3LmzJ3PcCu8e3LuGYcxxggAAADwcQF2FwAAAABcDARbAAAA+AWCLQAAAPwCwRYAAAB+gWALAAAAv0CwBQAAgF8g2AIAAMAvEGwBAADgFwi2AAAA8AsEWwAAAPgFgi0AnIddu3bJ4XBozpw5trz/nDlz5HA4tGvXLlveHwB8AcEWALzIM888o8WLF9tdhu644w7deOONdpcBANVCsAUAL3KmYHvPPffoxIkTatOmTa3XUFxcrJUrV2rw4MG1/l4AcDEF2V0AAODcAgMDFRgYWCfv9dFHHyk/P59gC8DnMGMLwCft27dP9913n2JiYuR0OtWlSxe9/PLLkqSsrCwFBQXpqaeeqvS6jIwMORwOvfDCC5Kkw4cP6ze/+Y26deumBg0aKDIyUjfccIO2bNlyzhoGDhyogQMHVuq/9957FR8f79H3t7/9TVdffbWaNGmisLAw9erVSwsWLPAY43A4VFBQoP/+979yOBxyOBy69957JZ15je2LL76oLl26yOl0qkWLFho/fryOHj1aqc6uXbtq27ZtuvbaaxUeHq6WLVvqueeeq/JzLV26VJ07d3Z/hq1bt+ree+9Vu3btFBoaqtjYWN133306dOhQpdfu27dPY8eOVYsWLeR0OtW2bVs99NBDKioqco85evSofv3rXys+Pl5Op1OtWrXSqFGjlJOTU2U9AHC+mLEF4HOysrJ01VVXyeFwaMKECWrWrJnee+89jR07Vnl5eXrkkUc0YMAAvfnmm5o6darHa+fNm6fAwEANHTpUkrRjxw4tXrxYQ4cOVdu2bZWVlaV///vfGjBggLZt26YWLVpclJr/+c9/6pZbbtGIESNUVFSkuXPnaujQoXr33XfdM6P/+9//dP/99yshIUHjxo2TJF166aVnPOcf/vAHPfXUU0pKStJDDz2kjIwMzZw5U59//rk+/vhjBQcHu8ceOXJEgwYN0u23364777xTCxYs0GOPPaZu3brphhtu8DjvsmXLdNNNN7l/XrlypXbs2KExY8YoNjZWX3/9tV566SV9/fXX+vTTT+VwOCRJ+/fvV0JCgo4ePapx48apU6dO2rdvnxYsWKDjx48rJCREx44d0zXXXKPt27frvvvu05VXXqmcnBwtWbJEP/74o5o2bXpR/rwB1FMGAHzM2LFjTfPmzU1OTo5H//Dhw01UVJQ5fvy4+fe//20kma+++spjTOfOnc1Pf/pT988nT540JSUlHmN27txpnE6nefrppz36JJnZs2e7+wYMGGAGDBhQqb7Ro0ebNm3aePQdP37c4+eioiLTtWtXj1qMMSYiIsKMHj260jlnz55tJJmdO3caY4zJzs42ISEh5vrrr/eo/4UXXjCSzMsvv+xRpyTzyiuvuPsKCwtNbGysueOOOzzeZ8eOHUaSWb169RlrN8aYN954w0gyH374obtv1KhRJiAgwHz++eeVxrtcLmOMMVOmTDGSzKJFi844BgBqiqUIAHyKMUYLFy7UzTffLGOMcnJy3EdycrJyc3O1adMm3X777QoKCtK8efPcr01PT9e2bds0bNgwd5/T6VRAgPVXYUlJiQ4dOqQGDRqoY8eO2rRp00WrOywszN0+cuSIcnNzdc0119T4PVatWqWioiI98sgj7vol6YEHHlBkZKSWLl3qMb5BgwYaOXKk++eQkBAlJCRox44dHuOWLl2qqKgo9evXr8raT548qZycHF111VWS5K7f5XJp8eLFuvnmm9W7d+9K9ZbN6i5cuFA9evTQbbfddsYxAFBTBFsAPuXgwYM6evSoXnrpJTVr1szjGDNmjCQpOztbTZs21XXXXac333zT/dp58+YpKChIt99+u7vP5XLpH//4hzp06CCn06mmTZuqWbNm2rp1q3Jzcy9a3e+++66uuuoqhYaGqnHjxmrWrJlmzpxZ4/fYvXu3JKljx44e/SEhIWrXrp37+TKtWrWqFBwbNWqkI0eOePQtXbpU119/vYKCyleqHT58WL/61a8UExOjsLAwNWvWTG3btpUkd/0HDx5UXl6eunbteta6f/jhh3OOAYCaYo0tAJ/icrkkSSNHjtTo0aOrHNO9e3dJ0vDhwzVmzBht3rxZPXv21JtvvqnrrrvOYx3nM888oyeffFL33Xef/vjHP6px48YKCAjQI4884n6vM3E4HDLGVOovKSnx+Pmjjz7SLbfcov79++vFF19U8+bNFRwcrNmzZ+v111+v1uevqTPtqFCx/uPHj2vNmjWaOXOmx5g777xTn3zyiSZNmqSePXuqQYMGcrlcGjRo0Dn/jACgLhFsAfiUZs2aqWHDhiopKVFSUtJZxw4ZMkS/+MUv3MsRvv32W02ePNljzIIFC3TttdcqNTXVo//o0aPnvJGpUaNGlX6VL6nSbOnChQsVGhqq999/X06n090/e/bsSq8931/Hl+1nm5GRoXbt2rn7i4qKtHPnznP+2VTlgw8+UGFhocfNZEeOHFFaWpqeeuopTZkyxd3/3Xffeby2WbNmioyMVHp6+lnf49JLLz3nGACoKZYiAPApgYGBuuOOO7Rw4cIqA9LBgwfd7ejoaCUnJ+vNN9/U3LlzFRISoiFDhlQ63+mzrvPnz9e+ffvOWcull16qb775xuM9t2zZoo8//rjSezgcDo+Z3F27dlX5RQwRERGVtuuqSlJSkkJCQvSvf/3Lo/7U1FTl5ubWaA/aZcuWqXfv3oqJifGoXVKlP6Pp06d7/BwQEKAhQ4bonXfe0RdffFHp3GWvv+OOO7Rlyxa99dZbZxwDADXFjC0An/Pss89q9erVSkxM1AMPPKDOnTvr8OHD2rRpk1atWqXDhw+7xw4bNkwjR47Uiy++qOTkZEVHR3uc66abbtLTTz+tMWPG6Oqrr9ZXX32l1157zWMW9Ezuu+8+TZs2TcnJyRo7dqyys7M1a9YsdenSRXl5ee5xgwcP1rRp0zRo0CDdfffdys7O1owZM9S+fXtt3brV45y9evXSqlWrNG3aNLVo0UJt27ZVYmJipfdu1qyZJk+erKeeekqDBg3SLbfcooyMDL344ovq06ePx41i52vZsmXudcplIiMj1b9/fz333HMqLi5Wy5YttWLFCu3cubPS65955hmtWLFCAwYM0Lhx43T55ZfrwIEDmj9/vtatW6fo6GhNmjRJCxYs0NChQ3XfffepV69eOnz4sJYsWaJZs2apR48e1a4bANxs248BAC5AVlaWGT9+vImLizPBwcEmNjbWXHfddeall17yGJeXl2fCwsKMJPPqq69WOs/JkyfNo48+apo3b27CwsLMT37yE7N+/fpKW3lVtd2XMca8+uqrpl27diYkJMT07NnTvP/++1Vu95Wammo6dOhgnE6n6dSpk5k9e7aZOnWqOf2v4W+++cb079/fXXPZ1l+nb/dV5oUXXjCdOnUywcHBJiYmxjz00EPmyJEjHmMGDBhgunTpUumzV6wzPT3dSDIbNmyoNO7HH380t912m4mOjjZRUVFm6NChZv/+/UaSmTp1qsfY3bt3m1GjRplmzZoZp9Np2rVrZ8aPH28KCwvdYw4dOmQmTJhgWrZsaUJCQkyrVq3M6NGjK23fBgDV5TCG3/0AQH333HPPadq0aTpw4ADbbgHwWayxBQAoPj5e//jHPwi1AHwaM7YAAADwC8zYAgAAwC8QbAEAAOAXCLYAAADwC36xj63L5dL+/fvVsGFDbnwAAADwQsYY5efnq0WLFgoIqJ25Vb8Itvv371dcXJzdZQAAAOAc9u7dq1atWtXKuf0i2DZs2FCS9QcVGRlpczUAAAA4XV5enuLi4ty5rTb4RbAtW34QGRlJsAUAAPBitblslJvHAAAA4BcItgAAAPALBFsAAAD4BYItAAAA/ALBFgAAAH6BYAsAAAC/QLAFAACAXyDYAgAAwC8QbAEAAOAXCLYAAADwCwRbAAAA+AWCLQAAAPwCwRYAAAB+IcjuAgAAAFDLSkqkohNS0UnrKKzQbtleatjI7govCoItAAD+rKREOlUkFRed5bG4/OeyPmOs1zsc5YccVfys0seACu1zjHE4pIAgyRlmHaHhpe1wKSRUCrDpF8oul3SyQCrIk47nS8fzytsFedbPZX0F+VZQDAySgoJLj5Dy9hn7gyv0n0dfyamqw+jpP3u0y8acKP+5pPjMn3vMVKn7NXX351yLCLYAAHijU0VS3hEp/7CUd1jKPyLlHbL6judVDqiniqTi4tLHwvJ+l8vuT1I9DocVbp3hFUJvuBQaVt7nDPcMw6eH47J2UWGFcFohoFYVXI/nScePScbH/ryqKyDA+vMNCSt9DJWCnXZXddEQbAEAqCvGWEGqLKzmHbba+Ucq/3w8/+K/f2CQNXsYXHoEnf4YbD0GBlsByJjyQ0YyKn10WW13f4Uxrgp9UunYsnaF17pOWbOMhSesWdLCk+Vjy/rtEhIqRURK4ZFSeEOrHVHaDo8sf84ZWjojXvp/KE4VWzOsp4rP3efRX1VfaX9QcHkADQmz3jPktMMZduY+j/Fh1vkcDvv+bGsZwRYAgJowxvo178kC6eTx8scTBdKxI1WH1fwjVnA5X4FB1trHyCZSZCOpYWPriIgsnWkLKf9VtzugOj1DasXQGhBYe38eF6rsz7PwhFR4XDpZ+uj+uUK78MS5fy46aQX0iEgpojSQhp/erhBc3aG1ofVnBp9EsAUA1C/GWL+qP1FQGphOC6bu9un9VYyr6a+tIyJLQ2ojKbKxdTQseywNsg0bWWHLj2fXPDgc5Wtu1fjCz+cqKV3TW0/+/CCJYAsAsEPZ7NyJAunEsfLjZIEVOt2/kj1l3fRS8Ve0JcXl/RV/bVtyqvTnotLnK7y24vjiwou77tQRIIVFlK/tDA2XGjSqMMN6+oxrI2v2FLXLm2enUWsItgDgK4yxZhjzSn+lXbZO89gRq+/YUeuO82Bn+RHiLP/VdHDpr649+k9rl40NCT33WrziIs9QWukoDa3H8632ydP6q/Mr+drgcFhhNCyi9GajiArtcCk0wjOwutun9YeEMisIeAmCLQDYrehk+frLsrvf8w+XhtWKfUes2ca64nBYaw0rhmNHQGkwzbdmQi9UQIAU1kAKa1gaKiOsoBgYJAWV3ugUGOS5dVJgUOlWSEHlWyKVPe/uDzpDf+l61LAIAinghwi2AFCbThVL2XulzN3Sof0Vtm2qMNNaeLx653SGlf9Ku2yNZsNGUoNo6/nisu2eCsu3fSo6Wd5fVKG/uIp+V4l1nrK1qGcL0w6HFUxDI0oDatkRYa0PPf258Aae4wiXAC4igi0AXAwlJVLOPilzl3Rgl/WYuUs6uK88KJ5NcIhnWG3YuHRNZqPym4oaRFs/O8Nq9aO416FWCsKla1MrhldnuH2b6QPAaQi2AFAdLpd0OLNygM3ae+Zv9gmNkGLjpUtald/tXjbLWhZgneHeM3NZ9qv/0Ai7KwGAaiHYAkBVjJGOZltLCA7sLH3cJWXvsX6tX5WQUCm2jRViY+Ol5qWPUU29J7QCgB8j2ALAiWPS3m9LA+wuK8Bm7bb2Ka1KULAU07o8wJaF2EYx/FoeAGxEsAVQv5SckvbvlPZsl3Z/Yx3Ze6oeGxAoXRJnzcI2b1s6G9tWatJcCmSPTADwNgRbAP7LGGs97J5vykPsvu+sm6JO1zhWatm+NMTGWwG2WUs20gcAH0KwBeA/ThyzwmtZkN3zjfWlBacLayC17ii17iS1udxqN2xU5+UCAC4ugi0A33SqWNq/wzPEZu+tPC4wSGrRrjTAdpLadJKatmQtLAD4IYItAO9njHTogGeI/fG7qr/5qklzK7y2vtx6bNne2iMWAOD3CLYAvNupYum/f5TSP6n8XHjD0iUFpSG2dSepQVTd1wgA8AoEWwDeyxhp3jQr1AYESq06WEG2zeXlSwrYHxYAUIpgC8B7vTdb+mKltR527NNS50S7KwIAeDHungDgnT55V1r5utUe+mtCLQDgnAi2ALxP+nppwb+sdvI90lU32FsPAMAnEGwBeJfd26VX/iQZl5Q4SEoeZXdFAAAfQbAF4D0O7pP+83upuFDq1Eca+gg3hwEAzhvBFoB3yD8ivTRZKsi1dj+4d4r15QoAAJwngi0A+xWekP7f76Wc/VLjWOmBP0vOMLurAgD4GIItAHuVlEiv/FnakyFFREq/SJEiG9tdFQDABxFsAdjHGGnhv6Rtn1pfezv2j9IlcXZXBQDwUQRbAPZZ9bq0fql1g9jIJ6S2XeyuCADgwwi2AOyxYYW0bLbVvn2C1L2fvfUAAHxejYLtjBkzFB8fr9DQUCUmJmrDhg1nHT99+nR17NhRYWFhiouL069//WudPHnygs4JwId984U07+9W+6fDpH632lsPAMAvVDvYzps3TxMnTtTUqVO1adMm9ejRQ8nJycrOzq5y/Ouvv67HH39cU6dO1fbt25Wamqp58+bpiSeeqPE5AfiwH7+T5jwluUqkXtdJg8faXREAwE84jDGmOi9ITExUnz599MILL0iSXC6X4uLi9PDDD+vxxx+vNH7ChAnavn270tLS3H2PPvqoPvvsM61bt65G5zxdXl6eoqKilJubq8jIyOp8HAB16XCWNP1hKf+w1OEKadwzUlCw3VUBAOpAXeS1as3YFhUVaePGjUpKSio/QUCAkpKStH79+ipfc/XVV2vjxo3upQU7duzQsmXLdOONN9b4nAB8UEGe9O/JVqht3lYaM5VQCwC4qKr1tT45OTkqKSlRTEyMR39MTIy++eabKl9z9913KycnR/369ZMxRqdOndKDDz7oXopQk3MWFhaqsLDQ/XNeXl51PgaAulZcJKVOkbL3SNHNrJnasAZ2VwUA8DO1vivCmjVr9Mwzz+jFF1/Upk2btGjRIi1dulR//OMfa3zOlJQURUVFuY+4OPa9BLyWyyW9miLtTJdCI6xQG93M7qoAAH6oWjO2TZs2VWBgoLKysjz6s7KyFBsbW+VrnnzySd1zzz26//77JUndunVTQUGBxo0bp9/97nc1OufkyZM1ceJE9895eXmEW8AbGSO9PUva+pEUGCyNfcpahgAAQC2o1oxtSEiIevXq5XEjmMvlUlpamvr27Vvla44fP66AAM+3CQwMlCQZY2p0TqfTqcjISI8DgBdau1D6cJHVvvu3UvuetpYDAPBv1ZqxlaSJEydq9OjR6t27txISEjR9+nQVFBRozJgxkqRRo0apZcuWSklJkSTdfPPNmjZtmq644golJibq+++/15NPPqmbb77ZHXDPdU4APujL1dZsrSTdMk668lp76wEA+L1qB9thw4bp4MGDmjJlijIzM9WzZ08tX77cffPXnj17PGZof//738vhcOj3v/+99u3bp2bNmunmm2/Wn//85/M+JwAf8/0W6bXnrPY1t0kDh9pbDwCgXqj2PrbeiH1sAS9yYJf0/CPSiWPW1+SOflIKCLS7KgCAzbxuH1sAOKujOdJLk61Q27aLNGIyoRYAUGcItgAujhPHpP88IR09KF3SWhr7RynEaXdVAIB6hGAL4MKdKpZmPyXt3yE1bCz94hkpgmVBAIC6RbAFcGGMkeb+XfruS8kZJo37s9S46j2oAQCoTQRbADVnjLR4prRxlbWW9t4pUqsOdlcFAKinCLYAam75K+VfwDD8UalTH3vrAQDUawRbADWz+k1pxf+s9u0TpD7X21sPAKDeI9gCqL5P3pWWvGS1B98nXTPE1nIAAJAItgCqa2OatOCfVjvpLinpbnvrAQCgFMEWwPn76mPp9b9YN431u1W68T67KwIAwI1gC+D8ZGyU/vsnyeWSev9Mum285HDYXRUAAG4EWwDntvNr6eWpUkmx1L2fNPw3UgB/fQAAvAv/MgE4u73fSS89IRWdtLbzuucJKTDQ7qoAAKiEYAvgzLJ2S/9+XDpZILXrJo2ZKgWF2F0VAABVItgCqNqhA9LM30oFuVLcZdIDf5JCQu2uCgCAMyLYAqjsaI704iQp95AUGy/9IkUKjbC7KgAAzopgC8DTsaPSrN9KhzOlpi2kB/8iRUTZXRUAAOdEsAVQ7sQxadbjUtYeKbqZ9NBzUlQTu6sCAOC8EGwBWApPSP/5nbTve6lBtBVqG8faXRUAAOeNYAtAOlUkvfwHa7/asAbW8oNL4uyuCgCAaiHYAvVdSYn0yp+lbzdaux6Me0ZqeandVQEAUG0EW6A+c7mkN/4qffWxFBQs3f9HKb6z3VUBAFAjBFugvjJGWvgvaeMqKSBQGj1F6nCF3VUBAFBjBFugPjJGeuc/0ifvSg6HNOJxqWtfu6sCAOCCEGyB+mjV69LqN632nb+WrrzW3noAALgICLZAfbN2kbRsttUe8pB01Y321gMAwEVCsAXqk8/ekxa/aLUHjZYG3GFvPQAAXEQEW6C++HKNNO8fVnvgUOn6kbaWAwDAxUawBeqDbZ9Jr6ZIxmUtPbhlnHXTGAAAfoRgC/i77zdLc56SXCXSlT+Vhv6KUAsA8EsEW8Cf5R6S5jwtFRdZ23nd/Vtrz1oAAPwQwRbwV8ZI8/4uFeRJLdtLo56UAoPsrgoAgFpDsAX81cdLpO0bpOAQaeRk6xEAAD9GsAX8UfZeaclLVvumB6TYNvbWAwBAHSDYAv6m5JS1A0JxoXTZlVK/W+2uCACAOkGwBfzNitekvd9K4Q2lu34rBfCfOQCgfuBfPMCf7NomrXrNav/8l1J0U3vrAQCgDhFsAX9ReEJ67S+SyyX1uk664lq7KwIAoE4RbAF/8fYsKWefFN1MuuNhu6sBAKDOEWwBf5C+Xlq/1PpGsbsfk8Ia2F0RAAB1jmAL+Lr8I9YXMUjSgJ9LHXraWg4AAHYh2AK+zBjpzWnSsaNS87bSjWPsrggAANsQbAFf9tlyaxlCYDDfLgYAqPcItoCvytkvvTXDat94r9Sina3lAABgN4It4ItKSqTXnpWKTkqXdpcG/tzuigAAsB3BFvBFH8y1vowhNNzaBSEg0O6KAACwHcEW8DV7MqTlr1jt2x+WGsfYWw8AAF6CYAv4kqKTpd8uViL16C/1TrK7IgAAvAbBFvAl7/xHyt4jRTaRhv7K+kIGAAAgiWAL+I5vPpfWvW217/qNFBFlbz0AAHgZgi3gCwpypTf+ZrX73Sp16mNvPQAAeCGCLeDtjJHm/1PKOyRd0lq6+QG7KwIAwCsRbAFv98UqacuH1pZeIx+XQkLtrggAAK9EsAW82eEsadHzVnvQKCnuMnvrAQDAixFsAW/lKpFe/4t08rgU31n66XC7KwIAwKsRbAFvtWaB9MNWa+nBiMelQL5dDACAsyHYAt5o3w/SstlW+7b/k5q2sLceAAB8AMEW8DbFRdJrKVLJKalrXynxBrsrAgDAJ9Qo2M6YMUPx8fEKDQ1VYmKiNmzYcMaxAwcOlMPhqHQMHjzYPebYsWOaMGGCWrVqpbCwMHXu3FmzZs2qSWmA71v2snRgl9QgWrpzIt8uBgDAeap2sJ03b54mTpyoqVOnatOmTerRo4eSk5OVnZ1d5fhFixbpwIED7iM9PV2BgYEaOnSoe8zEiRO1fPlyvfrqq9q+fbseeeQRTZgwQUuWLKn5JwN80XdfWmtrJWn4b6SGjeytBwAAH1LtYDtt2jQ98MADGjNmjHtmNTw8XC+//HKV4xs3bqzY2Fj3sXLlSoWHh3sE208++USjR4/WwIEDFR8fr3HjxqlHjx5nnQkG/M6JY9Lrz1ntvoOlLlfZWw8AAD6mWsG2qKhIGzduVFJSUvkJAgKUlJSk9evXn9c5UlNTNXz4cEVERLj7rr76ai1ZskT79u2TMUarV6/Wt99+q+uvv77KcxQWFiovL8/jAHzewuelowelpi2lWx+0uxoAAHxOtYJtTk6OSkpKFBMT49EfExOjzMzMc75+w4YNSk9P1/333+/R//zzz6tz585q1aqVQkJCNGjQIM2YMUP9+/ev8jwpKSmKiopyH3FxcdX5GID3+XK1tDFNcgRIIx6TnGF2VwQAgM+p010RUlNT1a1bNyUkJHj0P//88/r000+1ZMkSbdy4UX//+981fvx4rVq1qsrzTJ48Wbm5ue5j7969dVE+UDuO5kjz/2m1f3a39WUMAACg2oKqM7hp06YKDAxUVlaWR39WVpZiY2PP+tqCggLNnTtXTz/9tEf/iRMn9MQTT+itt95y75TQvXt3bd68WX/72988lj2UcTqdcjqd1Skd8E4ul/TGc9b62rjLpOtH2l0RAAA+q1oztiEhIerVq5fS0tLcfS6XS2lpaerbt+9ZXzt//nwVFhZq5EjPf7iLi4tVXFysgADPUgIDA+VyuapTHuB71i6Qvt0kBTulkZOlwGr9f00AAFBBtf8VnThxokaPHq3evXsrISFB06dPV0FBgcaMGSNJGjVqlFq2bKmUlBSP16WmpmrIkCFq0qSJR39kZKQGDBigSZMmKSwsTG3atNHatWv1yiuvaNq0aRfw0QAvt2ub9G6q1b71QekS1ooDAHAhqh1shw0bpoMHD2rKlCnKzMxUz549tXz5cvcNZXv27Kk0+5qRkaF169ZpxYoVVZ5z7ty5mjx5skaMGKHDhw+rTZs2+vOf/6wHH+TOcPipgjzplT9JrhLpioHS1TfZXREAAD7PYYwxdhdxofLy8hQVFaXc3FxFRkbaXQ5wdsZIqVOkr9dbW3s9+qIUGnHu1wEA4MPqIq/V6a4IACStXWiF2sBgafTvCbUAAFwkBFugLu3eLr3zH6s95EGpVQd76wEAwI8QbIG6cjxf+m/putoe/aWf3GJ3RQAA+BWCLVAXjJHe+Kt0JEtq0lwaNlFyOOyuCgAAv0KwBerCh4uk9E9K19U+KYU1sLsiAAD8DsEWqG27v/FcVxt3mb31AADgpwi2QG06nm/tV1tyinW1AADUMoItUFuMkeb+TTqcybpaAADqAMEWqC0fLZa++lgKDJJG/Z51tQAA1DKCLVAb9nwjLfm31b7lF1LrjvbWAwBAPUCwBS62E8es/WpLTknd+0nXDLG7IgAA6gWCLXAxVVxX2zhWGv4b1tUCAFBHCLbAxbTubWnrOmtd7WjW1QIAUJcItsDFsvdb6e3SdbU3j5Nad7K3HgAA6hmCLXAxnDgm/fePUkmx1O0nUv/b7K4IAIB6h2ALXChjpHnTpEMHWFcLAICNCLbAhfp4ibTlw/L9asMb2l0RAAD1EsEWuBB7v5MWz7LaN90vtWFdLQAAdiHYAjV1skB6pXRdbde+0oA77K4IAIB6jWAL1ETZutqc/VKjS6Thk1hXCwCAzQi2QE188o60ea0UEGitq42ItLsiAADqPYItUF0/fictnmm1b7pfiu9sbz0AAEASwRaonpMF0n//JJ0qlrr0lQb+3O6KAABAKYItcL6Mkd78h5SzT4puJt3FuloAALwJwRY4X+uXSl+usdbVjn6SdbUAAHgZgi1wPvb9IL01w2rfNJZ1tQAAeCGCLXAuJ49L/33aWlfbOVEawLpaAAC8EcEWOBtjpPn/kA6Wrqu9+7dSAP/ZAADgjfgXGjibT5dJm1ZbYXbU76WIKLsrAgAAZ0CwBc7kxDFpyUtW+8axUtsu9tYDAADOimALnMmahda+tc3jpWuH2l0NAAA4B4ItUJXj+dKHC6329aNYVwsAgA/gX2ugKmsXWrshNG8rde9ndzUAAOA8EGyB0xXkSWsXWe3ke5itBQDAR/AvNnC6tQulwuNSi3ZSN2ZrAQDwFQRboKKCPOnDt6w2s7UAAPgU/tUGKlqzoHS29lKp60/srgYAAFQDwRYoU5ArfVQ6WzuInRAAAPA1/MsNlFmzQCo8IbW8VOp6td3VAACAaiLYApJ0LFf6aLHVTh4lORy2lgMAAKqPYAtI0pr5pbO17ZmtBQDARxFsgWNHy2drBzFbCwCAryLYAqvnS0UnpVaXSV362l0NAACoIYIt6rdjR6V1b1ttZmsBAPBpBFvUbx+8ac3Wxl0mdU60uxoAAHABCLaov/KPSB8vsdrshAAAgM8j2KL+Wl06W9u6I7O1AAD4AYIt6qf8I9I6ZmsBAPAnBFvUTx+8KRUXSq07SZcn2F0NAAC4CAi2qH/yDpevrWUnBAAA/AbBFvXPB/Os2do2naROfeyuBgAAXCQEW9QveYelT96x2oNGM1sLAIAfIdiifkmbKxUXSW0ulzr2trsaAABwERFsUX/kHpLWv2u1ma0FAMDvEGxRf3wwz5qtje8idexldzUAAOAiI9iifsjNqbC2lp0QAADwRwRb1A9pc6VTxVLbLtJlV9pdDQAAqAU1CrYzZsxQfHy8QkNDlZiYqA0bNpxx7MCBA+VwOCodgwcP9hi3fft23XLLLYqKilJERIT69OmjPXv21KQ8wNPRHGn9UqvN2loAAPxWtYPtvHnzNHHiRE2dOlWbNm1Sjx49lJycrOzs7CrHL1q0SAcOHHAf6enpCgwM1NChQ91jfvjhB/Xr10+dOnXSmjVrtHXrVj355JMKDQ2t+ScDyqS9UTpb21XqcIXd1QAAgFriMMaY6rwgMTFRffr00QsvvCBJcrlciouL08MPP6zHH3/8nK+fPn26pkyZogMHDigiIkKSNHz4cAUHB+t///tfDT6ClJeXp6ioKOXm5ioyMrJG54CfOnpQ+tMoqaRY+r+/EmwBALBJXeS1as3YFhUVaePGjUpKSio/QUCAkpKStH79+vM6R2pqqoYPH+4OtS6XS0uXLtVll12m5ORkXXLJJUpMTNTixYurUxpQtbS5Vqi9tLvUvqfd1QAAgFpUrWCbk5OjkpISxcTEePTHxMQoMzPznK/fsGGD0tPTdf/997v7srOzdezYMT377LMaNGiQVqxYodtuu02333671q5dW+V5CgsLlZeX53EAlRzJltYvs9rJ7IQAAIC/C6rLN0tNTVW3bt2UkJDg7nO5XJKkW2+9Vb/+9a8lST179tQnn3yiWbNmacCAAZXOk5KSoqeeeqpuiobvqjhb26Gn3dUAAIBaVq0Z26ZNmyowMFBZWVke/VlZWYqNjT3rawsKCjR37lyNHTu20jmDgoLUuXNnj/7LL7/8jLsiTJ48Wbm5ue5j79691fkYqA+OZEufvme1B42ytxYAAFAnqhVsQ0JC1KtXL6Wlpbn7XC6X0tLS1Ldv37O+dv78+SosLNTIkSMrnbNPnz7KyMjw6P/222/Vpk2bKs/ldDoVGRnpcQAeVr1uzda278HaWgAA6olqL0WYOHGiRo8erd69eyshIUHTp09XQUGBxowZI0kaNWqUWrZsqZSUFI/XpaamasiQIWrSpEmlc06aNEnDhg1T//79de2112r58uV65513tGbNmpp9KtRvR7Kkz5Zb7UGj7a0FAADUmWoH22HDhungwYOaMmWKMjMz1bNnTy1fvtx9Q9mePXsUEOA5EZyRkaF169ZpxYoVVZ7ztttu06xZs5SSkqJf/vKX6tixoxYuXKh+/frV4COh3lv5hlRyytra69LudlcDAADqSLX3sfVG7GMLt8NZ0jOjrWA74R/Spd3srggAAMgL97EFvN6q1yvM1hJqAQCoTwi28B+HMyusrWUnBAAA6huCLfzHytckV4l02ZVSO2ZrAQCobwi28A+HDkgbSm9OZCcEAADqJYIt/MPK163Z2o69pLZd7K4GAADYgGAL35ezX/r8favNbC0AAPUWwRa+b9XrkssldeotxXc+93gAAOCXCLbwbTn7pc9L19YmsxMCAAD1GcEWvm3la6WztX2YrQUAoJ4j2MJ3HdwnfbHSarO2FgCAeo9gC9+14n/WbO3lCVKbTnZXAwAAbEawhW/K3C1tTLPaN9xraykAAMA7EGzhm5b/VzJG6tZPirvM7moAAIAXINjC9+z7XtryoeRwSIPYCQEAAFgItvA9y/9rPV4xUGrRzs5KAACAFyHYwrfs/kZKXy85Ati3FgAAeCDYwre8N9t67PMz6ZI4e2sBAABehWAL3/HDViljoxQQKF0/0u5qAACAlyHYwjcYIy0rna296gapSXN76wEAAF6HYAvf8O0macdXUlCw9LMRdlcDAAC8EMEW3q/ibO3VN0vRzeytBwAAeCWCLbzfts+kPd9IIaFS0l12VwMAALwUwRbezeUq3wnhmiFSw0a2lgMAALwXwRbebes6ad8PkjNcunao3dUAAAAvRrCF93KVSMvnWO2Bd0gRUbaWAwAAvBvBFt5r02opa48U3lAacIfd1QAAAC9HsIV3Kjklvf+K1b72Timsgb31AAAAr0ewhXf6fIWUs19qEG3dNAYAAHAOBFt4n1NF0vv/s9pJd0nOMHvrAQAAPoFgC+/z6XvS0YNSVFPrCxkAAADOA8EW3qXopLTiNav9sxFScIi99QAAAJ9BsIV3+fgdKf+w1DhWShxkdzUAAMCHEGzhPU4el9LmWu3rR0pBwfbWAwAAfArBFt7jo7ekglypWUup98/srgYAAPgYgi28w/F86YM3rfag0VJgoL31AAAAn0OwhXdYs0A6WSA1j5d6DrS7GgAA4IMItrDfsVzpw0VWe9C9UgD/swQAANVHgoD9PpgnFZ6QWnWQuv3E7moAAICPItjCXrmHpHVvW+0bx0gOh731AAAAn0Wwhb3S3pCKC6X4zlKnPnZXAwAAfBjBFvY5kiV9stRqM1sLAAAuEMEW9lnxmlRSLLXvKXW4wu5qAACAjyPYwh4H90kbllvtG+61tRQAAOAfCLawx4r/SS6XdHmC1K6r3dUAAAA/QLBF3cvaLW38wGozWwsAAC4Sgi3q3vJXJOOSuvWT4i6zuxoAAOAnCLaoW/t+kDavtXZAGDTK7moAAIAfIdiibi2fYz32HCi1aGdnJQAAwM8QbFF3dn8jpa+XHAHM1gIAgIuOYIu6895s67HPz6RL4uytBQAA+B2CLerGD1uljI1SQKB0/Ui7qwEAAH6IYIvaZ4z03hyrfdUNUpPmtpYDAAD8E8EWte/bTdaMbVCw9LMRdlcDAAD8FMEWtcuY8rW1V98sRTeztx4AAOC3CLaoXds+s3ZDCAmVku6yuxoAAODHCLaoPS5X+Wxtv1ulho3srQcAAPg1gi1qz9Z11jeNOcOln95pdzUAAMDPEWxRe1a+Zj0OvEOKiLK3FgAA4PdqFGxnzJih+Ph4hYaGKjExURs2bDjj2IEDB8rhcFQ6Bg8eXOX4Bx98UA6HQ9OnT69JafAWmbul/T9IgUFS/9vtrgYAANQD1Q628+bN08SJEzV16lRt2rRJPXr0UHJysrKzs6scv2jRIh04cMB9pKenKzAwUEOHDq009q233tKnn36qFi1aVP+TwLtsXms9duwthTe0txYAAFAvVDvYTps2TQ888IDGjBmjzp07a9asWQoPD9fLL79c5fjGjRsrNjbWfaxcuVLh4eGVgu2+ffv08MMP67XXXlNwcHDNPg28x5YPrcee/e2tAwAA1BvVCrZFRUXauHGjkpKSyk8QEKCkpCStX7/+vM6Rmpqq4cOHKyIiwt3ncrl0zz33aNKkSerSpcs5z1FYWKi8vDyPA17kwC4pc5cUGCx1vdruagAAQD1RrWCbk5OjkpISxcTEePTHxMQoMzPznK/fsGGD0tPTdf/993v0/+Uvf1FQUJB++ctfnlcdKSkpioqKch9xcXHn/yFQ+7aULkPo1FsKa2BvLQAAoN6o010RUlNT1a1bNyUkJLj7Nm7cqH/+85+aM2eOHA7HeZ1n8uTJys3NdR979+6trZJRE2Xra3uwDAEAANSdagXbpk2bKjAwUFlZWR79WVlZio2NPetrCwoKNHfuXI0dO9aj/6OPPlJ2drZat26toKAgBQUFaffu3Xr00UcVHx9f5bmcTqciIyM9DniJA7ukrD2lyxD62l0NAACoR6oVbENCQtSrVy+lpaW5+1wul9LS0tS379lDzPz581VYWKiRI0d69N9zzz3aunWrNm/e7D5atGihSZMm6f33369OefAGZbO1l7MMAQAA1K2g6r5g4sSJGj16tHr37q2EhARNnz5dBQUFGjNmjCRp1KhRatmypVJSUjxel5qaqiFDhqhJkyYe/U2aNKnUFxwcrNjYWHXs2LG65cFOxpQH254DbS0FAADUP9UOtsOGDdPBgwc1ZcoUZWZmqmfPnlq+fLn7hrI9e/YoIMBzIjgjI0Pr1q3TihUrLk7V8E4HdkrZe6SgYKnLVXZXAwAA6hmHMcbYXcSFysvLU1RUlHJzc1lva6f35kgrXrW2+Br7tN3VAAAAL1IXea1Od0WAH/NYhjDA3loAAEC9RLDFxXFgp5S9t3QZArshAACAukewxcXh3g0hQQoNt7cWAABQLxFsceFYhgAAALwAwRYXbv8O6eCPUnCI1JndEAAAgD0ItrhwZbO1nViGAAAA7EOwxYUxRtrCMgQAAGA/gi0uzP4fpIP7rGUIfCkDAACwEcEWF8a9G0Ki5AyztxYAAFCvEWxRcx67IfS3txYAAFDvEWxRc/u+l3L2S8FOdkMAAAC2I9ii5spmazuzDAEAANiPYIuaqbgMoQfLEAAAgP0ItqiZH7+XDh0oXYaQaHc1AAAABFvU0OY11iPLEAAAgJcg2KL6PHZDGGhrKQAAAGUItqi+vd9KhzOlkFCpc4Ld1QAAAEgi2KImtnxoPXZOtMItAACAFyDYono8liEMsLcWAACACgi2qJ69GeXLEC5nGQIAAPAeBFtUT9lsbZerWIYAAAC8CsEW588YaXPp+lqWIQAAAC9DsMX525MhHcmyZmo7sQwBAAB4F4Itzp97GUJfKcRpby0AAACnIdji/BgjbWE3BAAA4L0Itjg/e76RjmRbX5/bqY/d1QAAAFRCsMX5YRkCAADwcgRbnJvLxZcyAAAAr0ewxbnt+UY6epBlCAAAwKsRbHFuZbO1Xa+WgkPsrQUAAOAMCLY4O5dL2sKXMgAAAO9HsMXZ7d5eugwhXOrY2+5qAAAAzohgi7NjGQIAAPARBFucGcsQAACADyHY4sx2bZNyc6TQcKlTL7urAQAAOCuCLc6sbLa269VSEMsQAACAdyPYomoul7Sl7EsZBtpaCgAAwPkg2KJqu76Wcg9JoRFSxyvtrgYAAOCcCLaoWsXdEFiGAAAAfADBFpW5XNKWj6w2uyEAAAAfQbBFZTu/lvLKliGwGwIAAPANBFtUtnmN9di9nxQUbGspAAAA54tgC0+uEmnrOqvdo7+9tQAAAFQDwRaeypYhhDWQLmM3BAAA4DsItvBUthtCt5+wDAEAAPgUgi3KuUrKv22M3RAAAICPIdii3I50Kf+IFN5Q6nCF3dUAAABUC8EW5ViGAAAAfBjBFhaWIQAAAB9HsIXlh6+kY0dZhgAAAHwWwRaWLWXLEPpJgUH21gIAAFADBFtIJSXSlo+sNssQAACAjyLYQtqx1VqGEBEpdehpdzUAAAA1QrCF524ILEMAAAA+imBb35WUSFvXWe2eA20tBQAA4EIQbOu7H7aUL0No39PuagAAAGqMYFvfba64G0KgvbUAAABcgBoF2xkzZig+Pl6hoaFKTEzUhg0bzjh24MCBcjgclY7BgwdLkoqLi/XYY4+pW7duioiIUIsWLTRq1Cjt37+/Zp8I589jGQK7IQAAAN9W7WA7b948TZw4UVOnTtWmTZvUo0cPJScnKzs7u8rxixYt0oEDB9xHenq6AgMDNXToUEnS8ePHtWnTJj355JPatGmTFi1apIyMDN1yyy0X9slwbt9vlgpypYgoliEAAACf5zDGmOq8IDExUX369NELL7wgSXK5XIqLi9PDDz+sxx9//Jyvnz59uqZMmaIDBw4oIiKiyjGff/65EhIStHv3brVu3fqc58zLy1NUVJRyc3MVGRlZnY9Tv82bJn26TOp7k3TnI3ZXAwAA/Fhd5LVqzdgWFRVp48aNSkpKKj9BQICSkpK0fv368zpHamqqhg8ffsZQK0m5ublyOByKjo6u8vnCwkLl5eV5HKimklPSV2XLEPrbWwsAAMBFUK1gm5OTo5KSEsXExHj0x8TEKDMz85yv37Bhg9LT03X//fefcczJkyf12GOP6a677jpjmk9JSVFUVJT7iIuLq87HgCR984VUkCc1iJYu7WF3NQAAABesTndFSE1NVbdu3ZSQkFDl88XFxbrzzjtljNHMmTPPeJ7JkycrNzfXfezdu7e2SvZPp4qlJS9Z7d5J7IYAAAD8QrW+Zqpp06YKDAxUVlaWR39WVpZiY2PP+tqCggLNnTtXTz/9dJXPl4Xa3bt364MPPjjr2gun0ymn01md0lHRh4uk7D3WbO31I+2uBgAA4KKo1oxtSEiIevXqpbS0NHefy+VSWlqa+vbte9bXzp8/X4WFhRo5snKQKgu13333nVatWqUmTZpUpyxUx9GD0vv/s9o3j5PCGthbDwAAwEVSrRlbSZo4caJGjx6t3r17KyEhQdOnT1dBQYHGjBkjSRo1apRatmyplJQUj9elpqZqyJAhlUJrcXGxfv7zn2vTpk169913VVJS4l6v27hxY4WEhNT0s6Eqb8+Sik5KbbtYyxAAAAD8RLWD7bBhw3Tw4EFNmTJFmZmZ6tmzp5YvX+6+oWzPnj0KCPCcCM7IyNC6deu0YsWKSufbt2+flixZIknq2bOnx3OrV6/WwIEDq1siziRjo/VNY44A6Y5fSgF88RwAAPAf1d7H1huxj+15OFUs/XWclL1XumaIdPsEuysCAAD1iNftYwsftnahFWobREs33Gt3NQAAABcdwbY+OJItrXjVat/CDWMAAMA/EWzrA/cNY12l3j+zuxoAAIBaQbD1dxkbpS0flt4w9rDkcNhdEQAAQK0g2PqzU0XSwuetdr9bpZaX2lsPAABALSLY+rM1C6WDP0oNG0k3jLa7GgAAgFpFsPVXR7Klla9Zbb5hDAAA1AMEW3/lccMY3zAGAAD8H8HWH33zhXXDWECA9PNfcsMYAACoFwi2/uZUkbToBavdb4jUop2t5QAAANQVgq2/WbOg/IaxQaPsrgYAAKDOEGz9yZEsaUXpDWO3/IIbxgAAQL1CsPUni2dJxYVSu25Sr+vsrgYAAKBOEWz9xTefS1s/sm4Y4xvGAABAPUSw9QfcMAYAAECw9QurF0gH90kNG3PDGAAAqLcItr7ucFb5N4zdyg1jAACg/iLY+rq3Z1o3jF3aXbryp3ZXAwAAYBuCrS/bvkHauo4bxgAAAESw9V2niqRFM6z2NbdJzdvaWw8AAIDNCLa+avV8KYcbxgAAAMoQbH3R4Uxp5etW+9ZfSKER9tYDAADgBQi2vmgxN4wBAACcjmBbEy6XlLnbnvfevkH66uPSG8Z+yQ1jAAAApYLsLsAnffelNOsxKb6z1Hew1HOAFBJa++9bXOEbxvrfLjWPr/33BAAA8BEE25o4sFMKCJR2bbOOt16UeidZIbc2v8529ZtSzn4psomUfE/tvQ8AAIAPItjWxMCfW2tbN7wvfbpMOnRAWve2dbTpVDqLO1Byhl289zycKa3ihjEAAIAzcRhjjN1FXKi8vDxFRUUpNzdXkZGRdfvmLpe1NGH9Umvtq6vE6g8Nl3pdZ4Xclu0v/H1Sp0jpn0jte0j/9zfW1gIAAJ9SF3mNGdsLFRAgdexlHflHpA0rpE+XWksGPn7HOlp3tALuFdfWbBZ322dWqA0IlG7nG8YAAACqwoxtbXC5pB+2WLO4W9dJJaesfmeYdGXpLG5ch/M7V3GR9Nz9VlAeONRahgAAAOBjmLH1VQEBUocrrOPYUenzFVbIPbhPWv+udbS6TOp7o7VWNzT8zOcqu2EsihvGAAAAzoYZ27pijPR9xVncYqs/JNQKt30HS3GXeS4zOHRA+stYa9b2nt9JV15rT+0AAAAXiBlbf+JwSB16Wsexo9LnK0tncX+0dlb4dJl1k1nfwVKvn1q7Hix+0Qq17XtKVwy0s3oAAACvx4ytnYyRfthqBdwtH3nO4l52hZS+3rphbNJLUmwbe2sFAAC4AMzY+juHw9q+q30P6fbc0lncZVL2HivUStKA2wm1AAAA54Fg6y0ioqwvfhhwh7Qj3VqacKpIup4bxgAAAM4HwdbbOBzSpd2sAwAAAOctwO4CAAAAgIuBYAsAAAC/QLAFAACAXyDYAgAAwC8QbAEAAOAXCLYAAADwCwRbAAAA+AWCLQAAAPwCwRYAAAB+gWALAAAAv0CwBQAAgF8g2AIAAMAvEGwBAADgFwi2AAAA8AtBdhdwMRhjJEl5eXk2VwIAAICqlOW0stxWG/wi2Obn50uS4uLibK4EAAAAZ5Ofn6+oqKhaObfD1GZsriMul0v79+9Xw4YN5XA4lJeXp7i4OO3du1eRkZF2l4daxLWuH7jO9QfXun7gOtcfFa91w4YNlZ+frxYtWiggoHZWw/rFjG1AQIBatWpVqT8yMpL/YOoJrnX9wHWuP7jW9QPXuf4ou9a1NVNbhpvHAAAA4BcItgAAAPALfhlsnU6npk6dKqfTaXcpqGVc6/qB61x/cK3rB65z/VHX19ovbh4DAAAA/HLGFgAAAPUPwRYAAAB+gWALAAAAv0CwBQAAgF/wy2A7Y8YMxcfHKzQ0VImJidqwYYPdJeEsPvzwQ918881q0aKFHA6HFi9e7PG8MUZTpkxR8+bNFRYWpqSkJH333XceYw4fPqwRI0YoMjJS0dHRGjt2rI4dO+YxZuvWrbrmmmsUGhqquLg4Pffcc7X90VBBSkqK+vTpo4YNG+qSSy7RkCFDlJGR4THm5MmTGj9+vJo0aaIGDRrojjvuUFZWlseYPXv2aPDgwQoPD9cll1yiSZMm6dSpUx5j1qxZoyuvvFJOp1Pt27fXnDlzavvjodTMmTPVvXt392bsffv21Xvvved+nmvsn5599lk5HA498sgj7j6utX/4wx/+IIfD4XF06tTJ/bzXXWfjZ+bOnWtCQkLMyy+/bL7++mvzwAMPmOjoaJOVlWV3aTiDZcuWmd/97ndm0aJFRpJ56623PJ5/9tlnTVRUlFm8eLHZsmWLueWWW0zbtm3NiRMn3GMGDRpkevToYT799FPz0Ucfmfbt25u77rrL/Xxubq6JiYkxI0aMMOnp6eaNN94wYWFh5t///nddfcx6Lzk52cyePdukp6ebzZs3mxtvvNG0bt3aHDt2zD3mwQcfNHFxcSYtLc188cUX5qqrrjJXX321+/lTp06Zrl27mqSkJPPll1+aZcuWmaZNm5rJkye7x+zYscOEh4ebiRMnmm3btpnnn3/eBAYGmuXLl9fp562vlixZYpYuXWq+/fZbk5GRYZ544gkTHBxs0tPTjTFcY3+0YcMGEx8fb7p3725+9atfufu51v5h6tSppkuXLubAgQPu4+DBg+7nve06+12wTUhIMOPHj3f/XFJSYlq0aGFSUlJsrArn6/Rg63K5TGxsrPnrX//q7jt69KhxOp3mjTfeMMYYs23bNiPJfP755+4x7733nnE4HGbfvn3GGGNefPFF06hRI1NYWOge89hjj5mOHTvW8ifCmWRnZxtJZu3atcYY67oGBweb+fPnu8ds377dSDLr1683xlj/JyggIMBkZma6x8ycOdNERka6r+1vf/tb06VLF4/3GjZsmElOTq7tj4QzaNSokfl//+//cY39UH5+vunQoYNZuXKlGTBggDvYcq39x9SpU02PHj2qfM4br7NfLUUoKirSxo0blZSU5O4LCAhQUlKS1q9fb2NlqKmdO3cqMzPT45pGRUUpMTHRfU3Xr1+v6Oho9e7d2z0mKSlJAQEB+uyzz9xj+vfvr5CQEPeY5ORkZWRk6MiRI3X0aVBRbm6uJKlx48aSpI0bN6q4uNjjWnfq1EmtW7f2uNbdunVTTEyMe0xycrLy8vL09ddfu8dUPEfZGP4OqHslJSWaO3euCgoK1LdvX66xHxo/frwGDx5c6Xpwrf3Ld999pxYtWqhdu3YaMWKE9uzZI8k7r7NfBducnByVlJR4/OFJUkxMjDIzM22qChei7Lqd7ZpmZmbqkksu8Xg+KChIjRs39hhT1Tkqvgfqjsvl0iOPPKKf/OQn6tq1qyTrOoSEhCg6Otpj7OnX+lzX8Uxj8vLydOLEidr4ODjNV199pQYNGsjpdOrBBx/UW2+9pc6dO3ON/czcuXO1adMmpaSkVHqOa+0/EhMTNWfOHC1fvlwzZ87Uzp07dc011yg/P98rr3NQtUYDwEUwfvx4paena926dXaXglrQsWNHbd68Wbm5uVqwYIFGjx6ttWvX2l0WLqK9e/fqV7/6lVauXKnQ0FC7y0EtuuGGG9zt7t27KzExUW3atNGbb76psLAwGyurml/N2DZt2lSBgYGV7sbLyspSbGysTVXhQpRdt7Nd09jYWGVnZ3s8f+rUKR0+fNhjTFXnqPgeqBsTJkzQu+++q9WrV6tVq1bu/tjYWBUVFeno0aMe40+/1ue6jmcaExkZ6ZV/CfujkJAQtW/fXr169VJKSop69Oihf/7zn1xjP7Jx40ZlZ2fryiuvVFBQkIKCgrR27Vr961//UlBQkGJiYrjWfio6OlqXXXaZvv/+e6/8b9qvgm1ISIh69eqltLQ0d5/L5VJaWpr69u1rY2WoqbZt2yo2Ntbjmubl5emzzz5zX9O+ffvq6NGj2rhxo3vMBx98IJfLpcTERPeYDz/8UMXFxe4xK1euVMeOHdWoUaM6+jT1mzFGEyZM0FtvvaUPPvhAbdu29Xi+V69eCg4O9rjWGRkZ2rNnj8e1/uqrrzz+j8zKlSsVGRmpzp07u8dUPEfZGP4OsI/L5VJhYSHX2I9cd911+uqrr7R582b30bt3b40YMcLd5lr7p2PHjumHH35Q8+bNvfO/6Wrfbubl5s6da5xOp5kzZ47Ztm2bGTdunImOjva4Gw/eJT8/33z55Zfmyy+/NJLMtGnTzJdffml2795tjLG2+4qOjjZvv/222bp1q7n11lur3O7riiuuMJ999plZt26d6dChg8d2X0ePHjUxMTHmnnvuMenp6Wbu3LkmPDyc7b7q0EMPPWSioqLMmjVrPLaNOX78uHvMgw8+aFq3bm0++OAD88UXX5i+ffuavn37up8v2zbm+uuvN5s3bzbLly83zZo1q3LbmEmTJpnt27ebGTNmsD1QHXr88cfN2rVrzc6dO83WrVvN448/bhwOh1mxYoUxhmvszyruimAM19pfPProo2bNmjVm586d5uOPPzZJSUmmadOmJjs72xjjfdfZ74KtMcY8//zzpnXr1iYkJMQkJCSYTz/91O6ScBarV682kiodo0ePNsZYW349+eSTJiYmxjidTnPdddeZjIwMj3McOnTI3HXXXaZBgwYmMjLSjBkzxuTn53uM2bJli+nXr59xOp2mZcuW5tlnn62rjwhjqrzGkszs2bPdY06cOGH+7//+zzRq1MiEh4eb2267zRw4cMDjPLt27TI33HCDCQsLM02bNjWPPvqoKS4u9hizevVq07NnTxMSEmLatWvn8R6oXffdd59p06aNCQkJMc2aNTPXXXedO9QawzX2Z6cHW661fxg2bJhp3ry5CQkJMS1btjTDhg0z33//vft5b7vODmOMqf48LwAAAOBd/GqNLQAAAOovgi0AAAD8AsEWAAAAfoFgCwAAAL9AsAUAAIBfINgCAADALxBsAQAA4BcItgAAAPALBFsAAAD4BYItAAAA/ALBFgAAAH6BYAsAAAC/8P8BOdzS8nmkiaIAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAq0AAAHDCAYAAAAOSiMcAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHfElEQVR4nO3deXyU1d3///dkmwTIwpKEBMKOrAKRXQREEEREcUVEUUGrNbR6u1Rpv3Xp/WtjraW3VqVqFbUuYVFQEdHILiIIEgXBoGUJQhLWrEDW8/vjIkOGJJAJSa5J5vV8PK7HnLnmXJnP5HJ5czhzjsMYYwQAAAB4MT+7CwAAAADOhdAKAAAAr0doBQAAgNcjtAIAAMDrEVoBAADg9QitAAAA8HqEVgAAAHg9QisAAAC8HqEVAAAAXo/QCgAAAK9HaAXgs/bs2SOHw6E33njDlvd/44035HA4tGfPnnp/7yeffFIOh6Pe3xcAaorQCgB17C9/+YsWL15sdxm6/vrrdeWVV9pdBgDUCKEVAOpYVaH1tttu04kTJ9S+ffs6r6GoqEjJycmaMGFCnb8XANSFALsLAABf5e/vL39//3p5r7Vr1yo3N5fQCqDBYqQVgNfYv3+/pk+frujoaDmdTvXq1Uuvv/66JCkzM1MBAQF66qmnKlyXmpoqh8OhF154QZJ09OhRPfzww7rwwgvVrFkzhYWFafz48fruu+/OWcOll16qSy+9tML5O+64Qx06dHA79+yzz+riiy9Wy5YtFRISov79+2vhwoVufRwOh/Lz8/Xmm2/K4XDI4XDojjvukFT1nNaXXnpJvXr1ktPpVGxsrBISEpSVlVWhzt69e2v79u0aNWqUmjRpojZt2uiZZ56p9HN98skn6tmzZ4XPUF5xcbH+93//V507d5bT6VSHDh30+9//XgUFBW79Nm3apHHjxqlVq1YKCQlRx44dNX36dLc+SUlJ6t+/v0JDQxUWFqYLL7xQzz33XJXvDQDnwkgrAK+QmZmpIUOGyOFwaObMmYqMjNSnn36qGTNmKCcnRw888IBGjhyp+fPn64knnnC7dt68efL399eNN94oSdq1a5cWL16sG2+8UR07dlRmZqZefvlljRw5Utu3b1dsbGyt1Pzcc8/p6quv1tSpU1VYWKikpCTdeOONWrJkiWtE8z//+Y/uuusuDRo0SL/61a8kSZ07d67yZz755JN66qmnNGbMGP36179Wamqq5syZo2+++Ubr1q1TYGCgq++xY8d0xRVX6LrrrtNNN92khQsX6tFHH9WFF16o8ePHu/3cpUuX6qqrrjrr57nrrrv05ptv6oYbbtBDDz2kDRs2KDExUTt27NCiRYskSQcPHtTYsWMVGRmpxx57TBEREdqzZ48++OAD189JTk7WlClTNHr0aP31r3+VJO3YsUPr1q3T/fff78FvGADKMQDgBWbMmGFiYmLM4cOH3c7ffPPNJjw83Bw/fty8/PLLRpLZunWrW5+ePXuayy67zPX85MmTpqSkxK3P7t27jdPpNH/605/czkkyc+fOdZ0bOXKkGTlyZIX6br/9dtO+fXu3c8ePH3d7XlhYaHr37u1WizHGNG3a1Nx+++0VfubcuXONJLN7925jjDEHDx40QUFBZuzYsW71v/DCC0aSef31193qlGTeeust17mCggLTunVrc/3117u9z65du4wks3LlSte5J554wpT/X0BKSoqRZO666y63ax9++GEjyaxYscIYY8yiRYuMJPPNN99U+Dxl7r//fhMWFmaKi4ur7AMAnmJ6AADbGWP0/vvva+LEiTLG6PDhw65j3Lhxys7O1rfffqvrrrtOAQEBmjdvnuvabdu2afv27Zo8ebLrnNPplJ+f9Z+3kpISHTlyRM2aNVO3bt307bff1lrdISEhrvaxY8eUnZ2t4cOH1/g9vvjiCxUWFuqBBx5w1S9Jd999t8LCwvTJJ5+49W/WrJluvfVW1/OgoCANGjRIu3btcuv3ySefKDw8XJdcckmV77106VJJ0oMPPuh2/qGHHnL9DEmKiIiQJC1ZskRFRUWV/qyIiAjl5+crOTn5bB8XADxCaAVgu0OHDikrK0uvvPKKIiMj3Y4777xTkvXX0q1atdLo0aM1f/5817Xz5s1TQECArrvuOte50tJS/eMf/1DXrl3ldDrVqlUrRUZG6vvvv1d2dnat1b1kyRINGTJEwcHBatGihSIjIzVnzpwav8fevXslSd26dXM7HxQUpE6dOrleL9O2bdsKa602b95cx44dczv3ySefaOzYsQoIqHpG2N69e+Xn56cuXbq4nW/durUiIiJc7z1y5Ehdf/31euqpp9SqVStdc801mjt3rtu81/vuu08XXHCBxo8fr7Zt22r69OlatmxZNX8LAFA5QisA25WWlkqSbr31ViUnJ1d6DBs2TJJ08803a+fOnUpJSZEkzZ8/X6NHj1arVq1cP+8vf/mLHnzwQY0YMUJvv/22PvvsMyUnJ6tXr16u96pKVQvul5SUuD1fu3atrr76agUHB+ull17S0qVLlZycrFtuuUXGmJr+KjxS1coD5d//+PHjWrVqVbXXZz3XhgMOh0MLFy7U+vXrNXPmTNeX5/r376+8vDxJUlRUlFJSUvTRRx/p6quv1sqVKzV+/Hjdfvvt1fxkAFARX8QCYLvIyEiFhoaqpKREY8aMOWvfSZMm6Z577nFNEdi5c6dmzZrl1mfhwoUaNWqUXnvtNbfzWVlZbuG2Ms2bN6/w1+uSKoxyvv/++woODtZnn30mp9PpOj937twK11Z356my9VpTU1PVqVMn1/nCwkLt3r37nL+byqxYsUIFBQUVvphV2XuXlpbqp59+Uo8ePVznMzMzlZWVVWEt2SFDhmjIkCH685//rHfffVdTp05VUlKS7rrrLknW6PDEiRM1ceJElZaW6r777tPLL7+sP/7xjxVGcwGgOhhpBWA7f39/XX/99Xr//fe1bdu2Cq8fOnTI1Y6IiNC4ceM0f/58JSUlKSgoSJMmTarw884c7VywYIH2799/zlo6d+6sH3/80e09v/vuO61bt67CezgcDrcR2D179lS6iUDTpk0rLFlVmTFjxigoKEjPP/+8W/2vvfaasrOza7TG6tKlSzVgwABFR0eftV/ZSOz//d//uZ2fPXu2JLne+9ixYxV+t/369ZMk1xSBI0eOuL3u5+enPn36uPUBAE8x0grAKzz99NNauXKlBg8erLvvvls9e/bU0aNH9e233+qLL77Q0aNHXX0nT56sW2+9VS+99JLGjRvn+nJQmauuukp/+tOfdOedd+riiy/W1q1b9c4777iNXlZl+vTpmj17tsaNG6cZM2bo4MGD+te//qVevXopJyfH1W/ChAmaPXu2rrjiCt1yyy06ePCgXnzxRXXp0kXff/+928/s37+/vvjiC82ePVuxsbHq2LGjBg8eXOG9IyMjNWvWLD311FO64oordPXVVys1NVUvvfSSBg4c6Palq+paunSpa17w2fTt21e33367XnnlFWVlZWnkyJHauHGj3nzzTU2aNEmjRo2SJL355pt66aWXdO2116pz587Kzc3Vq6++qrCwMFfwveuuu3T06FFddtllatu2rfbu3at//vOf6tevn9soLgB4xMaVCwDATWZmpklISDBxcXEmMDDQtG7d2owePdq88sorbv1ycnJMSEiIkWTefvvtCj/n5MmT5qGHHjIxMTEmJCTEDBs2zKxfv77CclaVLXlljDFvv/226dSpkwkKCjL9+vUzn332WaVLXr322muma9euxul0mu7du5u5c+dWWErKGGN+/PFHM2LECFfNZctfnbnkVZkXXnjBdO/e3QQGBpro6Gjz61//2hw7dsytz8iRI02vXr0qfPbydW7bts1IMhs3bqzQr7I6i4qKzFNPPWU6duxoAgMDTVxcnJk1a5Y5efKkq8+3335rpkyZYtq1a2ecTqeJiooyV111ldm0aZOrz8KFC83YsWNNVFSUCQoKMu3atTP33HOPSU9Pr1AHAFSXw5h6+sYAAKBePfPMM5o9e7bS09OrPa8WALwVc1oBoJHq0KGD/vGPfxBYATQKjLQCAADA6zHSCgAAAK9HaAUAAIDXI7QCAADA6zWIdVpLS0t14MABhYaG8oUCAAAAL2SMUW5urmJjY+XnV/vjog0itB44cEBxcXF2lwEAAIBz2Ldvn9q2bVvrP7dBhNbQ0FBJ1i8hLCzM5moAAABwppycHMXFxblyW21rEKG1bEpAWFgYoRUAAMCL1dVUTr6IBQAAAK9HaAUAAIDXI7QCAADA6xFaAQAA4PUIrQAAAPB6hFYAAAB4PUIrAAAAvB6hFQAAAF6P0AoAAACvR2gFAACA1yO0AgAAwOsRWgEAAOD1CK0AAADweoTWMxkjZaZJW1baXQkAAABOCbC7AK9zNEN6errk5y/1GCQFN7W7IgAAAJ/HSOuZWsZIkW2l0hJp57d2VwMAAAARWivXY5D1uH2DvXUAAABAkoehNTExUQMHDlRoaKiioqI0adIkpaamVvv6pKQkORwOTZo0ydM661fPwdbj9o3WHFcAAADYyqPQunr1aiUkJOjrr79WcnKyioqKNHbsWOXn55/z2j179ujhhx/W8OHDa1xsvel8oRQULOUelX752e5qAAAAfJ5HX8RatmyZ2/M33nhDUVFR2rx5s0aMGFHldSUlJZo6daqeeuoprV27VllZWTUqtt4EBEnd+ktb10k7NkhxXe2uCAAAwKed15zW7OxsSVKLFi3O2u9Pf/qToqKiNGPGjPN5u/pVNq91x0Z76wAAAEDNl7wqLS3VAw88oGHDhql3795V9vvyyy/12muvKSUlpdo/u6CgQAUFBa7nOTk5NS2z5spC694dUl621Cy8/msAAACApPMYaU1ISNC2bduUlJRUZZ/c3FzddtttevXVV9WqVatq/+zExESFh4e7jri4uJqWWXMRkVJsZ+uLWKmb6v/9AQAA4OIwxvOvx8+cOVMffvih1qxZo44dO1bZLyUlRfHx8fL393edKy0tlST5+fkpNTVVnTt3rnBdZSOtcXFxys7OVlhYmKfl1twnr0lfvCddNEq67Q/1974AAAANTE5OjsLDw+ssr3k0PcAYo9/85jdatGiRVq1addbAKkndu3fX1q1b3c79v//3/5Sbm6vnnnuuyhFUp9Mpp9PpSWl1o8cgK7T+uMnabMDP/9zXAAAAoNZ5FFoTEhL07rvv6sMPP1RoaKgyMjIkSeHh4QoJCZEkTZs2TW3atFFiYqKCg4MrzHeNiIiQpLPOg/Ua7XtKTUKl47nS3h+ljr3srggAAMAneTSndc6cOcrOztall16qmJgY1zFv3jxXn7S0NKWnp9d6obbw95e6DbDa7I4FAABgG4+nB5zLqlWrzvr6G2+84clb2q/nYGnLSmu91gnT7a4GAADAJ53XOq0+ofsAyeGQ9v9XyjpsdzUAAAA+idB6Ls0ipHbdrPaPbDQAAABgB0JrdfQcYj1uJ7QCAADYgdBaHWW7Y+3cLBUX2lsLAACADyK0VkebLlJoC6nghLRrm93VAAAA+BxCa3X4+Uk9BlrtHUwRAAAAqG+E1urqMdh6ZL1WAACAekdora5uF1nbuB7cJx0+YHc1AAAAPoXQWl0hzaROp7aeZbQVAACgXhFaPVG2igDzWgEAAOoVodUTZeu1/pwiFZ60tRQAAABfQmj1RHQ7qXm0VFwk/ZRidzUAAAA+g9DqCYdD6nlqFYEdzGsFAACoL4RWT5XNa92+QTLG3loAAAB8BKHVU137SYFB0rGDUsZeu6sBAADwCYRWTwUFS136WW2mCAAAANQLQmtNlJ8iAAAAgDpHaK2JstC6e5t0Is/eWgAAAHwAobUmWsVKUe2k0lIpdbPd1QAAADR6hNaa6snuWAAAAPWF0FpTrvVaN1ojrgAAAKgzhNaa6thbcjaRco9Jv/xkdzUAAACNGqG1pgICpW4XWW2mCAAAANQpQuv56HFqigBLXwEAANQpQuv56DHQetyXak0TAAAAQJ0gtJ6P8FZSmy6SMdKPm+yuBgAAoNEitJ4v19JXTBEAAACoK4TW81U2r/XHTVJJib21AAAANFKE1vPVvrvUJNTaznXPdrurAQAAaJQIrefLz1/qfuoLWUwRAAAAqBOE1tpQfncsAAAA1DpCa23oPkByOKQDu6RjB+2uBgAAoNEhtNaGpuFS+x5Wm9FWAACAWkdorS2uKQLMawUAAKhthNba0uPUeq07t0jFhfbWAgAA0MgQWmtLmy5SWEup8KT03612VwMAANCoEFpri8NxerR1O1MEAAAAahOhtTaxpSsAAECdILTWpgsukvwDpEP7pUO/2F0NAABAo0ForU3BTaVOF1rt7Sx9BQAAUFsIrbWNpa8AAABqHaG1tpV9Gevn76WCE/bWAgAA0EgQWmtbVJzUorVUUiT9tMXuagAAABoFQmttczhOTxFgXisAAECtILTWhfLzWo2xtxYAAIBGgNBaFzr3lQKdUtYhKX233dUAAAA0eITWuhDklLr2s9rsjgUAAHDeCK11pWwVgR3MawUAADhfHoXWxMREDRw4UKGhoYqKitKkSZOUmpp61mteffVVDR8+XM2bN1fz5s01ZswYbdzoA0GubF7rnh+k47n21gIAANDAeRRaV69erYSEBH399ddKTk5WUVGRxo4dq/z8/CqvWbVqlaZMmaKVK1dq/fr1iouL09ixY7V///7zLt6rtWgtRbeXSkul1E12VwMAANCgOYyp+dfbDx06pKioKK1evVojRoyo1jUlJSVq3ry5XnjhBU2bNq1a1+Tk5Cg8PFzZ2dkKCwurabn176OXpZULpAGXS1MftbsaAACAOlPXee285rRmZ2dLklq0aFHta44fP66ioiKPrmmwyqYI/PiNNeIKAACAGgmo6YWlpaV64IEHNGzYMPXu3bva1z366KOKjY3VmDFjquxTUFCggoIC1/OcnJyalmmvjr2l4CZSXpa0b6fUvrvdFQEAADRINR5pTUhI0LZt25SUlFTta55++mklJSVp0aJFCg4OrrJfYmKiwsPDXUdcXFxNy7SXf4DUbYDV3sHSVwAAADVVo9A6c+ZMLVmyRCtXrlTbtm2rdc2zzz6rp59+Wp9//rn69Olz1r6zZs1Sdna269i3b19NyvQOZUtfsV4rAABAjXk0PcAYo9/85jdatGiRVq1apY4dO1brumeeeUZ//vOf9dlnn2nAgAHn7O90OuV0Oj0pzXuVhdZ9O6XcY1Joc3vrAQAAaIA8GmlNSEjQ22+/rXfffVehoaHKyMhQRkaGTpw44eozbdo0zZo1y/X8r3/9q/74xz/q9ddfV4cOHVzX5OXl1d6n8GZhLaS2F1htNhoAAACoEY9C65w5c5Sdna1LL71UMTExrmPevHmuPmlpaUpPT3e7prCwUDfccIPbNc8++2ztfQpv15MpAgAAAOfD4+kB57Jq1Sq353v27PHkLRqnnoOlz9+WUjdLJcXWF7QAAABQbee1TiuqKe4CqWm4dDJf2v2D3dUAAAA0OITW+uDnL3UfaLWZ1woAAOAxQmt9Kdsdi3mtAAAAHiO01pdu/SWHn5SxRzqaaXc1AAAADQqhtb40DZM69LDaTBEAAADwCKG1PjFFAAAAoEYIrfWpx6nQ+tMWqajQ3loAAAAaEEJrfYrtJIW3kooKpJ9T7K4GAACgwSC01ieHQ+pxancs5rUCAABUG6G1vpWf11qNHcYAAABAaK1/F1xkbeN6JF069Ivd1QAAADQIhNb65gyROvex2j+st7cWAACABoLQaoc+l1iPaxZJxawiAAAAcC6EVjsMukIKbyllHZLWL7W7GgAAAK9HaLVDYJB0+VSrnfyuVHjS3noAAAC8HKHVLoPHS82jpdyj0rqP7a4GAADAqxFa7RIQKI27zWovT5JOHre3HgAAAC9GaLXTgMulyDZSfra0dpHd1QAAAHgtQqud/P2lcdOs9soF0ok8e+sBAADwUoRWu8VfKrXuYAXWVQttLgYAAMA7EVrt5ucvjb/daq9+X8rLtrceAAAAL0Ro9QYXXiK16SIVnJBWzLO7GgAAAK9DaPUGDod05Z1W+8sPpewj9tYDAADgZQit3qLHIKl9D6moQFr+nt3VAAAAeBVCq7coP9r61SfSsYP21gMAAOBFCK3epGu81KWvVFIkJb9jdzUAAABeg9DqTRwOafwdVnvDMunwAVvLAQAA8BaEVm/T6UKp+0CptET67D92VwMAAOAVCK3eqGzd1s3Lpcw0e2sBAADwAoRWb9Suu9T7YsmUSp+9ZXc1AAAAtiO0equyua1bVkkHdtlZCQAAgO0Ird4qtpMUf6nV/vQNGwsBAACwH6HVm42bJjn8pG1fSWk/2l0NAACAbQit3iy6ndR/tNX+9E17awEAALARodXbjbtN8vOXfvxG2rXV7moAAABsQWj1dq1ipcFXWO2lcyVj7K0HAADABoTWhuDyqZJ/oPTf76WftthdDQAAQL0jtDYEzaOki6+y2oy2AgAAH0RobSjGTJECndLeHdKOjXZXAwAAUK8IrQ1FWAvpkmusNqOtAADAxxBaG5LLJkvOEGn/z9LWL+2uBgAAoN4QWhuSZuHSyOut9qdvSqUl9tYDAABQTwitDc2lN0ghzaSMPVLKarurAQAAqBeE1oYmpJk06karvexNqYTRVgAA0PgRWhui4ddKTcOlQ/ulTcl2VwMAAFDnCK0NUXATafTNVvuz/0jFRfbWAwAAUMcIrQ3VsIlSWEvpWKa04VO7qwEAAKhThNaGKihYuvwWq538jlRYYG89AAAAdcij0JqYmKiBAwcqNDRUUVFRmjRpklJTU8953YIFC9S9e3cFBwfrwgsv1NKlS2tcMMoZMl6KiJSyj0jrl9hdDQAAQJ3xKLSuXr1aCQkJ+vrrr5WcnKyioiKNHTtW+fn5VV7z1VdfacqUKZoxY4a2bNmiSZMmadKkSdq2bdt5F+/zAoKkcbdZ7S/ekwpO2FsPAABAHXEYU/P9QA8dOqSoqCitXr1aI0aMqLTP5MmTlZ+fryVLTo8EDhkyRP369dO//vWvar1PTk6OwsPDlZ2drbCwsJqW2ziVFEtPT5cOH5AmzJDGTLG7IgAA4IPqOq+d15zW7OxsSVKLFi2q7LN+/XqNGTPG7dy4ceO0fv3683lrlPEPkMZNs9or50sn8uytBwAAoA7UOLSWlpbqgQce0LBhw9S7d+8q+2VkZCg6OtrtXHR0tDIyMqq8pqCgQDk5OW4HzuKiUVJ0O+l4rrT6A7urAQAAqHU1Dq0JCQnatm2bkpKSarMeSdYXvsLDw11HXFxcrb9Ho+LnL11xu9VevVDKz7a3HgAAgFpWo9A6c+ZMLVmyRCtXrlTbtm3P2rd169bKzMx0O5eZmanWrVtXec2sWbOUnZ3tOvbt21eTMn1Ln+FSbGfp5HFp5QK7qwEAAKhVHoVWY4xmzpypRYsWacWKFerYseM5rxk6dKiWL1/udi45OVlDhw6t8hqn06mwsDC3A+fg5yeNv8Nqr10s5R6zsxoAAIBa5VFoTUhI0Ntvv613331XoaGhysjIUEZGhk6cOL3U0rRp0zRr1izX8/vvv1/Lli3T3//+d/3444968skntWnTJs2cObP2PgUsvYZI7bpLhSel5bU/bQMAAMAuHoXWOXPmKDs7W5deeqliYmJcx7x581x90tLSlJ6e7np+8cUX691339Urr7yivn37auHChVq8ePFZv7yFGnI4pCvvtNrrPpKyDttbDwAAQC05r3Va6wvrtHrAGOmFB6VdW6VhE6Ub7re7IgAA4AO8ep1WeKHyo61ff2ptOgAAANDAEVobo859pO4DrN2yls61uxoAAIDzRmhtrK662xp13bJSSku1uxoAAIDzQmhtrNp0lvqPttofv2rNdQUAAGigCK2N2fg7pYBA6ecU6cdv7K4GAACgxgitjVmLaGn4JKv98atSaYmt5QAAANQUobWxGz1FCmkmpe+WNi0/d38AAAAvRGht7JqGSWOmWO1P50qFBfbWAwAAUAOEVl8w/FopIlLKOiStXWx3NQAAAB4jtPqCwKDTGw4sf0/Kz7G3HgAAAA8RWn1F/9FSbCfpRJ70xXt2VwMAAOARQquv8POXrrrLaq9dLB3NsLUcAAAATxBafUn3gVLXeKmkSFr6ht3VAAAAVBuh1Zc4HNLEu632t8ul/T/bWw8AAEA1EVp9TdwF0kWjrG1dP37V7moAAACqhdDqi66cLvkHSKmbrQMAAMDLEVp9UcsYadjVVvvjV6XSUnvrAQAAOAdCq6+6fKoU3MSa17plpd3VAAAAnBWh1Vc1C5dG32y1P3ldKi60tx4AAICzILT6shHXSeGtpGOZ0pcf2V0NAABAlQitviwoWLridqud/I61WxYAAIAXIrT6uoFjpej20vFctncFAABei9Dq6/zLb++6SDp20N56AAAAKkFohdRriNS5j1RUKC170+5qAAAAKiC0wn17128+lw7ssrceAACAMxBaYWnfQ+o7wtredcm/7a4GAADADaEVp02YLvn5Szs2Sj+l2F0NAACAC6EVp0W2lS6+ymp//ArbuwIAAK9BaIW7sbdKzhBp307pu9V2VwMAACCJ0IozhTaXRt1ktT95XSousrceAAAAEVpRmUtvsMLrkXTpqyV2VwMAAEBoRSWcIdK4aVb787elk/n21gMAAHweoRWVGzJeioqT8rOlFfPsrgYAAPg4Qisq5x8gTZhhtVe9L2UftrceAADg0witqNqFw6QOvaSiAmnZW3ZXAwAAfBihFVUrv73rhmVSxl576wEAAD6L0Iqz69Rb6n2xZEqlT9jeFQAA2IPQinO76i7Jz0/atl7atdXuagAAgA8itOLcottJg8db7Y9ekYyxtx4AAOBzCK2onnHTpKBgae8O6fu1dlcDAAB8DKEV1RPeUhp5vdX+5DWppNjeegAAgE8htKL6LrtJahouHdovfb3U7moAAIAPIbSi+oKbSuNus9rL3pJOHre3HgAA4DMIrfDM0AlSq1gpL0tatcDuagAAgI8gtMIzAYHSldOt9soFUs5Re+sBAAA+gdAKz/UbKbXrJhWelL54z+5qAACADyC0wnMOhzT+Tqu94VMpP9veegAAQKNHaEXNdOsvxXa2RlvXfWx3NQAAoJHzOLSuWbNGEydOVGxsrBwOhxYvXnzOa9555x317dtXTZo0UUxMjKZPn64jR47UpF54C4dDGnWj1V67WCoqtLUcAADQuHkcWvPz89W3b1+9+OKL1eq/bt06TZs2TTNmzNAPP/ygBQsWaOPGjbr77rs9LhZeJv5SKSLSWklgU7LNxQAAgMYswNMLxo8fr/Hjx1e7//r169WhQwf99re/lSR17NhR99xzj/761796+tbwNv4B1i5ZH/5LWrVQGjxe8mPGCQAAqH11njCGDh2qffv2aenSpTLGKDMzUwsXLtSVV15Z12+N+jDkSmvTgYP7pB/W210NAABopOo8tA4bNkzvvPOOJk+erKCgILVu3Vrh4eFnnV5QUFCgnJwctwNeKriJdPFVVnvlfHtrAQAAjVadh9bt27fr/vvv1+OPP67Nmzdr2bJl2rNnj+69994qr0lMTFR4eLjriIuLq+sycT5GXGtNFdj9g7Rnu93VAACARshhjDE1vtjh0KJFizRp0qQq+9x22206efKkFiw4veXnl19+qeHDh+vAgQOKiYmpcE1BQYEKCgpcz3NychQXF6fs7GyFhYXVtFzUpff+Jm38TLrwEmn6k3ZXAwAA6llOTo7Cw8PrLK/V+Ujr8ePH5XfGl3P8/f0lSVXlZafTqbCwMLcDXm7UTdbjtnXSwV/srQUAADQ6HofWvLw8paSkKCUlRZK0e/dupaSkKC0tTZI0a9YsTZs2zdV/4sSJ+uCDDzRnzhzt2rVL69at029/+1sNGjRIsbGxtfMpYL/W7aWegyVjpNUL7a4GAAA0Mh6H1k2bNik+Pl7x8fGSpAcffFDx8fF6/PHHJUnp6emuACtJd9xxh2bPnq0XXnhBvXv31o033qhu3brpgw8+qKWPAK9RNtr6zedS7jF7awEAAI3Kec1prS91PUcCtcQY6R8J0r6d0tjbpPG3210RAACoJw1+Tit8iMNxerR13YdS4Ul76wEAAI0GoRW1q89wqUVrKT/HWk0AAACgFhBaUbv8/aVLb7DaqxZKpSX21gMAABoFQitq36BxUpNQ6Ui69P2XdlcDAAAaAUIrap8zRLrkGqu9cr71BS0AAIDzQGhF3bjkGikwSEpLlf77vd3VAACABo7QiroR2lwaMNZqr1xw9r4AAADnQGhF3bn0BmsZrO1fSxl77a4GAAA0YIRW1J2otlLvi632KkZbAQBAzRFaUbfKNhvYtFzKPmJvLQAAoMEitKJudewldegllRRJaxfZXQ0AAGigCK2oe5eVbe36sXTyuL21AACABonQirrXa6gU2VY6mS99vdTuagAAQANEaEXd8/M7vbXr6velkmJ76wEAAA0OoRX1Y+BYqVmElHVISlltdzUAAKCBIbSifgQGScMnWW22dgUAAB4itKL+DJsoBQVL+/8r7fzW7moAAEADQmhF/WkaLg2+wmqvnG9vLQAAoEEhtKJ+jbxecvhJqZutEVcAAIBqILSifrWMkfoOt9or2doVAABUD6EV9a9sa9ctK6VjB+2tBQAANAiEVtS/dt2kLn2l0hJpzQd2VwMAABoAQivsUTbauv4T6USevbUAAACvR2iFPXoMklp3kApOSF99Ync1AADAyxFaYQ+HQxp1o9Ve+4FUXGRvPQAAwKsRWmGfiy6TwltK2Uekb1fYXQ0AAPBihFbYJyBQGn6d1V65gK1dAQBAlQitsNfFEyRnEyljj7Rjo93VAAAAL0Vohb1CmklDr7TabO0KAACqQGiF/UZcJ/n5Sz9/J6Wl2l0NAADwQoRW2K95lBQ/ymoz2goAACpBaIV3KFv+6ru10uED9tYCAAC8DqEV3qFNZ6lbf8mUsrUrAACogNAK71G2teuGZVJ+tr21AAAAr0Johfe44CJrxLXwpLTuY7urAQAAXoTQCu/hcJwebV27WCoqtLUcAADgPQit8C79RlqrCeRlSd98bnc1AADASxBa4V38A6SR11vtVQul0lJ76wEAAF6B0ArvM3i8FNxUOvSL9MN6u6sBAABegNAK7xPcRBo20WqvmC8ZY289AADAdoRWeKfh10r+gdKeH6RNyXZXAwAAbEZohXcKbymNu9Vqv/+CdDTD3noAAICtCK3wXqNvljr2kgqOS28/LZWW2F0RAACwCaEV3svPX5r6mORsIu3eJq2YZ3dFAADAJoRWeLeWMdJ1M632p29K+3baWw8AALAFoRXeb+DlUp/h1vSAt5+2tnkFAAA+hdAK7+dwSDc9IIW1lA6mSR+/andFAACgnhFa0TA0DZemPGK1v/xQ2r7B3noAAEC98ji0rlmzRhMnTlRsbKwcDocWL158zmsKCgr0hz/8Qe3bt5fT6VSHDh30+uuv16Re+LLuA6QR11ntpGelvCxbywEAAPXH49Can5+vvn376sUXX6z2NTfddJOWL1+u1157TampqXrvvffUrVs3T98akCbMkFp3kHKPSfNms1sWAAA+IsDTC8aPH6/x48dXu/+yZcu0evVq7dq1Sy1atJAkdejQwdO3BSxBTunWWdI/EqRtX0kblklDqv/PIwAAaJjqfE7rRx99pAEDBuiZZ55RmzZtdMEFF+jhhx/WiRMn6vqt0Vi16SxdeafVXvSidGi/vfUAAIA65/FIq6d27dqlL7/8UsHBwVq0aJEOHz6s++67T0eOHNHcuXMrvaagoEAFBQWu5zk5OXVdJhqaS2+QdmyUfv5OeidR+s1zkr+/3VUBAIA6UucjraWlpXI4HHrnnXc0aNAgXXnllZo9e7befPPNKkdbExMTFR4e7jri4uLqukw0NH7+0i2/k4KbSnt/lJLfsbsiAABQh+o8tMbExKhNmzYKDw93nevRo4eMMfrll18qvWbWrFnKzs52Hfv27avrMtEQNY+Wbrzfaie/Le3dYW89AACgztR5aB02bJgOHDigvLw817mdO3fKz89Pbdu2rfQap9OpsLAwtwOo1EWXSReNkkpLrd2yCpgrDQBAY+RxaM3Ly1NKSopSUlIkSbt371ZKSorS0tIkWaOk06ZNc/W/5ZZb1LJlS915553avn271qxZo0ceeUTTp09XSEhI7XwK+LbrfytFREqH90uL59hdDQAAqAMeh9ZNmzYpPj5e8fHxkqQHH3xQ8fHxevzxxyVJ6enprgArSc2aNVNycrKysrI0YMAATZ06VRMnTtTzzz9fSx8BPq9JqHTLo9Z2r18vlbaus7siAABQyxzGeP/q7Dk5OQoPD1d2djZTBVC1j16RVs63tnz93atSWAu7KwIAwGfUdV6r8zmtQL258g4ptrOUn21t8+r9fx4DAADVRGhF4xEQZO2WFRBoreH61cd2VwQAAGoJoRWNS0wH6aq7rfaHL0uZaWftDgAAGgZCKxqf4ZOkC/pLRQXS24lScZHdFQEAgPNEaEXj4+cn3fKItarALz9Jn/3H7ooAAMB5IrSicQpvJd34P1Z7eZK0a6u99QAAgPNCaEXj1W+ENHCsZEqld/4qncy3uyIAAFBDhFY0btclSC1aS0czpA9etLsaAABQQ4RWNG7BTaWpj0oOP+mbz6WUNXZXBAAAaoDQisav04XSmJut9oJ/SFmH7a0HAAB4jNAK3zBumhR3gXQ8V3rvb1Jpqd0VAQAADxBa4Rv8A6zdsgKd0s7N0trFdlcEAAA8QGiF74iKk665x2oveVVK321vPQAAoNoIrfAtF0+Uegyydsl6O1EqLrS7IgAAUA2EVvgWh0Oa8ojULEI6sEta+obdFQEAgGogtML3hDaXJj9otVctkL7/0t56AADAORFa4Zt6XywNnSAZI819Ulr4vFRwwu6qAABAFQit8F3XJUgjrrPa6z6S/n6vtHeHvTUBAIBKEVrhuwKCpGvvk+79qxTeSjq0X3r+fmnZm1JJsd3VAQCAcgitQLf+0u9elS4aZW068Nl/rPB6cJ/dlQEAgFMIrYAkNQmVbvuDdYQ0k9JSpWfvlb780Jr3CgAAbEVoBcq7aJQ16nrBRVJRgfT+P6VXZknZh+2uDAAAn0ZoBc4UESnd87R0bYIUGCT9uEl65m4pZbXdlQEA4LMIrUBl/PykEddKD82R2naVjudKb/6v9PbT0ok8u6sDAMDnEFqBs4luL93/vHT5VMnhJ23+whp1/SnF7soAAPAphFbgXAICpSvvlH77f1KrWCnrkPTSw9KH/5KKCu2uDgAAn0BoBaqrQ0/p4ZetnbQkadVCafZ90v6f7a0LAAAfQGgFPOEMkW76H+mu/09qFiFl7JH+MVNaniSVlthdHQAAjRahFaiJXkOkR/8tXTjM2j1ryb+lFx6SjqTbXRkAAI0SoRWoqWYR0p1PSlMekZxNpN3bpL/9StrwKRsSAABQywitwPlwOKRB46RHXpY6XSgVnJCS/i69/oSUe8zu6gAAaDQIrUBtaBkjJTwrXXW35B8gbfvKWhpr23q7KwMAoFEgtAK1xc9fGj1Z+p8XpZgOUl6W9NofpaRnrTYAAKgxQitQ29p0lv7nJWnUjdb0gQ3LpD9Pk1bMk4pZ1xUAgJogtAJ1ITBIuvoeaeZsqe0F0snj0sevSonTpZTVfFELAAAPOYzx/v975uTkKDw8XNnZ2QoLC7O7HMAzpaXW9q+fvCZlH7HOdewlXfNrqX13e2sDAKCW1HVeI7QC9aXghLRygbRyvlR40jrXf7Q0YYbUPMre2gAAOE+EVhFa0chkHZY+nSt987k1TSAwSLr0Rmn0zdaOWwAANECEVhFa0Ujt+0n6cI703++t56EtpAl3SgPHWisRAADQgBBaRWhFI2aMtHWd9PEr0uED1rnYztKke6Wu8fbWBgCABwitIrTCBxQXSV9+KH32H+lkvnWu91Bp4j1SVFt7awMAoBoIrSK0wofkZUuf/0da95G16oCfv3TJNdLYW6Wm/LMPAPBehFYRWuGDMtOkj16Rtn9tPW8SagXXYVdLAYH21gYAQCUIrSK0woelbpY+elk6sMt6HtlGmvgrqffF1m5bAAB4CUKrCK3wcaUl0sbPpKVzpdxj1rku/aRr7pHadrW1NAAAyhBaRWgFJFlbwS5PklYtsL645XBYy2NdeacU3sru6gAAPo7QKkIr4OZoprUl7LcrrOdBwdKYKdYGBYFB9tYGAPBZhFYRWoFK7dkuffgv61GSWrSWJv2a+a4AAFvUdV7z8/SCNWvWaOLEiYqNjZXD4dDixYurfe26desUEBCgfv36efq2AM7Uoaf02+ekW2dZ0wOOZkivPyH96zEpc6/d1QEAUKs8Dq35+fnq27evXnzxRY+uy8rK0rRp0zR69GhP3xJAVRwOqf9oadZcacwtkn+gtHOz9MyvpMVzpBN5dlcIAECtOK/pAQ6HQ4sWLdKkSZPO2ffmm29W165d5e/vr8WLFyslJaXa78P0AKCaDh+wpgxs+8p63ixCumqGNHCc5Ofxn1EBAKg2r5seUBNz587Vrl279MQTT9TH2wG+q1WsNONP0j2JUlSclJclJf1d+r+Zp+e+AgDQAAXU9Rv89NNPeuyxx7R27VoFBFTv7QoKClRQUOB6npOTU1flAY1T94HSI69IX34oLXtL2rdTeu630oDLpavuksJb2l0hAAAeqdOR1pKSEt1yyy166qmndMEFF1T7usTERIWHh7uOuLi4OqwSaKQCAqVLb5D+8KY0+Arr3KZkKfEOafk8qbjQ1vIAAPBEnc5pzcrKUvPmzeXv7+86V1paKmOM/P399fnnn+uyyy6rcF1lI61xcXHMaQXOx94fpUUvWI+StSXspPuknoPtrQsA0CjU9ZzWOp0eEBYWpq1bt7qde+mll7RixQotXLhQHTt2rPQ6p9Mpp9NZl6UBvqd9d+m3z0ubvpCWvCod2i+9+gcrtE76tRTZ1u4KAQCoksehNS8vTz///LPr+e7du5WSkqIWLVqoXbt2mjVrlvbv36+33npLfn5+6t27t9v1UVFRCg4OrnAeQD3w85MGjZX6DJM+f0da84G0fYOUulkaeb10+VQpuIndVQIAUIHHc1o3bdqk+Ph4xcfHS5IefPBBxcfH6/HHH5ckpaenKy0trXarBFC7gptKV/9K+t2r1pe2SoqlFfOkv9whfZMslZbaXSEAAG7YxhXwdcZYo62LX7LWeZWs3bauTZDadbO3NgBAg1HXeY3QCsBSXCitel9KfkcqPGnttjXoCmnCdCm0ud3VAQC8HKFVhFagXmUflj7+t7T5C+t5cFNrvmvnPlLbLlJIM3vrAwB4JUKrCK2ALXZtkxa9KP3yk/v5yLZS3AXW1IG4C6Q2XSRniD01AgC8BqFVhFbANqUl0qbl0g/rrV21jmVW7OPwk6LbWQG2LMzGdpYCg+q/XgCAbQitIrQCXiMvywqv5Y/swxX7+flLMR1OBdlTI7IxHa1dugAAjRKhVYRWwKtlH5b2/XQqxKZaj3lZFfv5B0ptOp0ekY3rJkW3l8rtmAcAaLga9I5YAHxAeCvr6D3Uem6MlHXw9Ehs2k7pl53S8VwpLdU6ygQ6pTadrQDbd4TUqbe1agEAAGdgpBVA3TNGOpJeblpBqjU6W3DcvV/rDtLFV0kDxrBKAQA0MEwPEKEVaJRKS6XD+60Qu/NbKWW1tT6sJAUFSxeNki6eaE0lAAB4PUKrCK2ATziRJ236Qlr3sZS59/T5dt2s0df4UVaYBQB4JUKrCK2ATzHGWiP2q4+l79ZKJUXW+eCm0sCxVoBt3d7eGgEAFRBaRWgFfFZelrThM2n9EmtObJnOfaypA32GSQGsBwsA3oDQKkIr4PNKS6Wdm6Wvlkjb1kum1DrfLEIafIU0dILUMsbWEgHA1xFaRWgFUE7WIenrpdaRfcQ653BI3QdaUwd6DrY2NwAA1CtCqwitACpRUmJtL/vVx1Lq5tPnIyKtkdfBV1jrxwIA6gWhVYRWAOdwaL+0/hNp4zIpP8c65+cn9R5mjb52jbeeAwDqDKFVhFYA1VRUKH2/1lo2a/e20+cj20gDLpfa97DWfW0Sal+NANBIEVpFaAVQA+m7rS9ufZNcceetVrFWeC072na1ltQCANQYoVWEVgDnoeCEtGWVtevWvp3WLlxncjikyLgzgmwXNjMAAA8QWkVoBVCLjuda4dV1pErHDlbs5/CzNjFwBdluUptOrAsLAFUgtIrQCqCO5R6TfvnpdIhN2ynlHKnYzz9AiunoPiIb09E6X1PGWHNxiwulooJT7aLT7aKC08+dIdZ7Nouo+fsBQB0htIrQCsAG2YdPj8ampVqP+dkV+wUESm06S226WOG1qLBiCD3b8+Iiz2tr0dr6Ulm7btZjmy5SkPP8PzMAnAdCqwitALyAMdY0gn2p7kH2ZH7tvYfDTwoMkgKd1mNA0Knnp87lHpMO7qt4nZ+/FNtJat9datfdeoyMY5kvAPWK0CpCKwAvZYx0+IAVXtN3Sw6dCprO02HTk+fVmWZwIs8KzGk/Snt/lPbukPKyKvYLbmLNwy0Lsu26S+Eta/s3AAAuhFYRWgGgSmUjwGUhNu1Ha35u4cmKfSMiT4/EtutuzY91htR/zQAaJUKrCK0A4JGSEiljjzUKm5Yqpe2QMvZaAbe8shUSXNMKekjR7c7vi2UAfBahVYRWADhvJ49Lv+yU9p4KsWmpUtahiv38A6SoOGtVhLIjtqMUEWWtZwsAVajrvMYfpwHAFwQ3kbr0s44y2Yet8OoakU21dg9L320dbtc3lWI6lAuznawwG9KsHj8EAF/GSCsAwGKMdCxTOrD7dHBN322tWFBaUvk1EZHuo7IxHaXoODZhAHwQ0wNEaAUAWxUXWcE1fbd0YJf1mLGn8p3EJGuprcg4ayS2fJhtHs0yXEAjRmgVoRUAvNKJPCl9j/uobPpu63xlnCHWfNkWra0A2zxKahF96nkUUw2ABo45rQAA7xTSTOrU2zrKGGPNlU3f7T7NIDNNKjhxepexygQ3tUJs8+jTj82jpRZRVrBtGs6XwQAfRmgFANQeh8Oa5xoRKfUYdPp8SbF0aL906BdrWsGxTOnoqeNYprVF7sl8a/rBgV2V/+xA5+kQWyHYRkthLazdwQA0SoRWAEDd8w+w1oRt3b7y1wtOSFkH3YPssUzp6EHpWIaUc1QqKpAOpllHVe9RFpgjoqTm5dpl55uEMloLNFCEVgCA/ZwhUnR766hMcZG1ruyxTOloxqkwm3l6xDbrkDWaeyTdOqoSFFwu2FYRboOb1M1n9AYlxda2v0WF1nSMkKZsJoEGg39SAQDeLyBQahVrHZUpLZGyj1ghNuuQdOyQNXKbdej0kZdlbW97cJ91VCW46akgG1V5uA1tYYVfb1kJobjI+my5x8odR6XcM87lHZPycypeH+i0gnpZiHU2sR6Dyx/lz5Xve+oxKJgRbNQ5QisAoOHz87dWIGgeVXWfwgIp+1AVofagde5kvnWk51srI5xNULB1OEPKPYaUe36218q1yz8PdFrhr7iwXODMqiSQnjqfd0w6nuvh78rPWke38KT1vKjAOnKPefZzynP4VQy+TUKtI6SZ1DTsdLvsfPnXmYuMaiC0AgB8Q5BTimxrHVU5efx0iK1qxLYs7BWetI68rNqr0eGwAmVRgWfX+flLoRFSs+ZS6DmOJmFWcC0psXZAO5kvnci3PntZaD+R5/785PFTfc7sly+ZUus4kWcdNcm+wU0rBtkmYVKTspBbrh1S1qep9bvyD2CU10cQWgEAKBPc5OxfGDPmdFgtPGl9gezMx4ITUuEJqeDkqcey9slyr51xXVkQNuZ0YPUPkJpFnD2AloXUJqGeT1fw9z8dEmuq7PdRWbg9nmuF2OO50vEc6fip9oncU+fyrNAsnb7+aIbnNTj8pMCgU4fTegwo1670vFMKDDzdDggs17/cozPEfYpEQGDNf1c4b4RWAACqy+E4/Vf5tam01AqrZSG27K/RvX0EsfzvI7yV59eXFJcLtrmng25+TrnQW1ngzbWulaxR3vLBvy4FBp2e8+ssN9fXNQ+4ScW5wOXPhTS1fldMh6gRQisAAHbz86ubMOztykaTm0V4dp0x1hfQigutucrFhdaKCGXzc4vKPXd77czzZ5wr/1rhqT9EnMwvN//3VN/znRLiDLHCbFCIFBAg+QedegyQ/ANPtSt59A+wRntdj4Gnrys77x/o3qdZhNSm8/nV6yUIrQAAoGFxOE7/1X99bP/rmv97xnzeguMV5wO72mc+5ltBWzo9jaQ+9Bgk/eov9fNedYzQCgAAcDa1Mf9XskZwTx4/Pfe38IQ1zaGk2Aq0rsciqfjUeVe76Iw+Z7uu7Fyx1DKmdn4HXoDQCgAAUB8CgqRmQZ5Ph4AkyUtWRgYAAACqRmgFAACA1yO0AgAAwOsRWgEAAOD1PA6ta9as0cSJExUbGyuHw6HFixeftf8HH3ygyy+/XJGRkQoLC9PQoUP12Wef1bReAAAA+CCPQ2t+fr769u2rF198sVr916xZo8svv1xLly7V5s2bNWrUKE2cOFFbtmzxuFgAAAD4JocxxtT4YodDixYt0qRJkzy6rlevXpo8ebIef/zxavXPyclReHi4srOzFRYWVoNKAQAAUJfqOq/V+5zW0tJS5ebmqkWLFvX91gAAAGig6n1zgWeffVZ5eXm66aabquxTUFCggoIC1/OcnJz6KA0AAABeql5HWt9991099dRTmj9/vqKioqrsl5iYqPDwcNcRFxdXj1UCAADA29RbaE1KStJdd92l+fPna8yYMWftO2vWLGVnZ7uOffv21VOVAAAA8Eb1Mj3gvffe0/Tp05WUlKQJEyacs7/T6ZTT6ayHygAAANAQeBxa8/Ly9PPPP7ue7969WykpKWrRooXatWunWbNmaf/+/XrrrbckWVMCbr/9dj333HMaPHiwMjIyJEkhISEKDw+vpY8BAACAxszj6QGbNm1SfHy84uPjJUkPPvig4uPjXctXpaenKy0tzdX/lVdeUXFxsRISEhQTE+M67r///lr6CAAAAGjszmud1vqSnZ2tiIgI7du3j3VaAQAAvFBOTo7i4uKUlZVVJ3+bXu9LXtVEbm6uJLGKAAAAgJfLzc2tk9DaIEZaS0tLdeDAAYWGhsrhcLiSPCOvjRv32Xdwr30D99l3cK99w5n32Rij3NxcxcbGys+v9heoahAjrX5+fmrbtm2F82FhYfzL4AO4z76De+0buM++g3vtG8rf57r8kn29b+MKAAAAeIrQCgAAAK/XIEOr0+nUE088wQYEjRz32Xdwr30D99l3cK99Q33f5wbxRSwAAAD4tgY50goAAADfQmgFAACA1yO0AgAAwOsRWgEAAOD1GlxoffHFF9WhQwcFBwdr8ODB2rhxo90l4SzWrFmjiRMnKjY2Vg6HQ4sXL3Z73Rijxx9/XDExMQoJCdGYMWP0008/ufU5evSopk6dqrCwMEVERGjGjBnKy8tz6/P9999r+PDhCg4OVlxcnJ555pm6/mgoJzExUQMHDlRoaKiioqI0adIkpaamuvU5efKkEhIS1LJlSzVr1kzXX3+9MjMz3fqkpaVpwoQJatKkiaKiovTII4+ouLjYrc+qVat00UUXyel0qkuXLnrjjTfq+uOhnDlz5qhPnz6uxcSHDh2qTz/91PU697lxevrpp+VwOPTAAw+4znGvG4cnn3xSDofD7ejevbvrda+6z6YBSUpKMkFBQeb11183P/zwg7n77rtNRESEyczMtLs0VGHp0qXmD3/4g/nggw+MJLNo0SK3159++mkTHh5uFi9ebL777jtz9dVXm44dO5oTJ064+lxxxRWmb9++5uuvvzZr1641Xbp0MVOmTHG9np2dbaKjo83UqVPNtm3bzHvvvWdCQkLMyy+/XF8f0+eNGzfOzJ0712zbts2kpKSYK6+80rRr187k5eW5+tx7770mLi7OLF++3GzatMkMGTLEXHzxxa7Xi4uLTe/evc2YMWPMli1bzNKlS02rVq3MrFmzXH127dplmjRpYh588EGzfft2889//tP4+/ubZcuW1evn9WUfffSR+eSTT8zOnTtNamqq+f3vf28CAwPNtm3bjDHc58Zo48aNpkOHDqZPnz7m/vvvd53nXjcOTzzxhOnVq5dJT093HYcOHXK97k33uUGF1kGDBpmEhATX85KSEhMbG2sSExNtrArVdWZoLS0tNa1btzZ/+9vfXOeysrKM0+k07733njHGmO3btxtJ5ptvvnH1+fTTT43D4TD79+83xhjz0ksvmebNm5uCggJXn0cffdR069atjj8RqnLw4EEjyaxevdoYY93XwMBAs2DBAlefHTt2GElm/fr1xhjrDzh+fn4mIyPD1WfOnDkmLCzMdW9/97vfmV69erm91+TJk824cePq+iPhLJo3b27+/e9/c58bodzcXNO1a1eTnJxsRo4c6Qqt3OvG44knnjB9+/at9DVvu88NZnpAYWGhNm/erDFjxrjO+fn5acyYMVq/fr2NlaGmdu/erYyMDLd7Gh4ersGDB7vu6fr16xUREaEBAwa4+owZM0Z+fn7asGGDq8+IESMUFBTk6jNu3Dilpqbq2LFj9fRpUF52drYkqUWLFpKkzZs3q6ioyO1ed+/eXe3atXO71xdeeKGio6NdfcaNG6ecnBz98MMPrj7lf0ZZH/4bYI+SkhIlJSUpPz9fQ4cO5T43QgkJCZowYUKF+8G9blx++uknxcbGqlOnTpo6darS0tIked99bjCh9fDhwyopKXH7pUhSdHS0MjIybKoK56Psvp3tnmZkZCgqKsrt9YCAALVo0cKtT2U/o/x7oP6UlpbqgQce0LBhw9S7d29J1n0ICgpSRESEW98z7/W57mNVfXJycnTixIm6+DioxNatW9WsWTM5nU7de++9WrRokXr27Ml9bmSSkpL07bffKjExscJr3OvGY/DgwXrjjTe0bNkyzZkzR7t379bw4cOVm5vrdfc5wNMPBwBnk5CQoG3btunLL7+0uxTUkW7duiklJUXZ2dlauHChbr/9dq1evdruslCL9u3bp/vvv1/JyckKDg62uxzUofHjx7vaffr00eDBg9W+fXvNnz9fISEhNlZWUYMZaW3VqpX8/f0rfGMtMzNTrVu3tqkqnI+y+3a2e9q6dWsdPHjQ7fXi4mIdPXrUrU9lP6P8e6B+zJw5U0uWLNHKlSvVtm1b1/nWrVursLBQWVlZbv3PvNfnuo9V9QkLC/O6/7g2ZkFBQerSpYv69++vxMRE9e3bV8899xz3uRHZvHmzDh48qIsuukgBAQEKCAjQ6tWr9fzzzysgIEDR0dHc60YqIiJCF1xwgX7++Wev+3e6wYTWoKAg9e/fX8uXL3edKy0t1fLlyzV06FAbK0NNdezYUa1bt3a7pzk5OdqwYYPrng4dOlRZWVnavHmzq8+KFStUWlqqwYMHu/qsWbNGRUVFrj7Jycnq1q2bmjdvXk+fxrcZYzRz5kwtWrRIK1asUMeOHd1e79+/vwIDA93udWpqqtLS0tzu9datW93+kJKcnKywsDD17NnT1af8zyjrw38D7FVaWqqCggLucyMyevRobd26VSkpKa5jwIABmjp1qqvNvW6c8vLy9N///lcxMTHe9++0R1/bsllSUpJxOp3mjTfeMNu3bze/+tWvTEREhNs31uBdcnNzzZYtW8yWLVuMJDN79myzZcsWs3fvXmOMteRVRESE+fDDD833339vrrnmmkqXvIqPjzcbNmwwX375penatavbkldZWVkmOjra3HbbbWbbtm0mKSnJNGnShCWv6tGvf/1rEx4eblatWuW2bMrx48ddfe69917Trl07s2LFCrNp0yYzdOhQM3ToUNfrZcumjB071qSkpJhly5aZyMjISpdNeeSRR8yOHTvMiy++yPI49eyxxx4zq1evNrt37zbff/+9eeyxx4zD4TCff/65MYb73JiVXz3AGO51Y/HQQw+ZVatWmd27d5t169aZMWPGmFatWpmDBw8aY7zrPjeo0GqMMf/85z9Nu3btTFBQkBk0aJD5+uuv7S4JZ7Fy5UojqcJx++23G2OsZa/++Mc/mujoaON0Os3o0aNNamqq2884cuSImTJlimnWrJkJCwszd955p8nNzXXr891335lLLrnEOJ1O06ZNG/P000/X10eEMZXeY0lm7ty5rj4nTpww9913n2nevLlp0qSJufbaa016errbz9mzZ48ZP368CQkJMa1atTIPPfSQKSoqcuuzcuVK069fPxMUFGQ6derk9h6oe9OnTzft27c3QUFBJjIy0owePdoVWI3hPjdmZ4ZW7nXjMHnyZBMTE2OCgoJMmzZtzOTJk83PP//set2b7rPDGGM8G5sFAAAA6leDmdMKAAAA30VoBQAAgNcjtAIAAMDrEVoBAADg9QitAAAA8HqEVgAAAHg9QisAAAC8HqEVAAAAXo/QCgAAAK9HaAUAAIDXI7QCAADA6xFaAQAA4PX+f0iKF8S5F/KMAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tb_dir = os.path.join(WORK_DIR, \"tensorboard_output\")\n", + "fname = os.listdir(tb_dir)[0]\n", + "tb_path = os.path.join(tb_dir, fname)\n", + "#\n", + "data = read_tensorboard_file(tb_path)\n", + "print(data.keys())\n", + "_ = plot_image(data, \"loss\", 0.9)\n", + "_ = plot_image(data, \"lr\", 0)\n", + "_ = plot_image(data, \"evaluation/acc\", 0)\n", + "_ = plot_image(data, \"evaluation/loss\", 0)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 推理\n", + "推理部分见baichuan_infer.ipynb" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/pytorch/llm_agent/chatglm2_infer.ipynb b/examples/pytorch/llm_agent/chatglm2_infer.ipynb new file mode 100644 index 00000000..29388858 --- /dev/null +++ b/examples/pytorch/llm_agent/chatglm2_infer.ipynb @@ -0,0 +1,526 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ChatGLM2 推理" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 配置实验环境\n", + "The following code is copied from baichuan_infer.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install transformers" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2023-07-02 21:48:47,527] [INFO] [real_accelerator.py:110:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 21:48:48,006 - modelscope - INFO - PyTorch version 2.0.1 Found.\n", + "2023-07-02 21:48:48,007 - modelscope - INFO - Loading ast index from /home/hackathon/.cache/modelscope/ast_indexer\n", + "2023-07-02 21:48:48,032 - modelscope - INFO - Loading done! Current index file version is 1.6.2, with md5 ddf811ee982377c1357284a2bfda3dec and a total number of 861 components indexed\n", + "2023-07-02 21:48:48,708 - modelscope - INFO - [0, 1]\n", + "2023-07-02 21:48:48,848 - modelscope - INFO - Using device: cuda:0,1\n" + ] + }, + { + "data": { + "text/plain": [ + "device(type='cuda', index=0)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from _common import *\n", + "from transformers import TextStreamer\n", + "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "logger.info(device_ids)\n", + "select_device(device_ids)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Model, Tokenizer\n", + "Note: 你需要设置CKPT_FPATH的内容, 指向`.bin`文件, 或`.pth`文件" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 21:48:49,227 - modelscope - INFO - Development mode use revision: v1.0.3\n", + "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n", + "The tokenizer class you load from this checkpoint is 'ChatGLMTokenizer'. \n", + "The class this function is called from is 'ChatGLM2Tokenizer'.\n", + "2023-07-02 21:48:49,572 - modelscope - INFO - initialize model from /home/hackathon/.cache/modelscope/hub/ZhipuAI/chatglm2-6b\n", + "Failed to load cpm_kernels:No module named 'cpm_kernels'\n", + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "b72b43e11bec49c78c8097deaffea8a7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/7 [00:00```JSON\n", + "{\"api_name\": \"modelscope_speech-generation\", \"url\": \"http://90.49.118.175:2603/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"parameters\": {\"text\": \"秋树红叶舞飘零,\n", + "山间小溪水潺潺。\n", + "微风拂面感清凉,\n", + "散步赏景心旷神怡。\", \"gender\": \"woman\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"result\": \"\"}\n", + "```<|endofexec|>\n", + "\n", + "-----------------------------------------------------------------------------------\n", + "[TEST]你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-address\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-address\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"url\": \"http://159.1.4.174:3210/\", \"paths\": [{\"name\": \"modelscope_text-address\", \"model_id\": \"/damo/mgeo_geographic_elements_tagging_chinese_base\", \"method\": \"post\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的地址信息\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-address\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-address\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"url\": \"http://172.163.158.154:5325/\", \"paths\": [{\"name\": \"modelscope_text-address\", \"model_id\": \"/damo/mgeo_geographic_elements_tagging_chinese_base\", \"method\": \"post\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的地址信息\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-address\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-address\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"url\": \"http://133.94.12.37:3160/\", \"paths\": [{\"name\": \"modelscope_text-address\", \"model_id\": \"/damo/mgeo_geographic_elements_tagging_chinese_base\", \"method\": \"post\", \"description\": \"针对中文的地址信息,识别出里面的元素,包括省、市、区、镇、社区、道路、路号、POI、楼栋号、户室号等\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的地址信息\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "现在我给你另一条地址,请识别出里面的元素。输入地址:广东省深圳市南山区科技园北区 \n", + "\n", + "### 助手\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-address\", \"url\": \"http://133.94.12.37:3160/damo/mgeo_geographic_elements_tagging_chinese_base\", \"parameters\": {\"text\": \"广东省深圳市南山区科技园北区\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"town\": \"科技园北区\"}\n", + "```<|endofexec|>\n", + "地址识别结果为:{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"town\": \"科技园北区\"}。我识别出的元素包括:prov、city、district、town。\n", + "\n", + "[LABELS]<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-address\", \"url\": \"http://159.1.4.174:3210/damo/mgeo_geographic_elements_tagging_chinese_base\", \"parameters\": {\"text\": \"广东省深圳市南山区科技园北区\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"town\": \"\", \"community\": \"科技园北区\", \"poi\": \"\"}\n", + "```<|endofexec|>\n", + "地址识别json表示:{\"prov\": \"广东省\", \"city\": \"深圳市\", \"district\": \"南山区\", \"town\": \"\", \"community\": \"科技园北区\", \"poi\": \"\"}。我使用的模型是ModelScope的'damo/mgeo_geographic_elements_tagging_chinese_base'模型。这是基于达摩院联合高德发布的多任务多模态地址预训练底座MGeo模型微调得到的。\n", + "-----------------------------------------------------------------------------------\n", + "[TEST]你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。目前支持的插件信息如下,请自行判断是否需要调用插件来解决当前用户问题。若需要调用插件,则需要将插件调用请求按照json格式给出,必须包含api_name、url、parameters字段,并在其前后使用<|startofthink|>和<|endofthink|>作为标志。然后你需要根据插件API调用结果生成合理的答复;若无需调用插件,则直接给出对应回复即可:\n", + "\n", + "1. {\"name\": \"modelscope_text-translation-zh2en\", \"description\": \"将输入的中文文本翻译成英文\", \"url\": \"http://api-inference.modelscope.cn/api-inference/v1/models\", \"paths\": [{\"name\": \"modelscope_text-translation-zh2en\", \"model_id\": \"/damo/nlp_csanmt_translation_zh2en\", \"method\": \"post\", \"description\": \"将输入的中文文本翻译成英文\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的中文文本\", \"required\": \"True\"}]}]}\n", + "\n", + "2. {\"name\": \"modelscope_speech-generation\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"url\": \"http://api-inference.modelscope.cn/api-inference/v1/models\", \"paths\": [{\"name\": \"modelscope_speech-generation\", \"model_id\": \"/damo/speech_sambert-hifigan_tts_zh-cn_16k\", \"method\": \"post\", \"description\": \"针对回复的内容,用语音表示,同时可以选择是男声或者女声\", \"parameters\": [{\"name\": \"text\", \"description\": \"要转成语音的文本\", \"required\": \"True\"}, {\"name\": \"gender\", \"description\": \"用户身份\", \"required\": \"True\"}]}]}\n", + "\n", + "3. {\"name\": \"modelscope_image-generation\", \"description\": \"针对文本输入,生成对应的图片\", \"url\": \"http://api-inference.modelscope.cn/api-inference/v1/models\", \"paths\": [{\"name\": \"modelscope_image-generation\", \"model_id\": \"/damo/image_generation\", \"method\": \"post\", \"description\": \"针对文本输入,生成对应的图片\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本信息\", \"required\": \"True\"}]}]} \n", + "\n", + "### 用户\n", + "歌手:古巨基\n", + "歌曲名:爱情马戏班\n", + "经典歌词:情是何等诡秘能令人使出看家把戏;恋爱就像走纲线般惊险;为你献技 像马戏班\n", + "请结合以上信息,编写一个智能音响的播放导语,需要有文采,字数30字以内,凸显一下即将播放该歌曲 \n", + "\n", + "### 助手\n", + "爱情马戏班,由古巨基演唱,是一首充满马戏班元素的浪漫歌曲,歌词中描述了爱情的神秘和危险,是一首值得听一听的浪漫歌曲。\n", + "\n", + "[LABELS]亲爱的主人,今天我为您带来的是古巨基的经典之作——《爱情马戏班》。这首歌曲描绘了情与爱的神秘和惊险,让人们为之倾倒。让我们一起享受这场爱情的马戏表演吧!\n", + "-----------------------------------------------------------------------------------\n", + "[TEST]你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://114.42.178.183:8005/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://93.82.87.89:6631/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://4.105.93.165:8143/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "按照给定的schema抽取出下面文本对应的信息\n", + "schema:{\"动物\": null, \"食物\": null, \"颜色\": null}\n", + "这只棕色的狗狗很喜欢吃狗粮。 \n", + "\n", + "### 助手\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://4.105.93.165:8143/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"http://4.105.93.165:8143/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofexec|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"http://4.105.93.165:8143/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofexec|>\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"http://4.105.93.165:8143/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofexec|>\n", + "<|startofexec|>```JSON\n", + "{\"prov\": \"http://4.105.93.165:8143/damo/nlp_structbert_siames\n", + "\n", + "[LABELS]<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://114.42.178.183:8005/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"这只棕色的狗狗很喜欢吃狗粮。\", \"schema\": \"{\\\"动物\\\": null, \\\"食物\\\": null, \\\"颜色\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"动物\": [\"狗狗\"], \"食物\": [\"狗粮\"], \"颜色\": [\"棕色\"]}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"动物\": [\"狗狗\"], \"食物\": [\"狗粮\"], \"颜色\": [\"棕色\"]}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "-----------------------------------------------------------------------------------\n", + "[TEST]你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://28.179.171.5:6428/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://100.111.18.38:6408/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://144.67.18.142:6381/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "按照给定的schema抽取出下面文本对应的信息\n", + "schema:{\"人物\": null, \"地理位置\": null, \"组织机构\": null}\n", + "谷歌公司是一家全球知名的科技公司,总部位于美国的加利福尼亚州山景市。 \n", + "\n", + "### 助手\n", + "<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://100.111.18.38:6408/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"谷歌公司是一家全球知名的科技公司,总部位于美国的加利福尼亚州山景市。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": null, \"地理位置\": null, \"组织机构\": null}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": null, \"地理位置\": null, \"组织机构\": null}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调的通用信息抽取模型。\n", + "\n", + "[LABELS]<|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://100.111.18.38:6408/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"谷歌公司是一家全球知名的科技公司,总部位于美国的加利福尼亚州山景市。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [], \"地理位置\": [\"美国\", \"加利福尼亚州山景市\"], \"组织机构\": [\"谷歌公司\"]}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [], \"地理位置\": [\"美国\", \"加利福尼亚州山景市\"], \"组织机构\": [\"谷歌公司\"]}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "-----------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n", + "for d in test_dataset[:5]:\n", + " system = d[\"system\"]\n", + " user = d[\"user\"]\n", + " assistant = d[\"assistant\"]\n", + " input_ids = tokenize_function(system, user, None, tokenizer)[\"input_ids\"]\n", + " print(f\"[TEST]{tokenizer.decode(input_ids)}\", end=\"\")\n", + " input_ids = torch.tensor(input_ids)[None].cuda()\n", + " attention_mask = torch.ones_like(input_ids)\n", + " generate_ids = model.generate(input_ids=input_ids, max_new_tokens=512,\n", + " attention_mask=attention_mask,\n", + " streamer=streamer, pad_token_id=tokenizer.pad_token_id)\n", + " print()\n", + " print(f\"[LABELS]{assistant}\")\n", + " print(\"-----------------------------------------------------------------------------------\")\n", + " # input(\"next[ENTER]\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "hackathon", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/pytorch/llm_agent/chatglm2_sft.ipynb b/examples/pytorch/llm_agent/chatglm2_sft.ipynb new file mode 100644 index 00000000..1f9306f1 --- /dev/null +++ b/examples/pytorch/llm_agent/chatglm2_sft.ipynb @@ -0,0 +1,1931 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## ChatGLM2 + Lora + Agent\n", + "ChatGLM2-6B 是开源中英双语对话模型 ChatGLM-6B 的第二代版本,在保留了初代模型对话流畅、部署门槛较低等众多优秀特性的基础之上,ChatGLM2-6B 引入了如下新特性:\n", + "\n", + "1. 更强大的性能:基于 ChatGLM 初代模型的开发经验,我们全面升级了 ChatGLM2-6B 的基座模型。ChatGLM2-6B 使用了 GLM 的混合目标函数,经过了 1.4T 中英标识符的预训练与人类偏好对齐训练,评测结果显示,相比于初代模型,ChatGLM2-6B 在 MMLU(+23%)、CEval(+33%)、GSM8K(+571%) 、BBH(+60%)等数据集上的性能取得了大幅度的提升,在同尺寸开源模型中具有较强的竞争力。\n", + "\n", + "2. 更长的上下文:基于 FlashAttention 技术,我们将基座模型的上下文长度(Context Length)由 ChatGLM-6B 的 2K 扩展到了 32K,并在对话阶段使用 8K 的上下文长度训练,允许更多轮次的对话。但当前版本的 ChatGLM2-6B 对单轮超长文档的理解能力有限,我们会在后续迭代升级中着重进行优化。\n", + "\n", + "3. 更高效的推理:基于 Multi-Query Attention 技术,ChatGLM2-6B 有更高效的推理速度和更低的显存占用:在官方的模型实现下,推理速度相比初代提升了 42%,INT4 量化下,6G 显存支持的对话长度由 1K 提升到了 8K。" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1. Ref: https://modelscope.cn/models/ZhipuAI/chatglm2-6b/summary\n", + "2. 以下脚本可以在2*A10环境下正常运行, 大概占用40G显存\n", + "3. python>=3.8" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 配置实验环境\n", + "The following code is copied from baichuan_sft.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# !pip install modelscope -U\n", + "# !pip install numpy pandas matplotlib scikit-learn\n", + "# !pip install transformers datasets\n", + "# !conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n", + "# !pip install tqdm\n", + "# !pip install tensorboard\n", + "# !pip install torchmetrics\n", + "#\n", + "# !pip install numpy -U # Resolve torchmetrics dependencies and update numpy" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2023-07-02 20:34:35,987] [INFO] [real_accelerator.py:110:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 20:34:36,464 - modelscope - INFO - PyTorch version 2.0.1 Found.\n", + "2023-07-02 20:34:36,465 - modelscope - INFO - Loading ast index from /home/hackathon/.cache/modelscope/ast_indexer\n", + "2023-07-02 20:34:36,489 - modelscope - INFO - Loading done! Current index file version is 1.6.2, with md5 ddf811ee982377c1357284a2bfda3dec and a total number of 861 components indexed\n", + "2023-07-02 20:34:37,158 - modelscope - INFO - [0, 1]\n", + "2023-07-02 20:34:37,324 - modelscope - INFO - Using device: cuda:0,1\n", + "2023-07-02 20:34:37,326 - modelscope - INFO - Global seed set to 42\n" + ] + } + ], + "source": [ + "from _common import *\n", + "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "logger.info(device_ids)\n", + "select_device(device_ids)\n", + "_ = seed_everything(42)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Model, Tokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 20:34:37,660 - modelscope - INFO - Development mode use revision: v1.0.3\n", + "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n", + "The tokenizer class you load from this checkpoint is 'ChatGLMTokenizer'. \n", + "The class this function is called from is 'ChatGLM2Tokenizer'.\n", + "2023-07-02 20:34:38,020 - modelscope - INFO - initialize model from /home/hackathon/.cache/modelscope/hub/ZhipuAI/chatglm2-6b\n", + "Failed to load cpm_kernels:No module named 'cpm_kernels'\n", + "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "51826d090fb740e0a7d514e543af843b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/7 [00:00': 1, '': 2, '': 2}\n", + "2023-07-02 20:34:45,152 - modelscope - INFO - bos_token_id: 1, eos_token_id: 2, pad_token_id: 2\n" + ] + } + ], + "source": [ + "model_id = \"ZhipuAI/chatglm2-6b\"\n", + "model_revision = \"v1.0.3\"\n", + "WORK_DIR = \"runs/chatglm2\"\n", + "LORA_TARGET_MODULES = [\"query_key_value\"]\n", + "#\n", + "model_dir = get_model_dir(model_id, model_revision)\n", + "model, tokenizer = get_chatglm2_model_tokenizer(model_dir)\n", + "# chatglm2 does not support gradient_checkpointing\n", + "GRADIENT_CHECKPOINTING = False\n", + "if GRADIENT_CHECKPOINTING:\n", + " model.gradient_checkpointing_enable()\n", + " model.enable_input_require_grads()\n", + "logger.info(tokenizer.special_tokens)\n", + "if tokenizer.eos_token_id is None:\n", + " tokenizer.eos_token_id = tokenizer.pad_token_id\n", + "if tokenizer.bos_token_id is None:\n", + " tokenizer.bos_token_id = 1\n", + "#\n", + "logger.info(f\"bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, \"\n", + " f\"pad_token_id: {tokenizer.pad_token_id}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 准备Lora\n", + "The following code is copied from baichun.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 20:34:45,215 - modelscope - INFO - lora_config: LoRAConfig(rank=8, replace_modules=['query_key_value'], lora_alpha=32, lora_dropout=0.1, merge_weights=True, use_merged_linear=False, enable_lora=None, fan_in_fan_out=False, bias='none', only_lora_trainable=True, pretrained_weights=None)\n", + "2023-07-02 20:34:49,932 - modelscope - INFO - transformer.embedding.word_embeddings.weight: requires_grad=False\n", + "2023-07-02 20:34:49,933 - modelscope - INFO - transformer.encoder.layers.0.input_layernorm.weight: requires_grad=False\n", + "2023-07-02 20:34:49,933 - modelscope - INFO - transformer.encoder.layers.0.self_attention.query_key_value.weight: requires_grad=False\n", + "2023-07-02 20:34:49,933 - modelscope - INFO - transformer.encoder.layers.0.self_attention.query_key_value.bias: requires_grad=False\n", + "2023-07-02 20:34:49,934 - modelscope - INFO - transformer.encoder.layers.0.self_attention.query_key_value.lora_A: requires_grad=True\n", + "2023-07-02 20:34:49,934 - modelscope - INFO - transformer.encoder.layers.0.self_attention.query_key_value.lora_B: requires_grad=True\n", + "2023-07-02 20:34:49,934 - modelscope - INFO - transformer.encoder.layers.0.self_attention.dense.weight: requires_grad=False\n", + "2023-07-02 20:34:49,934 - modelscope - INFO - transformer.encoder.layers.0.post_attention_layernorm.weight: requires_grad=False\n", + "2023-07-02 20:34:49,935 - modelscope - INFO - transformer.encoder.layers.0.mlp.dense_h_to_4h.weight: requires_grad=False\n", + "2023-07-02 20:34:49,935 - modelscope - INFO - transformer.encoder.layers.0.mlp.dense_4h_to_h.weight: requires_grad=False\n", + "2023-07-02 20:34:49,936 - modelscope - INFO - transformer.encoder.layers.1.input_layernorm.weight: requires_grad=False\n", + "2023-07-02 20:34:49,936 - modelscope - INFO - transformer.encoder.layers.1.self_attention.query_key_value.weight: requires_grad=False\n", + "2023-07-02 20:34:49,936 - modelscope - INFO - transformer.encoder.layers.1.self_attention.query_key_value.bias: requires_grad=False\n", + "2023-07-02 20:34:49,937 - modelscope - INFO - transformer.encoder.layers.1.self_attention.query_key_value.lora_A: requires_grad=True\n", + "2023-07-02 20:34:49,937 - modelscope - INFO - transformer.encoder.layers.1.self_attention.query_key_value.lora_B: requires_grad=True\n", + "2023-07-02 20:34:49,937 - modelscope - INFO - transformer.encoder.layers.1.self_attention.dense.weight: requires_grad=False\n", + "2023-07-02 20:34:49,938 - modelscope - INFO - transformer.encoder.layers.1.post_attention_layernorm.weight: requires_grad=False\n", + "2023-07-02 20:34:49,938 - modelscope - INFO - transformer.encoder.layers.1.mlp.dense_h_to_4h.weight: requires_grad=False\n", + "2023-07-02 20:34:49,938 - modelscope - INFO - transformer.encoder.layers.1.mlp.dense_4h_to_h.weight: requires_grad=False\n", + "2023-07-02 20:34:49,938 - modelscope - INFO - transformer.encoder.layers.2.input_layernorm.weight: requires_grad=False\n", + "2023-07-02 20:34:49,939 - modelscope - INFO - ...\n", + "2023-07-02 20:34:49,941 - modelscope - INFO - ChatGLM2ForConditionalGeneration: 6245.5337M Params (1.9497M Trainable), 0.0000M Buffers.\n", + "2023-07-02 20:34:49,942 - modelscope - INFO - device: cuda:0, dtype: torch.float16\n" + ] + }, + { + "data": { + "text/plain": [ + "ChatGLM2ForConditionalGeneration(\n", + " (transformer): ChatGLMModel(\n", + " (embedding): Embedding(\n", + " (word_embeddings): Embedding(65024, 4096)\n", + " )\n", + " (rotary_pos_emb): RotaryEmbedding()\n", + " (encoder): GLMTransformer(\n", + " (layers): ModuleList(\n", + " (0-27): 28 x GLMBlock(\n", + " (input_layernorm): RMSNorm()\n", + " (self_attention): SelfAttention(\n", + " (query_key_value): Linear(\n", + " in_features=4096, out_features=4608, bias=True\n", + " (lora_dropout): Dropout(p=0.1, inplace=False)\n", + " )\n", + " (core_attention): CoreAttention(\n", + " (attention_dropout): Dropout(p=0.0, inplace=False)\n", + " )\n", + " (dense): Linear(in_features=4096, out_features=4096, bias=False)\n", + " )\n", + " (post_attention_layernorm): RMSNorm()\n", + " (mlp): MLP(\n", + " (dense_h_to_4h): Linear(in_features=4096, out_features=27392, bias=False)\n", + " (dense_4h_to_h): Linear(in_features=13696, out_features=4096, bias=False)\n", + " )\n", + " )\n", + " )\n", + " (final_layernorm): RMSNorm()\n", + " )\n", + " (output_layer): Linear(in_features=4096, out_features=65024, bias=False)\n", + " )\n", + ")" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "LORA_RANK = 8\n", + "LORA_ALPHA = 32\n", + "LORA_DROPOUT_P = 0.1\n", + "lora_config = LoRAConfig(\n", + " replace_modules=LORA_TARGET_MODULES,\n", + " rank=LORA_RANK,\n", + " lora_alpha=LORA_ALPHA,\n", + " lora_dropout=LORA_DROPOUT_P)\n", + "logger.info(f\"lora_config: {lora_config}\")\n", + "Swift.prepare_model(model, lora_config)\n", + "#\n", + "show_freeze_layers(model)\n", + "print_model_info(model)\n", + "_p = list(model.parameters())[100]\n", + "logger.info(f\"device: {_p.device}, dtype: {_p.dtype}\")\n", + "model.bfloat16()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 导入Dataset\n", + "The following code is copied from baichuan_sft.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 20:34:50,040 - modelscope - INFO - No subset_name specified, defaulting to the default\n", + "2023-07-02 20:34:50,479 - modelscope - WARNING - Reusing dataset ms_hackathon_23_agent_train_dev (/home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files)\n", + "2023-07-02 20:34:50,479 - modelscope - INFO - Generating dataset ms_hackathon_23_agent_train_dev (/home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files)\n", + "2023-07-02 20:34:50,480 - modelscope - INFO - Reusing cached meta-data file: /home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files/8c9e7b1aa666c8840cb938d877f2b99f\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "dac0fb3841854f6f867f0c639c6b2176", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading data files: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "beada7f3eb734a6485034e666e60285f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Extracting data files: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 5036/5036 [00:12<00:00, 403.83it/s]\n", + "2023-07-02 20:35:03,823 - modelscope - INFO - No subset_name specified, defaulting to the default\n", + "2023-07-02 20:35:04,269 - modelscope - WARNING - Reusing dataset ms_hackathon_23_agent_train_dev (/home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files)\n", + "2023-07-02 20:35:04,270 - modelscope - INFO - Generating dataset ms_hackathon_23_agent_train_dev (/home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files)\n", + "2023-07-02 20:35:04,270 - modelscope - INFO - Reusing cached meta-data file: /home/hackathon/.cache/modelscope/hub/datasets/modelscope/ms_hackathon_23_agent_train_dev/master/data_files/941b733ec0354c2172a3386d8788bb37\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "82cacd1b06864eabb4e320a93d41691c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading data files: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "37d5dbf851b745fb90b12cb1e4167732", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Extracting data files: 0it [00:00, ?it/s]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 285/285 [00:00<00:00, 380.76it/s]\n", + "2023-07-02 20:35:05,192 - modelscope - INFO - Dataset Token Length: 888.357487±349.060492, min=48.000000, max=2039.000000, size=4982\n", + "2023-07-02 20:35:05,192 - modelscope - INFO - Dataset Token Length: 928.654804±330.133929, min=74.000000, max=1959.000000, size=281\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[INPUT_IDS] 你是达摩院的ModelScopeGPT(魔搭助手),你是个大语言模型, 是2023年达摩院的工程师训练得到的。你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。1. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://109.199.101.10:1485/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "2. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://9.32.64.200:5873/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}}\n", + "\n", + "3. {\"plugin_name\": \"modelscope_text-ie\", \"plugin_owner\": \"ModelScopeGPT\", \"plugin_type\": \"default\", \"plugin_schema_for_model\": {\"name\": \"modelscope_text-ie\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"url\": \"http://54.149.78.185:3979/\", \"paths\": [{\"name\": \"modelscope_text-ie\", \"model_id\": \"/damo/nlp_structbert_siamese-uie_chinese-base\", \"method\": \"post\", \"description\": \"针对中文的文本,根据schema要抽取的内容,找出其中对应信息,并用json格式展示\", \"parameters\": [{\"name\": \"text\", \"description\": \"用户输入的文本\", \"required\": \"True\"}, {\"name\": \"schema\", \"description\": \"要抽取信息的json表示\", \"required\": \"True\"}]}]}} \n", + "\n", + "### 用户\n", + "按照给定的schema抽取出下面文本对应的信息\n", + "schema:{\"人物\": null, \"地理位置\": null, \"组织机构\": null}\n", + "近日,美国政府宣布将对中国1000多种商品加征关税,并威胁进一步加征关税。 \n", + "\n", + "### 助手\n", + " <|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://9.32.64.200:5873/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"近日,美国政府宣布将对中国1000多种商品加征关税,并威胁进一步加征关税。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n", + "\n", + "[LABLES] ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ ⁇ <|startofthink|>```JSON\n", + "{\"api_name\": \"modelscope_text-ie\", \"url\": \"http://9.32.64.200:5873/damo/nlp_structbert_siamese-uie_chinese-base\", \"parameters\": {\"text\": \"近日,美国政府宣布将对中国1000多种商品加征关税,并威胁进一步加征关税。\", \"schema\": \"{\\\"人物\\\": null, \\\"地理位置\\\": null, \\\"组织机构\\\": null}\"}}\n", + "```<|endofthink|>\n", + "\n", + "<|startofexec|>```JSON\n", + "{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}\n", + "```<|endofexec|>\n", + "信息抽取结果:{\"人物\": [], \"地理位置\": [\"中国\", \"美国\"], \"组织机构\": []}。我使用的模型是ModelScope的'damo/nlp_structbert_siamese-uie_chinese-base'模型。这是一个基于StructBERT预训练模型微调训练的通用信息抽取模型。\n" + ] + } + ], + "source": [ + "tokenize_function = partial(tokenize_function, tokenizer=tokenizer)\n", + "train_dataset = make_dataset(\"train\", tokenize_function)\n", + "val_dataset = make_dataset(\"validation\", tokenize_function)\n", + "# Data analysis\n", + "stat_dataset(train_dataset)\n", + "stat_dataset(val_dataset)\n", + "data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer)\n", + "print_examples(train_dataset[0], tokenizer)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 配置Config\n", + "The following code is copied from baichuan_sft.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 20:35:05,244 - modelscope - INFO - work_dir: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505\n" + ] + } + ], + "source": [ + "cfg_file = os.path.join(model_dir, \"configuration.json\")\n", + "#\n", + "BATCH_SIZE = 1\n", + "MAX_EPOCHS = 1\n", + "T_max = get_T_max(len(train_dataset), BATCH_SIZE, MAX_EPOCHS, True)\n", + "WORK_DIR = get_work_dir(WORK_DIR)\n", + "EVAL_INTERVAL = 200\n", + "CONFIG = Config({\n", + " \"train\": {\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": BATCH_SIZE,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": True,\n", + " \"drop_last\": True,\n", + " \"pin_memory\": True\n", + " },\n", + " \"max_epochs\": MAX_EPOCHS,\n", + " \"work_dir\": WORK_DIR,\n", + " \"optimizer\": {\n", + " \"type\": \"AdamW\",\n", + " \"lr\": 1e-4,\n", + " \"weight_decay\": 0.01,\n", + " \"options\": {\n", + " \"cumulative_iters\": 16, \"grad_clip\": {\n", + " \"norm_type\": 2,\n", + " \"max_norm\": 2.0\n", + " }\n", + " }\n", + " },\n", + " \"lr_scheduler\": {\n", + " \"type\": \"CosineAnnealingLR\",\n", + " \"T_max\": T_max,\n", + " \"eta_min\": 1e-5,\n", + " \"options\": {\n", + " \"by_epoch\": False,\n", + " \"warmup\": {\n", + " 'type': 'LinearWarmup',\n", + " 'warmup_ratio': 0.1,\n", + " \"warmup_iters\": 200\n", + " }\n", + " }\n", + " },\n", + " \"hooks\": [\n", + " {\"type\": \"CheckpointHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL, \"max_checkpoint_num\": 1},\n", + " {\"type\": \"EvaluationHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL},\n", + " {\"type\": \"BestCkptSaverHook\",\n", + " \"metric_key\": \"acc\",\n", + " \"save_best\": True, \"rule\": \"max\", \"max_checkpoint_num\": 1},\n", + " {\"type\": \"TextLoggerHook\",\n", + " \"by_epoch\": True, # Whether EpochBasedTrainer is used\n", + " \"interval\": 5},\n", + " {\"type\": \"TensorboardHook\", \"by_epoch\": False, \"interval\": 5}\n", + " ]\n", + " },\n", + " \"evaluation\": {\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": BATCH_SIZE,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": False,\n", + " \"drop_last\": False,\n", + " \"pin_memory\": True\n", + " },\n", + " \"metrics\": [\n", + " {\"type\": \"my_metric\", \"vocab_size\": tokenizer.vocab_size}\n", + " ]\n", + " }\n", + "})" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 微调\n", + "The following code is copied from baichuan_sft.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-07-02 20:35:05,284 - modelscope - INFO - ==========================Training Config Start==========================\n", + "2023-07-02 20:35:05,285 - modelscope - INFO - {\n", + " \"framework\": \"pytorch\",\n", + " \"task\": \"chat\",\n", + " \"pipeline\": {\n", + " \"type\": \"chatglm26b-text-generation\"\n", + " },\n", + " \"allow_remote\": true,\n", + " \"train\": {\n", + " \"hooks\": [\n", + " {\n", + " \"type\": \"TensorboardHook\",\n", + " \"by_epoch\": false,\n", + " \"interval\": 5\n", + " }\n", + " ],\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": 1,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": true,\n", + " \"drop_last\": true,\n", + " \"pin_memory\": true\n", + " },\n", + " \"max_epochs\": 1,\n", + " \"work_dir\": \"/home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505\",\n", + " \"optimizer\": {\n", + " \"type\": \"AdamW\",\n", + " \"lr\": 0.0001,\n", + " \"weight_decay\": 0.01,\n", + " \"options\": {\n", + " \"cumulative_iters\": 16,\n", + " \"grad_clip\": {\n", + " \"norm_type\": 2,\n", + " \"max_norm\": 2.0\n", + " }\n", + " }\n", + " },\n", + " \"lr_scheduler\": {\n", + " \"type\": \"CosineAnnealingLR\",\n", + " \"T_max\": 4982,\n", + " \"eta_min\": 1e-05,\n", + " \"options\": {\n", + " \"by_epoch\": false,\n", + " \"warmup\": {\n", + " \"type\": \"LinearWarmup\",\n", + " \"warmup_ratio\": 0.1,\n", + " \"warmup_iters\": 200\n", + " }\n", + " }\n", + " },\n", + " \"checkpoint\": {\n", + " \"period\": {\n", + " \"by_epoch\": false,\n", + " \"interval\": 200,\n", + " \"max_checkpoint_num\": 1\n", + " },\n", + " \"best\": {\n", + " \"metric_key\": \"acc\",\n", + " \"save_best\": true,\n", + " \"rule\": \"max\",\n", + " \"max_checkpoint_num\": 1\n", + " }\n", + " },\n", + " \"logging\": {\n", + " \"by_epoch\": true,\n", + " \"interval\": 5\n", + " }\n", + " },\n", + " \"evaluation\": {\n", + " \"dataloader\": {\n", + " \"batch_size_per_gpu\": 1,\n", + " \"workers_per_gpu\": 1,\n", + " \"shuffle\": false,\n", + " \"drop_last\": false,\n", + " \"pin_memory\": true\n", + " },\n", + " \"metrics\": [\n", + " {\n", + " \"type\": \"my_metric\",\n", + " \"vocab_size\": 64794\n", + " }\n", + " ],\n", + " \"period\": {\n", + " \"by_epoch\": false,\n", + " \"interval\": 200\n", + " }\n", + " }\n", + "}\n", + "2023-07-02 20:35:05,285 - modelscope - INFO - ===========================Training Config End===========================\n", + "2023-07-02 20:35:05,286 - modelscope - WARNING - ('OPTIMIZER', 'default', 'AdamW') not found in ast index file\n", + "2023-07-02 20:35:05,287 - modelscope - WARNING - ('LR_SCHEDULER', 'default', 'CosineAnnealingLR') not found in ast index file\n", + "2023-07-02 20:35:05,289 - modelscope - INFO - Stage: before_run:\n", + " (ABOVE_NORMAL) OptimizerHook \n", + " (LOW ) LrSchedulerHook \n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: before_train_epoch:\n", + " (LOW ) LrSchedulerHook \n", + " -------------------- \n", + "Stage: before_train_iter:\n", + " (ABOVE_NORMAL) OptimizerHook \n", + " -------------------- \n", + "Stage: after_train_iter:\n", + " (ABOVE_NORMAL) OptimizerHook \n", + " (NORMAL ) EvaluationHook \n", + " (LOW ) LrSchedulerHook \n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: after_train_epoch:\n", + " (NORMAL ) EvaluationHook \n", + " (LOW ) LrSchedulerHook \n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: after_val_epoch:\n", + " (VERY_LOW ) TextLoggerHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "Stage: after_run:\n", + " (LOW ) BestCkptSaverHook \n", + " (LOW ) CheckpointHook \n", + " (VERY_LOW ) TensorboardHook \n", + " -------------------- \n", + "2023-07-02 20:35:05,293 - modelscope - INFO - Checkpoints will be saved to /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505\n", + "2023-07-02 20:35:05,296 - modelscope - INFO - Checkpoints will be saved to /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505\n", + "2023-07-02 20:35:05,296 - modelscope - INFO - Text logs will be saved to /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505\n", + "2023-07-02 20:35:05,296 - modelscope - INFO - tensorboard files will be saved to /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/tensorboard_output\n", + "2023-07-02 20:35:09,665 - modelscope - INFO - epoch [1][5/4982]\tlr: 1.000e-05, memory: 9310, loss: 4.4797\n", + "2023-07-02 20:35:11,753 - modelscope - INFO - epoch [1][10/4982]\tlr: 1.000e-05, memory: 9653, loss: 4.4281\n", + "2023-07-02 20:35:15,111 - modelscope - INFO - epoch [1][15/4982]\tlr: 1.000e-05, memory: 11498, loss: 5.4297\n", + "2023-07-02 20:35:18,142 - modelscope - INFO - epoch [1][20/4982]\tlr: 1.225e-05, memory: 12041, loss: 2.6703\n", + "2023-07-02 20:35:21,335 - modelscope - INFO - epoch [1][25/4982]\tlr: 1.450e-05, memory: 12041, loss: 2.5969\n", + "2023-07-02 20:35:24,524 - modelscope - INFO - epoch [1][30/4982]\tlr: 1.675e-05, memory: 12180, loss: 2.7797\n", + "2023-07-02 20:35:27,061 - modelscope - INFO - epoch [1][35/4982]\tlr: 1.900e-05, memory: 12180, loss: 5.0344\n", + "2023-07-02 20:35:29,749 - modelscope - INFO - epoch [1][40/4982]\tlr: 2.125e-05, memory: 12180, loss: 6.1875\n", + "2023-07-02 20:35:32,140 - modelscope - INFO - epoch [1][45/4982]\tlr: 2.350e-05, memory: 12180, loss: 4.5844\n", + "2023-07-02 20:35:35,367 - modelscope - INFO - epoch [1][50/4982]\tlr: 2.575e-05, memory: 12180, loss: 3.3578\n", + "2023-07-02 20:35:37,739 - modelscope - INFO - epoch [1][55/4982]\tlr: 2.800e-05, memory: 12180, loss: 3.0375\n", + "2023-07-02 20:35:41,595 - modelscope - INFO - epoch [1][60/4982]\tlr: 3.025e-05, memory: 12180, loss: 2.7219\n", + "2023-07-02 20:35:44,105 - modelscope - INFO - epoch [1][65/4982]\tlr: 3.250e-05, memory: 12180, loss: 4.8016\n", + "2023-07-02 20:35:46,069 - modelscope - INFO - epoch [1][70/4982]\tlr: 3.475e-05, memory: 12180, loss: 6.9406\n", + "2023-07-02 20:35:48,149 - modelscope - INFO - epoch [1][75/4982]\tlr: 3.700e-05, memory: 12180, loss: 3.2133\n", + "2023-07-02 20:35:50,371 - modelscope - INFO - epoch [1][80/4982]\tlr: 3.925e-05, memory: 12180, loss: 4.3719\n", + "2023-07-02 20:35:53,531 - modelscope - INFO - epoch [1][85/4982]\tlr: 4.150e-05, memory: 12180, loss: 5.8875\n", + "2023-07-02 20:35:55,682 - modelscope - INFO - epoch [1][90/4982]\tlr: 4.375e-05, memory: 12180, loss: 4.9297\n", + "2023-07-02 20:35:57,349 - modelscope - INFO - epoch [1][95/4982]\tlr: 4.600e-05, memory: 12180, loss: 5.8781\n", + "2023-07-02 20:36:00,218 - modelscope - INFO - epoch [1][100/4982]\tlr: 4.825e-05, memory: 12180, loss: 2.4125\n", + "2023-07-02 20:36:02,674 - modelscope - INFO - epoch [1][105/4982]\tlr: 5.050e-05, memory: 12180, loss: 6.7234\n", + "2023-07-02 20:36:05,443 - modelscope - INFO - epoch [1][110/4982]\tlr: 5.275e-05, memory: 12180, loss: 3.7437\n", + "2023-07-02 20:36:08,231 - modelscope - INFO - epoch [1][115/4982]\tlr: 5.500e-05, memory: 12180, loss: 4.5187\n", + "2023-07-02 20:36:10,992 - modelscope - INFO - epoch [1][120/4982]\tlr: 5.725e-05, memory: 12180, loss: 4.3281\n", + "2023-07-02 20:36:12,907 - modelscope - INFO - epoch [1][125/4982]\tlr: 5.950e-05, memory: 12180, loss: 4.4422\n", + "2023-07-02 20:36:16,210 - modelscope - INFO - epoch [1][130/4982]\tlr: 6.175e-05, memory: 12992, loss: 5.8688\n", + "2023-07-02 20:36:18,791 - modelscope - INFO - epoch [1][135/4982]\tlr: 6.400e-05, memory: 12992, loss: 3.2531\n", + "2023-07-02 20:36:19,911 - modelscope - INFO - epoch [1][140/4982]\tlr: 6.625e-05, memory: 12992, loss: 5.1781\n", + "2023-07-02 20:36:22,445 - modelscope - INFO - epoch [1][145/4982]\tlr: 6.850e-05, memory: 12992, loss: 3.4523\n", + "2023-07-02 20:36:24,826 - modelscope - INFO - epoch [1][150/4982]\tlr: 7.075e-05, memory: 12992, loss: 4.6125\n", + "2023-07-02 20:36:26,567 - modelscope - INFO - epoch [1][155/4982]\tlr: 7.300e-05, memory: 12992, loss: 4.0859\n", + "2023-07-02 20:36:29,936 - modelscope - INFO - epoch [1][160/4982]\tlr: 7.525e-05, memory: 12992, loss: 3.4937\n", + "2023-07-02 20:36:32,253 - modelscope - INFO - epoch [1][165/4982]\tlr: 7.750e-05, memory: 12992, loss: 5.8266\n", + "2023-07-02 20:36:34,867 - modelscope - INFO - epoch [1][170/4982]\tlr: 7.975e-05, memory: 12992, loss: 2.7047\n", + "2023-07-02 20:36:38,118 - modelscope - INFO - epoch [1][175/4982]\tlr: 8.200e-05, memory: 12992, loss: 2.5844\n", + "2023-07-02 20:36:40,913 - modelscope - INFO - epoch [1][180/4982]\tlr: 8.425e-05, memory: 12992, loss: 3.9641\n", + "2023-07-02 20:36:43,807 - modelscope - INFO - epoch [1][185/4982]\tlr: 8.650e-05, memory: 12992, loss: 3.1375\n", + "2023-07-02 20:36:46,624 - modelscope - INFO - epoch [1][190/4982]\tlr: 8.875e-05, memory: 12992, loss: 3.8813\n", + "2023-07-02 20:36:49,527 - modelscope - INFO - epoch [1][195/4982]\tlr: 9.100e-05, memory: 12992, loss: 3.6156\n", + "2023-07-02 20:36:51,833 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:05<00:00, 4.29it/s]\n", + "2023-07-02 20:37:57,381 - modelscope - INFO - Saving checkpoint at 200 iter\n", + "2023-07-02 20:37:57,410 - modelscope - INFO - Saving checkpoint at 200 iter\n", + "2023-07-02 20:37:57,436 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 12992, evaluation/acc: 0.6542, evaluation/loss: 3.4747, loss: 4.5406\n", + "2023-07-02 20:38:00,375 - modelscope - INFO - epoch [1][205/4982]\tlr: 9.550e-05, memory: 12992, loss: 3.8125\n", + "2023-07-02 20:38:03,071 - modelscope - INFO - epoch [1][210/4982]\tlr: 9.775e-05, memory: 12992, loss: 4.4109\n", + "2023-07-02 20:38:06,715 - modelscope - INFO - epoch [1][215/4982]\tlr: 1.000e-04, memory: 12992, loss: 2.2437\n", + "2023-07-02 20:38:09,499 - modelscope - INFO - epoch [1][220/4982]\tlr: 9.998e-05, memory: 12992, loss: 3.2750\n", + "2023-07-02 20:38:13,188 - modelscope - INFO - epoch [1][225/4982]\tlr: 9.996e-05, memory: 13730, loss: 3.2656\n", + "2023-07-02 20:38:15,237 - modelscope - INFO - epoch [1][230/4982]\tlr: 9.994e-05, memory: 13730, loss: 4.3750\n", + "2023-07-02 20:38:17,706 - modelscope - INFO - epoch [1][235/4982]\tlr: 9.992e-05, memory: 13730, loss: 3.2844\n", + "2023-07-02 20:38:20,429 - modelscope - INFO - epoch [1][240/4982]\tlr: 9.990e-05, memory: 13730, loss: 2.9766\n", + "2023-07-02 20:38:23,127 - modelscope - INFO - epoch [1][245/4982]\tlr: 9.988e-05, memory: 13730, loss: 4.4125\n", + "2023-07-02 20:38:26,058 - modelscope - INFO - epoch [1][250/4982]\tlr: 9.986e-05, memory: 13730, loss: 2.3047\n", + "2023-07-02 20:38:28,740 - modelscope - INFO - epoch [1][255/4982]\tlr: 9.984e-05, memory: 13730, loss: 3.5484\n", + "2023-07-02 20:38:31,332 - modelscope - INFO - epoch [1][260/4982]\tlr: 9.982e-05, memory: 13730, loss: 4.4297\n", + "2023-07-02 20:38:33,632 - modelscope - INFO - epoch [1][265/4982]\tlr: 9.980e-05, memory: 13730, loss: 5.1078\n", + "2023-07-02 20:38:35,634 - modelscope - INFO - epoch [1][270/4982]\tlr: 9.977e-05, memory: 13730, loss: 4.2250\n", + "2023-07-02 20:38:37,731 - modelscope - INFO - epoch [1][275/4982]\tlr: 9.975e-05, memory: 13730, loss: 4.5984\n", + "2023-07-02 20:38:39,950 - modelscope - INFO - epoch [1][280/4982]\tlr: 9.973e-05, memory: 13730, loss: 4.0594\n", + "2023-07-02 20:38:42,470 - modelscope - INFO - epoch [1][285/4982]\tlr: 9.970e-05, memory: 13730, loss: 2.6523\n", + "2023-07-02 20:38:45,483 - modelscope - INFO - epoch [1][290/4982]\tlr: 9.968e-05, memory: 13730, loss: 2.5766\n", + "2023-07-02 20:38:47,773 - modelscope - INFO - epoch [1][295/4982]\tlr: 9.965e-05, memory: 13730, loss: 2.7078\n", + "2023-07-02 20:38:51,126 - modelscope - INFO - epoch [1][300/4982]\tlr: 9.963e-05, memory: 13730, loss: 5.0844\n", + "2023-07-02 20:38:53,948 - modelscope - INFO - epoch [1][305/4982]\tlr: 9.960e-05, memory: 13730, loss: 3.3844\n", + "2023-07-02 20:38:56,666 - modelscope - INFO - epoch [1][310/4982]\tlr: 9.958e-05, memory: 13730, loss: 3.1812\n", + "2023-07-02 20:38:59,269 - modelscope - INFO - epoch [1][315/4982]\tlr: 9.955e-05, memory: 13730, loss: 3.3219\n", + "2023-07-02 20:39:02,576 - modelscope - INFO - epoch [1][320/4982]\tlr: 9.952e-05, memory: 13730, loss: 2.0031\n", + "2023-07-02 20:39:04,494 - modelscope - INFO - epoch [1][325/4982]\tlr: 9.949e-05, memory: 13730, loss: 3.7469\n", + "2023-07-02 20:39:07,068 - modelscope - INFO - epoch [1][330/4982]\tlr: 9.947e-05, memory: 13730, loss: 3.0187\n", + "2023-07-02 20:39:09,719 - modelscope - INFO - epoch [1][335/4982]\tlr: 9.944e-05, memory: 13730, loss: 2.5828\n", + "2023-07-02 20:39:11,755 - modelscope - INFO - epoch [1][340/4982]\tlr: 9.941e-05, memory: 13730, loss: 4.1156\n", + "2023-07-02 20:39:14,258 - modelscope - INFO - epoch [1][345/4982]\tlr: 9.938e-05, memory: 13730, loss: 5.1594\n", + "2023-07-02 20:39:16,436 - modelscope - INFO - epoch [1][350/4982]\tlr: 9.935e-05, memory: 13730, loss: 4.0859\n", + "2023-07-02 20:39:19,643 - modelscope - INFO - epoch [1][355/4982]\tlr: 9.932e-05, memory: 13730, loss: 1.8391\n", + "2023-07-02 20:39:22,779 - modelscope - INFO - epoch [1][360/4982]\tlr: 9.929e-05, memory: 13730, loss: 2.0641\n", + "2023-07-02 20:39:25,402 - modelscope - INFO - epoch [1][365/4982]\tlr: 9.926e-05, memory: 13730, loss: 1.9453\n", + "2023-07-02 20:39:27,813 - modelscope - INFO - epoch [1][370/4982]\tlr: 9.923e-05, memory: 13730, loss: 3.8641\n", + "2023-07-02 20:39:30,315 - modelscope - INFO - epoch [1][375/4982]\tlr: 9.920e-05, memory: 13730, loss: 3.0281\n", + "2023-07-02 20:39:33,075 - modelscope - INFO - epoch [1][380/4982]\tlr: 9.916e-05, memory: 13730, loss: 1.9109\n", + "2023-07-02 20:39:35,539 - modelscope - INFO - epoch [1][385/4982]\tlr: 9.913e-05, memory: 13730, loss: 3.9797\n", + "2023-07-02 20:39:37,804 - modelscope - INFO - epoch [1][390/4982]\tlr: 9.910e-05, memory: 13730, loss: 4.4547\n", + "2023-07-02 20:39:40,277 - modelscope - INFO - epoch [1][395/4982]\tlr: 9.906e-05, memory: 13730, loss: 2.4516\n", + "2023-07-02 20:39:43,900 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.25it/s]\n", + "2023-07-02 20:40:50,049 - modelscope - INFO - Saving checkpoint at 400 iter\n", + "2023-07-02 20:40:50,080 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter200_acc0.6542276740074158\n", + "2023-07-02 20:40:50,083 - modelscope - INFO - Saving checkpoint at 400 iter\n", + "2023-07-02 20:40:50,113 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_200\n", + "2023-07-02 20:40:50,115 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 13730, evaluation/acc: 0.6604, evaluation/loss: 3.0119, loss: 2.8062\n", + "2023-07-02 20:40:53,254 - modelscope - INFO - epoch [1][405/4982]\tlr: 9.900e-05, memory: 13730, loss: 3.2422\n", + "2023-07-02 20:40:55,618 - modelscope - INFO - epoch [1][410/4982]\tlr: 9.896e-05, memory: 13730, loss: 4.2297\n", + "2023-07-02 20:40:58,448 - modelscope - INFO - epoch [1][415/4982]\tlr: 9.893e-05, memory: 13730, loss: 3.6063\n", + "2023-07-02 20:41:00,872 - modelscope - INFO - epoch [1][420/4982]\tlr: 9.889e-05, memory: 13730, loss: 4.6141\n", + "2023-07-02 20:41:02,997 - modelscope - INFO - epoch [1][425/4982]\tlr: 9.885e-05, memory: 13730, loss: 5.2875\n", + "2023-07-02 20:41:06,866 - modelscope - INFO - epoch [1][430/4982]\tlr: 9.882e-05, memory: 13730, loss: 2.2109\n", + "2023-07-02 20:41:09,155 - modelscope - INFO - epoch [1][435/4982]\tlr: 9.878e-05, memory: 13730, loss: 2.5969\n", + "2023-07-02 20:41:11,158 - modelscope - INFO - epoch [1][440/4982]\tlr: 9.874e-05, memory: 13730, loss: 3.1453\n", + "2023-07-02 20:41:13,695 - modelscope - INFO - epoch [1][445/4982]\tlr: 9.870e-05, memory: 13730, loss: 4.1219\n", + "2023-07-02 20:41:16,481 - modelscope - INFO - epoch [1][450/4982]\tlr: 9.867e-05, memory: 13730, loss: 3.0016\n", + "2023-07-02 20:41:19,595 - modelscope - INFO - epoch [1][455/4982]\tlr: 9.863e-05, memory: 13730, loss: 2.0086\n", + "2023-07-02 20:41:22,798 - modelscope - INFO - epoch [1][460/4982]\tlr: 9.859e-05, memory: 13730, loss: 1.6477\n", + "2023-07-02 20:41:24,516 - modelscope - INFO - epoch [1][465/4982]\tlr: 9.855e-05, memory: 13730, loss: 5.0250\n", + "2023-07-02 20:41:26,807 - modelscope - INFO - epoch [1][470/4982]\tlr: 9.851e-05, memory: 13730, loss: 5.0906\n", + "2023-07-02 20:41:29,550 - modelscope - INFO - epoch [1][475/4982]\tlr: 9.847e-05, memory: 13730, loss: 3.1719\n", + "2023-07-02 20:41:31,558 - modelscope - INFO - epoch [1][480/4982]\tlr: 9.843e-05, memory: 13730, loss: 3.0094\n", + "2023-07-02 20:41:34,367 - modelscope - INFO - epoch [1][485/4982]\tlr: 9.839e-05, memory: 13730, loss: 1.8000\n", + "2023-07-02 20:41:37,084 - modelscope - INFO - epoch [1][490/4982]\tlr: 9.834e-05, memory: 13730, loss: 3.2406\n", + "2023-07-02 20:41:39,602 - modelscope - INFO - epoch [1][495/4982]\tlr: 9.830e-05, memory: 13730, loss: 2.9141\n", + "2023-07-02 20:41:42,010 - modelscope - INFO - epoch [1][500/4982]\tlr: 9.826e-05, memory: 13730, loss: 3.1969\n", + "2023-07-02 20:41:44,328 - modelscope - INFO - epoch [1][505/4982]\tlr: 9.822e-05, memory: 13730, loss: 2.4125\n", + "2023-07-02 20:41:47,138 - modelscope - INFO - epoch [1][510/4982]\tlr: 9.817e-05, memory: 13730, loss: 2.3031\n", + "2023-07-02 20:41:50,494 - modelscope - INFO - epoch [1][515/4982]\tlr: 9.813e-05, memory: 13730, loss: 2.2938\n", + "2023-07-02 20:41:52,746 - modelscope - INFO - epoch [1][520/4982]\tlr: 9.808e-05, memory: 13730, loss: 3.8672\n", + "2023-07-02 20:41:54,958 - modelscope - INFO - epoch [1][525/4982]\tlr: 9.804e-05, memory: 13730, loss: 3.2156\n", + "2023-07-02 20:41:57,466 - modelscope - INFO - epoch [1][530/4982]\tlr: 9.799e-05, memory: 13730, loss: 3.0344\n", + "2023-07-02 20:42:00,137 - modelscope - INFO - epoch [1][535/4982]\tlr: 9.795e-05, memory: 13730, loss: 4.9406\n", + "2023-07-02 20:42:02,774 - modelscope - INFO - epoch [1][540/4982]\tlr: 9.790e-05, memory: 13730, loss: 3.3563\n", + "2023-07-02 20:42:05,715 - modelscope - INFO - epoch [1][545/4982]\tlr: 9.786e-05, memory: 13730, loss: 1.4797\n", + "2023-07-02 20:42:07,960 - modelscope - INFO - epoch [1][550/4982]\tlr: 9.781e-05, memory: 13730, loss: 3.8781\n", + "2023-07-02 20:42:11,011 - modelscope - INFO - epoch [1][555/4982]\tlr: 9.776e-05, memory: 13730, loss: 2.9297\n", + "2023-07-02 20:42:13,456 - modelscope - INFO - epoch [1][560/4982]\tlr: 9.771e-05, memory: 13730, loss: 3.8203\n", + "2023-07-02 20:42:15,443 - modelscope - INFO - epoch [1][565/4982]\tlr: 9.767e-05, memory: 13730, loss: 2.0219\n", + "2023-07-02 20:42:18,846 - modelscope - INFO - epoch [1][570/4982]\tlr: 9.762e-05, memory: 13730, loss: 1.9281\n", + "2023-07-02 20:42:22,121 - modelscope - INFO - epoch [1][575/4982]\tlr: 9.757e-05, memory: 13730, loss: 2.6750\n", + "2023-07-02 20:42:25,145 - modelscope - INFO - epoch [1][580/4982]\tlr: 9.752e-05, memory: 13730, loss: 1.7852\n", + "2023-07-02 20:42:27,316 - modelscope - INFO - epoch [1][585/4982]\tlr: 9.747e-05, memory: 13730, loss: 2.8047\n", + "2023-07-02 20:42:29,441 - modelscope - INFO - epoch [1][590/4982]\tlr: 9.742e-05, memory: 13730, loss: 2.6773\n", + "2023-07-02 20:42:32,360 - modelscope - INFO - epoch [1][595/4982]\tlr: 9.737e-05, memory: 13730, loss: 1.9812\n", + "2023-07-02 20:42:35,221 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.24it/s]\n", + "2023-07-02 20:43:41,520 - modelscope - INFO - Saving checkpoint at 600 iter\n", + "2023-07-02 20:43:41,550 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter400_acc0.6604225635528564\n", + "2023-07-02 20:43:41,552 - modelscope - INFO - Saving checkpoint at 600 iter\n", + "2023-07-02 20:43:41,582 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_400\n", + "2023-07-02 20:43:41,584 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 13730, evaluation/acc: 0.6708, evaluation/loss: 2.5856, loss: 2.3328\n", + "2023-07-02 20:43:43,999 - modelscope - INFO - epoch [1][605/4982]\tlr: 9.726e-05, memory: 13730, loss: 2.6875\n", + "2023-07-02 20:43:47,119 - modelscope - INFO - epoch [1][610/4982]\tlr: 9.721e-05, memory: 13730, loss: 1.4031\n", + "2023-07-02 20:43:48,961 - modelscope - INFO - epoch [1][615/4982]\tlr: 9.716e-05, memory: 13730, loss: 2.9422\n", + "2023-07-02 20:43:51,931 - modelscope - INFO - epoch [1][620/4982]\tlr: 9.711e-05, memory: 13730, loss: 2.2016\n", + "2023-07-02 20:43:55,085 - modelscope - INFO - epoch [1][625/4982]\tlr: 9.705e-05, memory: 13730, loss: 2.4344\n", + "2023-07-02 20:43:57,859 - modelscope - INFO - epoch [1][630/4982]\tlr: 9.700e-05, memory: 13730, loss: 1.9727\n", + "2023-07-02 20:44:00,652 - modelscope - INFO - epoch [1][635/4982]\tlr: 9.695e-05, memory: 13730, loss: 3.5047\n", + "2023-07-02 20:44:03,525 - modelscope - INFO - epoch [1][640/4982]\tlr: 9.689e-05, memory: 13730, loss: 2.3672\n", + "2023-07-02 20:44:06,457 - modelscope - INFO - epoch [1][645/4982]\tlr: 9.684e-05, memory: 13730, loss: 2.7797\n", + "2023-07-02 20:44:08,691 - modelscope - INFO - epoch [1][650/4982]\tlr: 9.678e-05, memory: 13730, loss: 1.9734\n", + "2023-07-02 20:44:11,608 - modelscope - INFO - epoch [1][655/4982]\tlr: 9.673e-05, memory: 13730, loss: 2.0531\n", + "2023-07-02 20:44:13,499 - modelscope - INFO - epoch [1][660/4982]\tlr: 9.667e-05, memory: 13730, loss: 2.8078\n", + "2023-07-02 20:44:15,767 - modelscope - INFO - epoch [1][665/4982]\tlr: 9.661e-05, memory: 13730, loss: 3.3703\n", + "2023-07-02 20:44:18,064 - modelscope - INFO - epoch [1][670/4982]\tlr: 9.656e-05, memory: 13730, loss: 3.2156\n", + "2023-07-02 20:44:20,955 - modelscope - INFO - epoch [1][675/4982]\tlr: 9.650e-05, memory: 13830, loss: 3.4172\n", + "2023-07-02 20:44:24,557 - modelscope - INFO - epoch [1][680/4982]\tlr: 9.644e-05, memory: 13830, loss: 1.4219\n", + "2023-07-02 20:44:27,433 - modelscope - INFO - epoch [1][685/4982]\tlr: 9.638e-05, memory: 13830, loss: 3.5094\n", + "2023-07-02 20:44:30,177 - modelscope - INFO - epoch [1][690/4982]\tlr: 9.632e-05, memory: 13830, loss: 2.3234\n", + "2023-07-02 20:44:32,790 - modelscope - INFO - epoch [1][695/4982]\tlr: 9.627e-05, memory: 13830, loss: 1.7906\n", + "2023-07-02 20:44:35,003 - modelscope - INFO - epoch [1][700/4982]\tlr: 9.621e-05, memory: 13830, loss: 3.4016\n", + "2023-07-02 20:44:38,237 - modelscope - INFO - epoch [1][705/4982]\tlr: 9.615e-05, memory: 13830, loss: 2.1484\n", + "2023-07-02 20:44:42,304 - modelscope - INFO - epoch [1][710/4982]\tlr: 9.609e-05, memory: 13830, loss: 1.9828\n", + "2023-07-02 20:44:45,293 - modelscope - INFO - epoch [1][715/4982]\tlr: 9.602e-05, memory: 13830, loss: 1.6828\n", + "2023-07-02 20:44:48,385 - modelscope - INFO - epoch [1][720/4982]\tlr: 9.596e-05, memory: 13830, loss: 2.0969\n", + "2023-07-02 20:44:50,846 - modelscope - INFO - epoch [1][725/4982]\tlr: 9.590e-05, memory: 13830, loss: 3.2031\n", + "2023-07-02 20:44:53,572 - modelscope - INFO - epoch [1][730/4982]\tlr: 9.584e-05, memory: 13830, loss: 2.8055\n", + "2023-07-02 20:44:54,918 - modelscope - INFO - epoch [1][735/4982]\tlr: 9.578e-05, memory: 13830, loss: 5.0641\n", + "2023-07-02 20:44:58,220 - modelscope - INFO - epoch [1][740/4982]\tlr: 9.572e-05, memory: 13830, loss: 2.5125\n", + "2023-07-02 20:45:01,363 - modelscope - INFO - epoch [1][745/4982]\tlr: 9.565e-05, memory: 13830, loss: 1.5758\n", + "2023-07-02 20:45:03,990 - modelscope - INFO - epoch [1][750/4982]\tlr: 9.559e-05, memory: 13830, loss: 2.3664\n", + "2023-07-02 20:45:06,603 - modelscope - INFO - epoch [1][755/4982]\tlr: 9.553e-05, memory: 13830, loss: 1.8188\n", + "2023-07-02 20:45:09,658 - modelscope - INFO - epoch [1][760/4982]\tlr: 9.546e-05, memory: 13830, loss: 2.6125\n", + "2023-07-02 20:45:12,102 - modelscope - INFO - epoch [1][765/4982]\tlr: 9.540e-05, memory: 13830, loss: 1.7031\n", + "2023-07-02 20:45:14,836 - modelscope - INFO - epoch [1][770/4982]\tlr: 9.533e-05, memory: 13830, loss: 1.7359\n", + "2023-07-02 20:45:17,436 - modelscope - INFO - epoch [1][775/4982]\tlr: 9.527e-05, memory: 13830, loss: 1.4336\n", + "2023-07-02 20:45:20,163 - modelscope - INFO - epoch [1][780/4982]\tlr: 9.520e-05, memory: 13830, loss: 2.5672\n", + "2023-07-02 20:45:23,429 - modelscope - INFO - epoch [1][785/4982]\tlr: 9.513e-05, memory: 13830, loss: 1.9164\n", + "2023-07-02 20:45:26,285 - modelscope - INFO - epoch [1][790/4982]\tlr: 9.507e-05, memory: 13830, loss: 2.3203\n", + "2023-07-02 20:45:28,656 - modelscope - INFO - epoch [1][795/4982]\tlr: 9.500e-05, memory: 13830, loss: 2.7672\n", + "2023-07-02 20:45:31,279 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 20:46:37,656 - modelscope - INFO - Saving checkpoint at 800 iter\n", + "2023-07-02 20:46:37,685 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter600_acc0.6708211898803711\n", + "2023-07-02 20:46:37,687 - modelscope - INFO - Saving checkpoint at 800 iter\n", + "2023-07-02 20:46:37,715 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_600\n", + "2023-07-02 20:46:37,718 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 13830, evaluation/acc: 0.6881, evaluation/loss: 2.2625, loss: 2.6750\n", + "2023-07-02 20:46:40,639 - modelscope - INFO - epoch [1][805/4982]\tlr: 9.486e-05, memory: 13830, loss: 1.8695\n", + "2023-07-02 20:46:43,092 - modelscope - INFO - epoch [1][810/4982]\tlr: 9.480e-05, memory: 13830, loss: 2.8734\n", + "2023-07-02 20:46:46,484 - modelscope - INFO - epoch [1][815/4982]\tlr: 9.473e-05, memory: 13830, loss: 1.7906\n", + "2023-07-02 20:46:49,542 - modelscope - INFO - epoch [1][820/4982]\tlr: 9.466e-05, memory: 13830, loss: 2.6391\n", + "2023-07-02 20:46:52,581 - modelscope - INFO - epoch [1][825/4982]\tlr: 9.459e-05, memory: 13830, loss: 2.3250\n", + "2023-07-02 20:46:55,248 - modelscope - INFO - epoch [1][830/4982]\tlr: 9.452e-05, memory: 13830, loss: 2.3188\n", + "2023-07-02 20:46:58,323 - modelscope - INFO - epoch [1][835/4982]\tlr: 9.445e-05, memory: 13830, loss: 1.8852\n", + "2023-07-02 20:47:00,885 - modelscope - INFO - epoch [1][840/4982]\tlr: 9.438e-05, memory: 13830, loss: 2.5203\n", + "2023-07-02 20:47:03,739 - modelscope - INFO - epoch [1][845/4982]\tlr: 9.431e-05, memory: 13830, loss: 2.2563\n", + "2023-07-02 20:47:06,494 - modelscope - INFO - epoch [1][850/4982]\tlr: 9.424e-05, memory: 13830, loss: 2.4937\n", + "2023-07-02 20:47:08,653 - modelscope - INFO - epoch [1][855/4982]\tlr: 9.416e-05, memory: 13830, loss: 2.1844\n", + "2023-07-02 20:47:12,100 - modelscope - INFO - epoch [1][860/4982]\tlr: 9.409e-05, memory: 13830, loss: 2.6281\n", + "2023-07-02 20:47:14,954 - modelscope - INFO - epoch [1][865/4982]\tlr: 9.402e-05, memory: 13830, loss: 1.7703\n", + "2023-07-02 20:47:17,549 - modelscope - INFO - epoch [1][870/4982]\tlr: 9.395e-05, memory: 13830, loss: 3.3172\n", + "2023-07-02 20:47:20,094 - modelscope - INFO - epoch [1][875/4982]\tlr: 9.387e-05, memory: 13830, loss: 2.2594\n", + "2023-07-02 20:47:23,556 - modelscope - INFO - epoch [1][880/4982]\tlr: 9.380e-05, memory: 13830, loss: 2.6352\n", + "2023-07-02 20:47:25,327 - modelscope - INFO - epoch [1][885/4982]\tlr: 9.373e-05, memory: 13830, loss: 2.7180\n", + "2023-07-02 20:47:28,177 - modelscope - INFO - epoch [1][890/4982]\tlr: 9.365e-05, memory: 13830, loss: 2.3750\n", + "2023-07-02 20:47:30,955 - modelscope - INFO - epoch [1][895/4982]\tlr: 9.358e-05, memory: 13830, loss: 1.7266\n", + "2023-07-02 20:47:34,940 - modelscope - INFO - epoch [1][900/4982]\tlr: 9.350e-05, memory: 13830, loss: 2.1984\n", + "2023-07-02 20:47:37,402 - modelscope - INFO - epoch [1][905/4982]\tlr: 9.343e-05, memory: 13830, loss: 2.2336\n", + "2023-07-02 20:47:40,011 - modelscope - INFO - epoch [1][910/4982]\tlr: 9.335e-05, memory: 13830, loss: 2.7844\n", + "2023-07-02 20:47:42,601 - modelscope - INFO - epoch [1][915/4982]\tlr: 9.327e-05, memory: 13830, loss: 3.2297\n", + "2023-07-02 20:47:44,837 - modelscope - INFO - epoch [1][920/4982]\tlr: 9.320e-05, memory: 13830, loss: 2.4188\n", + "2023-07-02 20:47:47,897 - modelscope - INFO - epoch [1][925/4982]\tlr: 9.312e-05, memory: 13830, loss: 1.6863\n", + "2023-07-02 20:47:50,418 - modelscope - INFO - epoch [1][930/4982]\tlr: 9.304e-05, memory: 13830, loss: 3.9219\n", + "2023-07-02 20:47:52,672 - modelscope - INFO - epoch [1][935/4982]\tlr: 9.296e-05, memory: 13830, loss: 1.6926\n", + "2023-07-02 20:47:55,286 - modelscope - INFO - epoch [1][940/4982]\tlr: 9.289e-05, memory: 13830, loss: 1.7281\n", + "2023-07-02 20:47:59,111 - modelscope - INFO - epoch [1][945/4982]\tlr: 9.281e-05, memory: 13830, loss: 1.1969\n", + "2023-07-02 20:48:01,843 - modelscope - INFO - epoch [1][950/4982]\tlr: 9.273e-05, memory: 13830, loss: 1.6633\n", + "2023-07-02 20:48:04,387 - modelscope - INFO - epoch [1][955/4982]\tlr: 9.265e-05, memory: 13830, loss: 2.2094\n", + "2023-07-02 20:48:06,681 - modelscope - INFO - epoch [1][960/4982]\tlr: 9.257e-05, memory: 13830, loss: 2.1922\n", + "2023-07-02 20:48:09,850 - modelscope - INFO - epoch [1][965/4982]\tlr: 9.249e-05, memory: 13830, loss: 1.3594\n", + "2023-07-02 20:48:12,651 - modelscope - INFO - epoch [1][970/4982]\tlr: 9.241e-05, memory: 13830, loss: 1.7945\n", + "2023-07-02 20:48:15,819 - modelscope - INFO - epoch [1][975/4982]\tlr: 9.233e-05, memory: 13830, loss: 1.7203\n", + "2023-07-02 20:48:18,453 - modelscope - INFO - epoch [1][980/4982]\tlr: 9.225e-05, memory: 13830, loss: 1.8453\n", + "2023-07-02 20:48:20,628 - modelscope - INFO - epoch [1][985/4982]\tlr: 9.216e-05, memory: 13830, loss: 1.8086\n", + "2023-07-02 20:48:22,947 - modelscope - INFO - epoch [1][990/4982]\tlr: 9.208e-05, memory: 13830, loss: 2.6445\n", + "2023-07-02 20:48:25,309 - modelscope - INFO - epoch [1][995/4982]\tlr: 9.200e-05, memory: 13830, loss: 3.2172\n", + "2023-07-02 20:48:28,028 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 20:49:34,496 - modelscope - INFO - Saving checkpoint at 1000 iter\n", + "2023-07-02 20:49:34,522 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter800_acc0.6881153583526611\n", + "2023-07-02 20:49:34,524 - modelscope - INFO - Saving checkpoint at 1000 iter\n", + "2023-07-02 20:49:34,548 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_800\n", + "2023-07-02 20:49:34,551 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 13830, evaluation/acc: 0.7003, evaluation/loss: 2.0893, loss: 2.7594\n", + "2023-07-02 20:49:37,631 - modelscope - INFO - epoch [1][1005/4982]\tlr: 9.183e-05, memory: 13830, loss: 1.3188\n", + "2023-07-02 20:49:40,106 - modelscope - INFO - epoch [1][1010/4982]\tlr: 9.175e-05, memory: 13830, loss: 2.3094\n", + "2023-07-02 20:49:42,559 - modelscope - INFO - epoch [1][1015/4982]\tlr: 9.167e-05, memory: 13830, loss: 2.4734\n", + "2023-07-02 20:49:44,919 - modelscope - INFO - epoch [1][1020/4982]\tlr: 9.158e-05, memory: 13830, loss: 2.0336\n", + "2023-07-02 20:49:49,264 - modelscope - INFO - epoch [1][1025/4982]\tlr: 9.150e-05, memory: 13861, loss: 1.0523\n", + "2023-07-02 20:49:51,204 - modelscope - INFO - epoch [1][1030/4982]\tlr: 9.141e-05, memory: 13861, loss: 3.1086\n", + "2023-07-02 20:49:53,066 - modelscope - INFO - epoch [1][1035/4982]\tlr: 9.133e-05, memory: 13861, loss: 2.3414\n", + "2023-07-02 20:49:56,035 - modelscope - INFO - epoch [1][1040/4982]\tlr: 9.124e-05, memory: 13861, loss: 2.2359\n", + "2023-07-02 20:49:59,351 - modelscope - INFO - epoch [1][1045/4982]\tlr: 9.116e-05, memory: 13861, loss: 1.9051\n", + "2023-07-02 20:50:01,989 - modelscope - INFO - epoch [1][1050/4982]\tlr: 9.107e-05, memory: 13861, loss: 1.5266\n", + "2023-07-02 20:50:04,982 - modelscope - INFO - epoch [1][1055/4982]\tlr: 9.098e-05, memory: 13861, loss: 2.5000\n", + "2023-07-02 20:50:07,348 - modelscope - INFO - epoch [1][1060/4982]\tlr: 9.090e-05, memory: 13861, loss: 2.9164\n", + "2023-07-02 20:50:10,149 - modelscope - INFO - epoch [1][1065/4982]\tlr: 9.081e-05, memory: 13861, loss: 2.1641\n", + "2023-07-02 20:50:13,289 - modelscope - INFO - epoch [1][1070/4982]\tlr: 9.072e-05, memory: 13861, loss: 2.7469\n", + "2023-07-02 20:50:16,220 - modelscope - INFO - epoch [1][1075/4982]\tlr: 9.063e-05, memory: 13861, loss: 2.2922\n", + "2023-07-02 20:50:18,255 - modelscope - INFO - epoch [1][1080/4982]\tlr: 9.054e-05, memory: 13861, loss: 3.7016\n", + "2023-07-02 20:50:21,566 - modelscope - INFO - epoch [1][1085/4982]\tlr: 9.046e-05, memory: 13861, loss: 1.1164\n", + "2023-07-02 20:50:24,961 - modelscope - INFO - epoch [1][1090/4982]\tlr: 9.037e-05, memory: 13861, loss: 1.5523\n", + "2023-07-02 20:50:28,072 - modelscope - INFO - epoch [1][1095/4982]\tlr: 9.028e-05, memory: 13861, loss: 1.9781\n", + "2023-07-02 20:50:31,178 - modelscope - INFO - epoch [1][1100/4982]\tlr: 9.019e-05, memory: 13861, loss: 2.0867\n", + "2023-07-02 20:50:33,103 - modelscope - INFO - epoch [1][1105/4982]\tlr: 9.010e-05, memory: 13861, loss: 2.9258\n", + "2023-07-02 20:50:37,069 - modelscope - INFO - epoch [1][1110/4982]\tlr: 9.001e-05, memory: 14281, loss: 1.8297\n", + "2023-07-02 20:50:39,077 - modelscope - INFO - epoch [1][1115/4982]\tlr: 8.992e-05, memory: 14281, loss: 2.1539\n", + "2023-07-02 20:50:41,028 - modelscope - INFO - epoch [1][1120/4982]\tlr: 8.982e-05, memory: 14281, loss: 2.4891\n", + "2023-07-02 20:50:43,285 - modelscope - INFO - epoch [1][1125/4982]\tlr: 8.973e-05, memory: 14281, loss: 1.7930\n", + "2023-07-02 20:50:46,047 - modelscope - INFO - epoch [1][1130/4982]\tlr: 8.964e-05, memory: 14281, loss: 1.1984\n", + "2023-07-02 20:50:49,011 - modelscope - INFO - epoch [1][1135/4982]\tlr: 8.955e-05, memory: 14281, loss: 3.1102\n", + "2023-07-02 20:50:51,386 - modelscope - INFO - epoch [1][1140/4982]\tlr: 8.946e-05, memory: 14281, loss: 2.2969\n", + "2023-07-02 20:50:54,463 - modelscope - INFO - epoch [1][1145/4982]\tlr: 8.936e-05, memory: 14281, loss: 1.7891\n", + "2023-07-02 20:50:56,539 - modelscope - INFO - epoch [1][1150/4982]\tlr: 8.927e-05, memory: 14281, loss: 2.6641\n", + "2023-07-02 20:50:58,715 - modelscope - INFO - epoch [1][1155/4982]\tlr: 8.918e-05, memory: 14281, loss: 2.5141\n", + "2023-07-02 20:51:01,359 - modelscope - INFO - epoch [1][1160/4982]\tlr: 8.908e-05, memory: 14281, loss: 1.7031\n", + "2023-07-02 20:51:04,218 - modelscope - INFO - epoch [1][1165/4982]\tlr: 8.899e-05, memory: 14281, loss: 2.7891\n", + "2023-07-02 20:51:07,009 - modelscope - INFO - epoch [1][1170/4982]\tlr: 8.889e-05, memory: 14281, loss: 1.6977\n", + "2023-07-02 20:51:09,989 - modelscope - INFO - epoch [1][1175/4982]\tlr: 8.880e-05, memory: 14281, loss: 1.7984\n", + "2023-07-02 20:51:13,347 - modelscope - INFO - epoch [1][1180/4982]\tlr: 8.870e-05, memory: 14281, loss: 1.7750\n", + "2023-07-02 20:51:16,349 - modelscope - INFO - epoch [1][1185/4982]\tlr: 8.861e-05, memory: 14281, loss: 2.2219\n", + "2023-07-02 20:51:18,901 - modelscope - INFO - epoch [1][1190/4982]\tlr: 8.851e-05, memory: 14281, loss: 2.1070\n", + "2023-07-02 20:51:22,332 - modelscope - INFO - epoch [1][1195/4982]\tlr: 8.841e-05, memory: 14281, loss: 1.3805\n", + "2023-07-02 20:51:25,298 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 20:52:31,792 - modelscope - INFO - Saving checkpoint at 1200 iter\n", + "2023-07-02 20:52:31,820 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter1000_acc0.7003207802772522\n", + "2023-07-02 20:52:31,822 - modelscope - INFO - Saving checkpoint at 1200 iter\n", + "2023-07-02 20:52:31,848 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_1000\n", + "2023-07-02 20:52:31,851 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14281, evaluation/acc: 0.7126, evaluation/loss: 1.9764, loss: 1.4297\n", + "2023-07-02 20:52:35,250 - modelscope - INFO - epoch [1][1205/4982]\tlr: 8.822e-05, memory: 14281, loss: 1.4805\n", + "2023-07-02 20:52:38,308 - modelscope - INFO - epoch [1][1210/4982]\tlr: 8.812e-05, memory: 14281, loss: 1.6289\n", + "2023-07-02 20:52:40,236 - modelscope - INFO - epoch [1][1215/4982]\tlr: 8.803e-05, memory: 14281, loss: 1.6109\n", + "2023-07-02 20:52:42,979 - modelscope - INFO - epoch [1][1220/4982]\tlr: 8.793e-05, memory: 14281, loss: 1.8672\n", + "2023-07-02 20:52:45,670 - modelscope - INFO - epoch [1][1225/4982]\tlr: 8.783e-05, memory: 14281, loss: 1.7875\n", + "2023-07-02 20:52:48,769 - modelscope - INFO - epoch [1][1230/4982]\tlr: 8.773e-05, memory: 14281, loss: 2.9453\n", + "2023-07-02 20:52:51,329 - modelscope - INFO - epoch [1][1235/4982]\tlr: 8.763e-05, memory: 14281, loss: 3.7453\n", + "2023-07-02 20:52:54,457 - modelscope - INFO - epoch [1][1240/4982]\tlr: 8.753e-05, memory: 14281, loss: 1.6602\n", + "2023-07-02 20:52:57,272 - modelscope - INFO - epoch [1][1245/4982]\tlr: 8.743e-05, memory: 14281, loss: 1.9398\n", + "2023-07-02 20:52:59,875 - modelscope - INFO - epoch [1][1250/4982]\tlr: 8.733e-05, memory: 14281, loss: 2.6437\n", + "2023-07-02 20:53:03,234 - modelscope - INFO - epoch [1][1255/4982]\tlr: 8.723e-05, memory: 14281, loss: 1.9438\n", + "2023-07-02 20:53:05,817 - modelscope - INFO - epoch [1][1260/4982]\tlr: 8.713e-05, memory: 14281, loss: 2.0344\n", + "2023-07-02 20:53:07,576 - modelscope - INFO - epoch [1][1265/4982]\tlr: 8.703e-05, memory: 14281, loss: 3.1516\n", + "2023-07-02 20:53:10,222 - modelscope - INFO - epoch [1][1270/4982]\tlr: 8.693e-05, memory: 14281, loss: 1.7117\n", + "2023-07-02 20:53:14,014 - modelscope - INFO - epoch [1][1275/4982]\tlr: 8.683e-05, memory: 14281, loss: 1.1664\n", + "2023-07-02 20:53:16,657 - modelscope - INFO - epoch [1][1280/4982]\tlr: 8.673e-05, memory: 14281, loss: 2.4438\n", + "2023-07-02 20:53:19,474 - modelscope - INFO - epoch [1][1285/4982]\tlr: 8.663e-05, memory: 14281, loss: 1.6219\n", + "2023-07-02 20:53:22,505 - modelscope - INFO - epoch [1][1290/4982]\tlr: 8.652e-05, memory: 14281, loss: 1.4367\n", + "2023-07-02 20:53:25,260 - modelscope - INFO - epoch [1][1295/4982]\tlr: 8.642e-05, memory: 14281, loss: 2.8367\n", + "2023-07-02 20:53:27,856 - modelscope - INFO - epoch [1][1300/4982]\tlr: 8.632e-05, memory: 14281, loss: 2.7094\n", + "2023-07-02 20:53:30,269 - modelscope - INFO - epoch [1][1305/4982]\tlr: 8.621e-05, memory: 14281, loss: 2.2687\n", + "2023-07-02 20:53:32,850 - modelscope - INFO - epoch [1][1310/4982]\tlr: 8.611e-05, memory: 14281, loss: 1.6922\n", + "2023-07-02 20:53:35,441 - modelscope - INFO - epoch [1][1315/4982]\tlr: 8.601e-05, memory: 14281, loss: 1.6664\n", + "2023-07-02 20:53:38,415 - modelscope - INFO - epoch [1][1320/4982]\tlr: 8.590e-05, memory: 14281, loss: 1.8898\n", + "2023-07-02 20:53:41,871 - modelscope - INFO - epoch [1][1325/4982]\tlr: 8.580e-05, memory: 14281, loss: 1.3605\n", + "2023-07-02 20:53:44,517 - modelscope - INFO - epoch [1][1330/4982]\tlr: 8.569e-05, memory: 14281, loss: 1.8219\n", + "2023-07-02 20:53:46,642 - modelscope - INFO - epoch [1][1335/4982]\tlr: 8.559e-05, memory: 14281, loss: 2.2359\n", + "2023-07-02 20:53:49,682 - modelscope - INFO - epoch [1][1340/4982]\tlr: 8.548e-05, memory: 14281, loss: 1.8867\n", + "2023-07-02 20:53:52,314 - modelscope - INFO - epoch [1][1345/4982]\tlr: 8.538e-05, memory: 14281, loss: 1.0359\n", + "2023-07-02 20:53:53,796 - modelscope - INFO - epoch [1][1350/4982]\tlr: 8.527e-05, memory: 14281, loss: 3.0266\n", + "2023-07-02 20:53:55,582 - modelscope - INFO - epoch [1][1355/4982]\tlr: 8.516e-05, memory: 14281, loss: 3.4328\n", + "2023-07-02 20:53:57,793 - modelscope - INFO - epoch [1][1360/4982]\tlr: 8.506e-05, memory: 14281, loss: 1.6180\n", + "2023-07-02 20:54:00,871 - modelscope - INFO - epoch [1][1365/4982]\tlr: 8.495e-05, memory: 14281, loss: 1.6867\n", + "2023-07-02 20:54:03,738 - modelscope - INFO - epoch [1][1370/4982]\tlr: 8.484e-05, memory: 14281, loss: 1.8242\n", + "2023-07-02 20:54:05,352 - modelscope - INFO - epoch [1][1375/4982]\tlr: 8.474e-05, memory: 14281, loss: 3.2016\n", + "2023-07-02 20:54:08,417 - modelscope - INFO - epoch [1][1380/4982]\tlr: 8.463e-05, memory: 14281, loss: 1.9574\n", + "2023-07-02 20:54:11,057 - modelscope - INFO - epoch [1][1385/4982]\tlr: 8.452e-05, memory: 14281, loss: 2.2539\n", + "2023-07-02 20:54:13,691 - modelscope - INFO - epoch [1][1390/4982]\tlr: 8.441e-05, memory: 14281, loss: 1.7277\n", + "2023-07-02 20:54:17,235 - modelscope - INFO - epoch [1][1395/4982]\tlr: 8.430e-05, memory: 14281, loss: 1.1039\n", + "2023-07-02 20:54:18,839 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 20:55:25,409 - modelscope - INFO - Saving checkpoint at 1400 iter\n", + "2023-07-02 20:55:25,440 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter1200_acc0.7125999927520752\n", + "2023-07-02 20:55:25,442 - modelscope - INFO - Saving checkpoint at 1400 iter\n", + "2023-07-02 20:55:25,472 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_1200\n", + "2023-07-02 20:55:25,475 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14281, evaluation/acc: 0.7218, evaluation/loss: 1.9104, loss: 1.8773\n", + "2023-07-02 20:55:28,676 - modelscope - INFO - epoch [1][1405/4982]\tlr: 8.408e-05, memory: 14281, loss: 2.2473\n", + "2023-07-02 20:55:32,047 - modelscope - INFO - epoch [1][1410/4982]\tlr: 8.397e-05, memory: 14281, loss: 1.2844\n", + "2023-07-02 20:55:34,358 - modelscope - INFO - epoch [1][1415/4982]\tlr: 8.386e-05, memory: 14281, loss: 2.6406\n", + "2023-07-02 20:55:37,290 - modelscope - INFO - epoch [1][1420/4982]\tlr: 8.375e-05, memory: 14281, loss: 1.2020\n", + "2023-07-02 20:55:39,572 - modelscope - INFO - epoch [1][1425/4982]\tlr: 8.364e-05, memory: 14281, loss: 2.3109\n", + "2023-07-02 20:55:41,133 - modelscope - INFO - epoch [1][1430/4982]\tlr: 8.353e-05, memory: 14281, loss: 3.6844\n", + "2023-07-02 20:55:44,293 - modelscope - INFO - epoch [1][1435/4982]\tlr: 8.342e-05, memory: 14281, loss: 1.2117\n", + "2023-07-02 20:55:47,573 - modelscope - INFO - epoch [1][1440/4982]\tlr: 8.331e-05, memory: 14281, loss: 1.3582\n", + "2023-07-02 20:55:49,943 - modelscope - INFO - epoch [1][1445/4982]\tlr: 8.320e-05, memory: 14281, loss: 1.8289\n", + "2023-07-02 20:55:52,281 - modelscope - INFO - epoch [1][1450/4982]\tlr: 8.309e-05, memory: 14281, loss: 1.6055\n", + "2023-07-02 20:55:55,483 - modelscope - INFO - epoch [1][1455/4982]\tlr: 8.297e-05, memory: 14281, loss: 0.7688\n", + "2023-07-02 20:55:57,759 - modelscope - INFO - epoch [1][1460/4982]\tlr: 8.286e-05, memory: 14281, loss: 2.2945\n", + "2023-07-02 20:56:00,237 - modelscope - INFO - epoch [1][1465/4982]\tlr: 8.275e-05, memory: 14281, loss: 1.8000\n", + "2023-07-02 20:56:03,402 - modelscope - INFO - epoch [1][1470/4982]\tlr: 8.264e-05, memory: 14281, loss: 1.0266\n", + "2023-07-02 20:56:04,994 - modelscope - INFO - epoch [1][1475/4982]\tlr: 8.252e-05, memory: 14281, loss: 2.0094\n", + "2023-07-02 20:56:06,787 - modelscope - INFO - epoch [1][1480/4982]\tlr: 8.241e-05, memory: 14281, loss: 1.9977\n", + "2023-07-02 20:56:09,900 - modelscope - INFO - epoch [1][1485/4982]\tlr: 8.230e-05, memory: 14281, loss: 2.0945\n", + "2023-07-02 20:56:12,226 - modelscope - INFO - epoch [1][1490/4982]\tlr: 8.218e-05, memory: 14281, loss: 2.9172\n", + "2023-07-02 20:56:14,763 - modelscope - INFO - epoch [1][1495/4982]\tlr: 8.207e-05, memory: 14281, loss: 1.8367\n", + "2023-07-02 20:56:17,535 - modelscope - INFO - epoch [1][1500/4982]\tlr: 8.195e-05, memory: 14281, loss: 1.4617\n", + "2023-07-02 20:56:19,733 - modelscope - INFO - epoch [1][1505/4982]\tlr: 8.184e-05, memory: 14281, loss: 1.9328\n", + "2023-07-02 20:56:22,653 - modelscope - INFO - epoch [1][1510/4982]\tlr: 8.172e-05, memory: 14281, loss: 1.5078\n", + "2023-07-02 20:56:26,133 - modelscope - INFO - epoch [1][1515/4982]\tlr: 8.161e-05, memory: 14281, loss: 2.1977\n", + "2023-07-02 20:56:28,551 - modelscope - INFO - epoch [1][1520/4982]\tlr: 8.149e-05, memory: 14281, loss: 2.2246\n", + "2023-07-02 20:56:31,182 - modelscope - INFO - epoch [1][1525/4982]\tlr: 8.138e-05, memory: 14281, loss: 1.9840\n", + "2023-07-02 20:56:33,710 - modelscope - INFO - epoch [1][1530/4982]\tlr: 8.126e-05, memory: 14281, loss: 1.5406\n", + "2023-07-02 20:56:36,337 - modelscope - INFO - epoch [1][1535/4982]\tlr: 8.114e-05, memory: 14281, loss: 1.9930\n", + "2023-07-02 20:56:39,530 - modelscope - INFO - epoch [1][1540/4982]\tlr: 8.103e-05, memory: 14281, loss: 1.8547\n", + "2023-07-02 20:56:42,288 - modelscope - INFO - epoch [1][1545/4982]\tlr: 8.091e-05, memory: 14281, loss: 1.2977\n", + "2023-07-02 20:56:44,838 - modelscope - INFO - epoch [1][1550/4982]\tlr: 8.079e-05, memory: 14281, loss: 1.9984\n", + "2023-07-02 20:56:46,590 - modelscope - INFO - epoch [1][1555/4982]\tlr: 8.068e-05, memory: 14281, loss: 3.7969\n", + "2023-07-02 20:56:49,311 - modelscope - INFO - epoch [1][1560/4982]\tlr: 8.056e-05, memory: 14281, loss: 3.0336\n", + "2023-07-02 20:56:52,158 - modelscope - INFO - epoch [1][1565/4982]\tlr: 8.044e-05, memory: 14281, loss: 1.2789\n", + "2023-07-02 20:56:54,583 - modelscope - INFO - epoch [1][1570/4982]\tlr: 8.032e-05, memory: 14281, loss: 2.0461\n", + "2023-07-02 20:56:57,318 - modelscope - INFO - epoch [1][1575/4982]\tlr: 8.020e-05, memory: 14281, loss: 1.3301\n", + "2023-07-02 20:57:00,187 - modelscope - INFO - epoch [1][1580/4982]\tlr: 8.008e-05, memory: 14281, loss: 1.4945\n", + "2023-07-02 20:57:02,809 - modelscope - INFO - epoch [1][1585/4982]\tlr: 7.997e-05, memory: 14281, loss: 1.7984\n", + "2023-07-02 20:57:05,103 - modelscope - INFO - epoch [1][1590/4982]\tlr: 7.985e-05, memory: 14281, loss: 2.2133\n", + "2023-07-02 20:57:07,880 - modelscope - INFO - epoch [1][1595/4982]\tlr: 7.973e-05, memory: 14281, loss: 1.4664\n", + "2023-07-02 20:57:10,754 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 20:58:17,336 - modelscope - INFO - Saving checkpoint at 1600 iter\n", + "2023-07-02 20:58:17,364 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter1400_acc0.7218371033668518\n", + "2023-07-02 20:58:17,366 - modelscope - INFO - Saving checkpoint at 1600 iter\n", + "2023-07-02 20:58:17,392 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_1400\n", + "2023-07-02 20:58:17,395 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14281, evaluation/acc: 0.7349, evaluation/loss: 1.8596, loss: 0.7406\n", + "2023-07-02 20:58:19,762 - modelscope - INFO - epoch [1][1605/4982]\tlr: 7.949e-05, memory: 14281, loss: 2.4625\n", + "2023-07-02 20:58:22,187 - modelscope - INFO - epoch [1][1610/4982]\tlr: 7.937e-05, memory: 14281, loss: 2.0211\n", + "2023-07-02 20:58:24,593 - modelscope - INFO - epoch [1][1615/4982]\tlr: 7.925e-05, memory: 14281, loss: 1.8141\n", + "2023-07-02 20:58:26,348 - modelscope - INFO - epoch [1][1620/4982]\tlr: 7.913e-05, memory: 14281, loss: 2.8254\n", + "2023-07-02 20:58:28,996 - modelscope - INFO - epoch [1][1625/4982]\tlr: 7.900e-05, memory: 14281, loss: 1.3973\n", + "2023-07-02 20:58:31,382 - modelscope - INFO - epoch [1][1630/4982]\tlr: 7.888e-05, memory: 14281, loss: 2.4805\n", + "2023-07-02 20:58:34,123 - modelscope - INFO - epoch [1][1635/4982]\tlr: 7.876e-05, memory: 14281, loss: 1.2414\n", + "2023-07-02 20:58:37,249 - modelscope - INFO - epoch [1][1640/4982]\tlr: 7.864e-05, memory: 14281, loss: 1.7254\n", + "2023-07-02 20:58:40,060 - modelscope - INFO - epoch [1][1645/4982]\tlr: 7.852e-05, memory: 14281, loss: 2.1672\n", + "2023-07-02 20:58:42,200 - modelscope - INFO - epoch [1][1650/4982]\tlr: 7.840e-05, memory: 14281, loss: 2.4047\n", + "2023-07-02 20:58:44,560 - modelscope - INFO - epoch [1][1655/4982]\tlr: 7.827e-05, memory: 14281, loss: 1.7063\n", + "2023-07-02 20:58:47,535 - modelscope - INFO - epoch [1][1660/4982]\tlr: 7.815e-05, memory: 14281, loss: 1.3406\n", + "2023-07-02 20:58:50,161 - modelscope - INFO - epoch [1][1665/4982]\tlr: 7.803e-05, memory: 14281, loss: 2.4453\n", + "2023-07-02 20:58:52,380 - modelscope - INFO - epoch [1][1670/4982]\tlr: 7.791e-05, memory: 14281, loss: 1.7500\n", + "2023-07-02 20:58:54,351 - modelscope - INFO - epoch [1][1675/4982]\tlr: 7.778e-05, memory: 14281, loss: 2.8453\n", + "2023-07-02 20:58:55,966 - modelscope - INFO - epoch [1][1680/4982]\tlr: 7.766e-05, memory: 14281, loss: 1.8719\n", + "2023-07-02 20:58:58,457 - modelscope - INFO - epoch [1][1685/4982]\tlr: 7.754e-05, memory: 14281, loss: 2.1156\n", + "2023-07-02 20:59:01,212 - modelscope - INFO - epoch [1][1690/4982]\tlr: 7.741e-05, memory: 14281, loss: 1.7188\n", + "2023-07-02 20:59:04,057 - modelscope - INFO - epoch [1][1695/4982]\tlr: 7.729e-05, memory: 14281, loss: 2.5672\n", + "2023-07-02 20:59:07,177 - modelscope - INFO - epoch [1][1700/4982]\tlr: 7.716e-05, memory: 14281, loss: 1.0508\n", + "2023-07-02 20:59:09,355 - modelscope - INFO - epoch [1][1705/4982]\tlr: 7.704e-05, memory: 14281, loss: 1.8687\n", + "2023-07-02 20:59:11,209 - modelscope - INFO - epoch [1][1710/4982]\tlr: 7.691e-05, memory: 14281, loss: 2.7281\n", + "2023-07-02 20:59:14,101 - modelscope - INFO - epoch [1][1715/4982]\tlr: 7.679e-05, memory: 14281, loss: 1.0727\n", + "2023-07-02 20:59:16,660 - modelscope - INFO - epoch [1][1720/4982]\tlr: 7.666e-05, memory: 14281, loss: 1.6773\n", + "2023-07-02 20:59:18,798 - modelscope - INFO - epoch [1][1725/4982]\tlr: 7.654e-05, memory: 14281, loss: 2.3687\n", + "2023-07-02 20:59:20,724 - modelscope - INFO - epoch [1][1730/4982]\tlr: 7.641e-05, memory: 14281, loss: 1.9219\n", + "2023-07-02 20:59:23,591 - modelscope - INFO - epoch [1][1735/4982]\tlr: 7.629e-05, memory: 14281, loss: 1.5344\n", + "2023-07-02 20:59:27,214 - modelscope - INFO - epoch [1][1740/4982]\tlr: 7.616e-05, memory: 14281, loss: 0.5793\n", + "2023-07-02 20:59:29,708 - modelscope - INFO - epoch [1][1745/4982]\tlr: 7.603e-05, memory: 14281, loss: 1.4609\n", + "2023-07-02 20:59:32,082 - modelscope - INFO - epoch [1][1750/4982]\tlr: 7.591e-05, memory: 14281, loss: 1.0852\n", + "2023-07-02 20:59:34,683 - modelscope - INFO - epoch [1][1755/4982]\tlr: 7.578e-05, memory: 14281, loss: 1.5297\n", + "2023-07-02 20:59:36,962 - modelscope - INFO - epoch [1][1760/4982]\tlr: 7.565e-05, memory: 14281, loss: 2.9937\n", + "2023-07-02 20:59:39,715 - modelscope - INFO - epoch [1][1765/4982]\tlr: 7.553e-05, memory: 14281, loss: 2.1242\n", + "2023-07-02 20:59:42,455 - modelscope - INFO - epoch [1][1770/4982]\tlr: 7.540e-05, memory: 14281, loss: 2.3789\n", + "2023-07-02 20:59:45,020 - modelscope - INFO - epoch [1][1775/4982]\tlr: 7.527e-05, memory: 14281, loss: 1.8289\n", + "2023-07-02 20:59:46,865 - modelscope - INFO - epoch [1][1780/4982]\tlr: 7.515e-05, memory: 14281, loss: 2.0219\n", + "2023-07-02 20:59:50,367 - modelscope - INFO - epoch [1][1785/4982]\tlr: 7.502e-05, memory: 14281, loss: 2.6187\n", + "2023-07-02 20:59:52,626 - modelscope - INFO - epoch [1][1790/4982]\tlr: 7.489e-05, memory: 14281, loss: 2.3051\n", + "2023-07-02 20:59:54,711 - modelscope - INFO - epoch [1][1795/4982]\tlr: 7.476e-05, memory: 14281, loss: 2.3953\n", + "2023-07-02 20:59:56,419 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 21:01:03,053 - modelscope - INFO - Saving checkpoint at 1800 iter\n", + "2023-07-02 21:01:03,080 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter1600_acc0.7349275350570679\n", + "2023-07-02 21:01:03,082 - modelscope - INFO - Saving checkpoint at 1800 iter\n", + "2023-07-02 21:01:03,106 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_1600\n", + "2023-07-02 21:01:03,109 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14281, evaluation/acc: 0.7401, evaluation/loss: 1.8176, loss: 2.8625\n", + "2023-07-02 21:01:05,753 - modelscope - INFO - epoch [1][1805/4982]\tlr: 7.450e-05, memory: 14281, loss: 1.8352\n", + "2023-07-02 21:01:08,030 - modelscope - INFO - epoch [1][1810/4982]\tlr: 7.438e-05, memory: 14281, loss: 2.1453\n", + "2023-07-02 21:01:10,702 - modelscope - INFO - epoch [1][1815/4982]\tlr: 7.425e-05, memory: 14281, loss: 1.6281\n", + "2023-07-02 21:01:13,348 - modelscope - INFO - epoch [1][1820/4982]\tlr: 7.412e-05, memory: 14281, loss: 2.3008\n", + "2023-07-02 21:01:16,272 - modelscope - INFO - epoch [1][1825/4982]\tlr: 7.399e-05, memory: 14281, loss: 2.2414\n", + "2023-07-02 21:01:19,067 - modelscope - INFO - epoch [1][1830/4982]\tlr: 7.386e-05, memory: 14281, loss: 2.8672\n", + "2023-07-02 21:01:21,555 - modelscope - INFO - epoch [1][1835/4982]\tlr: 7.373e-05, memory: 14281, loss: 2.3172\n", + "2023-07-02 21:01:24,755 - modelscope - INFO - epoch [1][1840/4982]\tlr: 7.360e-05, memory: 14281, loss: 0.9746\n", + "2023-07-02 21:01:27,186 - modelscope - INFO - epoch [1][1845/4982]\tlr: 7.347e-05, memory: 14281, loss: 1.4992\n", + "2023-07-02 21:01:30,804 - modelscope - INFO - epoch [1][1850/4982]\tlr: 7.334e-05, memory: 14281, loss: 2.0031\n", + "2023-07-02 21:01:34,075 - modelscope - INFO - epoch [1][1855/4982]\tlr: 7.321e-05, memory: 14281, loss: 1.3766\n", + "2023-07-02 21:01:36,465 - modelscope - INFO - epoch [1][1860/4982]\tlr: 7.308e-05, memory: 14281, loss: 2.3203\n", + "2023-07-02 21:01:39,721 - modelscope - INFO - epoch [1][1865/4982]\tlr: 7.295e-05, memory: 14281, loss: 2.5617\n", + "2023-07-02 21:01:43,444 - modelscope - INFO - epoch [1][1870/4982]\tlr: 7.281e-05, memory: 14281, loss: 0.8551\n", + "2023-07-02 21:01:46,641 - modelscope - INFO - epoch [1][1875/4982]\tlr: 7.268e-05, memory: 14281, loss: 2.1117\n", + "2023-07-02 21:01:49,075 - modelscope - INFO - epoch [1][1880/4982]\tlr: 7.255e-05, memory: 14281, loss: 1.9414\n", + "2023-07-02 21:01:51,733 - modelscope - INFO - epoch [1][1885/4982]\tlr: 7.242e-05, memory: 14281, loss: 1.3805\n", + "2023-07-02 21:01:54,863 - modelscope - INFO - epoch [1][1890/4982]\tlr: 7.229e-05, memory: 14281, loss: 2.0562\n", + "2023-07-02 21:01:56,818 - modelscope - INFO - epoch [1][1895/4982]\tlr: 7.216e-05, memory: 14281, loss: 2.2391\n", + "2023-07-02 21:01:59,267 - modelscope - INFO - epoch [1][1900/4982]\tlr: 7.202e-05, memory: 14281, loss: 2.3027\n", + "2023-07-02 21:02:01,900 - modelscope - INFO - epoch [1][1905/4982]\tlr: 7.189e-05, memory: 14281, loss: 1.8711\n", + "2023-07-02 21:02:05,392 - modelscope - INFO - epoch [1][1910/4982]\tlr: 7.176e-05, memory: 14281, loss: 1.0352\n", + "2023-07-02 21:02:07,808 - modelscope - INFO - epoch [1][1915/4982]\tlr: 7.163e-05, memory: 14281, loss: 1.9133\n", + "2023-07-02 21:02:10,597 - modelscope - INFO - epoch [1][1920/4982]\tlr: 7.149e-05, memory: 14281, loss: 1.5922\n", + "2023-07-02 21:02:13,358 - modelscope - INFO - epoch [1][1925/4982]\tlr: 7.136e-05, memory: 14281, loss: 2.3203\n", + "2023-07-02 21:02:15,288 - modelscope - INFO - epoch [1][1930/4982]\tlr: 7.123e-05, memory: 14281, loss: 1.5707\n", + "2023-07-02 21:02:17,292 - modelscope - INFO - epoch [1][1935/4982]\tlr: 7.110e-05, memory: 14281, loss: 2.6484\n", + "2023-07-02 21:02:20,830 - modelscope - INFO - epoch [1][1940/4982]\tlr: 7.096e-05, memory: 14281, loss: 0.7172\n", + "2023-07-02 21:02:22,944 - modelscope - INFO - epoch [1][1945/4982]\tlr: 7.083e-05, memory: 14281, loss: 2.1992\n", + "2023-07-02 21:02:25,967 - modelscope - INFO - epoch [1][1950/4982]\tlr: 7.069e-05, memory: 14281, loss: 1.1105\n", + "2023-07-02 21:02:28,446 - modelscope - INFO - epoch [1][1955/4982]\tlr: 7.056e-05, memory: 14281, loss: 1.2781\n", + "2023-07-02 21:02:31,222 - modelscope - INFO - epoch [1][1960/4982]\tlr: 7.043e-05, memory: 14281, loss: 2.7156\n", + "2023-07-02 21:02:33,689 - modelscope - INFO - epoch [1][1965/4982]\tlr: 7.029e-05, memory: 14281, loss: 2.1977\n", + "2023-07-02 21:02:36,277 - modelscope - INFO - epoch [1][1970/4982]\tlr: 7.016e-05, memory: 14281, loss: 1.8652\n", + "2023-07-02 21:02:39,628 - modelscope - INFO - epoch [1][1975/4982]\tlr: 7.002e-05, memory: 14281, loss: 0.9414\n", + "2023-07-02 21:02:41,404 - modelscope - INFO - epoch [1][1980/4982]\tlr: 6.989e-05, memory: 14281, loss: 2.2672\n", + "2023-07-02 21:02:44,260 - modelscope - INFO - epoch [1][1985/4982]\tlr: 6.975e-05, memory: 14281, loss: 2.0039\n", + "2023-07-02 21:02:46,214 - modelscope - INFO - epoch [1][1990/4982]\tlr: 6.962e-05, memory: 14281, loss: 2.1391\n", + "2023-07-02 21:02:48,596 - modelscope - INFO - epoch [1][1995/4982]\tlr: 6.948e-05, memory: 14281, loss: 2.2766\n", + "2023-07-02 21:02:51,578 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.24it/s]\n", + "2023-07-02 21:03:57,832 - modelscope - INFO - Saving checkpoint at 2000 iter\n", + "2023-07-02 21:03:57,857 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter1800_acc0.7400715351104736\n", + "2023-07-02 21:03:57,860 - modelscope - INFO - Saving checkpoint at 2000 iter\n", + "2023-07-02 21:03:57,883 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_1800\n", + "2023-07-02 21:03:57,885 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14281, evaluation/acc: 0.7442, evaluation/loss: 1.7936, loss: 1.5309\n", + "2023-07-02 21:04:00,725 - modelscope - INFO - epoch [1][2005/4982]\tlr: 6.921e-05, memory: 14281, loss: 1.2211\n", + "2023-07-02 21:04:02,917 - modelscope - INFO - epoch [1][2010/4982]\tlr: 6.908e-05, memory: 14281, loss: 2.4078\n", + "2023-07-02 21:04:05,194 - modelscope - INFO - epoch [1][2015/4982]\tlr: 6.894e-05, memory: 14281, loss: 2.0891\n", + "2023-07-02 21:04:06,825 - modelscope - INFO - epoch [1][2020/4982]\tlr: 6.881e-05, memory: 14281, loss: 2.4773\n", + "2023-07-02 21:04:09,109 - modelscope - INFO - epoch [1][2025/4982]\tlr: 6.867e-05, memory: 14281, loss: 1.7293\n", + "2023-07-02 21:04:12,824 - modelscope - INFO - epoch [1][2030/4982]\tlr: 6.854e-05, memory: 14281, loss: 0.9602\n", + "2023-07-02 21:04:15,460 - modelscope - INFO - epoch [1][2035/4982]\tlr: 6.840e-05, memory: 14281, loss: 1.4973\n", + "2023-07-02 21:04:18,540 - modelscope - INFO - epoch [1][2040/4982]\tlr: 6.826e-05, memory: 14281, loss: 2.0359\n", + "2023-07-02 21:04:21,265 - modelscope - INFO - epoch [1][2045/4982]\tlr: 6.813e-05, memory: 14281, loss: 1.5586\n", + "2023-07-02 21:04:24,566 - modelscope - INFO - epoch [1][2050/4982]\tlr: 6.799e-05, memory: 14281, loss: 1.3984\n", + "2023-07-02 21:04:27,716 - modelscope - INFO - epoch [1][2055/4982]\tlr: 6.785e-05, memory: 14281, loss: 1.6156\n", + "2023-07-02 21:04:29,775 - modelscope - INFO - epoch [1][2060/4982]\tlr: 6.772e-05, memory: 14281, loss: 2.4398\n", + "2023-07-02 21:04:33,407 - modelscope - INFO - epoch [1][2065/4982]\tlr: 6.758e-05, memory: 14281, loss: 1.2191\n", + "2023-07-02 21:04:35,873 - modelscope - INFO - epoch [1][2070/4982]\tlr: 6.744e-05, memory: 14281, loss: 1.5117\n", + "2023-07-02 21:04:38,406 - modelscope - INFO - epoch [1][2075/4982]\tlr: 6.731e-05, memory: 14281, loss: 1.5688\n", + "2023-07-02 21:04:40,452 - modelscope - INFO - epoch [1][2080/4982]\tlr: 6.717e-05, memory: 14281, loss: 1.3535\n", + "2023-07-02 21:04:42,464 - modelscope - INFO - epoch [1][2085/4982]\tlr: 6.703e-05, memory: 14281, loss: 3.2313\n", + "2023-07-02 21:04:44,395 - modelscope - INFO - epoch [1][2090/4982]\tlr: 6.689e-05, memory: 14281, loss: 1.8109\n", + "2023-07-02 21:04:47,097 - modelscope - INFO - epoch [1][2095/4982]\tlr: 6.676e-05, memory: 14281, loss: 2.6109\n", + "2023-07-02 21:04:50,488 - modelscope - INFO - epoch [1][2100/4982]\tlr: 6.662e-05, memory: 14281, loss: 2.3133\n", + "2023-07-02 21:04:53,478 - modelscope - INFO - epoch [1][2105/4982]\tlr: 6.648e-05, memory: 14281, loss: 1.5336\n", + "2023-07-02 21:04:56,669 - modelscope - INFO - epoch [1][2110/4982]\tlr: 6.634e-05, memory: 14281, loss: 1.8234\n", + "2023-07-02 21:05:00,502 - modelscope - INFO - epoch [1][2115/4982]\tlr: 6.620e-05, memory: 14329, loss: 3.0766\n", + "2023-07-02 21:05:02,541 - modelscope - INFO - epoch [1][2120/4982]\tlr: 6.607e-05, memory: 14329, loss: 1.3789\n", + "2023-07-02 21:05:05,161 - modelscope - INFO - epoch [1][2125/4982]\tlr: 6.593e-05, memory: 14329, loss: 1.5391\n", + "2023-07-02 21:05:07,009 - modelscope - INFO - epoch [1][2130/4982]\tlr: 6.579e-05, memory: 14329, loss: 2.6172\n", + "2023-07-02 21:05:10,521 - modelscope - INFO - epoch [1][2135/4982]\tlr: 6.565e-05, memory: 14329, loss: 1.7750\n", + "2023-07-02 21:05:13,068 - modelscope - INFO - epoch [1][2140/4982]\tlr: 6.551e-05, memory: 14329, loss: 2.1238\n", + "2023-07-02 21:05:15,637 - modelscope - INFO - epoch [1][2145/4982]\tlr: 6.537e-05, memory: 14329, loss: 2.5039\n", + "2023-07-02 21:05:18,628 - modelscope - INFO - epoch [1][2150/4982]\tlr: 6.523e-05, memory: 14329, loss: 1.6203\n", + "2023-07-02 21:05:21,523 - modelscope - INFO - epoch [1][2155/4982]\tlr: 6.510e-05, memory: 14329, loss: 0.9555\n", + "2023-07-02 21:05:24,213 - modelscope - INFO - epoch [1][2160/4982]\tlr: 6.496e-05, memory: 14329, loss: 2.1133\n", + "2023-07-02 21:05:27,402 - modelscope - INFO - epoch [1][2165/4982]\tlr: 6.482e-05, memory: 14329, loss: 1.1963\n", + "2023-07-02 21:05:29,840 - modelscope - INFO - epoch [1][2170/4982]\tlr: 6.468e-05, memory: 14329, loss: 1.3637\n", + "2023-07-02 21:05:32,853 - modelscope - INFO - epoch [1][2175/4982]\tlr: 6.454e-05, memory: 14329, loss: 1.7201\n", + "2023-07-02 21:05:35,628 - modelscope - INFO - epoch [1][2180/4982]\tlr: 6.440e-05, memory: 14329, loss: 2.0109\n", + "2023-07-02 21:05:38,589 - modelscope - INFO - epoch [1][2185/4982]\tlr: 6.426e-05, memory: 14329, loss: 1.2418\n", + "2023-07-02 21:05:40,918 - modelscope - INFO - epoch [1][2190/4982]\tlr: 6.412e-05, memory: 14329, loss: 2.0758\n", + "2023-07-02 21:05:43,421 - modelscope - INFO - epoch [1][2195/4982]\tlr: 6.398e-05, memory: 14329, loss: 1.7094\n", + "2023-07-02 21:05:46,523 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.21it/s]\n", + "2023-07-02 21:06:53,212 - modelscope - INFO - Saving checkpoint at 2200 iter\n", + "2023-07-02 21:06:53,240 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter2000_acc0.7442383766174316\n", + "2023-07-02 21:06:53,243 - modelscope - INFO - Saving checkpoint at 2200 iter\n", + "2023-07-02 21:06:53,269 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_2000\n", + "2023-07-02 21:06:53,272 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14329, evaluation/acc: 0.7494, evaluation/loss: 1.7767, loss: 2.1570\n", + "2023-07-02 21:06:55,998 - modelscope - INFO - epoch [1][2205/4982]\tlr: 6.370e-05, memory: 14329, loss: 1.3469\n", + "2023-07-02 21:06:59,535 - modelscope - INFO - epoch [1][2210/4982]\tlr: 6.356e-05, memory: 14329, loss: 1.3730\n", + "2023-07-02 21:07:01,992 - modelscope - INFO - epoch [1][2215/4982]\tlr: 6.342e-05, memory: 14329, loss: 2.2066\n", + "2023-07-02 21:07:04,789 - modelscope - INFO - epoch [1][2220/4982]\tlr: 6.328e-05, memory: 14329, loss: 1.7098\n", + "2023-07-02 21:07:07,714 - modelscope - INFO - epoch [1][2225/4982]\tlr: 6.314e-05, memory: 14329, loss: 2.0953\n", + "2023-07-02 21:07:09,812 - modelscope - INFO - epoch [1][2230/4982]\tlr: 6.300e-05, memory: 14329, loss: 2.3914\n", + "2023-07-02 21:07:12,315 - modelscope - INFO - epoch [1][2235/4982]\tlr: 6.286e-05, memory: 14329, loss: 2.6797\n", + "2023-07-02 21:07:15,918 - modelscope - INFO - epoch [1][2240/4982]\tlr: 6.272e-05, memory: 14329, loss: 1.3217\n", + "2023-07-02 21:07:19,044 - modelscope - INFO - epoch [1][2245/4982]\tlr: 6.258e-05, memory: 14329, loss: 1.4527\n", + "2023-07-02 21:07:21,636 - modelscope - INFO - epoch [1][2250/4982]\tlr: 6.244e-05, memory: 14329, loss: 2.1770\n", + "2023-07-02 21:07:23,761 - modelscope - INFO - epoch [1][2255/4982]\tlr: 6.230e-05, memory: 14329, loss: 1.8191\n", + "2023-07-02 21:07:25,994 - modelscope - INFO - epoch [1][2260/4982]\tlr: 6.216e-05, memory: 14329, loss: 1.3582\n", + "2023-07-02 21:07:28,770 - modelscope - INFO - epoch [1][2265/4982]\tlr: 6.202e-05, memory: 14329, loss: 1.0121\n", + "2023-07-02 21:07:32,193 - modelscope - INFO - epoch [1][2270/4982]\tlr: 6.188e-05, memory: 14329, loss: 1.0039\n", + "2023-07-02 21:07:34,881 - modelscope - INFO - epoch [1][2275/4982]\tlr: 6.174e-05, memory: 14329, loss: 1.2828\n", + "2023-07-02 21:07:37,688 - modelscope - INFO - epoch [1][2280/4982]\tlr: 6.159e-05, memory: 14329, loss: 1.4516\n", + "2023-07-02 21:07:40,006 - modelscope - INFO - epoch [1][2285/4982]\tlr: 6.145e-05, memory: 14329, loss: 1.5963\n", + "2023-07-02 21:07:42,993 - modelscope - INFO - epoch [1][2290/4982]\tlr: 6.131e-05, memory: 14329, loss: 2.7687\n", + "2023-07-02 21:07:46,133 - modelscope - INFO - epoch [1][2295/4982]\tlr: 6.117e-05, memory: 14329, loss: 1.5977\n", + "2023-07-02 21:07:47,508 - modelscope - INFO - epoch [1][2300/4982]\tlr: 6.103e-05, memory: 14329, loss: 2.5945\n", + "2023-07-02 21:07:50,902 - modelscope - INFO - epoch [1][2305/4982]\tlr: 6.089e-05, memory: 14329, loss: 1.2125\n", + "2023-07-02 21:07:53,059 - modelscope - INFO - epoch [1][2310/4982]\tlr: 6.075e-05, memory: 14329, loss: 2.2883\n", + "2023-07-02 21:07:56,237 - modelscope - INFO - epoch [1][2315/4982]\tlr: 6.061e-05, memory: 14329, loss: 0.8787\n", + "2023-07-02 21:07:59,345 - modelscope - INFO - epoch [1][2320/4982]\tlr: 6.046e-05, memory: 14329, loss: 2.6320\n", + "2023-07-02 21:08:02,587 - modelscope - INFO - epoch [1][2325/4982]\tlr: 6.032e-05, memory: 14329, loss: 1.4213\n", + "2023-07-02 21:08:04,652 - modelscope - INFO - epoch [1][2330/4982]\tlr: 6.018e-05, memory: 14329, loss: 2.7547\n", + "2023-07-02 21:08:07,208 - modelscope - INFO - epoch [1][2335/4982]\tlr: 6.004e-05, memory: 14329, loss: 2.1891\n", + "2023-07-02 21:08:09,836 - modelscope - INFO - epoch [1][2340/4982]\tlr: 5.990e-05, memory: 14329, loss: 1.9711\n", + "2023-07-02 21:08:12,642 - modelscope - INFO - epoch [1][2345/4982]\tlr: 5.976e-05, memory: 14329, loss: 1.2281\n", + "2023-07-02 21:08:15,772 - modelscope - INFO - epoch [1][2350/4982]\tlr: 5.961e-05, memory: 14329, loss: 1.1650\n", + "2023-07-02 21:08:18,568 - modelscope - INFO - epoch [1][2355/4982]\tlr: 5.947e-05, memory: 14329, loss: 1.0545\n", + "2023-07-02 21:08:21,580 - modelscope - INFO - epoch [1][2360/4982]\tlr: 5.933e-05, memory: 14329, loss: 2.3699\n", + "2023-07-02 21:08:24,345 - modelscope - INFO - epoch [1][2365/4982]\tlr: 5.919e-05, memory: 14329, loss: 1.7188\n", + "2023-07-02 21:08:27,132 - modelscope - INFO - epoch [1][2370/4982]\tlr: 5.905e-05, memory: 14329, loss: 0.8174\n", + "2023-07-02 21:08:28,995 - modelscope - INFO - epoch [1][2375/4982]\tlr: 5.891e-05, memory: 14329, loss: 2.0500\n", + "2023-07-02 21:08:32,221 - modelscope - INFO - epoch [1][2380/4982]\tlr: 5.876e-05, memory: 14329, loss: 0.8354\n", + "2023-07-02 21:08:34,747 - modelscope - INFO - epoch [1][2385/4982]\tlr: 5.862e-05, memory: 14329, loss: 1.3457\n", + "2023-07-02 21:08:38,256 - modelscope - INFO - epoch [1][2390/4982]\tlr: 5.848e-05, memory: 14329, loss: 1.9180\n", + "2023-07-02 21:08:40,701 - modelscope - INFO - epoch [1][2395/4982]\tlr: 5.834e-05, memory: 14329, loss: 1.1666\n", + "2023-07-02 21:08:43,933 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:09:50,373 - modelscope - INFO - Saving checkpoint at 2400 iter\n", + "2023-07-02 21:09:50,402 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter2200_acc0.749400794506073\n", + "2023-07-02 21:09:50,404 - modelscope - INFO - Saving checkpoint at 2400 iter\n", + "2023-07-02 21:09:50,432 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_2200\n", + "2023-07-02 21:09:50,435 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14329, evaluation/acc: 0.7535, evaluation/loss: 1.7703, loss: 1.5938\n", + "2023-07-02 21:09:53,136 - modelscope - INFO - epoch [1][2405/4982]\tlr: 5.805e-05, memory: 14329, loss: 3.0355\n", + "2023-07-02 21:09:55,673 - modelscope - INFO - epoch [1][2410/4982]\tlr: 5.791e-05, memory: 14329, loss: 1.9070\n", + "2023-07-02 21:09:58,239 - modelscope - INFO - epoch [1][2415/4982]\tlr: 5.777e-05, memory: 14329, loss: 1.1090\n", + "2023-07-02 21:10:00,413 - modelscope - INFO - epoch [1][2420/4982]\tlr: 5.763e-05, memory: 14329, loss: 1.3535\n", + "2023-07-02 21:10:02,887 - modelscope - INFO - epoch [1][2425/4982]\tlr: 5.748e-05, memory: 14329, loss: 1.4563\n", + "2023-07-02 21:10:05,462 - modelscope - INFO - epoch [1][2430/4982]\tlr: 5.734e-05, memory: 14329, loss: 2.2436\n", + "2023-07-02 21:10:08,549 - modelscope - INFO - epoch [1][2435/4982]\tlr: 5.720e-05, memory: 14329, loss: 1.8266\n", + "2023-07-02 21:10:11,226 - modelscope - INFO - epoch [1][2440/4982]\tlr: 5.706e-05, memory: 14329, loss: 1.8402\n", + "2023-07-02 21:10:13,579 - modelscope - INFO - epoch [1][2445/4982]\tlr: 5.691e-05, memory: 14329, loss: 2.0742\n", + "2023-07-02 21:10:15,828 - modelscope - INFO - epoch [1][2450/4982]\tlr: 5.677e-05, memory: 14329, loss: 1.5211\n", + "2023-07-02 21:10:18,658 - modelscope - INFO - epoch [1][2455/4982]\tlr: 5.663e-05, memory: 14329, loss: 0.9520\n", + "2023-07-02 21:10:21,705 - modelscope - INFO - epoch [1][2460/4982]\tlr: 5.649e-05, memory: 14329, loss: 1.4098\n", + "2023-07-02 21:10:24,494 - modelscope - INFO - epoch [1][2465/4982]\tlr: 5.635e-05, memory: 14329, loss: 1.5748\n", + "2023-07-02 21:10:27,349 - modelscope - INFO - epoch [1][2470/4982]\tlr: 5.620e-05, memory: 14329, loss: 2.5328\n", + "2023-07-02 21:10:29,516 - modelscope - INFO - epoch [1][2475/4982]\tlr: 5.606e-05, memory: 14329, loss: 1.2904\n", + "2023-07-02 21:10:32,690 - modelscope - INFO - epoch [1][2480/4982]\tlr: 5.592e-05, memory: 14329, loss: 0.5270\n", + "2023-07-02 21:10:35,469 - modelscope - INFO - epoch [1][2485/4982]\tlr: 5.578e-05, memory: 14329, loss: 0.9842\n", + "2023-07-02 21:10:37,617 - modelscope - INFO - epoch [1][2490/4982]\tlr: 5.563e-05, memory: 14329, loss: 2.4695\n", + "2023-07-02 21:10:40,562 - modelscope - INFO - epoch [1][2495/4982]\tlr: 5.549e-05, memory: 14329, loss: 1.2441\n", + "2023-07-02 21:10:42,074 - modelscope - INFO - epoch [1][2500/4982]\tlr: 5.535e-05, memory: 14329, loss: 2.1055\n", + "2023-07-02 21:10:44,402 - modelscope - INFO - epoch [1][2505/4982]\tlr: 5.521e-05, memory: 14329, loss: 1.5461\n", + "2023-07-02 21:10:47,254 - modelscope - INFO - epoch [1][2510/4982]\tlr: 5.506e-05, memory: 14329, loss: 2.3160\n", + "2023-07-02 21:10:50,538 - modelscope - INFO - epoch [1][2515/4982]\tlr: 5.492e-05, memory: 14329, loss: 1.4293\n", + "2023-07-02 21:10:53,161 - modelscope - INFO - epoch [1][2520/4982]\tlr: 5.478e-05, memory: 14329, loss: 2.6732\n", + "2023-07-02 21:10:55,975 - modelscope - INFO - epoch [1][2525/4982]\tlr: 5.464e-05, memory: 14329, loss: 1.1059\n", + "2023-07-02 21:10:59,325 - modelscope - INFO - epoch [1][2530/4982]\tlr: 5.449e-05, memory: 14329, loss: 0.7672\n", + "2023-07-02 21:11:02,511 - modelscope - INFO - epoch [1][2535/4982]\tlr: 5.435e-05, memory: 14329, loss: 1.0480\n", + "2023-07-02 21:11:04,652 - modelscope - INFO - epoch [1][2540/4982]\tlr: 5.421e-05, memory: 14329, loss: 1.4984\n", + "2023-07-02 21:11:08,281 - modelscope - INFO - epoch [1][2545/4982]\tlr: 5.407e-05, memory: 14329, loss: 1.1805\n", + "2023-07-02 21:11:10,297 - modelscope - INFO - epoch [1][2550/4982]\tlr: 5.392e-05, memory: 14329, loss: 2.0984\n", + "2023-07-02 21:11:13,563 - modelscope - INFO - epoch [1][2555/4982]\tlr: 5.378e-05, memory: 14329, loss: 0.5590\n", + "2023-07-02 21:11:15,666 - modelscope - INFO - epoch [1][2560/4982]\tlr: 5.364e-05, memory: 14329, loss: 1.8969\n", + "2023-07-02 21:11:17,895 - modelscope - INFO - epoch [1][2565/4982]\tlr: 5.350e-05, memory: 14329, loss: 2.2344\n", + "2023-07-02 21:11:20,533 - modelscope - INFO - epoch [1][2570/4982]\tlr: 5.335e-05, memory: 14329, loss: 1.2381\n", + "2023-07-02 21:11:23,834 - modelscope - INFO - epoch [1][2575/4982]\tlr: 5.321e-05, memory: 14329, loss: 1.7533\n", + "2023-07-02 21:11:26,883 - modelscope - INFO - epoch [1][2580/4982]\tlr: 5.307e-05, memory: 14329, loss: 0.9559\n", + "2023-07-02 21:11:29,602 - modelscope - INFO - epoch [1][2585/4982]\tlr: 5.293e-05, memory: 14329, loss: 1.1484\n", + "2023-07-02 21:11:31,820 - modelscope - INFO - epoch [1][2590/4982]\tlr: 5.279e-05, memory: 14329, loss: 1.4527\n", + "2023-07-02 21:11:33,946 - modelscope - INFO - epoch [1][2595/4982]\tlr: 5.264e-05, memory: 14329, loss: 2.1156\n", + "2023-07-02 21:11:36,808 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:12:43,304 - modelscope - INFO - Saving checkpoint at 2600 iter\n", + "2023-07-02 21:12:43,335 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter2400_acc0.7534938454627991\n", + "2023-07-02 21:12:43,337 - modelscope - INFO - Saving checkpoint at 2600 iter\n", + "2023-07-02 21:12:43,366 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_2400\n", + "2023-07-02 21:12:43,369 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14329, evaluation/acc: 0.7577, evaluation/loss: 1.7432, loss: 1.3414\n", + "2023-07-02 21:12:45,632 - modelscope - INFO - epoch [1][2605/4982]\tlr: 5.236e-05, memory: 14329, loss: 1.1031\n", + "2023-07-02 21:12:47,931 - modelscope - INFO - epoch [1][2610/4982]\tlr: 5.222e-05, memory: 14329, loss: 2.4422\n", + "2023-07-02 21:12:50,545 - modelscope - INFO - epoch [1][2615/4982]\tlr: 5.207e-05, memory: 14329, loss: 1.2281\n", + "2023-07-02 21:12:53,002 - modelscope - INFO - epoch [1][2620/4982]\tlr: 5.193e-05, memory: 14329, loss: 1.9912\n", + "2023-07-02 21:12:55,893 - modelscope - INFO - epoch [1][2625/4982]\tlr: 5.179e-05, memory: 14329, loss: 1.7354\n", + "2023-07-02 21:12:58,266 - modelscope - INFO - epoch [1][2630/4982]\tlr: 5.165e-05, memory: 14329, loss: 3.0562\n", + "2023-07-02 21:13:00,767 - modelscope - INFO - epoch [1][2635/4982]\tlr: 5.151e-05, memory: 14329, loss: 1.7664\n", + "2023-07-02 21:13:04,043 - modelscope - INFO - epoch [1][2640/4982]\tlr: 5.136e-05, memory: 14329, loss: 1.7547\n", + "2023-07-02 21:13:06,487 - modelscope - INFO - epoch [1][2645/4982]\tlr: 5.122e-05, memory: 14329, loss: 2.0453\n", + "2023-07-02 21:13:09,480 - modelscope - INFO - epoch [1][2650/4982]\tlr: 5.108e-05, memory: 14329, loss: 1.5508\n", + "2023-07-02 21:13:11,484 - modelscope - INFO - epoch [1][2655/4982]\tlr: 5.094e-05, memory: 14329, loss: 2.8527\n", + "2023-07-02 21:13:14,637 - modelscope - INFO - epoch [1][2660/4982]\tlr: 5.080e-05, memory: 14329, loss: 0.4787\n", + "2023-07-02 21:13:17,215 - modelscope - INFO - epoch [1][2665/4982]\tlr: 5.066e-05, memory: 14329, loss: 1.1926\n", + "2023-07-02 21:13:19,892 - modelscope - INFO - epoch [1][2670/4982]\tlr: 5.051e-05, memory: 14329, loss: 2.3055\n", + "2023-07-02 21:13:21,987 - modelscope - INFO - epoch [1][2675/4982]\tlr: 5.037e-05, memory: 14329, loss: 1.6938\n", + "2023-07-02 21:13:24,761 - modelscope - INFO - epoch [1][2680/4982]\tlr: 5.023e-05, memory: 14329, loss: 2.2922\n", + "2023-07-02 21:13:26,815 - modelscope - INFO - epoch [1][2685/4982]\tlr: 5.009e-05, memory: 14329, loss: 1.6898\n", + "2023-07-02 21:13:29,236 - modelscope - INFO - epoch [1][2690/4982]\tlr: 4.995e-05, memory: 14329, loss: 2.2826\n", + "2023-07-02 21:13:31,582 - modelscope - INFO - epoch [1][2695/4982]\tlr: 4.981e-05, memory: 14329, loss: 1.7828\n", + "2023-07-02 21:13:33,912 - modelscope - INFO - epoch [1][2700/4982]\tlr: 4.966e-05, memory: 14329, loss: 1.8785\n", + "2023-07-02 21:13:36,729 - modelscope - INFO - epoch [1][2705/4982]\tlr: 4.952e-05, memory: 14329, loss: 1.4273\n", + "2023-07-02 21:13:38,262 - modelscope - INFO - epoch [1][2710/4982]\tlr: 4.938e-05, memory: 14329, loss: 1.5227\n", + "2023-07-02 21:13:40,572 - modelscope - INFO - epoch [1][2715/4982]\tlr: 4.924e-05, memory: 14329, loss: 2.0828\n", + "2023-07-02 21:13:43,610 - modelscope - INFO - epoch [1][2720/4982]\tlr: 4.910e-05, memory: 14329, loss: 1.7301\n", + "2023-07-02 21:13:46,147 - modelscope - INFO - epoch [1][2725/4982]\tlr: 4.896e-05, memory: 14329, loss: 1.8305\n", + "2023-07-02 21:13:49,457 - modelscope - INFO - epoch [1][2730/4982]\tlr: 4.882e-05, memory: 14329, loss: 1.6883\n", + "2023-07-02 21:13:51,690 - modelscope - INFO - epoch [1][2735/4982]\tlr: 4.868e-05, memory: 14329, loss: 1.3963\n", + "2023-07-02 21:13:54,487 - modelscope - INFO - epoch [1][2740/4982]\tlr: 4.854e-05, memory: 14329, loss: 1.2293\n", + "2023-07-02 21:13:56,303 - modelscope - INFO - epoch [1][2745/4982]\tlr: 4.839e-05, memory: 14329, loss: 1.7289\n", + "2023-07-02 21:13:59,073 - modelscope - INFO - epoch [1][2750/4982]\tlr: 4.825e-05, memory: 14329, loss: 1.1637\n", + "2023-07-02 21:14:02,327 - modelscope - INFO - epoch [1][2755/4982]\tlr: 4.811e-05, memory: 14329, loss: 1.3336\n", + "2023-07-02 21:14:05,192 - modelscope - INFO - epoch [1][2760/4982]\tlr: 4.797e-05, memory: 14329, loss: 0.9352\n", + "2023-07-02 21:14:07,032 - modelscope - INFO - epoch [1][2765/4982]\tlr: 4.783e-05, memory: 14329, loss: 1.9258\n", + "2023-07-02 21:14:10,206 - modelscope - INFO - epoch [1][2770/4982]\tlr: 4.769e-05, memory: 14329, loss: 2.0555\n", + "2023-07-02 21:14:12,659 - modelscope - INFO - epoch [1][2775/4982]\tlr: 4.755e-05, memory: 14329, loss: 1.5836\n", + "2023-07-02 21:14:15,156 - modelscope - INFO - epoch [1][2780/4982]\tlr: 4.741e-05, memory: 14329, loss: 1.6203\n", + "2023-07-02 21:14:18,171 - modelscope - INFO - epoch [1][2785/4982]\tlr: 4.727e-05, memory: 14329, loss: 2.1402\n", + "2023-07-02 21:14:20,575 - modelscope - INFO - epoch [1][2790/4982]\tlr: 4.713e-05, memory: 14329, loss: 1.6504\n", + "2023-07-02 21:14:23,247 - modelscope - INFO - epoch [1][2795/4982]\tlr: 4.699e-05, memory: 14329, loss: 1.7109\n", + "2023-07-02 21:14:26,026 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:15:32,451 - modelscope - INFO - Saving checkpoint at 2800 iter\n", + "2023-07-02 21:15:32,483 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter2600_acc0.7577160000801086\n", + "2023-07-02 21:15:32,485 - modelscope - INFO - Saving checkpoint at 2800 iter\n", + "2023-07-02 21:15:32,515 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_2600\n", + "2023-07-02 21:15:32,518 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14329, evaluation/acc: 0.7621, evaluation/loss: 1.7451, loss: 2.2227\n", + "2023-07-02 21:15:34,950 - modelscope - INFO - epoch [1][2805/4982]\tlr: 4.671e-05, memory: 14329, loss: 2.0086\n", + "2023-07-02 21:15:38,272 - modelscope - INFO - epoch [1][2810/4982]\tlr: 4.657e-05, memory: 14329, loss: 0.8770\n", + "2023-07-02 21:15:41,346 - modelscope - INFO - epoch [1][2815/4982]\tlr: 4.643e-05, memory: 14329, loss: 0.7887\n", + "2023-07-02 21:15:43,033 - modelscope - INFO - epoch [1][2820/4982]\tlr: 4.629e-05, memory: 14329, loss: 2.8648\n", + "2023-07-02 21:15:45,965 - modelscope - INFO - epoch [1][2825/4982]\tlr: 4.615e-05, memory: 14329, loss: 1.9832\n", + "2023-07-02 21:15:48,381 - modelscope - INFO - epoch [1][2830/4982]\tlr: 4.601e-05, memory: 14329, loss: 1.4816\n", + "2023-07-02 21:15:51,262 - modelscope - INFO - epoch [1][2835/4982]\tlr: 4.587e-05, memory: 14329, loss: 1.3080\n", + "2023-07-02 21:15:53,969 - modelscope - INFO - epoch [1][2840/4982]\tlr: 4.573e-05, memory: 14329, loss: 1.2664\n", + "2023-07-02 21:15:56,145 - modelscope - INFO - epoch [1][2845/4982]\tlr: 4.559e-05, memory: 14329, loss: 2.4719\n", + "2023-07-02 21:15:58,623 - modelscope - INFO - epoch [1][2850/4982]\tlr: 4.545e-05, memory: 14329, loss: 1.0096\n", + "2023-07-02 21:16:01,537 - modelscope - INFO - epoch [1][2855/4982]\tlr: 4.532e-05, memory: 14329, loss: 1.7023\n", + "2023-07-02 21:16:05,216 - modelscope - INFO - epoch [1][2860/4982]\tlr: 4.518e-05, memory: 14329, loss: 1.8641\n", + "2023-07-02 21:16:08,050 - modelscope - INFO - epoch [1][2865/4982]\tlr: 4.504e-05, memory: 14329, loss: 2.1398\n", + "2023-07-02 21:16:10,270 - modelscope - INFO - epoch [1][2870/4982]\tlr: 4.490e-05, memory: 14329, loss: 1.9180\n", + "2023-07-02 21:16:12,856 - modelscope - INFO - epoch [1][2875/4982]\tlr: 4.476e-05, memory: 14329, loss: 1.6426\n", + "2023-07-02 21:16:15,831 - modelscope - INFO - epoch [1][2880/4982]\tlr: 4.462e-05, memory: 14329, loss: 1.9609\n", + "2023-07-02 21:16:18,475 - modelscope - INFO - epoch [1][2885/4982]\tlr: 4.448e-05, memory: 14329, loss: 1.3818\n", + "2023-07-02 21:16:21,513 - modelscope - INFO - epoch [1][2890/4982]\tlr: 4.434e-05, memory: 14329, loss: 1.8543\n", + "2023-07-02 21:16:23,561 - modelscope - INFO - epoch [1][2895/4982]\tlr: 4.421e-05, memory: 14329, loss: 1.6133\n", + "2023-07-02 21:16:25,999 - modelscope - INFO - epoch [1][2900/4982]\tlr: 4.407e-05, memory: 14329, loss: 2.2039\n", + "2023-07-02 21:16:28,248 - modelscope - INFO - epoch [1][2905/4982]\tlr: 4.393e-05, memory: 14329, loss: 1.5797\n", + "2023-07-02 21:16:31,059 - modelscope - INFO - epoch [1][2910/4982]\tlr: 4.379e-05, memory: 14329, loss: 1.0002\n", + "2023-07-02 21:16:33,522 - modelscope - INFO - epoch [1][2915/4982]\tlr: 4.365e-05, memory: 14329, loss: 1.5379\n", + "2023-07-02 21:16:35,881 - modelscope - INFO - epoch [1][2920/4982]\tlr: 4.352e-05, memory: 14329, loss: 2.8797\n", + "2023-07-02 21:16:38,582 - modelscope - INFO - epoch [1][2925/4982]\tlr: 4.338e-05, memory: 14329, loss: 2.2234\n", + "2023-07-02 21:16:41,105 - modelscope - INFO - epoch [1][2930/4982]\tlr: 4.324e-05, memory: 14329, loss: 0.9779\n", + "2023-07-02 21:16:43,610 - modelscope - INFO - epoch [1][2935/4982]\tlr: 4.310e-05, memory: 14329, loss: 1.1336\n", + "2023-07-02 21:16:46,978 - modelscope - INFO - epoch [1][2940/4982]\tlr: 4.297e-05, memory: 14329, loss: 1.7703\n", + "2023-07-02 21:16:49,719 - modelscope - INFO - epoch [1][2945/4982]\tlr: 4.283e-05, memory: 14329, loss: 2.1102\n", + "2023-07-02 21:16:52,425 - modelscope - INFO - epoch [1][2950/4982]\tlr: 4.269e-05, memory: 14329, loss: 1.6873\n", + "2023-07-02 21:16:54,893 - modelscope - INFO - epoch [1][2955/4982]\tlr: 4.256e-05, memory: 14329, loss: 1.8313\n", + "2023-07-02 21:16:58,211 - modelscope - INFO - epoch [1][2960/4982]\tlr: 4.242e-05, memory: 14329, loss: 1.2132\n", + "2023-07-02 21:17:01,430 - modelscope - INFO - epoch [1][2965/4982]\tlr: 4.228e-05, memory: 14329, loss: 1.5578\n", + "2023-07-02 21:17:04,190 - modelscope - INFO - epoch [1][2970/4982]\tlr: 4.215e-05, memory: 14329, loss: 1.1242\n", + "2023-07-02 21:17:07,777 - modelscope - INFO - epoch [1][2975/4982]\tlr: 4.201e-05, memory: 14329, loss: 1.3516\n", + "2023-07-02 21:17:11,666 - modelscope - INFO - epoch [1][2980/4982]\tlr: 4.187e-05, memory: 14329, loss: 1.2953\n", + "2023-07-02 21:17:14,548 - modelscope - INFO - epoch [1][2985/4982]\tlr: 4.174e-05, memory: 14329, loss: 2.3777\n", + "2023-07-02 21:17:17,244 - modelscope - INFO - epoch [1][2990/4982]\tlr: 4.160e-05, memory: 14329, loss: 1.8803\n", + "2023-07-02 21:17:20,544 - modelscope - INFO - epoch [1][2995/4982]\tlr: 4.147e-05, memory: 14329, loss: 1.1699\n", + "2023-07-02 21:17:22,682 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 21:18:29,245 - modelscope - INFO - Saving checkpoint at 3000 iter\n", + "2023-07-02 21:18:29,273 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter2800_acc0.7621409296989441\n", + "2023-07-02 21:18:29,275 - modelscope - INFO - Saving checkpoint at 3000 iter\n", + "2023-07-02 21:18:29,301 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_2800\n", + "2023-07-02 21:18:29,303 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14329, evaluation/acc: 0.7655, evaluation/loss: 1.7432, loss: 1.2258\n", + "2023-07-02 21:18:31,804 - modelscope - INFO - epoch [1][3005/4982]\tlr: 4.120e-05, memory: 14329, loss: 2.2777\n", + "2023-07-02 21:18:35,465 - modelscope - INFO - epoch [1][3010/4982]\tlr: 4.106e-05, memory: 14329, loss: 1.4781\n", + "2023-07-02 21:18:38,255 - modelscope - INFO - epoch [1][3015/4982]\tlr: 4.092e-05, memory: 14329, loss: 1.4242\n", + "2023-07-02 21:18:41,641 - modelscope - INFO - epoch [1][3020/4982]\tlr: 4.079e-05, memory: 14449, loss: 2.5148\n", + "2023-07-02 21:18:44,184 - modelscope - INFO - epoch [1][3025/4982]\tlr: 4.065e-05, memory: 14449, loss: 1.9086\n", + "2023-07-02 21:18:47,235 - modelscope - INFO - epoch [1][3030/4982]\tlr: 4.052e-05, memory: 14449, loss: 2.3363\n", + "2023-07-02 21:18:50,005 - modelscope - INFO - epoch [1][3035/4982]\tlr: 4.039e-05, memory: 14449, loss: 1.4543\n", + "2023-07-02 21:18:52,482 - modelscope - INFO - epoch [1][3040/4982]\tlr: 4.025e-05, memory: 14449, loss: 2.1744\n", + "2023-07-02 21:18:55,300 - modelscope - INFO - epoch [1][3045/4982]\tlr: 4.012e-05, memory: 14449, loss: 1.8871\n", + "2023-07-02 21:18:58,643 - modelscope - INFO - epoch [1][3050/4982]\tlr: 3.998e-05, memory: 14449, loss: 1.6809\n", + "2023-07-02 21:19:01,867 - modelscope - INFO - epoch [1][3055/4982]\tlr: 3.985e-05, memory: 14449, loss: 2.7977\n", + "2023-07-02 21:19:05,785 - modelscope - INFO - epoch [1][3060/4982]\tlr: 3.971e-05, memory: 14449, loss: 1.6258\n", + "2023-07-02 21:19:09,029 - modelscope - INFO - epoch [1][3065/4982]\tlr: 3.958e-05, memory: 14449, loss: 0.9796\n", + "2023-07-02 21:19:11,551 - modelscope - INFO - epoch [1][3070/4982]\tlr: 3.945e-05, memory: 14449, loss: 2.2262\n", + "2023-07-02 21:19:14,238 - modelscope - INFO - epoch [1][3075/4982]\tlr: 3.931e-05, memory: 14449, loss: 1.3527\n", + "2023-07-02 21:19:16,361 - modelscope - INFO - epoch [1][3080/4982]\tlr: 3.918e-05, memory: 14449, loss: 1.6689\n", + "2023-07-02 21:19:18,345 - modelscope - INFO - epoch [1][3085/4982]\tlr: 3.905e-05, memory: 14449, loss: 2.9641\n", + "2023-07-02 21:19:20,849 - modelscope - INFO - epoch [1][3090/4982]\tlr: 3.891e-05, memory: 14449, loss: 1.6723\n", + "2023-07-02 21:19:23,101 - modelscope - INFO - epoch [1][3095/4982]\tlr: 3.878e-05, memory: 14449, loss: 2.7703\n", + "2023-07-02 21:19:25,726 - modelscope - INFO - epoch [1][3100/4982]\tlr: 3.865e-05, memory: 14449, loss: 0.8043\n", + "2023-07-02 21:19:28,252 - modelscope - INFO - epoch [1][3105/4982]\tlr: 3.852e-05, memory: 14449, loss: 2.0820\n", + "2023-07-02 21:19:30,440 - modelscope - INFO - epoch [1][3110/4982]\tlr: 3.838e-05, memory: 14449, loss: 2.3492\n", + "2023-07-02 21:19:33,686 - modelscope - INFO - epoch [1][3115/4982]\tlr: 3.825e-05, memory: 14449, loss: 0.8090\n", + "2023-07-02 21:19:36,596 - modelscope - INFO - epoch [1][3120/4982]\tlr: 3.812e-05, memory: 14449, loss: 0.6620\n", + "2023-07-02 21:19:38,596 - modelscope - INFO - epoch [1][3125/4982]\tlr: 3.799e-05, memory: 14449, loss: 2.6781\n", + "2023-07-02 21:19:41,115 - modelscope - INFO - epoch [1][3130/4982]\tlr: 3.786e-05, memory: 14449, loss: 1.4328\n", + "2023-07-02 21:19:44,046 - modelscope - INFO - epoch [1][3135/4982]\tlr: 3.772e-05, memory: 14449, loss: 1.3764\n", + "2023-07-02 21:19:47,148 - modelscope - INFO - epoch [1][3140/4982]\tlr: 3.759e-05, memory: 14449, loss: 1.0316\n", + "2023-07-02 21:19:50,062 - modelscope - INFO - epoch [1][3145/4982]\tlr: 3.746e-05, memory: 14449, loss: 1.6078\n", + "2023-07-02 21:19:52,899 - modelscope - INFO - epoch [1][3150/4982]\tlr: 3.733e-05, memory: 14449, loss: 1.9883\n", + "2023-07-02 21:19:55,621 - modelscope - INFO - epoch [1][3155/4982]\tlr: 3.720e-05, memory: 14449, loss: 1.6697\n", + "2023-07-02 21:19:57,950 - modelscope - INFO - epoch [1][3160/4982]\tlr: 3.707e-05, memory: 14449, loss: 2.7109\n", + "2023-07-02 21:20:00,606 - modelscope - INFO - epoch [1][3165/4982]\tlr: 3.694e-05, memory: 14449, loss: 1.5930\n", + "2023-07-02 21:20:04,380 - modelscope - INFO - epoch [1][3170/4982]\tlr: 3.681e-05, memory: 14449, loss: 1.5211\n", + "2023-07-02 21:20:07,165 - modelscope - INFO - epoch [1][3175/4982]\tlr: 3.668e-05, memory: 14449, loss: 1.1980\n", + "2023-07-02 21:20:09,788 - modelscope - INFO - epoch [1][3180/4982]\tlr: 3.655e-05, memory: 14449, loss: 1.7625\n", + "2023-07-02 21:20:12,711 - modelscope - INFO - epoch [1][3185/4982]\tlr: 3.642e-05, memory: 14449, loss: 1.6734\n", + "2023-07-02 21:20:15,469 - modelscope - INFO - epoch [1][3190/4982]\tlr: 3.629e-05, memory: 14449, loss: 1.9477\n", + "2023-07-02 21:20:18,068 - modelscope - INFO - epoch [1][3195/4982]\tlr: 3.616e-05, memory: 14449, loss: 1.4062\n", + "2023-07-02 21:20:20,228 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:21:26,662 - modelscope - INFO - Saving checkpoint at 3200 iter\n", + "2023-07-02 21:21:26,689 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter3000_acc0.7654780745506287\n", + "2023-07-02 21:21:26,692 - modelscope - INFO - Saving checkpoint at 3200 iter\n", + "2023-07-02 21:21:26,718 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_3000\n", + "2023-07-02 21:21:26,721 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7670, evaluation/loss: 1.7173, loss: 2.3687\n", + "2023-07-02 21:21:29,912 - modelscope - INFO - epoch [1][3205/4982]\tlr: 3.590e-05, memory: 14449, loss: 1.7494\n", + "2023-07-02 21:21:32,447 - modelscope - INFO - epoch [1][3210/4982]\tlr: 3.577e-05, memory: 14449, loss: 2.1035\n", + "2023-07-02 21:21:35,773 - modelscope - INFO - epoch [1][3215/4982]\tlr: 3.565e-05, memory: 14449, loss: 0.8089\n", + "2023-07-02 21:21:38,867 - modelscope - INFO - epoch [1][3220/4982]\tlr: 3.552e-05, memory: 14449, loss: 1.5078\n", + "2023-07-02 21:21:42,117 - modelscope - INFO - epoch [1][3225/4982]\tlr: 3.539e-05, memory: 14449, loss: 0.6988\n", + "2023-07-02 21:21:44,231 - modelscope - INFO - epoch [1][3230/4982]\tlr: 3.526e-05, memory: 14449, loss: 2.9305\n", + "2023-07-02 21:21:46,826 - modelscope - INFO - epoch [1][3235/4982]\tlr: 3.513e-05, memory: 14449, loss: 1.9297\n", + "2023-07-02 21:21:49,591 - modelscope - INFO - epoch [1][3240/4982]\tlr: 3.501e-05, memory: 14449, loss: 0.5963\n", + "2023-07-02 21:21:51,805 - modelscope - INFO - epoch [1][3245/4982]\tlr: 3.488e-05, memory: 14449, loss: 3.5063\n", + "2023-07-02 21:21:54,641 - modelscope - INFO - epoch [1][3250/4982]\tlr: 3.475e-05, memory: 14449, loss: 2.2263\n", + "2023-07-02 21:21:56,972 - modelscope - INFO - epoch [1][3255/4982]\tlr: 3.462e-05, memory: 14449, loss: 2.3281\n", + "2023-07-02 21:21:59,236 - modelscope - INFO - epoch [1][3260/4982]\tlr: 3.450e-05, memory: 14449, loss: 1.6074\n", + "2023-07-02 21:22:02,735 - modelscope - INFO - epoch [1][3265/4982]\tlr: 3.437e-05, memory: 14449, loss: 0.7896\n", + "2023-07-02 21:22:05,850 - modelscope - INFO - epoch [1][3270/4982]\tlr: 3.424e-05, memory: 14449, loss: 2.6018\n", + "2023-07-02 21:22:07,890 - modelscope - INFO - epoch [1][3275/4982]\tlr: 3.412e-05, memory: 14449, loss: 1.3377\n", + "2023-07-02 21:22:10,846 - modelscope - INFO - epoch [1][3280/4982]\tlr: 3.399e-05, memory: 14449, loss: 1.4023\n", + "2023-07-02 21:22:13,203 - modelscope - INFO - epoch [1][3285/4982]\tlr: 3.387e-05, memory: 14449, loss: 2.1109\n", + "2023-07-02 21:22:15,914 - modelscope - INFO - epoch [1][3290/4982]\tlr: 3.374e-05, memory: 14449, loss: 1.3941\n", + "2023-07-02 21:22:18,753 - modelscope - INFO - epoch [1][3295/4982]\tlr: 3.362e-05, memory: 14449, loss: 2.0223\n", + "2023-07-02 21:22:21,131 - modelscope - INFO - epoch [1][3300/4982]\tlr: 3.349e-05, memory: 14449, loss: 1.3546\n", + "2023-07-02 21:22:22,563 - modelscope - INFO - epoch [1][3305/4982]\tlr: 3.337e-05, memory: 14449, loss: 2.2541\n", + "2023-07-02 21:22:26,351 - modelscope - INFO - epoch [1][3310/4982]\tlr: 3.324e-05, memory: 14449, loss: 2.1484\n", + "2023-07-02 21:22:29,794 - modelscope - INFO - epoch [1][3315/4982]\tlr: 3.312e-05, memory: 14449, loss: 0.9180\n", + "2023-07-02 21:22:31,954 - modelscope - INFO - epoch [1][3320/4982]\tlr: 3.299e-05, memory: 14449, loss: 2.4869\n", + "2023-07-02 21:22:34,848 - modelscope - INFO - epoch [1][3325/4982]\tlr: 3.287e-05, memory: 14449, loss: 1.0967\n", + "2023-07-02 21:22:37,229 - modelscope - INFO - epoch [1][3330/4982]\tlr: 3.275e-05, memory: 14449, loss: 2.1406\n", + "2023-07-02 21:22:39,882 - modelscope - INFO - epoch [1][3335/4982]\tlr: 3.262e-05, memory: 14449, loss: 1.9133\n", + "2023-07-02 21:22:42,375 - modelscope - INFO - epoch [1][3340/4982]\tlr: 3.250e-05, memory: 14449, loss: 2.0443\n", + "2023-07-02 21:22:45,140 - modelscope - INFO - epoch [1][3345/4982]\tlr: 3.238e-05, memory: 14449, loss: 2.7484\n", + "2023-07-02 21:22:48,235 - modelscope - INFO - epoch [1][3350/4982]\tlr: 3.225e-05, memory: 14449, loss: 1.3258\n", + "2023-07-02 21:22:50,145 - modelscope - INFO - epoch [1][3355/4982]\tlr: 3.213e-05, memory: 14449, loss: 2.4828\n", + "2023-07-02 21:22:53,373 - modelscope - INFO - epoch [1][3360/4982]\tlr: 3.201e-05, memory: 14449, loss: 1.3379\n", + "2023-07-02 21:22:55,667 - modelscope - INFO - epoch [1][3365/4982]\tlr: 3.189e-05, memory: 14449, loss: 2.0289\n", + "2023-07-02 21:22:57,577 - modelscope - INFO - epoch [1][3370/4982]\tlr: 3.176e-05, memory: 14449, loss: 2.0500\n", + "2023-07-02 21:23:00,744 - modelscope - INFO - epoch [1][3375/4982]\tlr: 3.164e-05, memory: 14449, loss: 1.0834\n", + "2023-07-02 21:23:04,128 - modelscope - INFO - epoch [1][3380/4982]\tlr: 3.152e-05, memory: 14449, loss: 0.8875\n", + "2023-07-02 21:23:07,233 - modelscope - INFO - epoch [1][3385/4982]\tlr: 3.140e-05, memory: 14449, loss: 1.1375\n", + "2023-07-02 21:23:09,464 - modelscope - INFO - epoch [1][3390/4982]\tlr: 3.128e-05, memory: 14449, loss: 2.3506\n", + "2023-07-02 21:23:12,230 - modelscope - INFO - epoch [1][3395/4982]\tlr: 3.116e-05, memory: 14449, loss: 1.0258\n", + "2023-07-02 21:23:15,891 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:24:22,313 - modelscope - INFO - Saving checkpoint at 3400 iter\n", + "2023-07-02 21:24:22,343 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter3200_acc0.7669530510902405\n", + "2023-07-02 21:24:22,345 - modelscope - INFO - Saving checkpoint at 3400 iter\n", + "2023-07-02 21:24:22,373 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_3200\n", + "2023-07-02 21:24:22,376 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7689, evaluation/loss: 1.6972, loss: 1.1217\n", + "2023-07-02 21:24:25,324 - modelscope - INFO - epoch [1][3405/4982]\tlr: 3.092e-05, memory: 14449, loss: 1.3055\n", + "2023-07-02 21:24:28,008 - modelscope - INFO - epoch [1][3410/4982]\tlr: 3.080e-05, memory: 14449, loss: 1.8813\n", + "2023-07-02 21:24:30,896 - modelscope - INFO - epoch [1][3415/4982]\tlr: 3.068e-05, memory: 14449, loss: 1.8965\n", + "2023-07-02 21:24:33,316 - modelscope - INFO - epoch [1][3420/4982]\tlr: 3.056e-05, memory: 14449, loss: 2.1344\n", + "2023-07-02 21:24:35,511 - modelscope - INFO - epoch [1][3425/4982]\tlr: 3.044e-05, memory: 14449, loss: 2.6798\n", + "2023-07-02 21:24:38,328 - modelscope - INFO - epoch [1][3430/4982]\tlr: 3.032e-05, memory: 14449, loss: 0.9617\n", + "2023-07-02 21:24:41,517 - modelscope - INFO - epoch [1][3435/4982]\tlr: 3.020e-05, memory: 14449, loss: 1.7773\n", + "2023-07-02 21:24:44,031 - modelscope - INFO - epoch [1][3440/4982]\tlr: 3.008e-05, memory: 14449, loss: 0.9613\n", + "2023-07-02 21:24:46,636 - modelscope - INFO - epoch [1][3445/4982]\tlr: 2.996e-05, memory: 14449, loss: 2.5844\n", + "2023-07-02 21:24:49,249 - modelscope - INFO - epoch [1][3450/4982]\tlr: 2.984e-05, memory: 14449, loss: 1.5498\n", + "2023-07-02 21:24:51,312 - modelscope - INFO - epoch [1][3455/4982]\tlr: 2.973e-05, memory: 14449, loss: 3.1250\n", + "2023-07-02 21:24:53,950 - modelscope - INFO - epoch [1][3460/4982]\tlr: 2.961e-05, memory: 14449, loss: 1.4406\n", + "2023-07-02 21:24:58,115 - modelscope - INFO - epoch [1][3465/4982]\tlr: 2.949e-05, memory: 14449, loss: 1.8449\n", + "2023-07-02 21:25:01,189 - modelscope - INFO - epoch [1][3470/4982]\tlr: 2.938e-05, memory: 14449, loss: 1.5242\n", + "2023-07-02 21:25:04,395 - modelscope - INFO - epoch [1][3475/4982]\tlr: 2.926e-05, memory: 14449, loss: 1.7469\n", + "2023-07-02 21:25:06,700 - modelscope - INFO - epoch [1][3480/4982]\tlr: 2.914e-05, memory: 14449, loss: 2.0787\n", + "2023-07-02 21:25:09,262 - modelscope - INFO - epoch [1][3485/4982]\tlr: 2.903e-05, memory: 14449, loss: 2.8416\n", + "2023-07-02 21:25:11,210 - modelscope - INFO - epoch [1][3490/4982]\tlr: 2.891e-05, memory: 14449, loss: 1.3633\n", + "2023-07-02 21:25:13,408 - modelscope - INFO - epoch [1][3495/4982]\tlr: 2.879e-05, memory: 14449, loss: 2.1203\n", + "2023-07-02 21:25:16,422 - modelscope - INFO - epoch [1][3500/4982]\tlr: 2.868e-05, memory: 14449, loss: 1.2863\n", + "2023-07-02 21:25:19,311 - modelscope - INFO - epoch [1][3505/4982]\tlr: 2.856e-05, memory: 14449, loss: 2.5109\n", + "2023-07-02 21:25:22,759 - modelscope - INFO - epoch [1][3510/4982]\tlr: 2.845e-05, memory: 14449, loss: 1.1850\n", + "2023-07-02 21:25:25,501 - modelscope - INFO - epoch [1][3515/4982]\tlr: 2.833e-05, memory: 14449, loss: 1.2992\n", + "2023-07-02 21:25:27,731 - modelscope - INFO - epoch [1][3520/4982]\tlr: 2.822e-05, memory: 14449, loss: 1.6945\n", + "2023-07-02 21:25:30,093 - modelscope - INFO - epoch [1][3525/4982]\tlr: 2.810e-05, memory: 14449, loss: 1.4635\n", + "2023-07-02 21:25:32,786 - modelscope - INFO - epoch [1][3530/4982]\tlr: 2.799e-05, memory: 14449, loss: 1.3238\n", + "2023-07-02 21:25:35,630 - modelscope - INFO - epoch [1][3535/4982]\tlr: 2.788e-05, memory: 14449, loss: 1.7512\n", + "2023-07-02 21:25:38,803 - modelscope - INFO - epoch [1][3540/4982]\tlr: 2.776e-05, memory: 14449, loss: 0.5063\n", + "2023-07-02 21:25:41,431 - modelscope - INFO - epoch [1][3545/4982]\tlr: 2.765e-05, memory: 14449, loss: 2.9984\n", + "2023-07-02 21:25:44,590 - modelscope - INFO - epoch [1][3550/4982]\tlr: 2.754e-05, memory: 14449, loss: 1.9760\n", + "2023-07-02 21:25:47,035 - modelscope - INFO - epoch [1][3555/4982]\tlr: 2.743e-05, memory: 14449, loss: 1.2375\n", + "2023-07-02 21:25:49,304 - modelscope - INFO - epoch [1][3560/4982]\tlr: 2.731e-05, memory: 14449, loss: 2.3781\n", + "2023-07-02 21:25:51,809 - modelscope - INFO - epoch [1][3565/4982]\tlr: 2.720e-05, memory: 14449, loss: 1.3707\n", + "2023-07-02 21:25:55,272 - modelscope - INFO - epoch [1][3570/4982]\tlr: 2.709e-05, memory: 14449, loss: 2.1244\n", + "2023-07-02 21:25:57,747 - modelscope - INFO - epoch [1][3575/4982]\tlr: 2.698e-05, memory: 14449, loss: 0.8705\n", + "2023-07-02 21:26:00,593 - modelscope - INFO - epoch [1][3580/4982]\tlr: 2.687e-05, memory: 14449, loss: 2.1484\n", + "2023-07-02 21:26:02,783 - modelscope - INFO - epoch [1][3585/4982]\tlr: 2.676e-05, memory: 14449, loss: 1.3639\n", + "2023-07-02 21:26:04,331 - modelscope - INFO - epoch [1][3590/4982]\tlr: 2.665e-05, memory: 14449, loss: 1.5500\n", + "2023-07-02 21:26:07,565 - modelscope - INFO - epoch [1][3595/4982]\tlr: 2.654e-05, memory: 14449, loss: 1.4891\n", + "2023-07-02 21:26:09,515 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 21:27:16,035 - modelscope - INFO - Saving checkpoint at 3600 iter\n", + "2023-07-02 21:27:16,062 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter3400_acc0.768944263458252\n", + "2023-07-02 21:27:16,065 - modelscope - INFO - Saving checkpoint at 3600 iter\n", + "2023-07-02 21:27:16,090 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_3400\n", + "2023-07-02 21:27:16,092 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7704, evaluation/loss: 1.6898, loss: 2.3109\n", + "2023-07-02 21:27:17,958 - modelscope - INFO - epoch [1][3605/4982]\tlr: 2.632e-05, memory: 14449, loss: 1.5484\n", + "2023-07-02 21:27:20,844 - modelscope - INFO - epoch [1][3610/4982]\tlr: 2.621e-05, memory: 14449, loss: 1.7049\n", + "2023-07-02 21:27:24,038 - modelscope - INFO - epoch [1][3615/4982]\tlr: 2.610e-05, memory: 14449, loss: 1.1580\n", + "2023-07-02 21:27:26,611 - modelscope - INFO - epoch [1][3620/4982]\tlr: 2.599e-05, memory: 14449, loss: 1.1926\n", + "2023-07-02 21:27:29,270 - modelscope - INFO - epoch [1][3625/4982]\tlr: 2.588e-05, memory: 14449, loss: 1.9445\n", + "2023-07-02 21:27:32,570 - modelscope - INFO - epoch [1][3630/4982]\tlr: 2.577e-05, memory: 14449, loss: 0.8320\n", + "2023-07-02 21:27:34,890 - modelscope - INFO - epoch [1][3635/4982]\tlr: 2.566e-05, memory: 14449, loss: 1.8961\n", + "2023-07-02 21:27:37,762 - modelscope - INFO - epoch [1][3640/4982]\tlr: 2.556e-05, memory: 14449, loss: 1.3434\n", + "2023-07-02 21:27:40,862 - modelscope - INFO - epoch [1][3645/4982]\tlr: 2.545e-05, memory: 14449, loss: 1.6516\n", + "2023-07-02 21:27:43,323 - modelscope - INFO - epoch [1][3650/4982]\tlr: 2.534e-05, memory: 14449, loss: 3.4539\n", + "2023-07-02 21:27:46,306 - modelscope - INFO - epoch [1][3655/4982]\tlr: 2.523e-05, memory: 14449, loss: 1.5139\n", + "2023-07-02 21:27:48,976 - modelscope - INFO - epoch [1][3660/4982]\tlr: 2.513e-05, memory: 14449, loss: 1.6055\n", + "2023-07-02 21:27:52,023 - modelscope - INFO - epoch [1][3665/4982]\tlr: 2.502e-05, memory: 14449, loss: 0.5375\n", + "2023-07-02 21:27:55,459 - modelscope - INFO - epoch [1][3670/4982]\tlr: 2.492e-05, memory: 14449, loss: 1.8552\n", + "2023-07-02 21:27:58,311 - modelscope - INFO - epoch [1][3675/4982]\tlr: 2.481e-05, memory: 14449, loss: 1.0477\n", + "2023-07-02 21:28:00,477 - modelscope - INFO - epoch [1][3680/4982]\tlr: 2.470e-05, memory: 14449, loss: 1.8646\n", + "2023-07-02 21:28:02,402 - modelscope - INFO - epoch [1][3685/4982]\tlr: 2.460e-05, memory: 14449, loss: 2.7117\n", + "2023-07-02 21:28:05,217 - modelscope - INFO - epoch [1][3690/4982]\tlr: 2.449e-05, memory: 14449, loss: 2.6594\n", + "2023-07-02 21:28:07,697 - modelscope - INFO - epoch [1][3695/4982]\tlr: 2.439e-05, memory: 14449, loss: 1.9680\n", + "2023-07-02 21:28:11,289 - modelscope - INFO - epoch [1][3700/4982]\tlr: 2.429e-05, memory: 14449, loss: 1.4680\n", + "2023-07-02 21:28:14,322 - modelscope - INFO - epoch [1][3705/4982]\tlr: 2.418e-05, memory: 14449, loss: 2.1742\n", + "2023-07-02 21:28:16,434 - modelscope - INFO - epoch [1][3710/4982]\tlr: 2.408e-05, memory: 14449, loss: 2.0691\n", + "2023-07-02 21:28:19,150 - modelscope - INFO - epoch [1][3715/4982]\tlr: 2.398e-05, memory: 14449, loss: 1.6078\n", + "2023-07-02 21:28:22,166 - modelscope - INFO - epoch [1][3720/4982]\tlr: 2.387e-05, memory: 14449, loss: 0.9880\n", + "2023-07-02 21:28:24,924 - modelscope - INFO - epoch [1][3725/4982]\tlr: 2.377e-05, memory: 14449, loss: 1.1384\n", + "2023-07-02 21:28:28,212 - modelscope - INFO - epoch [1][3730/4982]\tlr: 2.367e-05, memory: 14449, loss: 1.3064\n", + "2023-07-02 21:28:30,391 - modelscope - INFO - epoch [1][3735/4982]\tlr: 2.357e-05, memory: 14449, loss: 2.5031\n", + "2023-07-02 21:28:32,316 - modelscope - INFO - epoch [1][3740/4982]\tlr: 2.346e-05, memory: 14449, loss: 1.1914\n", + "2023-07-02 21:28:35,087 - modelscope - INFO - epoch [1][3745/4982]\tlr: 2.336e-05, memory: 14449, loss: 1.5630\n", + "2023-07-02 21:28:38,274 - modelscope - INFO - epoch [1][3750/4982]\tlr: 2.326e-05, memory: 14449, loss: 1.5844\n", + "2023-07-02 21:28:40,649 - modelscope - INFO - epoch [1][3755/4982]\tlr: 2.316e-05, memory: 14449, loss: 2.6648\n", + "2023-07-02 21:28:43,226 - modelscope - INFO - epoch [1][3760/4982]\tlr: 2.306e-05, memory: 14449, loss: 1.3648\n", + "2023-07-02 21:28:45,433 - modelscope - INFO - epoch [1][3765/4982]\tlr: 2.296e-05, memory: 14449, loss: 2.8930\n", + "2023-07-02 21:28:48,571 - modelscope - INFO - epoch [1][3770/4982]\tlr: 2.286e-05, memory: 14449, loss: 1.8161\n", + "2023-07-02 21:28:51,247 - modelscope - INFO - epoch [1][3775/4982]\tlr: 2.276e-05, memory: 14449, loss: 2.2783\n", + "2023-07-02 21:28:53,364 - modelscope - INFO - epoch [1][3780/4982]\tlr: 2.266e-05, memory: 14449, loss: 2.4652\n", + "2023-07-02 21:28:56,459 - modelscope - INFO - epoch [1][3785/4982]\tlr: 2.256e-05, memory: 14449, loss: 0.5556\n", + "2023-07-02 21:28:58,529 - modelscope - INFO - epoch [1][3790/4982]\tlr: 2.247e-05, memory: 14449, loss: 1.4350\n", + "2023-07-02 21:29:01,457 - modelscope - INFO - epoch [1][3795/4982]\tlr: 2.237e-05, memory: 14449, loss: 2.3062\n", + "2023-07-02 21:29:03,885 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 21:30:10,496 - modelscope - INFO - Saving checkpoint at 3800 iter\n", + "2023-07-02 21:30:10,522 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter3600_acc0.7704192399978638\n", + "2023-07-02 21:30:10,525 - modelscope - INFO - Saving checkpoint at 3800 iter\n", + "2023-07-02 21:30:10,549 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_3600\n", + "2023-07-02 21:30:10,552 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7714, evaluation/loss: 1.6864, loss: 1.6359\n", + "2023-07-02 21:30:12,897 - modelscope - INFO - epoch [1][3805/4982]\tlr: 2.217e-05, memory: 14449, loss: 2.1727\n", + "2023-07-02 21:30:15,703 - modelscope - INFO - epoch [1][3810/4982]\tlr: 2.208e-05, memory: 14449, loss: 1.7061\n", + "2023-07-02 21:30:18,582 - modelscope - INFO - epoch [1][3815/4982]\tlr: 2.198e-05, memory: 14449, loss: 0.9371\n", + "2023-07-02 21:30:21,148 - modelscope - INFO - epoch [1][3820/4982]\tlr: 2.188e-05, memory: 14449, loss: 1.7875\n", + "2023-07-02 21:30:23,806 - modelscope - INFO - epoch [1][3825/4982]\tlr: 2.179e-05, memory: 14449, loss: 2.2953\n", + "2023-07-02 21:30:26,426 - modelscope - INFO - epoch [1][3830/4982]\tlr: 2.169e-05, memory: 14449, loss: 2.3281\n", + "2023-07-02 21:30:28,893 - modelscope - INFO - epoch [1][3835/4982]\tlr: 2.160e-05, memory: 14449, loss: 1.5443\n", + "2023-07-02 21:30:31,735 - modelscope - INFO - epoch [1][3840/4982]\tlr: 2.150e-05, memory: 14449, loss: 2.0406\n", + "2023-07-02 21:30:33,879 - modelscope - INFO - epoch [1][3845/4982]\tlr: 2.141e-05, memory: 14449, loss: 2.1980\n", + "2023-07-02 21:30:36,598 - modelscope - INFO - epoch [1][3850/4982]\tlr: 2.131e-05, memory: 14449, loss: 1.5972\n", + "2023-07-02 21:30:39,142 - modelscope - INFO - epoch [1][3855/4982]\tlr: 2.122e-05, memory: 14449, loss: 2.2004\n", + "2023-07-02 21:30:41,541 - modelscope - INFO - epoch [1][3860/4982]\tlr: 2.112e-05, memory: 14449, loss: 1.5225\n", + "2023-07-02 21:30:44,206 - modelscope - INFO - epoch [1][3865/4982]\tlr: 2.103e-05, memory: 14449, loss: 2.0740\n", + "2023-07-02 21:30:47,318 - modelscope - INFO - epoch [1][3870/4982]\tlr: 2.094e-05, memory: 14449, loss: 2.7250\n", + "2023-07-02 21:30:50,059 - modelscope - INFO - epoch [1][3875/4982]\tlr: 2.084e-05, memory: 14449, loss: 2.2059\n", + "2023-07-02 21:30:52,045 - modelscope - INFO - epoch [1][3880/4982]\tlr: 2.075e-05, memory: 14449, loss: 1.7930\n", + "2023-07-02 21:30:54,716 - modelscope - INFO - epoch [1][3885/4982]\tlr: 2.066e-05, memory: 14449, loss: 1.6184\n", + "2023-07-02 21:30:56,979 - modelscope - INFO - epoch [1][3890/4982]\tlr: 2.057e-05, memory: 14449, loss: 2.1453\n", + "2023-07-02 21:31:01,437 - modelscope - INFO - epoch [1][3895/4982]\tlr: 2.048e-05, memory: 14449, loss: 1.2229\n", + "2023-07-02 21:31:05,207 - modelscope - INFO - epoch [1][3900/4982]\tlr: 2.039e-05, memory: 14449, loss: 1.7156\n", + "2023-07-02 21:31:07,873 - modelscope - INFO - epoch [1][3905/4982]\tlr: 2.029e-05, memory: 14449, loss: 1.8084\n", + "2023-07-02 21:31:10,896 - modelscope - INFO - epoch [1][3910/4982]\tlr: 2.020e-05, memory: 14449, loss: 0.4583\n", + "2023-07-02 21:31:13,623 - modelscope - INFO - epoch [1][3915/4982]\tlr: 2.011e-05, memory: 14449, loss: 3.1516\n", + "2023-07-02 21:31:16,647 - modelscope - INFO - epoch [1][3920/4982]\tlr: 2.002e-05, memory: 14449, loss: 1.0519\n", + "2023-07-02 21:31:19,431 - modelscope - INFO - epoch [1][3925/4982]\tlr: 1.994e-05, memory: 14449, loss: 2.3402\n", + "2023-07-02 21:31:21,995 - modelscope - INFO - epoch [1][3930/4982]\tlr: 1.985e-05, memory: 14449, loss: 2.3391\n", + "2023-07-02 21:31:24,439 - modelscope - INFO - epoch [1][3935/4982]\tlr: 1.976e-05, memory: 14449, loss: 2.4483\n", + "2023-07-02 21:31:26,586 - modelscope - INFO - epoch [1][3940/4982]\tlr: 1.967e-05, memory: 14449, loss: 2.2727\n", + "2023-07-02 21:31:28,897 - modelscope - INFO - epoch [1][3945/4982]\tlr: 1.958e-05, memory: 14449, loss: 3.0383\n", + "2023-07-02 21:31:31,754 - modelscope - INFO - epoch [1][3950/4982]\tlr: 1.949e-05, memory: 14449, loss: 1.5698\n", + "2023-07-02 21:31:35,256 - modelscope - INFO - epoch [1][3955/4982]\tlr: 1.941e-05, memory: 14449, loss: 1.2930\n", + "2023-07-02 21:31:37,474 - modelscope - INFO - epoch [1][3960/4982]\tlr: 1.932e-05, memory: 14449, loss: 1.4481\n", + "2023-07-02 21:31:40,154 - modelscope - INFO - epoch [1][3965/4982]\tlr: 1.923e-05, memory: 14449, loss: 1.6508\n", + "2023-07-02 21:31:42,215 - modelscope - INFO - epoch [1][3970/4982]\tlr: 1.915e-05, memory: 14449, loss: 1.6758\n", + "2023-07-02 21:31:44,996 - modelscope - INFO - epoch [1][3975/4982]\tlr: 1.906e-05, memory: 14449, loss: 3.0355\n", + "2023-07-02 21:31:47,982 - modelscope - INFO - epoch [1][3980/4982]\tlr: 1.898e-05, memory: 14449, loss: 2.0975\n", + "2023-07-02 21:31:50,425 - modelscope - INFO - epoch [1][3985/4982]\tlr: 1.889e-05, memory: 14449, loss: 2.7559\n", + "2023-07-02 21:31:53,599 - modelscope - INFO - epoch [1][3990/4982]\tlr: 1.881e-05, memory: 14449, loss: 0.6062\n", + "2023-07-02 21:31:56,806 - modelscope - INFO - epoch [1][3995/4982]\tlr: 1.872e-05, memory: 14449, loss: 1.8811\n", + "2023-07-02 21:31:59,002 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.24it/s]\n", + "2023-07-02 21:33:05,226 - modelscope - INFO - Saving checkpoint at 4000 iter\n", + "2023-07-02 21:33:05,253 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter3800_acc0.7713964581489563\n", + "2023-07-02 21:33:05,255 - modelscope - INFO - Saving checkpoint at 4000 iter\n", + "2023-07-02 21:33:05,280 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_3800\n", + "2023-07-02 21:33:05,283 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7721, evaluation/loss: 1.6809, loss: 2.3164\n", + "2023-07-02 21:33:07,641 - modelscope - INFO - epoch [1][4005/4982]\tlr: 1.855e-05, memory: 14449, loss: 1.3918\n", + "2023-07-02 21:33:10,090 - modelscope - INFO - epoch [1][4010/4982]\tlr: 1.847e-05, memory: 14449, loss: 1.7758\n", + "2023-07-02 21:33:13,438 - modelscope - INFO - epoch [1][4015/4982]\tlr: 1.839e-05, memory: 14449, loss: 0.8627\n", + "2023-07-02 21:33:16,653 - modelscope - INFO - epoch [1][4020/4982]\tlr: 1.831e-05, memory: 14449, loss: 1.2715\n", + "2023-07-02 21:33:20,248 - modelscope - INFO - epoch [1][4025/4982]\tlr: 1.822e-05, memory: 14449, loss: 2.1164\n", + "2023-07-02 21:33:23,029 - modelscope - INFO - epoch [1][4030/4982]\tlr: 1.814e-05, memory: 14449, loss: 1.0982\n", + "2023-07-02 21:33:25,384 - modelscope - INFO - epoch [1][4035/4982]\tlr: 1.806e-05, memory: 14449, loss: 1.3770\n", + "2023-07-02 21:33:27,542 - modelscope - INFO - epoch [1][4040/4982]\tlr: 1.798e-05, memory: 14449, loss: 1.4436\n", + "2023-07-02 21:33:29,897 - modelscope - INFO - epoch [1][4045/4982]\tlr: 1.790e-05, memory: 14449, loss: 1.6316\n", + "2023-07-02 21:33:32,478 - modelscope - INFO - epoch [1][4050/4982]\tlr: 1.782e-05, memory: 14449, loss: 0.8738\n", + "2023-07-02 21:33:35,228 - modelscope - INFO - epoch [1][4055/4982]\tlr: 1.774e-05, memory: 14449, loss: 1.9016\n", + "2023-07-02 21:33:37,569 - modelscope - INFO - epoch [1][4060/4982]\tlr: 1.766e-05, memory: 14449, loss: 1.6512\n", + "2023-07-02 21:33:40,234 - modelscope - INFO - epoch [1][4065/4982]\tlr: 1.758e-05, memory: 14449, loss: 1.3039\n", + "2023-07-02 21:33:42,749 - modelscope - INFO - epoch [1][4070/4982]\tlr: 1.750e-05, memory: 14449, loss: 1.2514\n", + "2023-07-02 21:33:45,340 - modelscope - INFO - epoch [1][4075/4982]\tlr: 1.742e-05, memory: 14449, loss: 2.8492\n", + "2023-07-02 21:33:47,472 - modelscope - INFO - epoch [1][4080/4982]\tlr: 1.734e-05, memory: 14449, loss: 2.0809\n", + "2023-07-02 21:33:50,149 - modelscope - INFO - epoch [1][4085/4982]\tlr: 1.727e-05, memory: 14449, loss: 1.1375\n", + "2023-07-02 21:33:53,306 - modelscope - INFO - epoch [1][4090/4982]\tlr: 1.719e-05, memory: 14449, loss: 0.4272\n", + "2023-07-02 21:33:55,772 - modelscope - INFO - epoch [1][4095/4982]\tlr: 1.711e-05, memory: 14449, loss: 3.0484\n", + "2023-07-02 21:33:58,344 - modelscope - INFO - epoch [1][4100/4982]\tlr: 1.704e-05, memory: 14449, loss: 1.9910\n", + "2023-07-02 21:34:00,903 - modelscope - INFO - epoch [1][4105/4982]\tlr: 1.696e-05, memory: 14449, loss: 1.7889\n", + "2023-07-02 21:34:03,059 - modelscope - INFO - epoch [1][4110/4982]\tlr: 1.688e-05, memory: 14449, loss: 1.2016\n", + "2023-07-02 21:34:05,621 - modelscope - INFO - epoch [1][4115/4982]\tlr: 1.681e-05, memory: 14449, loss: 1.8453\n", + "2023-07-02 21:34:09,027 - modelscope - INFO - epoch [1][4120/4982]\tlr: 1.673e-05, memory: 14449, loss: 1.5453\n", + "2023-07-02 21:34:11,741 - modelscope - INFO - epoch [1][4125/4982]\tlr: 1.666e-05, memory: 14449, loss: 1.9316\n", + "2023-07-02 21:34:13,865 - modelscope - INFO - epoch [1][4130/4982]\tlr: 1.659e-05, memory: 14449, loss: 2.3094\n", + "2023-07-02 21:34:16,258 - modelscope - INFO - epoch [1][4135/4982]\tlr: 1.651e-05, memory: 14449, loss: 2.5703\n", + "2023-07-02 21:34:20,487 - modelscope - INFO - epoch [1][4140/4982]\tlr: 1.644e-05, memory: 14449, loss: 1.3984\n", + "2023-07-02 21:34:23,365 - modelscope - INFO - epoch [1][4145/4982]\tlr: 1.636e-05, memory: 14449, loss: 1.5207\n", + "2023-07-02 21:34:26,448 - modelscope - INFO - epoch [1][4150/4982]\tlr: 1.629e-05, memory: 14449, loss: 1.3838\n", + "2023-07-02 21:34:28,356 - modelscope - INFO - epoch [1][4155/4982]\tlr: 1.622e-05, memory: 14449, loss: 1.5562\n", + "2023-07-02 21:34:30,276 - modelscope - INFO - epoch [1][4160/4982]\tlr: 1.615e-05, memory: 14449, loss: 2.0258\n", + "2023-07-02 21:34:33,019 - modelscope - INFO - epoch [1][4165/4982]\tlr: 1.608e-05, memory: 14449, loss: 1.0586\n", + "2023-07-02 21:34:35,587 - modelscope - INFO - epoch [1][4170/4982]\tlr: 1.601e-05, memory: 14449, loss: 2.0258\n", + "2023-07-02 21:34:38,118 - modelscope - INFO - epoch [1][4175/4982]\tlr: 1.593e-05, memory: 14449, loss: 1.7780\n", + "2023-07-02 21:34:40,812 - modelscope - INFO - epoch [1][4180/4982]\tlr: 1.586e-05, memory: 14449, loss: 1.4871\n", + "2023-07-02 21:34:43,689 - modelscope - INFO - epoch [1][4185/4982]\tlr: 1.579e-05, memory: 14449, loss: 2.4375\n", + "2023-07-02 21:34:45,571 - modelscope - INFO - epoch [1][4190/4982]\tlr: 1.572e-05, memory: 14449, loss: 2.8734\n", + "2023-07-02 21:34:47,974 - modelscope - INFO - epoch [1][4195/4982]\tlr: 1.566e-05, memory: 14449, loss: 1.9576\n", + "2023-07-02 21:34:50,431 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.24it/s]\n", + "2023-07-02 21:35:56,740 - modelscope - INFO - Saving checkpoint at 4200 iter\n", + "2023-07-02 21:35:56,767 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_4000\n", + "2023-07-02 21:35:56,770 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7719, evaluation/loss: 1.6805, loss: 3.5922\n", + "2023-07-02 21:35:58,922 - modelscope - INFO - epoch [1][4205/4982]\tlr: 1.552e-05, memory: 14449, loss: 2.2658\n", + "2023-07-02 21:36:01,295 - modelscope - INFO - epoch [1][4210/4982]\tlr: 1.545e-05, memory: 14449, loss: 1.6580\n", + "2023-07-02 21:36:04,097 - modelscope - INFO - epoch [1][4215/4982]\tlr: 1.538e-05, memory: 14449, loss: 1.6982\n", + "2023-07-02 21:36:06,731 - modelscope - INFO - epoch [1][4220/4982]\tlr: 1.532e-05, memory: 14449, loss: 1.9359\n", + "2023-07-02 21:36:08,551 - modelscope - INFO - epoch [1][4225/4982]\tlr: 1.525e-05, memory: 14449, loss: 2.5812\n", + "2023-07-02 21:36:11,911 - modelscope - INFO - epoch [1][4230/4982]\tlr: 1.518e-05, memory: 14449, loss: 1.9195\n", + "2023-07-02 21:36:14,506 - modelscope - INFO - epoch [1][4235/4982]\tlr: 1.512e-05, memory: 14449, loss: 1.2545\n", + "2023-07-02 21:36:17,733 - modelscope - INFO - epoch [1][4240/4982]\tlr: 1.505e-05, memory: 14449, loss: 1.9451\n", + "2023-07-02 21:36:20,470 - modelscope - INFO - epoch [1][4245/4982]\tlr: 1.499e-05, memory: 14449, loss: 1.4648\n", + "2023-07-02 21:36:22,770 - modelscope - INFO - epoch [1][4250/4982]\tlr: 1.492e-05, memory: 14449, loss: 1.6961\n", + "2023-07-02 21:36:25,378 - modelscope - INFO - epoch [1][4255/4982]\tlr: 1.486e-05, memory: 14449, loss: 2.4164\n", + "2023-07-02 21:36:27,752 - modelscope - INFO - epoch [1][4260/4982]\tlr: 1.479e-05, memory: 14449, loss: 1.9963\n", + "2023-07-02 21:36:30,118 - modelscope - INFO - epoch [1][4265/4982]\tlr: 1.473e-05, memory: 14449, loss: 2.1148\n", + "2023-07-02 21:36:33,660 - modelscope - INFO - epoch [1][4270/4982]\tlr: 1.466e-05, memory: 14449, loss: 1.0082\n", + "2023-07-02 21:36:37,177 - modelscope - INFO - epoch [1][4275/4982]\tlr: 1.460e-05, memory: 14449, loss: 1.0070\n", + "2023-07-02 21:36:39,794 - modelscope - INFO - epoch [1][4280/4982]\tlr: 1.454e-05, memory: 14449, loss: 2.2496\n", + "2023-07-02 21:36:42,033 - modelscope - INFO - epoch [1][4285/4982]\tlr: 1.448e-05, memory: 14449, loss: 2.6797\n", + "2023-07-02 21:36:45,045 - modelscope - INFO - epoch [1][4290/4982]\tlr: 1.442e-05, memory: 14449, loss: 1.7584\n", + "2023-07-02 21:36:47,854 - modelscope - INFO - epoch [1][4295/4982]\tlr: 1.435e-05, memory: 14449, loss: 0.8922\n", + "2023-07-02 21:36:50,056 - modelscope - INFO - epoch [1][4300/4982]\tlr: 1.429e-05, memory: 14449, loss: 0.9248\n", + "2023-07-02 21:36:52,432 - modelscope - INFO - epoch [1][4305/4982]\tlr: 1.423e-05, memory: 14449, loss: 2.2406\n", + "2023-07-02 21:36:55,320 - modelscope - INFO - epoch [1][4310/4982]\tlr: 1.417e-05, memory: 14449, loss: 2.6234\n", + "2023-07-02 21:36:57,625 - modelscope - INFO - epoch [1][4315/4982]\tlr: 1.411e-05, memory: 14449, loss: 2.5016\n", + "2023-07-02 21:36:59,666 - modelscope - INFO - epoch [1][4320/4982]\tlr: 1.405e-05, memory: 14449, loss: 2.4305\n", + "2023-07-02 21:37:01,862 - modelscope - INFO - epoch [1][4325/4982]\tlr: 1.400e-05, memory: 14449, loss: 2.3391\n", + "2023-07-02 21:37:03,730 - modelscope - INFO - epoch [1][4330/4982]\tlr: 1.394e-05, memory: 14449, loss: 2.1297\n", + "2023-07-02 21:37:06,491 - modelscope - INFO - epoch [1][4335/4982]\tlr: 1.388e-05, memory: 14449, loss: 1.5926\n", + "2023-07-02 21:37:08,327 - modelscope - INFO - epoch [1][4340/4982]\tlr: 1.382e-05, memory: 14449, loss: 2.0867\n", + "2023-07-02 21:37:10,978 - modelscope - INFO - epoch [1][4345/4982]\tlr: 1.376e-05, memory: 14449, loss: 1.5793\n", + "2023-07-02 21:37:13,418 - modelscope - INFO - epoch [1][4350/4982]\tlr: 1.371e-05, memory: 14449, loss: 1.3965\n", + "2023-07-02 21:37:16,097 - modelscope - INFO - epoch [1][4355/4982]\tlr: 1.365e-05, memory: 14449, loss: 1.6531\n", + "2023-07-02 21:37:18,922 - modelscope - INFO - epoch [1][4360/4982]\tlr: 1.360e-05, memory: 14449, loss: 1.2753\n", + "2023-07-02 21:37:21,708 - modelscope - INFO - epoch [1][4365/4982]\tlr: 1.354e-05, memory: 14449, loss: 1.6145\n", + "2023-07-02 21:37:23,716 - modelscope - INFO - epoch [1][4370/4982]\tlr: 1.349e-05, memory: 14449, loss: 2.6463\n", + "2023-07-02 21:37:27,213 - modelscope - INFO - epoch [1][4375/4982]\tlr: 1.343e-05, memory: 14449, loss: 0.6934\n", + "2023-07-02 21:37:30,031 - modelscope - INFO - epoch [1][4380/4982]\tlr: 1.338e-05, memory: 14449, loss: 2.2023\n", + "2023-07-02 21:37:33,441 - modelscope - INFO - epoch [1][4385/4982]\tlr: 1.332e-05, memory: 14449, loss: 1.6848\n", + "2023-07-02 21:37:35,797 - modelscope - INFO - epoch [1][4390/4982]\tlr: 1.327e-05, memory: 14449, loss: 1.6936\n", + "2023-07-02 21:37:39,329 - modelscope - INFO - epoch [1][4395/4982]\tlr: 1.322e-05, memory: 14449, loss: 0.5190\n", + "2023-07-02 21:37:41,815 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:38:48,264 - modelscope - INFO - Saving checkpoint at 4400 iter\n", + "2023-07-02 21:38:48,291 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter4000_acc0.7720601558685303\n", + "2023-07-02 21:38:48,293 - modelscope - INFO - Saving checkpoint at 4400 iter\n", + "2023-07-02 21:38:48,319 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_4200\n", + "2023-07-02 21:38:48,321 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7722, evaluation/loss: 1.6760, loss: 2.0141\n", + "2023-07-02 21:38:52,426 - modelscope - INFO - epoch [1][4405/4982]\tlr: 1.311e-05, memory: 14449, loss: 1.0922\n", + "2023-07-02 21:38:54,940 - modelscope - INFO - epoch [1][4410/4982]\tlr: 1.306e-05, memory: 14449, loss: 1.1858\n", + "2023-07-02 21:38:57,631 - modelscope - INFO - epoch [1][4415/4982]\tlr: 1.301e-05, memory: 14449, loss: 2.2687\n", + "2023-07-02 21:39:01,287 - modelscope - INFO - epoch [1][4420/4982]\tlr: 1.296e-05, memory: 14449, loss: 1.2707\n", + "2023-07-02 21:39:04,825 - modelscope - INFO - epoch [1][4425/4982]\tlr: 1.291e-05, memory: 14449, loss: 2.9891\n", + "2023-07-02 21:39:07,641 - modelscope - INFO - epoch [1][4430/4982]\tlr: 1.286e-05, memory: 14449, loss: 1.6935\n", + "2023-07-02 21:39:10,432 - modelscope - INFO - epoch [1][4435/4982]\tlr: 1.281e-05, memory: 14449, loss: 1.4844\n", + "2023-07-02 21:39:13,413 - modelscope - INFO - epoch [1][4440/4982]\tlr: 1.276e-05, memory: 14449, loss: 1.8453\n", + "2023-07-02 21:39:17,035 - modelscope - INFO - epoch [1][4445/4982]\tlr: 1.271e-05, memory: 14449, loss: 1.4854\n", + "2023-07-02 21:39:20,194 - modelscope - INFO - epoch [1][4450/4982]\tlr: 1.266e-05, memory: 14449, loss: 1.2645\n", + "2023-07-02 21:39:23,060 - modelscope - INFO - epoch [1][4455/4982]\tlr: 1.261e-05, memory: 14449, loss: 1.7969\n", + "2023-07-02 21:39:25,473 - modelscope - INFO - epoch [1][4460/4982]\tlr: 1.257e-05, memory: 14449, loss: 2.3201\n", + "2023-07-02 21:39:28,124 - modelscope - INFO - epoch [1][4465/4982]\tlr: 1.252e-05, memory: 14449, loss: 1.7680\n", + "2023-07-02 21:39:30,849 - modelscope - INFO - epoch [1][4470/4982]\tlr: 1.247e-05, memory: 14449, loss: 1.6301\n", + "2023-07-02 21:39:33,762 - modelscope - INFO - epoch [1][4475/4982]\tlr: 1.243e-05, memory: 14449, loss: 2.1186\n", + "2023-07-02 21:39:36,085 - modelscope - INFO - epoch [1][4480/4982]\tlr: 1.238e-05, memory: 14449, loss: 1.4234\n", + "2023-07-02 21:39:38,762 - modelscope - INFO - epoch [1][4485/4982]\tlr: 1.233e-05, memory: 14449, loss: 1.7797\n", + "2023-07-02 21:39:41,748 - modelscope - INFO - epoch [1][4490/4982]\tlr: 1.229e-05, memory: 14449, loss: 1.6820\n", + "2023-07-02 21:39:44,541 - modelscope - INFO - epoch [1][4495/4982]\tlr: 1.224e-05, memory: 14449, loss: 1.0109\n", + "2023-07-02 21:39:47,053 - modelscope - INFO - epoch [1][4500/4982]\tlr: 1.220e-05, memory: 14449, loss: 2.4484\n", + "2023-07-02 21:39:49,590 - modelscope - INFO - epoch [1][4505/4982]\tlr: 1.216e-05, memory: 14449, loss: 1.8258\n", + "2023-07-02 21:39:52,526 - modelscope - INFO - epoch [1][4510/4982]\tlr: 1.211e-05, memory: 14449, loss: 2.8773\n", + "2023-07-02 21:39:55,867 - modelscope - INFO - epoch [1][4515/4982]\tlr: 1.207e-05, memory: 14449, loss: 1.6246\n", + "2023-07-02 21:39:58,627 - modelscope - INFO - epoch [1][4520/4982]\tlr: 1.203e-05, memory: 14449, loss: 2.5562\n", + "2023-07-02 21:40:01,603 - modelscope - INFO - epoch [1][4525/4982]\tlr: 1.199e-05, memory: 14449, loss: 1.4436\n", + "2023-07-02 21:40:04,193 - modelscope - INFO - epoch [1][4530/4982]\tlr: 1.194e-05, memory: 14449, loss: 1.3711\n", + "2023-07-02 21:40:07,773 - modelscope - INFO - epoch [1][4535/4982]\tlr: 1.190e-05, memory: 14449, loss: 1.8023\n", + "2023-07-02 21:40:10,054 - modelscope - INFO - epoch [1][4540/4982]\tlr: 1.186e-05, memory: 14449, loss: 2.0508\n", + "2023-07-02 21:40:12,973 - modelscope - INFO - epoch [1][4545/4982]\tlr: 1.182e-05, memory: 14449, loss: 2.5195\n", + "2023-07-02 21:40:16,038 - modelscope - INFO - epoch [1][4550/4982]\tlr: 1.178e-05, memory: 14449, loss: 1.7164\n", + "2023-07-02 21:40:18,581 - modelscope - INFO - epoch [1][4555/4982]\tlr: 1.174e-05, memory: 14449, loss: 1.5645\n", + "2023-07-02 21:40:20,963 - modelscope - INFO - epoch [1][4560/4982]\tlr: 1.170e-05, memory: 14449, loss: 2.0105\n", + "2023-07-02 21:40:23,706 - modelscope - INFO - epoch [1][4565/4982]\tlr: 1.167e-05, memory: 14449, loss: 1.3252\n", + "2023-07-02 21:40:25,962 - modelscope - INFO - epoch [1][4570/4982]\tlr: 1.163e-05, memory: 14449, loss: 1.8855\n", + "2023-07-02 21:40:29,182 - modelscope - INFO - epoch [1][4575/4982]\tlr: 1.159e-05, memory: 14449, loss: 1.2594\n", + "2023-07-02 21:40:31,408 - modelscope - INFO - epoch [1][4580/4982]\tlr: 1.155e-05, memory: 14449, loss: 2.0570\n", + "2023-07-02 21:40:34,024 - modelscope - INFO - epoch [1][4585/4982]\tlr: 1.152e-05, memory: 14449, loss: 2.6170\n", + "2023-07-02 21:40:36,599 - modelscope - INFO - epoch [1][4590/4982]\tlr: 1.148e-05, memory: 14449, loss: 1.6721\n", + "2023-07-02 21:40:39,014 - modelscope - INFO - epoch [1][4595/4982]\tlr: 1.144e-05, memory: 14449, loss: 1.1687\n", + "2023-07-02 21:40:41,965 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.22it/s]\n", + "2023-07-02 21:41:48,497 - modelscope - INFO - Saving checkpoint at 4600 iter\n", + "2023-07-02 21:41:48,524 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter4400_acc0.7721523642539978\n", + "2023-07-02 21:41:48,526 - modelscope - INFO - Saving checkpoint at 4600 iter\n", + "2023-07-02 21:41:48,552 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_4400\n", + "2023-07-02 21:41:48,555 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7725, evaluation/loss: 1.6727, loss: 1.6291\n", + "2023-07-02 21:41:51,846 - modelscope - INFO - epoch [1][4605/4982]\tlr: 1.137e-05, memory: 14449, loss: 0.3742\n", + "2023-07-02 21:41:54,432 - modelscope - INFO - epoch [1][4610/4982]\tlr: 1.134e-05, memory: 14449, loss: 1.9832\n", + "2023-07-02 21:41:56,756 - modelscope - INFO - epoch [1][4615/4982]\tlr: 1.130e-05, memory: 14449, loss: 1.6234\n", + "2023-07-02 21:41:59,635 - modelscope - INFO - epoch [1][4620/4982]\tlr: 1.127e-05, memory: 14449, loss: 1.2416\n", + "2023-07-02 21:42:02,440 - modelscope - INFO - epoch [1][4625/4982]\tlr: 1.124e-05, memory: 14449, loss: 1.9668\n", + "2023-07-02 21:42:04,595 - modelscope - INFO - epoch [1][4630/4982]\tlr: 1.120e-05, memory: 14449, loss: 1.1527\n", + "2023-07-02 21:42:07,367 - modelscope - INFO - epoch [1][4635/4982]\tlr: 1.117e-05, memory: 14449, loss: 2.0367\n", + "2023-07-02 21:42:09,781 - modelscope - INFO - epoch [1][4640/4982]\tlr: 1.114e-05, memory: 14449, loss: 1.6268\n", + "2023-07-02 21:42:12,158 - modelscope - INFO - epoch [1][4645/4982]\tlr: 1.111e-05, memory: 14449, loss: 2.4633\n", + "2023-07-02 21:42:14,206 - modelscope - INFO - epoch [1][4650/4982]\tlr: 1.108e-05, memory: 14449, loss: 2.8531\n", + "2023-07-02 21:42:16,879 - modelscope - INFO - epoch [1][4655/4982]\tlr: 1.105e-05, memory: 14449, loss: 2.2703\n", + "2023-07-02 21:42:20,006 - modelscope - INFO - epoch [1][4660/4982]\tlr: 1.102e-05, memory: 14449, loss: 0.8350\n", + "2023-07-02 21:42:22,598 - modelscope - INFO - epoch [1][4665/4982]\tlr: 1.099e-05, memory: 14449, loss: 1.9375\n", + "2023-07-02 21:42:26,607 - modelscope - INFO - epoch [1][4670/4982]\tlr: 1.096e-05, memory: 14449, loss: 0.9594\n", + "2023-07-02 21:42:30,336 - modelscope - INFO - epoch [1][4675/4982]\tlr: 1.093e-05, memory: 14449, loss: 1.2943\n", + "2023-07-02 21:42:32,894 - modelscope - INFO - epoch [1][4680/4982]\tlr: 1.090e-05, memory: 14449, loss: 1.4293\n", + "2023-07-02 21:42:37,079 - modelscope - INFO - epoch [1][4685/4982]\tlr: 1.087e-05, memory: 14449, loss: 1.4109\n", + "2023-07-02 21:42:40,878 - modelscope - INFO - epoch [1][4690/4982]\tlr: 1.084e-05, memory: 14449, loss: 0.6270\n", + "2023-07-02 21:42:43,202 - modelscope - INFO - epoch [1][4695/4982]\tlr: 1.082e-05, memory: 14449, loss: 1.4430\n", + "2023-07-02 21:42:45,786 - modelscope - INFO - epoch [1][4700/4982]\tlr: 1.079e-05, memory: 14449, loss: 1.2656\n", + "2023-07-02 21:42:47,371 - modelscope - INFO - epoch [1][4705/4982]\tlr: 1.076e-05, memory: 14449, loss: 1.9141\n", + "2023-07-02 21:42:50,147 - modelscope - INFO - epoch [1][4710/4982]\tlr: 1.074e-05, memory: 14449, loss: 1.1176\n", + "2023-07-02 21:42:52,690 - modelscope - INFO - epoch [1][4715/4982]\tlr: 1.071e-05, memory: 14449, loss: 2.7781\n", + "2023-07-02 21:42:55,645 - modelscope - INFO - epoch [1][4720/4982]\tlr: 1.069e-05, memory: 14449, loss: 0.4620\n", + "2023-07-02 21:42:58,615 - modelscope - INFO - epoch [1][4725/4982]\tlr: 1.066e-05, memory: 14449, loss: 1.2354\n", + "2023-07-02 21:43:00,944 - modelscope - INFO - epoch [1][4730/4982]\tlr: 1.064e-05, memory: 14449, loss: 1.4683\n", + "2023-07-02 21:43:04,011 - modelscope - INFO - epoch [1][4735/4982]\tlr: 1.062e-05, memory: 14449, loss: 1.3249\n", + "2023-07-02 21:43:06,962 - modelscope - INFO - epoch [1][4740/4982]\tlr: 1.059e-05, memory: 14449, loss: 1.0039\n", + "2023-07-02 21:43:10,074 - modelscope - INFO - epoch [1][4745/4982]\tlr: 1.057e-05, memory: 14449, loss: 1.9678\n", + "2023-07-02 21:43:12,406 - modelscope - INFO - epoch [1][4750/4982]\tlr: 1.055e-05, memory: 14449, loss: 0.6996\n", + "2023-07-02 21:43:15,125 - modelscope - INFO - epoch [1][4755/4982]\tlr: 1.053e-05, memory: 14449, loss: 0.9693\n", + "2023-07-02 21:43:17,919 - modelscope - INFO - epoch [1][4760/4982]\tlr: 1.050e-05, memory: 14449, loss: 2.0680\n", + "2023-07-02 21:43:20,500 - modelscope - INFO - epoch [1][4765/4982]\tlr: 1.048e-05, memory: 14449, loss: 1.6277\n", + "2023-07-02 21:43:22,713 - modelscope - INFO - epoch [1][4770/4982]\tlr: 1.046e-05, memory: 14449, loss: 1.9484\n", + "2023-07-02 21:43:24,366 - modelscope - INFO - epoch [1][4775/4982]\tlr: 1.044e-05, memory: 14449, loss: 2.6502\n", + "2023-07-02 21:43:27,079 - modelscope - INFO - epoch [1][4780/4982]\tlr: 1.042e-05, memory: 14449, loss: 1.2715\n", + "2023-07-02 21:43:29,023 - modelscope - INFO - epoch [1][4785/4982]\tlr: 1.040e-05, memory: 14449, loss: 1.8383\n", + "2023-07-02 21:43:31,660 - modelscope - INFO - epoch [1][4790/4982]\tlr: 1.038e-05, memory: 14449, loss: 1.6623\n", + "2023-07-02 21:43:34,660 - modelscope - INFO - epoch [1][4795/4982]\tlr: 1.037e-05, memory: 14449, loss: 1.2914\n", + "2023-07-02 21:43:37,720 - modelscope - WARNING - ('METRICS', 'default', 'my_metric') not found in ast index file\n", + "Total test samples: 100%|██████████| 281/281 [01:06<00:00, 4.23it/s]\n", + "2023-07-02 21:44:44,218 - modelscope - INFO - Saving checkpoint at 4800 iter\n", + "2023-07-02 21:44:44,248 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/best_iter4600_acc0.7724842429161072\n", + "2023-07-02 21:44:44,250 - modelscope - INFO - Saving checkpoint at 4800 iter\n", + "2023-07-02 21:44:44,279 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_4600\n", + "2023-07-02 21:44:44,282 - modelscope - INFO - epoch(eval) [1][281]\tmemory: 14449, evaluation/acc: 0.7729, evaluation/loss: 1.6707, loss: 1.1414\n", + "2023-07-02 21:44:46,870 - modelscope - INFO - epoch [1][4805/4982]\tlr: 1.033e-05, memory: 14449, loss: 0.6551\n", + "2023-07-02 21:44:49,076 - modelscope - INFO - epoch [1][4810/4982]\tlr: 1.031e-05, memory: 14449, loss: 1.6857\n", + "2023-07-02 21:44:51,074 - modelscope - INFO - epoch [1][4815/4982]\tlr: 1.030e-05, memory: 14449, loss: 1.9123\n", + "2023-07-02 21:44:53,385 - modelscope - INFO - epoch [1][4820/4982]\tlr: 1.028e-05, memory: 14449, loss: 1.4424\n", + "2023-07-02 21:44:55,581 - modelscope - INFO - epoch [1][4825/4982]\tlr: 1.027e-05, memory: 14449, loss: 2.2789\n", + "2023-07-02 21:44:58,108 - modelscope - INFO - epoch [1][4830/4982]\tlr: 1.025e-05, memory: 14449, loss: 1.9641\n", + "2023-07-02 21:45:00,888 - modelscope - INFO - epoch [1][4835/4982]\tlr: 1.024e-05, memory: 14449, loss: 1.6689\n", + "2023-07-02 21:45:02,999 - modelscope - INFO - epoch [1][4840/4982]\tlr: 1.022e-05, memory: 14449, loss: 1.9693\n", + "2023-07-02 21:45:06,302 - modelscope - INFO - epoch [1][4845/4982]\tlr: 1.021e-05, memory: 14449, loss: 1.3166\n", + "2023-07-02 21:45:09,602 - modelscope - INFO - epoch [1][4850/4982]\tlr: 1.019e-05, memory: 14449, loss: 1.5213\n", + "2023-07-02 21:45:12,571 - modelscope - INFO - epoch [1][4855/4982]\tlr: 1.018e-05, memory: 14449, loss: 1.8047\n", + "2023-07-02 21:45:14,672 - modelscope - INFO - epoch [1][4860/4982]\tlr: 1.017e-05, memory: 14449, loss: 1.5372\n", + "2023-07-02 21:45:17,717 - modelscope - INFO - epoch [1][4865/4982]\tlr: 1.016e-05, memory: 14449, loss: 1.3180\n", + "2023-07-02 21:45:20,504 - modelscope - INFO - epoch [1][4870/4982]\tlr: 1.014e-05, memory: 14449, loss: 1.3500\n", + "2023-07-02 21:45:23,506 - modelscope - INFO - epoch [1][4875/4982]\tlr: 1.013e-05, memory: 14449, loss: 2.2521\n", + "2023-07-02 21:45:25,399 - modelscope - INFO - epoch [1][4880/4982]\tlr: 1.012e-05, memory: 14449, loss: 1.9281\n", + "2023-07-02 21:45:28,444 - modelscope - INFO - epoch [1][4885/4982]\tlr: 1.011e-05, memory: 14449, loss: 1.4693\n", + "2023-07-02 21:45:31,381 - modelscope - INFO - epoch [1][4890/4982]\tlr: 1.010e-05, memory: 14449, loss: 2.0117\n", + "2023-07-02 21:45:35,557 - modelscope - INFO - epoch [1][4895/4982]\tlr: 1.009e-05, memory: 14449, loss: 0.5264\n", + "2023-07-02 21:45:39,804 - modelscope - INFO - epoch [1][4900/4982]\tlr: 1.008e-05, memory: 14449, loss: 1.2449\n", + "2023-07-02 21:45:42,752 - modelscope - INFO - epoch [1][4905/4982]\tlr: 1.008e-05, memory: 14449, loss: 1.3134\n", + "2023-07-02 21:45:45,007 - modelscope - INFO - epoch [1][4910/4982]\tlr: 1.007e-05, memory: 14449, loss: 0.9836\n", + "2023-07-02 21:45:47,247 - modelscope - INFO - epoch [1][4915/4982]\tlr: 1.006e-05, memory: 14449, loss: 1.8653\n", + "2023-07-02 21:45:49,545 - modelscope - INFO - epoch [1][4920/4982]\tlr: 1.005e-05, memory: 14449, loss: 1.9227\n", + "2023-07-02 21:45:52,533 - modelscope - INFO - epoch [1][4925/4982]\tlr: 1.005e-05, memory: 14449, loss: 1.1875\n", + "2023-07-02 21:45:55,303 - modelscope - INFO - epoch [1][4930/4982]\tlr: 1.004e-05, memory: 14449, loss: 1.9453\n", + "2023-07-02 21:45:58,165 - modelscope - INFO - epoch [1][4935/4982]\tlr: 1.003e-05, memory: 14449, loss: 0.6951\n", + "2023-07-02 21:46:01,430 - modelscope - INFO - epoch [1][4940/4982]\tlr: 1.003e-05, memory: 14449, loss: 0.7973\n", + "2023-07-02 21:46:04,313 - modelscope - INFO - epoch [1][4945/4982]\tlr: 1.002e-05, memory: 14449, loss: 1.8844\n", + "2023-07-02 21:46:06,392 - modelscope - INFO - epoch [1][4950/4982]\tlr: 1.002e-05, memory: 14449, loss: 1.5102\n", + "2023-07-02 21:46:08,801 - modelscope - INFO - epoch [1][4955/4982]\tlr: 1.002e-05, memory: 14449, loss: 2.2773\n", + "2023-07-02 21:46:11,500 - modelscope - INFO - epoch [1][4960/4982]\tlr: 1.001e-05, memory: 14449, loss: 1.6867\n", + "2023-07-02 21:46:13,716 - modelscope - INFO - epoch [1][4965/4982]\tlr: 1.001e-05, memory: 14449, loss: 2.5187\n", + "2023-07-02 21:46:16,514 - modelscope - INFO - epoch [1][4970/4982]\tlr: 1.001e-05, memory: 14449, loss: 1.1453\n", + "2023-07-02 21:46:19,686 - modelscope - INFO - epoch [1][4975/4982]\tlr: 1.000e-05, memory: 14449, loss: 1.6125\n", + "2023-07-02 21:46:23,065 - modelscope - INFO - epoch [1][4980/4982]\tlr: 1.000e-05, memory: 14449, loss: 2.1379\n", + "2023-07-02 21:46:24,007 - modelscope - INFO - Saving checkpoint at 4982 iter\n", + "2023-07-02 21:46:24,163 - modelscope - INFO - deleting checkpoint: /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/iter_4800\n", + "2023-07-02 21:46:24,209 - modelscope - INFO - Train finished. Uploading models, waiting...\n", + "2023-07-02 21:46:24,299 - modelscope - INFO - {'done': True}\n" + ] + } + ], + "source": [ + "def cfg_modify_fn(cfg: Config) -> Config:\n", + " cfg.update(CONFIG)\n", + " return cfg\n", + "\n", + "\n", + "trainer = EpochBasedTrainer(\n", + " model=model,\n", + " cfg_file=cfg_file,\n", + " data_collator=data_collate_fn,\n", + " train_dataset=train_dataset,\n", + " eval_dataset=val_dataset,\n", + " remove_unused_data=True,\n", + " seed=42,\n", + " device='cpu', # No placement for model, leave the model to `device_map`\n", + " cfg_modify_fn=cfg_modify_fn,\n", + ")\n", + "\n", + "trainer.train()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 可视化\n", + "tensorboard 命令: (e.g.) \n", + "`tensorboard --logdir /home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505 --port 6006`\n", + "\n", + "\n", + "The following code is copied from baichuan_sft.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "dict_keys(['lr', 'loss', 'evaluation/acc', 'evaluation/loss'])\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAApIAAAHDCAYAAACXsvqpAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAACdkElEQVR4nO29d5wcV5X+/XRP6MlB0oykkUbJQc45CRtssLEwNjZg+2e8BkxeWHnBC7vLGlgwsCCz+y5LNtE2yXjBiw0YB5xzzlG2bMmSlUZhcp7pev84XVOhK9xKHaaf7+cjTXdX1b230q2nzj3n3JSmaRoIIYQQQggJSLrYDSCEEEIIIeUJhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhOS46qqrkEqlsHHjxmI3hRBCygIKSUIIIYQQEgoKSUIIIYQQEgoKSUIIIYQQEgoKSUII8eBHP/oRDjzwQGQyGXR1dWHNmjXo6+uzrPPKK6/g7LPPxoIFC1BXV4fFixfjfe97H/r7+2fWufXWW3HCCSegra0NTU1NWLlyJb7whS8UeG8IISReqovdAEIIKVUuvfRSfPWrX8Upp5yCT33qU1i3bh0uv/xyPProo7j//vtRU1ODiYkJrF69GuPj4/jHf/xHLFiwAFu2bMENN9yAvr4+tLa24vnnn8cZZ5yBQw45BF/72teQyWSwfv163H///cXeRUIIiQSFJCGEOLBz506sXbsWp556Km666Sak0zKAs99+++Giiy7Cb37zG3z4wx/GCy+8gA0bNuAPf/gDzjnnnJntv/zlL898vvXWWzExMYGbbroJ8+bNK/i+EEJIUnBomxBCHLjtttswMTGBiy++eEZEAsDHP/5xtLS04K9//SsAoLW1FQBwyy23YGRkxLGstrY2AMCf/vQnZLPZZBtOCCEFhEKSEEIceP311wEAK1eutPxeW1uLFStWzCxfvnw5PvvZz+LnP/855s2bh9WrV+OHP/yhxT/yvPPOw/HHH4+PfexjmD9/Pt73vvfh97//PUUlIaTsoZAkhJCI/Pd//zeeeeYZfOELX8Do6Cg+/elP48ADD8Qbb7wBAKivr8c999yD2267DR/4wAfwzDPP4LzzzsPb3/52TE9PF7n1hBASHgpJQghxYOnSpQCAdevWWX6fmJjAhg0bZpbrHHzwwfjSl76Ee+65B/feey+2bNmCH//4xzPL0+k0Tj75ZHz729/GCy+8gG984xu44447cOeddya/M4QQkhAUkoQQ4sApp5yC2tpafO9734OmaTO//+IXv0B/fz9OP/10AMDAwACmpqYs2x588MFIp9MYHx8HAOzZsyev/MMOOwwAZtYhhJByhFHbhBDiQEdHBy655BJ89atfxTve8Q6ceeaZWLduHX70ox/h6KOPxvvf/34AwB133IGLLroI5557Lvbdd19MTU3h17/+NaqqqnD22WcDAL72ta/hnnvuwemnn46lS5eip6cHP/rRj7B48WKccMIJxdxNQgiJBIUkIYS4cOmll6KjowM/+MEP8E//9E+YM2cOPvGJT+Cb3/wmampqAACHHnooVq9ejb/85S/YsmULGhoacOihh+Kmm27CcccdBwA488wzsXHjRlxxxRXYtWsX5s2bhxNPPBFf/epXZ6K+CSGkHElp5jEbQgghhBBCFKGPJCGEEEIICQWFJCGEEEIICQWFJCGEEEIICQWFJCGEEEIICQWFJCGEEEIICQWFJCGEEEIICUXB80hms1ls3boVzc3NSKVSha6eEEIIIYT4oGkaBgcH0dXVhXTa3e5YcCG5detWdHd3F7paQgghhBASkM2bN2Px4sWuywsuJJubmwFIw1paWgpdPSGEEEII8WFgYADd3d0zus2NggtJfTi7paWFQpIQQgghpITxc0NksA0hhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAlFICG5bNkypFKpvH9r1qxJqn2EEEIIIaRECZSQ/NFHH8X09PTM9+eeew5vf/vbce6558beMEIIIYQQUtoEEpIdHR2W75dddhn22msvnHjiibE2ihBCCCGElD6hfSQnJibwm9/8Bh/5yEd8p88hhBBCCCGzj9BzbV9//fXo6+vDhz70Ic/1xsfHMT4+PvN9YGAgbJWEEEIIIaSECG2R/MUvfoHTTjsNXV1dnuutXbsWra2tM/+6u7vDVkkIIYQQQkqIlKZpWtCNXn/9daxYsQJ//OMfcdZZZ3mu62SR7O7uRn9/P1paWoK3OE6y00A2C1TXFLcdhBBCCCElxMDAAFpbW331Wqih7SuvvBKdnZ04/fTTfdfNZDLIZDJhqkmeLa8B0ICFyykmCSGEEEICEnhoO5vN4sorr8SFF16I6urQLpYlQs4YOz5a3GYQQgghhJQhgYXkbbfdhk2bNuEjH/lIEu0hhBBCCCFlQmCT4qmnnooQbpWEEEIIIWSWwbm2CSGEEEJIKCgkCSGEEEJIKCgkCSGEEEJIKCgkCSGEEEJIKCgkCSGEEEJIKCgkCSGEEEJIKCgkASBV7AYQQgghhJQfFJIAZpTk8ADQv7u4TSGEEEIIKRPKfY7DeNmzXf7WNQCZ+uK2hRBCCCGkxKFF0onsdLFbQAghhBBS8lBIEkIIIYSQUFBIEkIIIYSQUFBIEkIIIYSQUFBIEkIIIYSQUFBIEkIIIYSQUFBIEkIIIYSQUFSukNS0YreAEEIIIaSsqVwhSQghhBBCIkEhSQghhBBCQkEhSQghhBBCQkEhCQBIFbsBhBBCCCFlRwULSVOwDXUkIYQQQkhgKlhIEkIIIYSQKFSukPTK/sPMQIQQQgghvlSukCSEEEIIIZGgkCSEEEIIIaGgkHSEY9uEEEIIIX5UsJCkWCSEEEIIiUIFC0kvmA+IEEIIIcQPCklHaK0khBBCCPGjcoUktSIhhBBCSCQqV0ha4FA2IYQQQkhQKlhI0iRJCCGEEBKFChaSZigqCSGEEEKCQiFJCCGEEEJCUXlCMpsFNAcLpNNvhBBCCCHElcoSkpMTwJb1wJ7txW4JIYQQQkjZU1lCcrBX/o4M0gJJCCGEEBKRyhKShBBCCCEkNipMSNIKSQghhBASFxUmJAkhhBBCSFxQSAI0VBJCCCGEhCCwkNyyZQve//73Y+7cuaivr8fBBx+Mxx57LIm2JQzVIyGEEEJIFKqDrNzb24vjjz8eb33rW3HTTTeho6MDr7zyCtrb25NqX4GgqCSEEEIICUogIfmtb30L3d3duPLKK2d+W758eeyNKgh52pFikhBCCCEkCIGGtv/85z/jqKOOwrnnnovOzk4cfvjh+NnPfpZU2+KHWpEQQgghJDYCCcnXXnsNl19+OfbZZx/ccsst+NSnPoVPf/rT+OUvf+m6zfj4OAYGBiz/CCGEEEJI+RNoaDubzeKoo47CN7/5TQDA4Ycfjueeew4//vGPceGFFzpus3btWnz1q1+N3tKkobWSEEIIISQQgSySCxcuxAEHHGD5bf/998emTZtct7nkkkvQ398/82/z5s3hWho7VI6EEEIIIVEIZJE8/vjjsW7dOstvL7/8MpYuXeq6TSaTQSaTCdc6QgghhBBSsgSySP7TP/0THnroIXzzm9/E+vXrcfXVV+OnP/0p1qxZk1T7YkZz/EgIIYQQQoITSEgeffTRuO666/C73/0OBx10EL7+9a/jO9/5Di644IKk2lcYtGyxW0AIIYQQUnYEGtoGgDPOOANnnHFGEm0pHnt2ANhh/U3LAkgBqVQxWkQIIYQQUvJU8FzbHmPbWhbY8iqwbWPBWkMIIYQQUm5UsJD0YGIc0DRgerLYLSGEEEIIKVkoJFXQNGDXFqBvZ7FbQgghhBBSMlBIqjA+CowOA4O9xW4JIYQQQkjJULlCMkj6H425ggghhBBC7FSWkNRcvxBCCCGEkIBUlpBUxpy4nIKTEEIIIcSJChOSFIWEEEIIIXFRYUJSEepNQgghhBBfKCQJIYQQQkgoKlhIBgrbTqwVhBBCCCHlSuUKSWpDQgghhJBIVK6Q9ERz+UwIIYQQQnQoJAkhhBBCSCgoJP2gQZIQQgghxBEKSUIIIYQQEooKE5KKvo+0QhJCCCGE+FJhQtKEp1ikkiSEEEII8aNyhSTFIiGEEEJIJCpLSIbSjraNNApQQgghhBCg0oSkGS89SK1ICCGEEOJL5QpJKklCCCGEkEhUsJBUhJqSEEIIIcSR6mI3IHE0DejfBVTX5P9OCCGEEEJCM/uF5OQ4MNgrn+sai9sWQgghhJBZxOwf2s5OuyxQ9ZGk5ZIQQgghxInZLyTddGDoWBsKS0IIIYQQoBKEpAVaGgkhhBBC4qIChGQYwch5uAkhhBBC/KgAIekC00gSQgghhESisoSk5vrFa8Xi4hosRAghhBBSXGa/kFQWj27bF1FU9vYAW14FxoaL1wZCCCGEEBdmv5B0E49eAtG+qFhacqhP/vbtKlIDCCGEEELcqQAhGYYSGtoGgFSq2C0ghBBCCMmjsoSkFjb9D9MGEUIIIYTYqSwhacYz1qbExCItkoQQQggpQSpASIaZ2sbE1ERsLSGEEEIImU3MfiFpGZVWtTSa1tu5JZkUPNksMDGm2CZaJAkhhBBSesx+IelGkDSSk5Nq2wVhxyb5Nzrkvy51JCGEEEJKkAoTkiUUNKMPmY8MFrcdhBBCCCEhqQAhqTl+VN4m2IbCxBgw2Btj0A5NkoQQQggpPaqL3YDEcZvZJkhC8qDs2CR/02mgsRWYngamp4DaTLjyzDpyckISlTe3A9U1ERtKCCGEEBKe2S8kzYS2SIYqREQfAGx9TbbrXAJk6tS3n8GkJHdsArQsMD0JzFsUoqyQaJrUWVXDdESEEEIIARBwaPvSSy9FKpWy/Ntvv/2SalsCFMtHMldXHHNma1n5O1ngtET9u4BtG4GBPYWtlxBCCCElS2CL5IEHHojbbrvNKKC61I2abnNth9imWOgWQPNwfE1tYdsw2Ct/B3YDrXMLWzchhBBCSpLAKrC6uhoLFixIoi3JozpFYonpyBnM+SyrCywkCSGEEEJsBI7afuWVV9DV1YUVK1bgggsuwKZNmzzXHx8fx8DAgOUfCUrOIpnNFrcZhBBCCCEmAgnJY489FldddRVuvvlmXH755diwYQPe/OY3Y3DQPRfi2rVr0draOvOvu7s7cqMDEWqGxIjpf+KGsS2EEEIIKUECCcnTTjsN5557Lg455BCsXr0aN954I/r6+vD73//edZtLLrkE/f39M/82b94cudHBcBvODjC0XfShbl1JFr0hhBBCCCEzRIqUaWtrw7777ov169e7rpPJZJDJhMyfGDdxpP8JlGS8wKZETZNgmNo6oL6psHUTQgghpOKINLPN0NAQXn31VSxcuDCu9hSO2GadKQCqeRtHhyQ9z66tybaHEEIIIQQBheQ///M/4+6778bGjRvxwAMP4D3veQ+qqqpw/vnnJ9W+mFEUj+Uws6GTEJ6eSrBCQgghhBArgYa233jjDZx//vnYvXs3Ojo6cMIJJ+Chhx5CR0dHUu2LjqvlMcjsNXEpywDlmNs9k0cypmYQQgghhMRAICF5zTXXJNWOwlCIiW3KacicEEIIISQCkXwky49iTZEYAosgZf4fQgghhJQeFSYkQxJIcyYwDE4dSQghhJASZPYLyew0cMPPgfv+ZP09saFt85cICtC3fSVuUSWEEELIrGf2C8lN64CXnwAeucW2IKwQ89suicAch4Tk1JGEEEIIKTKzX0hOjMdQSJBo6xiqI4QQQggpA2a/kNSmXX4PqfgKNeTM6G9CCCGElDizX0hms8ZnLeu+nhdBNJ0WV2R4GUWYE0IIIaQiqQAhabJITps+B7L4JWW9jLItxSUhhBBCiksFCMkYLJKBSMIiSQghhBBSesx+IWkWj2ZRGWy8Op5Vo1bJ0W5CCCGElBCzX0iah7bNn52E2OgQ0LdTPj98E/DMfe7rumJO0eOyodKwup+SJIQQQggpLoHm2i5LpqeMz1mPCO6//QZ4/kH5fs5ngPv/Ip8PfhMCmQJjSyPplzOSopIQQgghxWX2WySnJo3PbkPbA7sNEQkArz5tfB4fs23nh4ropAgkhBBCSPlTYULSxSI5OWH9PthrfB4bBibG1OszWxJHhpzTASmNbFNsEkIIIaS0qQAhaR7aNkdwm4SaXSjqfpIA8PxD1mVB9F12GhjcE2ADNygqCSGEEFJ6VICQNFkb3SySYyPW770mIfnwTVYLpR92S+LIkNNKKgWp10kIIYQQUgQqQEi6DG2bBd+4TUhOT1q/D/Xllzsxbg3kCUIcOpJD34QQQggpMrNfSE67BduYsFsk7YzarIqTE8CO14Gtr+WvG5vAo1AkhBBCSGkz+4Wka7CNh0XSzsig9fukV/CNTQCmUv7rJAEtloQQQghJmNkvJPt3GZ8twTamdewWyX2PsH4fHgBu+TXwyC2IXwQWYM5vQgghhJAEmN0JyR/8K7DelBPSzSJpF5JnfEwiua/9HrB9I3D/n41l7/o4ACcro16sgtjzTTbusg51JCGEEEJKiNltkVx2oPW7m0Vyz/b8bWvrgBUH5/8+NZn/mxeOQ9sujI8Cu7c51MHpEgkhhBBSesxuIblgKZBpML47WSS1rIg3J/Y/Ov83v8CcwL6JpvV7Nos/Zu+OQJuFXIEQQgghJBKzW0imUsBHv2Z81xyitvt3A5Pjztu3zpN5t82MDVu/j48an0eH8pc74Sc2pyYZLEMIIYSQkmd2C0kA6N4X6Fohn6cdEpIP93tv3zLH+n3UZpHs2SxD5tPTwK6t+RHeXv6UgIvhMMBweKByCSGEEELiY3YH2wBAXQOQrpLPThbJcVsqn6oa6/emNut3J4vj5Hh+rklPHObfNpNy+Z0QQgghpISY/UISMISkk0VSzwnZ1CaWy2NWW5dX24Slk5Ds2azWDuXo6xR9IAkhhBBS8lSIkMyN4Gs2IXn3/wGP3y6fO7sl7Y8fKj6QZvxGqZ30YCrlsoA5JwkhhBBSOsx+H0nAsEhms8C2jcC918twtC4iAUn340Zzu/E5qJB0xGdo23P9AJsRQgghhCRIhQnJaeB3/wk8+jfgqXus63gJyfP/BcjUy2e/9D92NNcv7qRUhraD1EsIIYQQEj+VJSTNib4H91jX8RKSTW3AwSfI58AWSSdroub8eQa3oW2fcgkhhBBCCkiFCMncbva8YfxW12hdx0tIAhEskiEEn92vMtQUiRSahBBCCEmWChGSOYtk/y7jt1FbvkdlITkcTByGSiweQx5JQgghhJCEqQwhWZUTkuYZbIYHrOtkfISkLjSDWiT9hradlmezQN/OgPUQQgghhBSWyhCSqdxuegrJeu8yzBbJIPhZJJ0WT4w6/EgIIYQQUlpUhpDUh7YnJ4zf7EKye6V3GWYfycSHtl0LUy+Xc3UTQgghJGEqQ0g6Dm2b5th+70XxWyRrMvLXUdDFkEeSEEIIIaTIVIiQzE3gM2GaV3t6Sv7WNQLLDvAvo9Ycta0g7FJ6wIxPMvFYDZYUnIQQQggpHJUlJJ2EVpXiLJG6RXJqApic9F4XMPwyHbWdg5LkUDUhhBBCyowKEZI1HssUhWRtnbHua8/6r69bJJUFoMd6E+PA1teAoX73dYKURwghhBASAxUiJD3EoqqQTKeN2W2eust/fdWh7ZnfPITf5LhM72j2z6SFkhBCCCFFpjKEZHUMQhIAFu8tf/t3+6+bckoq7jBFjS4I49aF1JmEEEIISZhIQvKyyy5DKpXCxRdfHFNzEiKOoW0AaGqXv/Z5up1wFJJexKH8qB4JIYQQUjhCC8lHH30UP/nJT3DIIYfE2Z5k8LJIei2z09wqfwf2AFoWePhm4KEbndcNKiTjGKqmjiSEEEJIAQklJIeGhnDBBRfgZz/7Gdrb2+NuU/zE4SMJAA2tIhCz08ANvwDu/zPwwA3AuNNMNA5C0kvoxe7zSFVJCCGEkGQJJSTXrFmD008/Haecckrc7UmGuIRkVRXQnBPOrzxp/O4kJL0sko5zbQcVfhSKhBBCCCkuAVSUcM011+CJJ57Ao48+qrT++Pg4xseNGWUGBgY81k6IPB/JFGaEWBAhCQBHvA2461rrb0GFpBOxWCQ1x4+EEEIIIUkQyCK5efNmfOYzn8Fvf/tb1NXVKW2zdu1atLa2zvzr7u4O1dBI2MViQ7NpmUcgjhOHvSX/twnFoW2vVEBBhSSFIiGEEEKKTCAh+fjjj6OnpwdHHHEEqqurUV1djbvvvhvf+973UF1djenp6bxtLrnkEvT398/827x5c2yNV8YuJOub3Jf5ka5Gnkh0skgO9QK//Rbw1yuClR8bVJqEEEIISZZAKurkk0/Gs89aZ3X58Ic/jP322w+f//znUVVVlbdNJpNBJpOJ1sqo2COzG5oBPRVkXUPw8j7078DrLwKvPAW88QowPpa/zo1XATtel3/vuFD8K2e0XUIirxy1o6aJEK+tk6TvhBBCCCkbAgnJ5uZmHHTQQZbfGhsbMXfu3LzfS4pq2/C1eWjbbJ1UZc4C+bf5ZfnuNLS9Z5vxeXQIaGo1vsci+MpRNTrQv1vyctY1AB2Li90aQgghhASgMkxAeT6SJvEYRkjqZOrlr31oW9OAPTuM76OD+gKHQuIUhGUYbDPUJ3/HRoraDEIIIYQEJ3DUtp277rorhmYkjFewTUMEIVnrIiRHBoAJ03D3yJBtQ5vKy04DU5Ph2wEAE+NWMdazCZjbFW3/CCGEEEI8iCwkywJ7ZHZ9s/PnoOgWSfvQ9u7t1u8zFkkXtrwavG67xXHH6/nr7N4KNOwbvGxCCCGEEAUqY2jb7iPZ2GJ8jmKxa2qTv327rL9vXmf9PuIjJENRLmPXhBBCCJmtVIaQtA9tN5oCX+oaw5fbmQsOef1FI/AGkEhuAKjJRauP5oa29VyRThPbJMVwf8IVEEIIIaRSqUwh2dAEnHQO8Jb3Rgu2mdtlzGDzh+8Arz4jnwf75O+iveVvnkWygErSHPRDyouJMTl/01PFbgkhhBDiSGUIyXSVdcrC6lqZ6vCoiHOF19QC7fON7889KH/HhuXvnNyyvGAbQhTYsUksyr09xW4JIYQQ4khlCMlUCkiZdrWmNr6yzRHgPZuB3h1GxPacBfJ31CQkY5lTm1QUk+P+6xBCCCFFoEKEJKxC0h58EwXz0PjgHuDKr+bqTAFtHfJ566vAFZfKtIk7twB9O6PXS0EaP709YgUstWNbYs0hhBBCdCpDSCJlnX4v5bPbQQJwTjjLOmw+U0aTNTq8rwe481pgnIm3S5ahPrEmj5aaKwKVJCGEkNKkQoQknMWeG/O61Ndt7wQu/kH+7w3NQEOL9TdzZDeJj+kpoH8XMDVR7JYQQgghFQWFZNR13dZvaAbqG6y/9e4ovWHT2cCe7cDAHhmWLkVGh2TYnOeeEELILKOChGTCu2r3u2xqlTrtItPsH5nNUlzEgT5FZTYbU4EBXyT82LVVhs2HQub05DVCCCGkRKkcIZlOeFff98/W722d8rfaFiG+faP8HRsBfv4l4Iafu5c5MSbJzTUngURxkRgx68gZmA+SEELILKNyhGTQ4eqgdHYDK480vs+ZDyCVb6l87Vn5+8x9YqV65Un3Mv/0Y+D3/yPrekGLFSGEEEKKQIUIyVTyQ9uANdpbT0ZuF5KvvyR/d281fnO0OMIIznnqnvxl1I7xUggxHvpdhiebEEJIaVIZQjKF5C2SgHUO7/YFUq89cntsWHz5+ncbv034JJzWpmNr4qwkdp1VgGuFEEJmE/27OQtXhVIZQhKwTmUYB01t+f6Ph50IHLgKWHkU0LUMQAo49f1Acztw8vuM9cZHrLOV6MEibsQWREKUKDUdSYMkIaSU0TRgYLe4a00yDVulUV3sBhSM1e8H7vyDzLEdB6lUvuCoawBWf0A+p3OHtmMR8PFvyOd7/ig32fgoMGESj75C0skiqbl8rkRi2H/L0HapKUlCCCkT6LNfcVSOkGyeA5z598VtQ6ZRhOTYCDBmEo8TES2SvG9nOTzBhBBCSpPKGdqOm1QKnpYrp8V1uQTlY8P+FslpkxUyqaFtTXMue3wUmJpMps6ShWKNkJJieso9EJEQUjJQSCaGg5LUheSml6zm//Gx/M3NQtNpaDsO3bPzDWDLeqtoHR8FejYD2zbEUEEZUZCR7ZAFU+OSSmNqEtj6GrBtY7FbQpRgJ1XJVIiQTEIZhChTT0H02G3W352Gts1WyvERQ0zu2QHc+ltgcI9p5ZA3sV7H6JCpLQ6itiIo5Y6wlNtGSAKMjchfJvEnpOSpECGZAGHSCe143fn38RGH30xCUtNEfI4Mioh89n7gt98KXj9xx2whpm4jhIRB0+TFvNIEMPvMioZCUhV9WFoVJx/KE85yXnfUR0gCwH1/Av7wHaB/l3wfGQCmcmkWIt/E7AUiMTHOFE2ExEm5Jk4YHQR2bZVh+YqFz5NKozKEZNROqXOxcyF+5dqXH3aSc/qhseH830YH83/bvQ2Y22V83/SyTwMComli9axIQnZ+o8Niae7Z5L9uuT4cCSFqOBkFCJnlVIaQjErGzRoZUBmk08CSlfm/OwnJkaH83wCrYHntmdyHmN4Ah/qsPpJ9uyonejvsIRwZkL9MwktIjPCtq7ygFbKSoZBMDKeOUANa5hpfW+fJ31EH0ej0m/33uIdP7OJ1cI9EdlccATpFJt8lhBBSwVBIhsUv2CY185+BpgEtc4zvcxbI31GPoe2DT3Cvw81qGQb7bDs6FWORDCkIA21HKwshZBbC9+mKpkKEpOkBHibaOi40DaitM77PXSh/nayPukictxA4+9NwFCFuVssw7N4WX1kVBXtQQgghlUuFCEkzcQpJr7Ic5uLWOfFsYK9DgcPeIt/HR2QGBz11BGD8rW8Glu4HHPSm/HJ0q2Ucw6tJD9FmsyU+DBwy/U8p7xIhhBQEdoSVTOXMta2TTgEOE8X44iQKfTWpywpHniz/9FxjmiZzbz99N/DADcC7PiEpJACguV3+OqUfinNoO5VGuAOjwNSkzJRT3wjMW5RMHVEJLXLZgRISO/QCIaRsqDyLZKpAu6zSEVZVG0PdY0MiIgHgLz+VSO7GVmDhcvmtrjF/+9GhmKyRkIhy1+UR6xjqk79OvqAlCYNtCCkuVJJlC/vEiqMChWTYDirodorr1+cEopO/47IDDIFntkhW18jf7HT+lIa9O2QWnKBTHSYqsMvhoVCIYBtCCJmFsB+saCpjaNusYwplkbTX60ZdE9C/G7jmv/OXmVMFmS2Sja3AYK8IyaE+oDW33tgIcOVX5XNtHXCIR8S3nXQ5iL0ECd0PsgMlhBBSuVSeRVKVxhb529AcoRAFcWaO4raj+0cCVotkpt4Qlr/8D0PLmHM+DvcrtxLQCiuwdUaHJMioJAhrkfRbTqGpzNSk8YJEiA7vIUJKmgoRkjZBpyKa2juBjkXAnPnq5YZh3GNKrVaTRbLeZJGsrQNWHiGfN6+TQBbAOpw9Mgg8fjvQ26PWjiTTIjkVPTFuBBS5oWkyVN+/O5FmWety+RxoQxKJns1A305gz45it4QQEhYK/4qjQoSkmZSaaEqlxeoX1lKnKsy85rY2Jy+3WyRPOhdo65Dv+nD2uCmh+NP3AHf/H/D7b6u1w4vB3uhl2JlSmFJwagIY6gcGCiAkwypJc6e5cwuwa4v7usXMYVoO6FkMxjhfMSGElAuVJyRTsD7Qm9uBhcvCl+W5XEE4dO/rvqzdZA01+0imcmL4hHfL976dMizoFGAzPOBctv2t0estsn+X+7KwqLy1mtcp1bdcc7vGhiUynUOzhETE3HeW6L1PTPAcVTKVEWxjxyzwGlqA6triteWkc2So+ul75HtdI7BoL2D5QTK8rs84Y26jLl72OQyoyQCT48DAHqtFMjCl3hFoSDT620lYT47L8Y1kSSz140pICWLXkTTm+zMxLhk9vFK5JYXm+oVUAJVnkQSsw9XKHVTEnkxP2WOnvgl467nG985u4KxP5iKuXaZ21C1eqRTQOk8+9+9yF5JK1j//VYpKoS2S/buAHZv8fUz92lXqx7Uk4UEjJBBjI8CO14HtG4vdElKBVKiQDCEK7ZvUBLVietSZrnIu122TrOlB25YTkn073XNHqszJnahQc9qRgPXpqwdtp6YpbmNbR/cL9Y1+p+ghpOyYmpQXRS8f9XJC7+N1P+Niwi6x4qCQDGNprG/K+Sz6OkkGL9syzO6yvTlljopF0jFYxn63F/juD1xdThDueN0Y7vdjegp4Yz2we3sC7dG3883/E7JgQoiQwD20Z4e8eKv2JcSH0GkvyCygQoSkTThGzZmo55gMWK0nnd3y96A35dZ32ODIk+Xv8Wcav5mF5BuvWNfXA3SchGSejizBm98ebDM+AkxOqFsRhvoBaMCoyvoxBvaU4KEsSybHGbhUsZj6vyTuJ15XhMRGIEV1+eWX45BDDkFLSwtaWlqwatUq3HTTTUm1LT7smixyGpYw2/tsc+7FwAX/BizdL7e6w/pveS9w0betUeZ6CqBXn7EOw3btBcxdIJ+TSN8TmYDDzWHEXZDTFOvDSnP8yIiBAEyOA9tfB7a8WuyWJI+WLY0hSULCwpfniiaQkFy8eDEuu+wyPP7443jsscfwtre9DWeddRaef/75pNoXP/b0P5Hwu3sC1JOpB+YvsW7rJIDtM+HoFkkzH/0acN5nJSIdUBOSJWmRNH8OE7pZNCVJolIquSSzBZh5adtGYOtrarlVK5JyujdLpK3ZrPiBmr8HZbBPhv4D+6UHr4qUN4HS/7zrXe+yfP/GN76Byy+/HA899BAOPPDAWBuWKGYhmYBx0bfOONc3Jy0HgH/4/4zk5bqQHPLxkdQ0JHr3h9Xtlg6sXHuncm03wfgY0LMJaGqTVFxJoVsjR4eB5iKmIiOzh62vSv+5cJmkBdq9TUavzNPu+tGXy1hR36QwVfBs6KtJWEI7C05PT+Oaa67B8PAwVq1aFWebkifyvNIFGKJUFZJVtneBTL3xuTF38ytZJNWqiw2l+mxi15JbLuYGx1leKVp3SXAGcon4h/qK2ozKpITuoWy2/O5pvb1jo0ZAUd/OcGUVwipPyprACcmfffZZrFq1CmNjY2hqasJ1112HAw44wHX98fFxjI+Pz3wfGHCZaaVg2KdIVBWFDusFGdkOPJyecq7Tj6Y2w1dyZmi7L3+9vLaXoJK0D20XbIrBpI5FhHLj3v/JCWDPdrFo1zfFVy4hcVNMETc9JW4HtXU216NKooRE9PSUGEaaWos7kQixENg0t3LlSjz11FN4+OGH8alPfQoXXnghXnjhBdf1165di9bW1pl/3d3dkRocC6XoI5m3achtzZvpwxGDexxWDDBFYmRCj217LFJob5BjaIkQV98sULmlxO5tkv5k19Zit4TMJjRNrqvZYsXS8zO65eglQp4/e0Ls2ipCsueN5OoggQksJGtra7H33nvjyCOPxNq1a3HooYfiu9/9ruv6l1xyCfr7+2f+bd68OVKDw2ETFFGHtp30SevcaGXm1RFABB2zWv7udzQsjdOjtt9Yb43odurkCyV49Hpmy1zbjpRBW2fLg56UFmPDkuh7x+vFbgkBco+DQozkFKjP0wU9sxyUFJHn2s5ms5ahazuZTAaZTCZqNfESeWYbh+2b58ibkusDOqFgGwA47p1A1wpgqS3gqX0+0LEY2PkG8MIjwNFvl/xpW161zqZTDALPNmMf2nXZXtMk+jXwsEecPpLxFVWR8PiVL3qOV3PEcFDGx4Bhc+5XXhDhiUFE8vATHwIJyUsuuQSnnXYalixZgsHBQVx99dW46667cMsttyTVvmRIwtcuZfe9jFpPgG2ra4AVBzuLp85uEZJ6BJ4++40lIa/qNIJxoKfxCeEjaV82OgyM2Hxud2+XBORtnQh0DBPbfZ+C+3ZJHsEko4KdYEpLUqr0bLJ+L6qQmQU3imJ3Gx9UnpVGICHZ09ODD37wg9i2bRtaW1txyCGH4JZbbsHb3/72pNoXD/bYGkv6nxg7Cq/7J3CsTQzzgQPGLDxekdvazH/JM6MjoyYk14BdW/I30WexGdyTnxpJmajHQtHfMps1/Fdb5uRH4OeVOQseaoQQK5oGDA8AmTqgpsRG7wAE7g+pIyuOQELyF7/4RVLtKCAxTJGoP9BLMT7FvmGDSgogLdi+uEUQj41I9ZkGhTIU63H6nAhFHtr2279i6MhsFkgXYxbVQlrIvShz4R4k0r9/l5zvQlvGlSjitVCIS2BkEOjdIZ+7941envneKdQlrAcllQojg+JP2TqvgNk+KpcKmWvbRtQLa2bzBKO2nWa2mVnkuiB/mzyLpNu2ETvr7LQMofe84SMCNNtfVSJGmZeCMHGyqhYNh+ugf5dx/oYHgC3rS3R6zVnM5Lj8i8rUhKStGXDK2GBD02S9ob5ovo0kHIlGhId4BmWng/WXUxO2fqIE+trd26RNo8Phtp8YZ6R+ACgk1TeKvRne1YXMI2lHzyWpJ6V1Ig6RZQ4yChqRrbKOZrdSqbQ5pI+kU9EjgwGm7SviLA+aJoLeadjfj4E9wPiIWBf2bJffwiYxJsHRNJlffPvr0aPq+3aJIOjfFbwNpUYhmjSw2+VarzBr1nQuGHPbBvVtSvnlIxsiulvTJOvAjk22WALiRoUIybxJq2MuT2W1OKO2A5SlWyT7dgKP3OK8adIPD/O+aHkfPPAY2g5s0PTbwGP51KQI8Z1+1tZgxQbbB9vK09PS2dkthtNTInhHh8OLkVIUE5WAlnX+nHzFBayrBNE0oH+33EtFEUUxi9W8+zdA+XowZpD0OrOtvzDvD9OkKVEhQtJGqJltCo1HuzxGtvNo6wSW7Cefr/2e0VFYiKMjUC0jZB7JMPWFPrV20ZrUAz7CcR/YI8MvSVgM6VNUWWiuX9SYnhKLfWKCooBCpRiiqJRutzgMDWWvK8t+BwpOBQpJhzQ9IYoQ/HzekprZxktJ2pal08C5nwbqGsX3apfDEPeQ37SVKXlbv//PuTxxAa17Th1RQINkcIuk/VhoMty3baPzcIVnxL3pNlEZ6lAdgo/y0IoqaEvp4UVKhzCX5PbXxWJfrnOSzzaLmuUkFuBGzzt8ZX48/a6H7LS4/8y66yY8lSckU7AKgyj3WaJR2zFMkaijwYjG1KMD81bwIJ0G7r0eePhm4NffzF8+MgjsNE2152lIDBJs45P+JwgaJNXO1AQw1O+ygkI7ppPymVGI2o67TMuq5kjPWagypybF52nElOha00pr6CrO/iTQOYwh0A7ID2zo7YlWrk4xH9gWt5wyEQ5eUduJ7EOZHBdV/LKF9LwhUzUO7C5cm0qcChSS6RgelDHlePRcP8TQthctuSkcHYWkCcc3Lc2Yk3m4X2aeMLN7mwg08/p+BB7aDthZ5R2jCGWZV4/kfO1lnYxQbChmoVj0oneHRGGag852viGR6SUZLFCs8xPjhVhIC+VQn/RRoYSSh/AqGxQbvmurzwplewDiw++5o2dVsMy+VNlUhpA03xtpm5BU7XcKHejtmesyiJMkZDpEfS7wPR5CcutrwOX/Ctz1h/xlcxcan6P45c04cYcQc4nmlVQsT8kiqdjOQPsQwiKrr9K/yz+NT9zHs9SsN06WR91feKSID4QoL0uxtaE41cZKb4+8BA/7uek4YNn/MhBS2az4SE9OOC+39JOw7tJYmHQ4fqMlCq5M5YT9+LlRBpdKoagMIWkmlY4+tK2cijFCUE8YH8mUy7J0WubdBoCtr7oXe8918vfJu6y/a7BaHAPnFjQdmJ7NMrSctEXSsyyn5V7rmqP4VHwkA/7uvzAkmjxsBvb4i/84h7b7dkoKkSmXBx1RYBaLyuF+sYwl4VYQync4YKBgEKanpM8LI3DdGNgtL4fbNyqsHHUkJ3gR5U8JvNyVGRUiJE13Rzot/9o6gbYOsdYFLWPmc4IXmVceyaDP+XSVMWPCpnXAy084r2eO6L77j6YFmjVJst+QlV9gTO8OxbdWL2tekXzbYs0r5vHmG/qt3rad8sM6xmt5sFfOT79CMuwkCHqO3I51WVscwvpIJtSnmY/xnh1iPVQd+o7LwqVpLoF2qsFxIeoc6pN+Vc/LGgdOmTcspzvK8Qpx0Qc9P0ETnheags6oNjuoECFpQheOzW1Ac3vw7aLWq4zHzDZBmRiTuZz1qQsfucV5vXFTwu3Hb7Nm9p+MYpG04TmntAmloB3FAhzTHilW5mmtDFhWiNUCkdfWEMPssbUrRgGgSv9usYYWa7g6Oy3J4JXrr4AHVe9OZ1/URAKdPDrNXVvl2vCaOch+OqJY5zXNlvHBYX81TaFvClqv/Yek34gCuN2Mj8o5iCsQKwkoHgNTGUIyZbNIhqHKQQj6XW/mep22V902f6H7716bnXqB/HUbZrF3aD/4rKT20GATkkEtTbYDVVUVQpCFtbTlCGIR8ErGG2lqRq8ON6AfUlTiDEYC5JgN9ccoDkLurx5JGehBFeOx7d8t0cteM0klVLWFsNohiYfoUC9mpmFMGq/91v0D87I2JDi0be73nQTsUF+Iqfj8Tm6R/MdVVtdnWhp2ypxhYnpaJlYohqiLYpEcH82NyFSWGK0MIVlVY3z2DGLxQMmimPc6G3B786Zho7Y9Fi5YKn+dIrMnxpw7uj98BzO+djoDPhbJ7a8b5WtaflqQdHVwH0lt5r/8Za7b+6/iuG7ekFtAkaWvMjokkcEqdQbFdVvVttquk6gd384t4rLQZxJwpZK2xQkli2EINRZkRhA7xTpepRDw40ncL1H2az/BZpi3cbo2HFORRSTwCErU+iLU4eaGsuN16TvDjCxY2hN1qD7gvvVsFh/xUEFN5UtlCMkak5AMbZE0DceqDndUm7aJU0iG9Z2sb5a/2en8t2A3C4ouLqdMIvOVJyS5t5ufopY1xONgb75ATaUQ+eHg6yMZtANRHY4OYEn1TbXhJYxDHp8wfeDYsIj/KFXr53hkKMTGDpij43duyU855YffPWq+3mel9SCkSbIUD0XibVK9aaIqyZjwPbUR6oxlFFzRhWa4X4a5B/vy19NF96hdSMY0TD89Jf3KqFN/FYObj1tE/SylMoRkda3xOVaLpM9VZq438NB2AqemusYQxHf8rwz/vfqM3MxeomdsxHpj9PaImPS0TOaEntMbpaYFt0gCyfkqBimm5B2xQwyf79ziU0ZcbQmA2So8PgL0bApYgMcDx+wLXHRcjlHRrq0C1quqCaYmJDWZkm+2SqGphHyCfSjUOY17pMOvvLD7paei6yuCv2Rvj7xAOz334rDQz8ZJHTxQjHooc8zWxKCCzmk71Run2mwJLcTQtsLFq7/pvfiIRHDrvioLlrlvs2uLISTbO+Um7O0Rq6bb0ISW98G2MOgNaheVhYzaTmjozzP4xfcHlQqsPosaArzQx2E1jrZ5JLzunx4Pd4OiEuMBC/QcK9TLUciy9dRVfTuDBUi6MTbkPoFCrLuv+MIcO3H3VyY3pYkxoCZjHdlzyiO58w2gpg5om5e/LBBhBJlCHZ5+8OY+sxSNBaVHZVgkUylg4TLxEQwbfZ1KA3UNQG2detSxRcDatkmlvMvxSv/jdXMFeRMyOzzrOclOOgf4yFet6/VsNm6ueYvkb/8ueaPb4pKXcsZH0mWZ0v3pYV0rVgJtJUOom5UpQv2qwtL8884tHpZmr+vEoezxMWDH5oARpiXsI2mmmA+LxNwTg+x/nPUWAa8pAZ0IO+wYONDO94fCEJdxbGRQngU9m62/24/L6LCMYvkFZUbNhOKGuTl6KrJA25f7DVF4KkNIAjLMXJMJv30qBXQsBjq71R9SqRQwf4lsZ/exXLS3940UKmpbgcNO9F7eMkfya77pDON4bdtoLJ/XJX8fuMEn2tDHT0a/WScngJcedfZV8XrIFiuPZKydTBIdlqlMr7fugDoSPZuAiVHv4CF7uaVqkYyLqckYfKESU5LhKPuHaIGDK4KUXYwhdA2ITUnqhoe8oEwHi6QrpmVh4hWC7srUhItLhKIfZ+j7obKGtitHSMaF1/SKTtdcbZ1YMp3KCRt9HcJQOcNb3gt84Avuy/U5uY97J/C28+TzjlwgRrrKmCEHAF55yr2ciTEZinJMAJzFzMF66EbgxiuBP/3EoRCPDj7rc4PHMDKr1I7Q5fgVVeQHuuvsTUHaVSYWybDt3LZBLPmxJqkvBgH3P1vE1CxOxN0O8/m0FB21nhI5Xp4oCizP3xVddpKIA3Cqf2JMrKRmQana93peW2EzqwRET5014ZH/tMhQSEYiwA3kSExD1Crl6VTXiIXUjeY5xueW3GfdR2nOAmDZAcZyr2n3hgfkxnWbSUK/QV98RP56Td3ohJJFMibRY9GRcVovCm2NUq2jRB94u7ep56kMNLQdoi3m62AqQsoftzKLhUobet4Qy7Q9TVa5GmHM+7x7u7jrzLhwRLUouoiS8dHc9Wy7dmK5BiL0K46uSPpfVSGpWEHaJ1l7WPKakxI//76daq45pZYOa7BXXMl0g04JQiFZqtjf1izfY+ixa+ucf69vND63zrUuW3mkWFdPOEu+68mfg2K+UTMO1lqn9TTYAkd8bvDJCf/5pS11qa6n8GAJ42/pu4nHCubOMY4HkWcRftee2WLvUNDURPg365FB/0TGM5RJ1+Z6vqKexwB5QoNa3fShTfvEBn6bquxSkOt3ZBDYsck6Y07/rmiiRA/CiSV5uua+zz2bpf2JzDcet/jJlef68h6gPotF0nSNurrhxBxso5LntRjuCF6UsCVSp0x62wrEPvRtvunCDombOfPvJVH7ye8DulfKbwcfb12nqc0qYOcvkb+6wAwrJPXO84WHgd2mYBAvf7PRQevsNCWRvDlyYS6f879aGB2yplWyO7+rVBfYSTJHOuJLzLaN8mYdNnG31zCyJegi5qFtr+G74X7ZL/sUgEGJ8wFm3v3RIeCNV9xntLK0IUglRX7K7t4mw5a9O4zfpqec+yVVvz1PQpmuFduQY892hesoSWt7DEPbnj6GLsumXPr+xKzcqvsZ9hqPdWw7xrKSoTLS/5Qj9gdhKg1Af4jG4JuxZCXwj98Wv8e9DgVefhw4/K3WddJVIib16Lu2Tvmr+1H2hxCSkxNATa3kELv5l9Zle7YbYhWIqfNXxaM8i7N2sQSs6bNbJLamhRf3nnWbBVqAd0+vB8z0lASg6XMNu1nIgxBWSEbUBzNDvEEs4IDDDFMJXVv69bJnO9DY4rBCTG0o1tC2/aXE8YVU1SLrQSFu/ZFBEVXzl4Yvw3IdxtjoWIa2XbaL+hJmpm9XxAICiv8w684yKCRLkZra/N/SKUNHuhHUr1KPGm9qBY54m0S2298Mm1oNIan7TLbmcoMN9UsHYM6X6cWz9wO3/hY44Dhg70Pzl+/aahWSXsSe/sdjmXm6qyiJ0PNezkMIZa9txkZ8fIBCDLnbiRQN7VDwwB4Rv5l6tSK86jcPvRUi2MapflWhPTmR/0JQbs+hOK1dSeNZdUJKUoOPdc6FIJkANC3/Ws8TZTEp/DBC0jxVbjrtfjzimglG04ARL8t77lgkbaMoV3/hkHBoOwqh38T0Dw5XW8sceRu155E0P6CSSm3iVOzS/Y3Pegqj+qbcrD2a4mwTOZ64Q/6+8BCweV3+8rxpGhWHH3ZuAa7+T2DjC+ptCYuKBcm12SGGjVS2BWSIbyjAufC8hDwsiVEEmsXvLFeObs1Tzk/pJSTjFipeuYzc6lJsw7STBSbO9oe0yJajVSXqLFdOixLpYhWPbaC0OIU4XyYxGGR9/XNvD7BlfX7aoKQsknnNMfdfpna5b+C8bZB6KkxJUkjqtNuGbQuB47WWcn5YKz/Ao1zADtsefSpw0PHA6R81FqdSwfwkJyeAKy61CsWNL+avF8hCY1p44xWShuWPP/Bvi0p5saxX4LJ2bBKLZGy4iCg/i5ub9pqcsArJsJepp0Uy5BCt0nSdPt/joEg6Umlo2/EYqRy3oDsV1c8gTDtULfUh9iXM7oS9x+Ig1HHyEel6gNzAHrhea44vVkDwjiLkdeq6eim8WJVCG7yhkNRpagO69sqPVC4VLA9Q0+cwSV2V6shRUwucegGw8ijr73Y/yd3bgIdvAp5/KL+Mzevy51M1O8jrhLVIBrGKRiWq9SPEarExsCfcEJLlRTtA1Lblzd4W8ZmIELNNBxmE6SnxLTMfnyBW21gIKYSTaoLOUJ8tJU6U8h0qmMmT5zXBgVeZIetVKtu0Xc9mtYAlawHB2xAov6JPmZoW3TgWdLDETYhptmV2n+mkMk4EthRqjh/DVR4Dpa8j6SNpIew83LHictVYhrZNv1dVA9kJhwUh8J3f27S8xWSR3LUV+NV/GMsy9UD3vvJX0yQ6W2fFQcBrzxnf2+fLkMdQn6TuUCX0cKtbeSHWi3yDezxk8r7HUOlQn/zr3hf+Q8QuOxo6ibC9vpD7oWyRDMieHeILO9gHdO/jVHj4slUpiWFlhzb05l4Cd28DulZ4rpqHPdekE8P9cu/3Q2b8CoxLQ/RE1Lpvt+vmAY67a8CSa+EB1s0RxDhQkteMl+XSpc/TNMnm4fsMDjHTRCQjZcwjG7MUCsmiEOKCc7NIVlUHsKLESKvJIvn8g9Zlf/6JTK947sXATVcaD6JD3iy5KM1C8oLPy0P86m/lWxZVnLgBxLPTAZSkk4N7yKLUKXAnZa4uGzKIJW6xH6S+YBuarGFuQ7uKdcV1miKXE9KXNcgxnJ70tyKqRLLnTbkXkLwm537YsUn+plLi110IXF8AHZa5YX9Zm5wQi50+Q1osU/gliGr77MumJ/OFZCzdhtNLsWK7vNpfWW6QnnBoOxIFvIm9LJKW3yNc3X7ze6ccLJLrHgMevz1/9clxEYe9piHtpjaxhh1wnHzvWiFpXxqb5ftQn3qCXnM+QfMbfBh/t9CE8BlyXa+UhsNtlgPzsbZMEZpbJ4xrQegHoGLUdiBLQsim2JmatEb4B8V8TLIxDfUp1au6nsOKPT5zrytVlvDY6+S49zphj/P4qKRwUi5XVUimjOATTRP/751v+ASkBDiGu7Z6lBXCfOe52NbHOYk0/dkWZqrRsWFg62um+87FdSI0tEiqQItknKTSUJu6b2YD9cVuIs8sJCNfyAE6ozC+pHquwFPfDyw/0MiVVp8TktlpSTze2Crfld/gTe2eGFNPJWMhYIcRysKmyT4O9gINzaGrTxxzsnPA2fo7PCB+rnMXulud4vQnVVk/ikXS93QqlB058tRUx84t8nf+knhybCo3IeiwYcCZWQpynTucTGWxEwB9IoCFyz1SoGmOHz0ZHZJ/C5dZp9/MTgOoCWiRdFg+OiTnzWmq3FCHwuue9Fot90NVFTCVDTfLj36f7NySc9nxaUTQPqkk+uWSaIQntEhGwX5+OxZJcErHovjK1HGbIrGq2hA1VRHfC4L4SOq5JM0rnPkJmSnnnE8bP594tvE5k3sgptMyxN2WK6O6xpgqcbAveLvNwsfLQjCD01trgPqipH3q3SmBBdtf9ymniJ2HPSDHKRp6z3b5PS9oynYNZaclmtwuNpN4e48SbBNLdGhEnIrcsUnmCY6lsIhkp62zSwVFqUlxtDuEVS0KXrM0aa5f/BkbASYcApyCCEkNcLy2g7z0RDp0Chvr+YwdX0psIyB+Ly6Opz7g9RD2nLlljdA0mcNdxV/YzvRUzNk4koEWyTjJ1AMLlqmv7/fsshiC3KZITAELV4glr74pwYvONmVjXQNwwpnAfX+W7/sdBex9mLH87E+L2GybJ9a3jS/kR36baWwGxkfEWrcgZ6lUuYn13JQ6vT0y5NTZnS+M84JIwuJTxugw0NzuvMzVp0zz/KpctzIO5WQa5BwEqTNdBUy7DEllpyXaN2AzQq8fOv2PQnlevm+x4VLo6DACJTuPUq+fMAkctWyvJ4ahbU3Lt5qrbBNmmavPrOtF4/49zDWj+W0f9kKMMU2c57F1/WKgC0k/i+Qeh0wfKli6Bc2zKXkLI+eKhgy7jwzIv6Y2a9njo2Jgcbu3d0d4cSsgFJKRSOJp4nIRuwXbADI0oF+gkfqHIKldABzzDmCfI8QqZU5cDgBL9zM+73+M/POiqU06Cst82pDho3uvB044y3nWm1efsX6/4efiE/WmM6RO3XL66N+knEwDsOb/c7CoBnzznJ52f8McH3HvACyHsMT8b9zOf56IMnX41bUhg70iDEO7LopSpk/DJ8bEt6ytIxf0ENS6NCzHamYINODwq0ITQxPFahaprhxB92uw1yfDg9OxDbpfSRzsoG4DcH45ijq0DXjsnov1zjGptwoKQtrTImnCc8Yar0oKNLrjdi7cBHJfj8wO19AsLkJORA1EKxAc2o5C+3z565deIjTmm9flVOXd1DEnJPdb1N4JrDhYbVi9odl9vbld8nfbBtOPGnDt94DXXwT+8rP8bXZtBTa/LJ8X51K26DfeAzcAv/qG3Kh9u0REAiLyHrlFUpl45VQcGwGevsdlqFyT4UbLTC02XDs981CNtUiMDstQplu7tCwwEVPH4vhAV7x2zH5b6ZAps5Ie2g6K367v3ibX1s5ccEmQ5o+NiA+X5dp2IgZXh/FRqWsypL9mlNMyNqJgKVQZivXB0WoetBDz6h7iI+jIgF2UR73Ow1ghU2mZ2tZ7pYDtsAyPqbdHZZRAD5YM4yOpgmMbVK2oXngdBzcjkImhXKL2oNb1EoRCMgqNLbkk5nbrlioBbmaVwJuo+EZtRyy/th6uheh+pXf/H/DgX43f9Wi8gd3AG6/I59Fh+Xfrb4319jokv8zJcRGUV3zZ+vtgn9y8Zt8ze+dx73XA7dc4C1gN4ZMnu1okc+J0YsxdoO7cYgiZJFCxSOr53pyWAclOdeZUn07fLiM5PhBMSygPU4ZEOZF3DNbWns1yz4wqPJyyWXkZs7z0FMh6E4V0iIG0uF9cog5TK1di2mZkUHyNzfeYU5kq2TvcFqsI5zyXIe+qfBuiC0mnF8Hh/niD2JTaGtEdwY5TpotZBoe2o5JkEvOaDFDXCFRXoyBJq3yriJrw3FZEXaMhFPUIbgC49rvAEW/L9737/f+IYNzwvGyrO6KvPApYstK5zufuz/9NF2OWDsp2g7/0mPzVxauFmIZQzcWYrZDptHMdccws4li5ju38PveADN+f9iHbpubzEtaHSMEyEIRBm/jWcxxGiXhW9aOKVpDaakk8gIb6FIPTEiLsLoXqc2Ow9gbdxj40nZ0W8a6acFyzWTSdXGlmUugEbWYUi2TI7dy6nJn0Py517N7m7NbkV5/jb36WZvuyAPvt68+aW6cQOXULDC2SpUwqJZa69vn5ATbJVBhyWcg6zMPcHYuAd/+DfM5mxe/SLg4A8YnMTosVRReCbz1XUll0dsv3dJUIUTe2b3S3KOqdgXloSK9nalL+RXmoWzoRc8fjN6xTgDdZc9s2rQP+9hux6O60zYHuZZFMHFt9muZ8LifGxE1gSmFKSC2LRF/UlI9R3GLHB6do46RPp+Mwo8s94UZgd4qU9zmI7YXBZ/3xUWDLevXroX+XwsiHX1mayzoBlKddEAdug896XhZJILo7j8LouvsGMVRaARZJCsmywS1q22O9wFWE8JH0Y16Xex32+lYeCczLDXHv3q4WIVpbZ8xacfY/AgeuAk45H9jn8Px1P/ktSaSenc63NGoAbv4l8MuvyUw95kTqfTtlWOlX3wB+8WVg3NS5Z6eBZ+6Vt+YXHpagHvuc4WMjRs45NzSbqNS/Pnyz+GoWAvP5MAcx2QV9LEIyrIXD9r13hzGDiRMqDyE3S4hnOxJ4IIQWO7ltVZKhj494p6tJVEm6lB27ho8jlZNHGbu2BEhT5lB2kMTbfsEWA3sc+kn70I8DQQNmwl6bKvdJyuYjGTaRu3sjHD4X+P6dWSfgLGH2VcaGk3cfCgGHtkuNqC/ApWY1d03UC+cAota50lH3bvcOhtFp7xRr5K4tIihXf0B+HzD5y9XWAe/8sAT7LDtAhN/1lwOnf8SUxFYz5gS/5dfWOob6gBcfkSg7AHj9BcMvdt3jwG2/s65/7/XAx75uzP5z9bdEjH7oy5KqScct+lIXNn07gftz6ZVOPMf/WKhiHzLTmRwXn6TGVmvex6E+Oc6yMULPIGNpQ7jN8jb0e9lQ7dwdrVwxPWx8h9tClmFmdCj/BcYJfSYa1+TNSVMIi0zA4x30XOg+2l716b87LXJLlxWGiTHJdGEP+FR5Duj9gMpQu2fQjKK10vE4+/hIBiEugRU2jZhreW5lpxTKt53InVvk2ZNYgG84aJEsJo43u8qFW6RgmzjK9LJIQjPE165thkhYegDwqf8yVjPn4upaAdQ4iFV9dhwAOOhNElkOAMtMaYpuvNKY3s8rYvDJO61TwW14wfi8c0v++gCw8UXjsy7KnnvQY0TJPsytWYMgPNOdxMTV/wlccalYY/ttQlJndNgq3gptkRzYEy0ptmNTwgxtK7Zf0/ItULu3OycZjzJ8GGVqRnsbdm8PN+2lUvm2uuz4WZB73lBzV8irN0Yhmbe9y2fnH3wswjHjOrKdkhePLettwTsKhQQ5XOZ13ayr+nMgqpDctsHfghv0pS7Qvnq8UDitE9ZXsgR9LCkki0qQqG3Voe1Sw95YDyGpAZif83N8+UlDsLTOAeobjfUsQnIvhzrgnmZo6f5Gzi5NA7a9Lp+dZpDQ2fC8WCF1zJ/rXKZj1NMSmZkYs7XV5Qmkd6h6egjAYQaZCDh1eNlpSbM0OQ5ceak1+a/d6mfurKMm7A2zeZCk2EoWSZenbRxuYds25qf3UMqHZ69Pk+snaRGiJ092m/YyCirHzG8Wn/GRcEnRo4oU78ID/VwQITkxlnsZcFOSMAKtzP1MGB9J87I8q6DPSTdHmE9OiBtQFGE/ohg85llFDC/HbvsQdGi7TKCQLEuKEGwT+u3JdomZi8kbUtGA5QdJXdtek4cwADS05Lfl7ReIpfHwk9zr1iN2lx1g/FaTAT74JeO3nTn/OvuMQGd8DNjvaOdyN60zhqdGXXLa6cLP3HGMj1r3383XUP9stgSafTaj4tTJeVmgvKb2KrRF0ozSlGMBLIeq9GxWFybTMQ236cFDW1+Lp7yi4fTyVIAHqtf5imqR0ly/OJddCCH5m7XAz75o9BtaVl7OZ0RjQMOEynDva88CP/gnydM7s6rffZKyPluSTG8GQOkNNkzXND3l3ocGSiCvQAkK0EBCcu3atTj66KPR3NyMzs5OvPvd78a6deuSatvsJ5EIrqSCbWIww9t312kKw/omYNHe8l23/OkR1LofW/e+wMHHA6e+3zsR+oVfAt6zxiok9XrnLJDP+pC1ObXOSecA+xyWP2d6Q7OkHZocBzY+L7/ZhxTbOuSv3mGbh+rGR2E5P+YHisVHMtf5mjum634Yo5O1ln/tmXMw6szJJdz3FGxRfSQj3AMq4lq1ePMDb2CPu5USkPMYZt7cKIx6DF337ghupStW9GjM7meOOCVG93JdiXwsAm4fJNhGuQk2saKPiDxzn/H3hp8Bf/iOfA8cbKPw4nT95XKc7/sT8Ncr5K9KknEnF6ekcCraM7hHsS27tqqlZ4vFIlnmQvLuu+/GmjVr8NBDD+HWW2/F5OQkTj31VAwPx+SfU2kEGW7x9C00rxe+OYkLSbvzud1aqd/Qex9q/V2fQegDX5CpEo89Ta1dzXOA5Qc6L9NFkh5NrXcC7fMldVAqLX/NbWmdJ1HhAPDorfLXLiRbcz6eI4MiFM2dy3C/dV03Iall5fuQbf1dtjQ8frz2LHD77/IFqNODU/fBNOddfMt75e/ON2R4/yeXAK8951+WCqODVr9TP3ZtlYCnmblylRWi2mrmyO3+Xf4+gsVOe6STzeZfJyokNYtIGJJ4LtoDjzytjl7HIsp5dnhh82tLHJj7nIFdUt/zD8n3GReZAAfdPrStf58Yk39aNr+PWfeYWCafe0ChgkIKI59jPzIY7oU9L02TixXSc4YgJxzWKUGLZKCo7Ztvvtny/aqrrkJnZycef/xxvOUtb4m1YRWBY2deJEsB4B1hHdvN7uFsrN9k+x8D3P1Ho4PXfRrnLnSfkzQo7XYhmRuizph8HquqgdM+DHz/YvleXQsctAp4/Dbx5dK0fEvR3C7gjfUiEm/9rVgwdXp3wLL/bgnR9eNg99vs6wEWLPXfNy0rwuj6y+X7dBY46mTDCuv0INuR8xU98Digc4lEaevHqLdHLKIAcOMVwEXfdmx2IPze3qengb/9WqzTh5wgQ3XZabFKrzwy/jlo7WKiTOa4DW3dUnmJTSR5soO1pxBdXtihbaWyA26fdOoos7VcDwb0nVbSoRzrAutn+8uW2/38+ovG9LVOpFLAcB9ww8+BFQcBBxzn3Ia4XHtGBoH6ZtuPpvryXkBC1uN6GF2ef0N9EiCqlBKozIWknf5+eROeM8c9FH18fBzj40anPDAQwlF6thKoQ1G9eCJcZFVeqXoSiDBzG9KobxLfxyfukO8N9hvfUki4dumiqndHzsk71xFmbMEzNbXG58lxsUqm0/IG+twDhpXxredK4u6j3w68+rQMFevphHSmJoH+PUBtJr899hkwhgeMt9xMgzwIVDrT7DRw5VetUd7P3S///ukHYmm1D9tqmlgcAYmQX3GQsax5jjWPZN7bekKWrVefEQvki48A+x1lCKb1T4uQ9ModaSbKQ9tz2xLpzMtNSDr6BxZASXoObcd4DSvlQEx4f0dMFmo9aM7uAx7ovDpYVu0WezchuUchSPCqrwObXgJefkL80p3uO69ZfYIwNWm8NM+UA49r3aMO5UPoYpE019fbIy/Jns+6wBUXjNDBNtlsFhdffDGOP/54HHTQQa7rrV27Fq2trTP/uru7w1ZZ/szrkum9OhbLd6cOzO26LcRbiGfOxyQqdLFIAhLsstchItD8ighzbBqaRTRqmlglx12EJGBY5vY5TI6RnlPx1t8agq17JXDqBfJWqYtUJ9ycye2d4nC/kfhcnx5MRUgO9LqnCpqJaLTVtXubPBiqa/LzCx5wjPV7Yy7waWJMnPfH7UM6MWFO8fKDzxqfA0evJ/TQDhygEBG34bGwOQmVhrbjPnaa1T2jkIMviab/8So7gfoAca35/f8YL7IWi6RJSI4MiHAyT4UZ9JqxD207YbZ41phelM33q1MZYyMiInV+9C+Fd7sYHQK2vhp+ulC/lyPzbk+Muh9Lp1EQp36m9HRkeCG5Zs0aPPfcc7jmmms817vkkkvQ398/82/zZp8ZPmYz9U2SrqauQb6H9ZFUxSsQxQnPxLQFDvCvawTO+iRw+Ft9VoxgKdUF/RsvA+O5IWonIXnOZ4DVHzSmXZy/LH8dXWABMrOOG49Y3UMwMpTr+B06F31ouyuXxHzLesmVBkgHfM1/A9/+B+Cua+XNf/PLztNK6uhWBM3mq6oHDi070GqBTaWAVWdYy0jnrqkHbhDn/f/7frAHo57iw4vpKffgkQGP/XMiMbES8rqLe1gzrEWyZ7N/zsYobX39RYV8n7nyCyEcPIe2E6pfm/nP9nvEa2BkELj3Opmd6+Zf5UYuzMOztuP+wA3W7/ddHywXp+6z7YX+It6xGPjIpcApfyffd22Va+HJO4HL/xXYusG63esvWb9PjKkl1o+T3h1yDQb1QUdK+jPfTAqmYzc8YIjrvGe6OTDVyxJaekoylDq46KKLcMMNN+DOO+/E4sWLPdfNZDJoaWmx/CM5gnQocQjJhcuNz5l6yY6/YBnQ1mmIKs/643hz9/CRzFs3YXR/w03rjDd7e6ohAGhuE99B/Xiecn6+4NRfDgAZDj7vc9blyw6U/d3wvBEhnc0Cv/km8PMvAW+8Kr/pDuyAYe1bup/8HewFfvdfMrz7zD3yFg2IC8C135WITH04ffE+RvS7zp9/IlMu2i0S23NDPXmBSan8l4v+ncDt18g0koCI28E90gnfdrX31HGaJknPr/pqfuCRmT/+QB6UTvhNEbZpnVhqZkRMUteRQrlJXcMWi2SEVDJeuVPt9QShZ7O8YFz1NVt5DuuODRcmAj7JqG0tK64WecO4Kv6GJgb75P70s4yZpy99/cWc/7CpzPVPWdd//UXr98dvB+5xub8cha+KkDT5mDe2Smo2XRj93/eBO/8g5/qFh6zbbcuJsCX7Gb+98qR3XV5tjZvxUe/5zsPkhNVfkt2ef9NTYjBwyqQhGwavM2ECCUlN03DRRRfhuuuuwx133IHly5f7b0TcCe0jqRi1bR+qNk8Dl06Lv19NrQglsxDyKzcKyrtcCCG5TP6+8hQwlLu59VRDXszrAj72H9bf7BHoelS4zoHHAotzw8YbcpHPIwPyEM1OA4/eIhHRl38e+M1l0pnoHVjrPOtMPU/elT99pP4w1gVeyxzgze/JX+f+PwMvPw7L8dWHzO2BTHpHZ46SB2Tub7NPlC5wn7lPxKSdqUl5OA71Aru3ikXlsdvz1wNElDolc3/XJwwh7yVCr/2uWGpuvFK+B47yzjE65P0AUSovqaFtkyhKIpWMUVG4zXSrOSDXxkzbHcrbE1MQhabJdWWu277cddusWMF6HEbLVM7z2IhcK8MOIwtBorb/+H25P+/8g3d9dteVvp2SBQEQkbJ7m9y7Bxwnvzm9eCmLNYgIV7VIZnLPkXQaOOcf89ebHJf7W7+39FzB+x9t9DObFVMK+p0ap2jyMJj9sVXvN3PbJl3aYG9/KiXt3fqa9P8DLkKy3C2Sa9aswW9+8xtcffXVaG5uxvbt27F9+3aMjirkTyJquF0krhdP3tVofGyOcT7OJC7eOCySUdq1dH+JxN75BrDhWflNH6L28nPUNHnzrnEImtGpbwL+7vNiGVy6P7DXoYb/oT70ZB6+ffUZ4O5rJYF1X49YGXTBUFsHnPheY93xEf95yKtqgIXLgMNOyl9mfohomjGLiV386sd21TuBv7/MKmbN9O0y/HvsjuwAcMuvgJ9+EXjpceM3N1/RTQ4iEpDjqKcm+u23nNcxo4sCv+uoZ7NYdFVSJAVZDsTrH+fmI5nkjC1B2mpuh/lY/uyLwGO3OZfnlatTZ8cmtSTVN14B/Pjz8kLz0mP5D3yv4zQ1Dfzy63LPDeyW+gLtu2ldP3cBL/QhXbsF0Y7u3mGe4Uu3wOuiZ26X9DtuWCy0KRml6O1xbr8+ZasXerCieaavrr3y13vxERk5+fVaEUt6f9G5xJjVbMur3nV5YmrnPdcDP/ycnM+HbgL+9BPvl0MVdOGr2o6xEXmB9lrHTN7wenk4SQYSkpdffjn6+/tx0kknYeHChTP//vd//zep9lUO7Z3yoNTnmrYTami7yjpkHen689jYya8wKoUY2s7UG8EkeseqiyUv/1LdImC28DqxYCnw//4JOPsfxTqsWzv14RD7kJ45mGbGTyglVuP9jhafUUA6G788h4ecINfM2/6f+HiaMc8D/sQdIgJTKXFxsJA75+kqEdjnfgbY94j8um75lfG5ps66bHIcWPe4CGTzcLVb52oXDfVNIobrG40hv7FhteS/gLeA2L1NxMNd14pVOgihLZIhr2uzT5s9p19SqJb90E3ADz4HbN8oIvLu/7Mu18970JyKYyPAby8Dfv1N/+HedaaXlBuvkEhgXUxOT3q7AJiHJ3/+71Lfa89612fBy9qpuM9mYWcWiGZ2bZVj+9Kj8v2kcyTIDzD6C/0FauFyYJFt+th3XGhYKXdvs+aE7euR/sjpOGuavx/rlvXyN2Ma2aqucX+e9e8UN5zJcelr5ywQMQmIGJ4Yy08qn98w78WP3ybn/Y7fAw/8RbJp3PcnY582vuDej4wOiwuPnj5JJ+gIQBC3jVQqP+DGyZe13C2SmqY5/vvQhz6UUPMqiKY2ic6tchEnsVw8ZmfeoJv6JP52wjF4x1xxHD6SKe9y/DjUlv90ZhYdhVvj+HfJXz1JuR/6A+KVJ2UIV2U2ktqMMWyuWxiy04ZV0Smv5n5HG5HegFhC9zva6OQHe+U0bHrJeOh37ZV/7dkP65wFwFFvl8/N7cCRp+TXPTkm1tKbrgKu+xHw4qPO+zXY53yOdZG+8ijgze8G/n6tiGEAOPJkYz2nCHb7Q+F/1pgEokNd5vRMfnM821GySIYItHjhIeC5B6PXHQeq1s4H/iIPwD98V1K4KJfnsx9bTZYps1C042Rl6usxfMzcxIIuLp0e9o/+zbttZtx2I5t1PoaOeRJN/pVOL+a7tgK/+g/xb9RpmWNkkHjpUfGNfPVp+d61QpavPNJYv2sF8I4PSp5eQHytdfxGOLwE1FCfEbC3cJl12Vl/LxkynPpIXch2LJa+p7ld3Hiy05Kp4cef97YAeln8ze4x201lPHU3cOfv5UXhjz8Qn22n8/GXn4oLj54/NyxBI/pVKD0dybm2ywb7DDFFrd++DNIBmFmwzBrcAyCv3V43hB5F7dsutdVcWXkUcNDxUlBDC9CQE5J2n0cnDn0zcMG/eUdpm2k05Qi76uuG9e3g443fDzvJmvDb3IFX1+Q/ZPQ5wfc/RoRWTQZYdbp1nVQKeOeHgQ9cIt9HBuQBZx5CeudH8js9p3O+YCnwvn8B3n8JsNfB+ctHBoH/+54MYW14Lt9nUhfok+POD/i+nJA88Djg6FOtVt8TzjLEuJOQ1IfodTRNrAqA8y2j584EgqcVimOI2s74qETh/u3XPhZXzeVzkZkcd7Zo6QFseUPbDr+ZeWO98fmO/3XPHepmnbdfD2a2bRCx8tCNLlajgHkWndAnLdDZtE4C65zEttk6ODZi5JIFJNPDr/4jf5s5C4wRlVeeAv78U+MY6ZkejjP5N+tuO3qC8OE+Y5mfoWJkUPqM5x/Mz5zQv1v2s6EF2PdI67KOxeIW8/a/y69DnwJXf+lNpaRPNWOet9tOn0MfoB9vfSpIIN8a/eRdhvDt3WEEG5p54xX562dRdLt8J8YUXDdmj5KkkCwFvHztdMzCJuxDLIpV03PblLz9mqmpzR/6zbuvPMr0mls4TtJVkv/x79fKFIy6VU5phoF0zoqsmGap2pbcXB+iam4Hzvi4DBu/6QzrNIV2i6P53J/zGVn+zg8Dp30IOPFs4B//x7BS2NFndNADefRhsJPOkQAir3QUZrqWy5Dz4n2A1R/IX+6VDqO53Zjt54WHrfvz6jMSjAPkv5gAcpyX5SLLnVLLOKXv2LXV3W/NHLSgkjjZgmJCb5XfdMxDrPr1/8gtEoV+x++N4UVzW+PSkRNjkibGLL5U0qHYhzzNEcU6+nCd47577MCW9dbvv73M+Lx7m2GlDCMkH79d7oMHbnAO3rJPfeqFij/tS49JINjAHrFymS2VfTutbh8jgzJX9U8vEeF235/zyzz6VOknuvaSl2E7eiDh4n0kFc87LjSeM/rLmDnfpEp/9+wDwC2/Bn79DevvuuDtXOxcTiol/ax9yF73pe00uV8tXGFdxyu3sePQt6ZmSX/6XuPzTluQVRwBbOOj8lKVxKQjJTi0HWlmGxKR+UukE3R6aHpRlBk3UvEbP8I2NZU2dRapiLuc2ylzHki9jjCkUu7nx55aSLc67XukBLrse7ix7O0XiOXiVJtQMw/jLVkZrG01tSJmpyZEtOhDPh2LpZOvzViH21U6rANXAU3tMnxlf8DodK0wxGUqLUJ32wbgrj9IZ7tlPXDMannY6rj5VukPnafuElHa3C7lvONCY3h62QEicDa9JNfJ1lcNS+74qDxQJ8et/kh9OyXgwM21xE4SPpLmB7tu2dN9ut54RRLid+8r7Z6ZiSOmm/KWX4u7xMtPAB/6svyWnfb3jbNHOuuWnBUHy/YbX5D2ToxZ/ecAwG0uakCGWp0skNlpuYZ++XX5/sBfgAOOlc/LDwTes0bcNR6/3T0xP2CdQeS238nfukZDQO7eFkAE+Kz3zL2GZVxnzw7jJdFuERsZMFxXbvm1ddnf/SuAlBGYAkjE8zrTvdPQLD7FekqeQ06wlqG775itbSr3up44fHxURLg9i4JfcGddo7Pon2N6WZ7XZV3Wu8N0rSugae55ak/5O5mNzDzUDeS/RJoFalW1z3XgdT/3uS8e6i+OQSghaJEsJrV10pl4znGdw3zxWKZZ8jqFUUVW0oRsnPlYOBWxYKm7VU61HemQbUt7vJvV1uVbbvc+ND9aGpDh7rP/UVIzxUl9k/x99RkRjZkGmY6yvlGuQ/NLjWqHtXQ/eQAsd5nh6qDjjTxxh7/VKoAfvknExx9/YDz43vIed0Gnu0uMj8qw+XU/lGHuG68wHsj7HA6c82nDp1SPDO3fDfzon4HvXCTRnIA8KKprRaAMeAgPO0lEbVsskkMyxG3mD98BfnKJ7HfU6FM7unDJs/RqcLw/pqck3dPVLhH0Kw4WUadb4R+7zRClmubc/hcekuFfIJck2sEydPV/Wn0ne3uA+/8in/Vro61D/totkgO75brXNOeyj32HvJAAIij3bIeSUPdb5dn7838z5wjUz7vb/aPzjgvF0rhgqbXfr2uyrpdpgGffqlsGR4cMq6DKvW4eeTH7LupC0t632XHLuWjuq+2uOz2bxd9aNWn9tMkloK4xlxg9BbR2iPvPQW/K38Z+zZtfpnWrdRiyHtHuvTucZ7JRovQe6hSSZYkmnWV9U/7cnPZrzEvYxIXeCTW3y1/X+UL9fEYcmLfIvT75kr+8qto/onqmSRpQ7XSMQt6sXhatVAq44BKrD6M9R6MfetLwFQ7+iSo05B4663NDkPscJteRfkz9jq0bqRTw7k85v9gs2gs4/aOy/NC3yD99eNuMbkmYv9S9no5Fzi9e/buNoVA9zVJbThTv2iaX3YuP5Au56SlDyL/+kn+Cb93tIKxF0m27iTEZztTZ+Lxh3TO7RAz3i4C761qPagOkBHrlSe8hYLfdfPw255yhOg25a0q39D90I/D03fL55l8CP/2CVYy8/pL4h177XfmuXwt2K37PZuB/v408qqpzvs4whKTdIvmrbwB/+rEEWji5zux7hFg3dZ+9503Js7M5y/bmlx0sXraD1L/bmgLJKajOnCNQt4DN8Xj5PeoUw/Jqp952L7V3mm5dhxNY12iIwj/9WK55lREYc5tfesT4rO+fn5Cc2+X8uz3V2lv/n/X7S49ag4ycePlJuSfGRwzB2tgi1th/+C+xstfUynd737P5Zeu1Ynd1ePgm77rdUEnkHobS05EUkmWJpoloc/RrM5GC3Dztne43sRt1DQ7BMj60zgM6u91zMOYNbSngJBr8Or2ULZK7vsl11dwGLmWEwM9fsr7R8PPr7A4uCFd/UHwaT/9IuPbpfpJ64mb9oetE0GOQSlkT22fq5V/HItnvFQdLsE1Tm3Tund3W7fXO3O5mYCZdlT9rkJm2DmOf9HRGu7YA0NytzPpc6nf8L3DlpdZAl2zWaj1b/7QMnbolOh7sk/QxT93t/BBxSjI8PQXc80frbxtfMD6f8dH8bZ57QP7a65ialIT2v73M3YrTu0PE3LP3A3/5GXDFV6zLLRG8Lg9Cs9/ewSeIFdmMHrRmzj2q+6W9+IgcU93ncXJCArR0JsaMWXf8xInOqtONIdtWk0XSLKr18/jqs4brwPHvEh/DE882XoT1YJQbrwR2bJbtrvuBTEn6h+8Af/uNtW7zOdi1FfjFvwPX5vYnO20Imze/x8jrqlskd28zAkoaWgzfRj0ljo6eGscJe//2NpMQczp9qZQxrP76i8Cz90JJnfSZxNa6JyQYanrKGCJvdXFH0Vn9frEKfuAL1t/bbX3Qm99jDUAEjOHo3h554TKn69q1VaZrfeIOuab1FyO9H6lrsL7g6y/jLXPlpWF6yhrs5CT8wwhCzcN1IxKlpyTpI1mOBL043fKSeVFdYxNxCnWmUs6pKxYuF788++w5YcWaZWjbJhrlR+tPmXp5WJlzcum+gprJOTtTr56f0A0VH7uFyyS/5JwFammGzDQ0y5zf1TXhZm3o3kesXfqx8BJtYWhqMx7SH74052hfDcChrXsdavWx07dzEv4tcw0RNn8JcObfy5SPdt5rmk2j1WSRBJynHDvl76wPjoE90qbufeXa+MtPJbr7wi+JQNEtZrV1+dHxAPDY3yQa/47/BU59f/5yJ/+tP/3YKhwB42F4zGoR4Ce/T6Lg3/we8Q/cst45GObVZ4xsALdfA2x5BXjPRcZD/qm7pW2ANf2RmbuvFbG1eB/go1+D471vvleOfYcIviNOBp64XRLu69OPmq1lu7dap+ecGBM/SLvlsH8XcG/ONzTTIJG8Lz/pnUvSnN+0pV3uq+kp4KGbgePeYZ1CcHTQsPYuWJY/KrDqdLHUDuyRlDrrHrfOCb3+KRHp+r1rPjx6uVtflZe1pvacj18aOOpk4MmcVXbTS8Cjt1rT1DS2yEvD7f8rwTQP32wMu863vXSZyZiC8054d06A6h2gS79tHrEZ2OPeF09PASNDYpAwW+rGR4Dff1uuR/1a9TNWNM+RoEA7Zou73uYT3i0+0/rwr37uX3hI/EHXPSb36D6HG9czYP3sFntw4tnSTx12klgbd2yyXlu68N//mFz6Mk3Esv6ioYqWDfiMU3yul6CPJIVkOVKoPHKWOr0W+lzYZlFaXRtMADkV7XcjOYpLG1XVhpiK83iquhLoVo+wQT2ebaiS4+3kh7bPEcC91xvf3WarAcK9+O5/tAgZs9uFWzlHnSIPWz0Vh46TkLQL7r0OkZx485dKQMqrz8iQVZvp4aFbJndtEYuGk6/aISdYAxUAIyDgjVeMKOQNz1tzc778hFhaJ8atQ47m6dBUr3M9qGTOfKC23hoMoI8K6C4B+n717RTLnm7d1jFHTT97n/y94WcixA8+3vqgdeOZ3HbrHnOOjh8dMkTkGR83rIbptJFn1I0Nz1m/v/CQpGMx86cfGylmMnXAyefLv8Feubauv9y6/oJlErCRzbklpKtkf/t2Ag/eIAJgqckv13yMnK612jrx5b37/+RY2IMzAOCmK0VM7tku2RP0FzJz33T7NUZqsMYWudd1N4qdb+Qn30/l2v2ef5DvGxZLipyFy31GDsz3hq0vc+vajnyb4UqxZ4e13fpMMMesFt/j3h4RX4Acy/lLjfm8zdHmHYvyUwP5Yc5QMdNmTV5APvZ18Xu+7ofG8L85EGbzy87Tqeocs9r594ZmyVELGC86ZlcHPeituV2s3EN9cp6DCsmsBqQCPFsKMZtbQlBIliVFEJJedQa5rufMlw6+qc1/w+Y5zuuoiC/7zeb23WyR9GuPV0S2jmrUr1u71Dd0X9TUKqLKae7g1rlWa5KXkAyjJI94m0zPaJ6ezW0fa2qB966Rtvzsi4bwrW825g52K8M8l/Cp7xcxediJ1nV0i8TYsDUi3F6m3RXj5l8Ci/e2iqidW6xCtHeH5O0DRGDqQ4Xm4XN9zmMvtKwRKXzOZ+TBeNNV8j3T4Oz6MH+p3EN//AFw8Q+sItvpnO/YJP+CzK+ss/llq4AGDOHXsdiaacAJ+/C6XbTbRSRgFSO1phEOPUr/U/8plrJdW0V4tcyR426uqrXDsJRtfN45oA1wv/4X7S1/nUQkYE2Q/pN/Az7+DWnbuOnlbfc2cXUAjFEhe1SymRW2YJtVp8tvC5b593l6xPmyA2wLXPqrfY8E3nhV/FZ7Nluv06tzx9d8vegTF8zrEoGmC0kzLXPVhWTrXBkhOPA4h4W5Ntc3GeJtz3YRe25W6YOPF/Fvzrepkg1Ff5Ewp3vSRz5a5koZQ33i4pA35aRf/5jU0HbpQR/JciTQrC+xVhy9iKpqeehm6v2b5+Zzkze0rYBZLFTVWJ3R9ePpV5SSgA14SyVhkQTk+Dq9QadS1odZ3EPb6SqJArc8uH0ObKZehqh0apyyGHiUUd8k6ZI6FlstNzW1xgPcHNhx5ifEKvHunOUnb2pIyBCw2Urx/IMiDPQHj1kg6Ynd+3cZ1jzAWdTZGR8zrr+6Rmug0Zvf7Xx9d5l8l3Vr7vS0BBs4WRDtHHky8NGvS1L5k9/nva7uizk5YQxL60FNduHuhN2txmuGGid/6FqHHLv1TVLusgPkOnPars7mYuOUWHq/o92vf7v/rh+6a8KEyTVmegrYk3Or0P03G1udA81O/0j+saquEUGrkqf2w18R30O93T4j20ilgGNOlc9bNxjHJzvtHXC2cLm4Lbzr49bfu1YEeyl+70XAm94FvO28/GXm55s5cPPmq4x70r7dMau9Rbob+rnYtcUwKOguMK1zjREBfcagwPg8MycnjOAz/aWjDKGQLEcK+ZbT1GoESJix5PkLKVjTVT4WMRfyrFMOy+zRx7UZ8anR02foG6mmlQDyoyPjIJFhilyZbpHr5kj4vOMfoD2qbVdZ7+BcWo7aOvcAAT+qa/N9dHVhuTUnfk54N7D3YTLbhu50X1MLnPdZm+Ut5ZyU+oSz8nNc6ulo9DQ0Ol7R0Dp6HbV1IhzMgQdtHXJ+zMN/qbQEt+jow+KvPCnBBiosXCEPyc5uY65mwLCqzl0oYhuQaeK2bQR++TWZl3x81LA6tbtY+cy86QxJD2V+UQDENaF1rvEgn7vQSL1jRvn+sK1n9sXUh8TteAnhdNoaQNTaAXzoK+4Bg3rqqXGbO0lPrt7GtlwzU87D1EFzCdupb5IXKakEM8fDaa5mneb2XHCPBjycC/hxmjHKjD7V4d6HGf3LKefLzFhBaJ8vs+44DW2bh6/NonvD88ZLzJwF1v5NNSjLjv5iuHubzBf/1N3Gy1jrPCMDxLrHrNcUoNZVmvuyPdslCEt/8dSykknghYdFyJsj4e2MjcjLyhN3ek8bWSQ4tF2OFNJHon2+WGy8hoqjNGfOfHnrdOroUynnffWbZcfrpxqbY7c5qrOpTW5Yp84NEEtmc7v7TBpwaa8XQYNtTFWFWgaIr93T98iKLV5+P74mWigHYfnRtZeIuYYW5zKVZhpy+G3uQvF11F8Y9Gkq7eUt2ltSFOkRzEN9zudmwVLxwzRHX+uWBPtwvFdCbB3dwjLjT5qWQKI920UwtrRbg2rSaRGcJ50jFsgdm8QX0+wrdvSpxlzRtXVWX9nqWkNAA5LRYclKyd94+EmSy7ChRVw0Fi4XH9Zbf2OIRz3/JmBcO+ZAKDuNLcDZn5b72zxUevL5uRRBae9zq+pnai/i8JOM4dfd24z7fu9DjTmm/Xze9joUuCfnA7h4L+mrLvi8WK/uvBYY3GP0B8/eB6w8wmqRBIy8mM2mF7a5C61D5ul0dCHphp4vs67BOdDr4OOlLfddL+fT6cX+8LdK0Mt+RxnHLJUCPvgl+d38AlZVJYKrodll5hk7Pve1W/9Y3yQWzYf+CrzjQ8bIzsnvE99Uu7B1c0syC9XJccOHOJUS16qmNqmrf7f0mR22dHR+3V8qZazz1yvkPtj0ktzjNRmg3/Sy6Za2a+cb1iwMmbr8Oc2LDIVkOdE+XzotL8frJHAUczEaswMLUfsGDqJWKSAHVp+e+iYRCm4J4lMAWua5C8maGoe2+ZCkRdKN7n2Bcz8DTE3l5+kL8oKgqCOt58fDz1T3S3NcrnKcTJYYnaX750RzDvv+mmnrkCHGv15hTF8JWB+K85eI2NIFAiBRnmMj1sheIJhF0jzEuvehAA41/C0t5yR33+lTyW1+WfzC9LpO+7CMIuhCct8jjOHpVFpyeZpfplIpGWbcvglYsMRq5VmwVISk21SJupVNxS94zgIRS/27xNLX5CBYnAiTmQCQa/wDX5TZlibGjPO18ihDSPq5dZiXV+X6BD29VNde4jO77AAjjdG13xNLqxk9yb5+rADgTadLLssVB0uw1+RELpdrqD1Vo7beWUjufwxw62/l8+svWV8ydE58r/PohpPfaecSOWfTU2pCUmWf37NGAm7M1DfK0PxRJ1vbduhb5BzbM4Skq5yH7N1Sw81blLuuq6TMh26UF7dzPyMvJi88LKmjnKY31rL5z8eezVZjiVPGCfPMVmaevsc6cqYHapYQFJLlRFOregcMJNsxhU1c7VxY+NXzREfKYaUAdXrOe+5TZkNL8EMRWpCrWOhc1tE0oHs/OKrAhmZgqFeG8HznnFUd2ra1SWVu4pBV5dG9ry24yEc8tDhYht78HkmI3Ngq6VwOehPw5J3GS8jubZJf0Lz+vdepWSR1IeklcM3nUbfQLFgiFpPBPYaIrKqWlCa7TA+spfvLw7ImI35kTa1Slz6dJCAPWbPfpY49yMZOkHnpq6rFgtXXI36gqvMZK+eydWhDxyIZutWtfy1zJDho9DwRGn6TFpj7Aru1qKFZ0h7Zr1W3OdvNrkHNc4BPXuZw7wftB9PuViz7OXGz7FXXSEqem64SP+DnH8xfR3VyB7286pr8WWxU7ns3lh8o4tAcqKVbEtNVsm9moVXXkH9s3IRk2zzguHfKsLk+AxYAnP+vxufDThQhuXmdiPGr/1Osl6ND8hJm5pZfyzGsqpEXwnd9TH43Z8qw09Aix8vJjxfIfyYt3te9rCJBH0kSjrBTCDoR2CpnWt/eOc34SKoXASD/DdZ1O5eC2zpzwz6lYJH0QdPgnl8uLQ/fQFNM+hD4pSPs0HYqv/hMPXDmJ2VZXaO/Nd8pyKJ1rgQynH2RHJ9MPXDhv1sDDszWBt1iYBYVrz0nkd523sj5fDm+IDq8FOmCIJW2TjXZOhf4xDeBji7rnMeplATtHHdabtuUemaB5Qe5B3ocf6ZDO32oqRU/Pq/gEfOyU/4OOGiVWtlumIXoAcfKcTvsRAm08cN8zbn5UqdSht8gYJxje/S03YfP7wVSZdpcPVdnXpscfvNyoWlNYoTL7grlVr/itWMOvDz1/dbjU12bf1/XZgyfX8C7/3jTGTKHuR54tvJIYKHp2DY0G64nQ31GbstNthEIwBDi05MifHdvE4uzfp+/9yLJW2rOnbk4NxLjZpE0+2rXZPwTvxcBWiRnNV43aYi3Q3MSbHPHEFkMRXH4c1vfT7zYfnOK3A2CPid24NlgEniX821DoaP+bUPbfjhaJCO05bjTJIilts7d/1WnqsrwP9RpaDa9oOT+1mTE4mC3hnz4UrE+pVK56QzvlYeIPlR+4tkSNZ2dBn7/HSNQ5+hT3dvkNLQNWIcW9ztGHqaplPUha59iNDXznz/1TeJuYH9gfvBL1gjZoOemqtp9nuH2+Ya19JATnNdxwq0JK48U6zHgPM+yKjOBLA689dx8S95xp4no0N0CgrojKQWX1YilTcW6m/J4ebDPLOPXLiXLol1I+uzP3IUiutw4cJXMALRo7/zz6Fa22ZKq4up06FvEFaQmg7z2t86ToXqzW5P95cLpZeOVp+TFcnpSRjSW7i8vGc3txhSj3ftKXlr71Iw6ug92XSNw1ieLY3zwgUJy1pHgRdaeywHZ3BZv5HgUHelqkQx4HMIGvdgJrHuLZZFUqD+kd0D+egEtknH6SAKSqDrI7E5HvE2G0+65TgSTOTrZLuoaWqxDUm3z5Pc5C+TBeNvvrGXf/X8S4NDbY4hIQHzV7MmVZ96JHCySgLVdK48yRGMKImiHBxz82FLq1l1NE4vgppdkuzM+KlYTe5qVoNdwTa1zRDwgPqq3/laGGwPh0oaFy0W4V1WL/55bvW783b8CG180ksE7UVsns7rszonGjsXA/GUynelffyE+oVFGXTxx6YdVh7YBESiL9hbRO+7gR2kvV6Xvtzffb/8bmuXlwi0PZXsn8LH/cJ45zelYaVq+S42XK4CO7jNpb2/rPPEXNt/r9uNgDrabv1SGyje/bPQ9cxca5e53lMwCVdckqZOA3Aw7wzLbztP3AseuFvcJPYr8jI8Z1ssSg0KSqFNdYzxE/DqcIATuZDWXzwgpbqDmB6SUKqhAQ9tK27msk0T6qHld7kEZYfJ+epXhuo7L73raDpUHiU77fHn792tHo0lIvvsfDIvhgqXuFpY//9Rq5dOHqvQZa4zKbH9htUiaI0j3PsQkFlLy4HVyT7AP/7sJg1Qa0KZFoGazIkj1eaCjkkqJsHPyWZu7EHjfP8dTj16XPitLmGtvwTK1/X7nh4AH/wrsf6y8hFRViY/pR74avE7Aua1O58orT6QZLyGZSknGBC0L/PSL7pYxIHf9hegH3UZeUgrr6Lil+HEVt7YX2IXLgK2vedfhhj5j1q1XG7/Z69SDixpagFMvAH79TalPd0GYY7ofa+uAD/67nBdNMxK0/2at+D0DwAM3SJ8w1CdD4R2L4N7JFRcKSRKOUhnarq1ztngFij72SUGioyJCAg9t29bP1MvQipvjtQp+CdYTSWiveA5VimydJ8Ex5iE7VeHstJouWNJpYDpA3lBHbA+7OlPUp3lmkuPPBJ5/yLkI+1CxPv9wc7t1+kEdi0XS9MLTOk9S69Q3OsxX7IVNmGoOQ6Nmy755CkjH4kJc89W13omvjZUR20xeSVr/OxZLSpe4sJ/z9k7p63Zvkyh0Tx/KVL4oUxlxSaWBD35RLG+NrWJNPeEsY/mcBWoJ7+3td/pubmuQNroW4XON6C8vymXa2jszoYWpHrtLwXAuwKihWV6sdReOh2+W3+3uUzP+zil5AXnoRkNE6ug5Iw88LvnI/ggw2IaEI85OOUxZC5dL5233eQuTJNvNGmkvKwmLpH39xtbk0zuZBXFc59GrGMsDQqG+mloZ7rEEhahaJJ2GtnMdfpDoU9c6bOXPW+i8XlNbfpoO+1SMgAxzd+3lXL6Tm4ZdQCzdTwKEVL0H7LlZ3c5/oOsixDWvKhqUi45goQfUglsSx+W8pCDiRB8Raplj8tlUHNpW9cWub5K0RLo1dd8jjGV+/sVeqFxPod2LUg6HwcXlKUiZZo47PX+VsWFrdLr+WZ9XvdlmQe3wmC3JLTn+tg3yd6bvKE0lSSE527APWyVWT5xlByxLg3SqTpHWTg9fP4OGagemZJFUK8px/foma2BHWPwsjoEtfREJM7SdSlmFe5RmTicoJI89TZzlT/m7/HXt08gdszp/neGB/N/yMJ1PV8GjqiTtm8XwCAhjkVSuV/V6ibBOmGwLSZBy+WK+nauqxRLtJ3zDWCTjJIxF0isgyLcuhyH/KCnq7O1tagHO+1z+et+7WFInvfqMkaVBH4I3D8XPW5SfY9SMeSpIM3rqqhKM1DbDoe3ZRnWtCBLVFB+hiXCTehUVZJlyoT5RwPbZbixlmIcyErZIBhKRXutp3utYhKZHOYF2xauckG4Qqu30W8c8tB0Ve/vrGoBzL3Zet75JAj0ev12+73O4zKtr5vC32ivIL8c8NVuQ4TknVC2SSRPkhSIul94k9lV1Bhc3n1A3VNvq9tKYd45j2He3NtU1+I+iVLkIX3ORQe7Pmowp8l/hGgn8smP7rkFmS9Ky8hzo3SEz6EyMAS8+Iv/0dFzzcz6RZgvuqe/33z89321NRizCb7xiLNNnPirBiG2AQnL2kUqFm7w+TD0zRO3pgypJj/pmLJIB6nQbsrG/6CbhIwkYPoFusyw41uOxLEgwjWd74xr2juGlI0qwTVxD23YRpsKxp0li8oPelJvT+3Myy8o+h8lyz3s1V5fFgqwQtBAk3D6OU+z6IuZWfYjj6F+owjpuxy7CC4aq+OnslojcoV612XqiHh/Ly1vC1siGlvyk2eb2ZxrEj9criAcIJiTbO2W2GADOPpIarNd5xKHt4X4JhNPdVZaslP76hp8b62zJBfLowTXm3LDzl/j3P+deDNz8KxndeP5BQ0g2tpqm0KSQJGWFzwVrydEVsaNKZJg8wNC2qu9PSwLDCym4RyPGUbbn8gJ1Skk/yIyKnPcpLotkVTUCd+R1DdYgjEV7yT8V9KrMVkilIUKfQpVEp8J+LtpLXloCC/QAQjJWX2zb9/pcypnmtvyZWEIX6kJ1jamekNM+BiEJq3Mgf1rTb23zrFZ11/ID3J92v2unbS2jchGNHU7TndqTzk9PSrYHPQ3XMe+QmXCOeJskSG+d63yd6Vbt7n0l2AkAtm8wlp98vuHKUJo6kkKycnG5IlvnyduXn7hJpYy0GIX2v/HCsVNz6ETMw0yu0yKayupaoTasmFi+uABoPkPbsaFYvuX6CNuhR9gXL4tkfZNESqtQVe3fjHmLrNMPxkHLHDmncfjP2oc5w5SXSslsKGEtvCkEEA2qglNlHdtKrXODW1Mt5SlmewhXeMTNk3BfCFCOsoXctCzIc8R+/TS3iWibyGU80GC9PqcCuBWoUlsnQU/mWa0OO9HYj6ZW4J0fls+dS/zPg3kkqbnd+HzgcaZ0e6WpJEtIAZCC0N4p/ipusxm0zJGIaBXRVFMbrSPWScIi6Vfm5ITxWaUDU/ZNK+CN7mZJjStPZNKWDFXSVdEOq5dFsjHA3PUqFsm6Bnd/sMDk6tLTv2Tq1SySfm2M6mrQsdiYySkUNotkU7u7W4dy81RcH2zrRL289Skng6DqShKlbSlbPXG5EqTgkhDcbWVze/xXC22R1K3i821R0eZ9ng5oBVY9Xud82jTsDPcsDl5l6r+b++3F+0ii+K4V1kCb0tSRFJIVR1Mb0LU8YN65IuJ083kJJUeDpMP6Lbk3Pq8ZT0JZa4KuH7Jn0DSPadsUhWQUvWl/oITZj/pG61zIdhpa5G/rXKgfWI/rxcmCFqTZVdVq+xlVBIQpR3U7vf01GTketW7W+ASS1pvbYD6Oza05X1GfIdLoFbt/D7O7KbubQJg2uC1Kcr8jlNOxON/Fx+meyNsXRaGvnPTefO6cTp7tt6Sed/VN1qk8nVJ8AYovEKY21zcBH/2a5Il1XLe0oJAkpYkuIoKiKmjqc1NThcnZ6JR2yGhA8PLC4mZJ1fI+uBDg6WkW3O2d3nMP5+HW6ae805jMmS+O642taue1rtH78DsOxQY4XypCMrLlJ+z2Abebv0Suf99rKIa6nLZ3tIo6ZVewfVe2iDlV69HuvKkkFZizIPj5dlq9tk6urSDW8SCVxOWjrL8AtM5VEHyq1lXbwlD5PD2CIPX+I8ncvOaXYafZpPxwu/wz9fGM+BUA+kiS0qKhWYa6XC0lfgQUBmHKmrdIhkr0WQcsmxTxjbG6RiJCG3LDhJGNSqZ9qW8yZttJV8U0VOZQhjmKOZUy/Fe9rNANzeIH2dhijay3p1yJ6stbsKChmQoT2M7u+uG2bkCrv5kFS4Htr3tvr5wSyrassUXcb3ZtsyXW92mTXz11DeqpfAB5sdLTtTgtc52ZyqEN7Z1ynZvLijXIyMkSG7Fz8HOPsC9XHlBQXNG8mlPfoP9Ukwn3khDk3lu0t8yJ3diae6aM+27iWNfUhPdyoLjPFw8oJEnpkYl5BoWwPoOufj2p4rgGzF1omr/ZYZ/mL5XOyDV4KAJROjC3Tj8v6W+bh6+cR/1zzX5JpvWqa2xC0mloO8B+pVV9zYrQ2Ycd+Q91Xj22qa5RuP5Uj6ON+iYZqUilrHOcAzKLyOiwPMirqiW/X5B2AyFdWRy2aZvnLiQdh4FDHg9VnHxDww7lO/6utHGIChVRGdrWaZkLDOyOt/5USqZE9VvHTDot+ShVRt5KUzta4NA2KX06u42UCgAS9d+yENODJS7Msx84HYJ02mHu8ZiIIxdkfqEedYQtMi1DTQuX51sQY7FIFrBXd63Kx5fQvDjUPqfCv3ypbJYnbLz8Bk3LvKLWa2qBxXvLcL2bkA1Sr15fKIIOd9utxCHK8CJv5CXua9jnegy6adANg1yrqjPEhG2XX1M6FokFesEySRUU1FpKiyQhIQniF6Wa/qfU6eyWoa7RIbGuhLKARt1vF+uhY1+mOjypOf9sryMKMznXFHzFgtSZShfYOuBSWToFZL3OrWm7TIN3eiPfQAn1ZgkK11zKXm9Yc6p9cdp9NRW3hMY265SVKv56gX0kFUVXHNfZnAWyP+aoYr3syHNI+J0L3x98fg9AXFkq4sDv/NY1Gp/1hOW+11BpikcztEiS8sPzGRqnb1HE7esb/ddxI1MvvmDzumQ4pmORbYUCd56BHxwuWJqtIPSiYPfDU32Iu5aXUls/6X7fURC7rJt3DSo2zv5wtrgcRN3BkJZoz9V8hKmKZTZTZ/XRTavYWfz8A1XWT+gx3NgCdC52mC63wMIkaHR7eyc8N8jkgh11P/pGh+FhL3GpNAFFyGPUMieX/svvxTsAKdcvJQOFJJllONxood9YI960ceQTrKqW4Rg364g+bOUZSR4S17mug1ph4mhMCCxW1Bi6urRqAuooO6xwnIMI4nQVsHCZ+2qqIi7OSQdUBfnMunHUqdh+8zBwdQITEDieuqBiNCJeRSv7V/u0L10lAq+hOfj88E1twFyXNDqAvFSnUpI5omOxdwo3J+Z1ScJvJwEalXSVpNdzy9McihhFaUJQSJLZRYn6kMSOrvE6u8ViOcchEW6cRsugx9U+hZmOVzqLoA+ctg5JY+OGyuweUWbTsDN/qXpZZsyzWKjodb+pOu37ajmueUrSqQDkXTzKM6UoOUna9jOGe9avfek08tvmJ4Zs16PTbF+Bmx6zaHQUfhGGSlvniZ+pHyrX6ZwFpkC4IC9FfnXntklXyQt00DKqqqXviG3iAJe2GT/GWUGMZcUHhSQpLZQ6hYAKKWxHXQ6+K9U1YrHMG76KA5fj7Ljbth/tEdLzl4jlwCnPWnunRC8GDW5obvcepnKySDa1W9dRTTJuL89OVbV/yionq3FtXb4Pmx+NLfk5B4MOp3ptl4ItEh7ApGlmEC/Br6gjC55KKZ1W05FZc/oo0zXcOs/lPMVpnQ9h9e9YlH/f+G7q56aStNW9EIR4izb7oUfdPfMLauTzob5qsWCwDSkN2jokZUbQh6odcyfY1gmMDlotPnHiYLixoklU3tSkCJiJsRjnYVbpKCOaJKNsbhcKtXXyz2m4vKkNcMn648o8u8+oUxscrFTtHcBQr/X36lpgUiH3m6f4Cfnwrcl4PLh9hKtSPSGfPPVN1ryK5uMTR9L12joJPPANaAmzL04WScUXrbTNr9a3qqDHwsMiGWZXq6rFUqqa/9KvbPuymlrxSVTyKwxZZzwbWAmV3iiO+p0skkHq9VuhNJUkhSQpDZrb4xF8lqnX2qLNCVxb75KPLgBmy1HZDbtH8JE0d6SR8s85UNegFsik6iNZk1EUkl6WPYX1nH7WPGbl8Do+eQ+qOK3uDr81tgCDvdao07DogU95AWRqTXEt02sbVQtobZ281FbXqlWenVYrd6YdpjJb53nMZhPkfMYgZi3LbMudRhFiE4Yx9w1ByBPHcbhY5P4mNrRdmlBIkvIj0enbTLTOBarS7kmy/UySSQZWFyJoO0gd9sPu5nsYh5hWbZdZPKQ96nULqqhrlOHqgT258mKwxNnxDATzqK+pBejrcV7V/lnZv9GjbS1z5cXKN6gr7gvTTSAGPBdBfCT1F1qLSHTZr7pGEQ3KgtJUZ00mIZcU9Sb44no6Y+pnA2tgVZcIheuwrsH6ElnfJJ+DWBPdMJfhG+xpOwhV1fKCmXV4ySzGRBgK0EeSzC7itPql0/IAjWummFhfTAusJIO2PY7O2BVlJWn66LEDbmleWufGvB9OU7nZHxY+lrWZZWmP+YMjWrOcAgbSaZl60y84KUxC8thxGtpO6FFXVa0WnKJjblpem9yuV5+DGuewcVKz7MRVZNCAPC9SKat/aV29+HIvXBbhGOhuCgHOXx4OgW4di8R67RTwVQLQIklmB6mUvPkFSV6eNI5z3M5WbPtmic4u0n6rDm27WoVSAR5cCvvoaJkI4rAWpv4Qx14/Vq1zc/69bsOvYYn4kPYrwmlZqBcCU0Gx5a41rRvbS0rA4xlodbeAu7iG0wOWo3o/hnrPTkX3BZ0pynztBGyMlmuLmbrGeNxKEoJCkpQhDjdm114yvKQyG0VcuI1st3XIjBJ+aVqSxq8D83sYuOaR9KGx1Uga7N0A9TLDoDqk6/ZwSkGGu1rmyLBumHrNmIeq2ucDg3vyrYoqvpZ+yz03U/FvzQnJqmpJaB0IxZltVAl1iThsVFUjojhy2THiKSSTbFyA66qEJo0BEGCSB8WGh7E8zl0I7N7mUl7w4vK20TRbu4p9ofoT2N5/zz334F3vehe6urqQSqVw/fXXJ9AsQgKSThdWRALw9LFasDTf0hXnkFGYJOv2HI4dQUWCIvYEwcXqB1Utkq4P9NwwX+s8/weYigA0n7OmVpkP3CuvZhBUHzwq5yKpYeBAjQiwrn1o2en4V1UjcB7JJK5bsyuD/TjHph0CbNw8R/qqmU3zVE2UhjiX6zvdqgs1tcGTjyeBcsYEVRI65gUkcI8xPDyMQw89FD/84Q+TaA8h/pSow3HJYRZILXOtwrEmo+AGkHCHFvZhqSqiI1skQzbQTbR6RWjPrGMpyK8ixQYFJFH/VsTvg2c/f07HX2WWmjyi+Lm5YL4GkvIV9XuhNtdbXR0ggMWtvqD9sYJidjrcTe0F8K8tAez9WxnscuC767TTTsNpp52WRFsI8aazW/KltRZ5yFinrhEYGQgQeVngHqGxBZgYlWHmMNOB+Y1sZ+rlfNjn0s0b8i+Wj2Ta+bOddFqmTdu1NUplpo8KFklXAgQ4xfJQdWhTFGER+7uH4suAGSeLaqEToJsxz9/tFIk7Q0iTpN3y3uaXi9erngDWsYXLRRgHDYBxcwlS2S52QlxfBadU22WQuI/k+Pg4xseNHG0DAwNJV0lmK5n60gqmae+Q9DCu6YFs2PuDSHkzVXzRUjJNWSx1OHRm7Z1ijWhoRt7DKVUCwzNB0t7UN8nw9eiw+jYq9ZrxFBGOBYWr02voUGWfIg1tu5x3fe5l5fnKc4Q5Bfb21zcWTySkq6z9g4pVGgjf3vZOf4uyXUd61eV1GwdyJYpj3D7AdqpW9bgvi5IVo8mS+Gva2rVr0draOvOvu7s76SoJKQzpKhGDyh2qqZOZv8QjfUsBiKO/S1eJddjPz8+1roQ73do6o21hfBGjpgCxE3RoOxaLZG6dxtbci5gtCMppWDKpoe22jsIEoJmtj63zgLk5i2DekGHIYKYoZEvgBctL1CV1S6bTxhSiSRoDOhbLfa+S7L5U8L0OC9OMKCQuJC+55BL09/fP/Nu8eXPSVRJSmqRcvwSnEM8jN0ERuOkx94RBfCTnLwUWLFO0GvsN8/nU5fQ5MCEj5f2YM19cQ+xtq6mVh+78pdbfgqIL0thTlIQ4BmaLZFALaNLow87NPvkAAzU54P7l6Uiv7WMMtulcAiza2+pm43puQtZb1yAv6cppfCJaStscZv2JSl5AUQldvy4kPrSdyWSQycSU0JmQsiZktGKxaOuQ4digeQRTADSFHQwbsRzEJyuVCl+PikVQF7UqEeKBZkBRIMwc3U7o4m9eFzAxHk4Mdi6WlFeuU/6FxBLvovoC4eIbG2Vigbhe3GrrgMV7O18jbi+aASdG8cfuhuKxapwvrDPJzsO6jMTYlkiYGpJOy7lUdVnwo71T7qEgc6eXAMwjSUhZUgCTZFW1MUQUKKdkyntKws5umcNc1bdUZ14XMNjnPPdvIrjsQ1uHTJvY3gHs3p6/rt06qR+7jsVA/04ZbnXFHGwTcug17AO3vin4OdGpqvaZdaOAQ7pmi6T5WNQ1iM/w1GRxfa1dA39cTlyoiHPPBrh/zztNCZy30IIwASWpUmQ6bfVvzotHinqMTAVmGnL3dSm4QKgT+AodGhrC+vXrZ75v2LABTz31FObMmYMlS5bE2jhCZhVllmTWQlBRU1UtD22nKdfCBk1FETphcNvn5nYZfjInuLacWpt1UstZIWsz/rk7gzw/Mg3yL65clCWJyznw8uM0CzXL+08qXPaCvIIKQColL1wDuxWGTy1mW4WybfUUvCtSsN4XDIVRopZ51nntLZvHIPr8Hgul5JrhQmAh+dhjj+Gtb33rzPfPfvazAIALL7wQV111VWwNI4R4UA4vrKEf2mWAvXN3jZpO8CGQSrnMPFPGLyyemBO6twHjo84vFmXw4FUiU5/cpAEz+Aw1J9HPmIeB61RmwCoUMVv442xDiRNYSJ500knQIptyCalEAloOPIlxOIWEw83CrDqjjiMx9K2zSUe63TLptFpkbtlaa4MEepk+B718fKtJ4Fmv+6rW1BZhNrKwuFgeZ8tLS0ToI0lIoaiUPqfUO9eOxUBvj0Qyx0VcFsk4nttFHy4sAeYvBaYnA0TvqpITFLFHpztUk8zKtk3ThvuJphVG2FXXyLSWpXCdhuqr4u7fCjSCkSAUkoQUjPLsJGbQoxOjRL+WAnUNwMJl0ctR8XkN/GCIQUmWjZVHAc31ize1GQAJXKddywMG6wRocyF9qFvnWYV2114AtALMs54j6Gw4hSDUIU9QVJYRJXg2CZmlxDmyXQwWrRCrhePDpjw7QCu2tB6BNnUTkkWwulSVoJAsx+vdjN7+qmpnEVTXCIwNAw1l4hdsj7AvlIAsJ/QXZ3sSf8s6cVQUYFrUEoVCkpCCUaa9hE4qXfa74ElzGzA6KL5bnSoZKFwORkI5xZUxp4spJ3/2uPNsxorPcZy7UIRkXEPeoYc4Vc73bL6Jg+JhBe5aIdejp4WfFkmgADPbEEKcKKMHfKWQqZeHx/ylahYat5Q/UYhD+CU1xWEQ5i0ypsQD1BI2d/pMn1sK++VGOi1zzhfDslemfnUlgdehS6edRaTbNmHPg9MtX04vgKBFkpDCESmalxSEQL5bcflFJsCc+TJLTezBJorUN8q/zS+rb1NTa03gDgDQxNo3OVG8fSkYJXDdkGDE0qc7TEJAIUkIcaV1LjA9XcZpSdwor44vFtye+w3NwMiA+FaNjwQsNKbj2NgKJBxYnAwOaVYamovSkuKSZP7R5IquDFRfIMtvhpqwUEgSUkha5kbbvqkVGOqX2VVIkfFI+aMnkt4ZUEhWxnOHODGb8n+WDRFT73il+gqjI+1llEkQFIUkIeVEW6dYm0otBY95WIfD9kQnaABN5RhxkoHHLhhxCvYkXFrS5SHRyqOVhBAhlSpNX7F0GpjXZXyuBBLxhZxlSiCdDiYk65uA4QHje8kcjpANKZn2lxml4GesREr8qqenHKbrVHwr8vKHzJRgX+8AhSQhJB6c5j2uGOKK2o6nmJIhXQVgUn39tk55UertSaxJpUscY9uz5AKqaxTf2No6oG9nghVFPOYpAPOXAGMjQENT/rKwp6NjsfhZt3WELKCwVIjpgBBCEqRcDCiFRn+5UHV3SKeBprbEmhOaWaLPhDK4WFMpidYvRV/wlO1LVTXQ2JJ/jUeZGKCuAZizoLRTXpmgRZIQQpIisACZVYpFhEBVdYApBe3MsuOhShlovcKT8LWgfMxTjh/zmLsQ6N0RPcCyDKBFkhBCIsOhbUdSKbHWzKb5v0lxKOa90dwmfx39IF2oqZUk+3UeUywCmA03PS2ShBASldgsSOX/UImXCjLN2YdMkyK0dXi243HMW+eJ32ZtXfzTeM6CW55CkpBKpEx8b8qHChI8haCtAxjskwT+JUEhnvYJX0MLlwMTYxUeFBeSVMqwLJqFZNlElycLhSQhlcTchcBgL9DeWeyWVAaBHzSzwDwRB83tpRloEZgSOp/VNXQxiJ04hGQJXSMhoZAkpJJoaK7QKeeKRHsHsHNCXRSV/zOFRCGdBrJaeMFXZnM0lxRhNCENkgAoJAkhJDpuD/7qWhlSJJVFW4fkP5yzUH2bVAroWiEvExwyLWHMYj2G81RV/jKs/PeAEEKKxbwuYHw0Rr8zWpRKkqBT1TW3y1SmQWd5SqVLxMpVQnNVZhqAiVGgvjHhihQPfNyHpbEVmJxQiO4uXSgkCSEkLPVN8QYvlMizm+SY2wWMDgItc4JvW85ThZaQjkTHIvlbilbaONqUSpW9z3oZX+mEEDLL0KOUG1uK2w4iNDRJgFo5i8JQlJBoS6VKTESWisIuHWiRJISQUqGpTfLVMbqWhKGMh0eLQhh9Wl0rwjadLjGBWzwoJAkhpFRIpWRGDEKC0LUCmJ6ShNlxkEpVnuFNVRSmUsCivVBSVtsiU2n2ekIIIWR2UVUdn4gEJIisqlqG9Uk+KVojzdAiSQghhBCDTL1YOWc9FINxQIskIYQQQioPWhVjgRZJQgghhFQeNRnJkFDF4LYoUEgSQgghpPJIpYA5C4rdirKHQ9uEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQUFJKEEEIIISQU1YWuUNM0AMDAwEChqyaEEEIIIQroOk3XbW4UXEgODg4CALq7uwtdNSGEEEIICcDg4CBaW1tdl6c0P6kZM9lsFlu3bkVzczNSqVSidQ0MDKC7uxubN29GS0tLonWR5OB5LH94DssfnsPyh+dwdlCo86hpGgYHB9HV1YV02t0TsuAWyXQ6jcWLFxe0zpaWFt40swCex/KH57D84Tksf3gOZweFOI9elkgdBtsQQgghhJBQUEgSQgghhJBQzGohmclk8JWvfAWZTKbYTSER4Hksf3gOyx+ew/KH53B2UGrnseDBNoQQQgghZHYwqy2ShBBCCCEkOSgkCSGEEEJIKCgkCSGEEEJIKCgkCSGEEEJIKGa1kPzhD3+IZcuWoa6uDsceeyweeeSRYjepYrnnnnvwrne9C11dXUilUrj++ustyzVNw5e//GUsXLgQ9fX1OOWUU/DKK69Y1tmzZw8uuOACtLS0oK2tDR/96EcxNDRkWeeZZ57Bm9/8ZtTV1aG7uxv/+Z//mfSuVQRr167F0UcfjebmZnR2duLd73431q1bZ1lnbGwMa9aswdy5c9HU1ISzzz4bO3bssKyzadMmnH766WhoaEBnZyf+5V/+BVNTU5Z17rrrLhxxxBHIZDLYe++9cdVVVyW9exXD5ZdfjkMOOWQmkfGqVatw0003zSznOSw/LrvsMqRSKVx88cUzv/E8ljaXXnopUqmU5d9+++03s7zszp82S7nmmmu02tpa7YorrtCef/557eMf/7jW1tam7dixo9hNq0huvPFG7Ytf/KL2xz/+UQOgXXfddZbll112mdba2qpdf/312tNPP62deeaZ2vLly7XR0dGZdd7xjndohx56qPbQQw9p9957r7b33ntr559//szy/v5+bf78+doFF1ygPffcc9rvfvc7rb6+XvvJT35SqN2ctaxevVq78sorteeee0576qmntHe+853akiVLtKGhoZl1PvnJT2rd3d3a7bffrj322GPacccdp73pTW+aWT41NaUddNBB2imnnKI9+eST2o033qjNmzdPu+SSS2bWee2117SGhgbts5/9rPbCCy9o3//+97Wqqirt5ptvLuj+zlb+/Oc/a3/961+1l19+WVu3bp32hS98QaupqdGee+45TdN4DsuNRx55RFu2bJl2yCGHaJ/5zGdmfud5LG2+8pWvaAceeKC2bdu2mX87d+6cWV5u52/WCsljjjlGW7Nmzcz36elpraurS1u7dm0RW0U0TcsTktlsVluwYIH2X//1XzO/9fX1aZlMRvvd736naZqmvfDCCxoA7dFHH51Z56abbtJSqZS2ZcsWTdM07Uc/+pHW3t6ujY+Pz6zz+c9/Xlu5cmXCe1R59PT0aAC0u+++W9M0OV81NTXaH/7wh5l1XnzxRQ2A9uCDD2qaJi8T6XRa2759+8w6l19+udbS0jJzzv71X/9VO/DAAy11nXfeedrq1auT3qWKpb29Xfv5z3/Oc1hmDA4Oavvss4926623aieeeOKMkOR5LH2+8pWvaIceeqjjsnI8f7NyaHtiYgKPP/44TjnllJnf0uk0TjnlFDz44INFbBlxYsOGDdi+fbvlfLW2tuLYY4+dOV8PPvgg2tracNRRR82sc8oppyCdTuPhhx+eWectb3kLamtrZ9ZZvXo11q1bh97e3gLtTWXQ398PAJgzZw4A4PHHH8fk5KTlHO63335YsmSJ5RwefPDBmD9//sw6q1evxsDAAJ5//vmZdcxl6Ovwvo2f6elpXHPNNRgeHsaqVat4DsuMNWvW4PTTT8871jyP5cErr7yCrq4urFixAhdccAE2bdoEoDzP36wUkrt27cL09LTlIAPA/PnzsX379iK1irihnxOv87V9+3Z0dnZalldXV2POnDmWdZzKMNdBopPNZnHxxRfj+OOPx0EHHQRAjm9tbS3a2tos69rPod/5cVtnYGAAo6OjSexOxfHss8+iqakJmUwGn/zkJ3HdddfhgAMO4DksI6655ho88cQTWLt2bd4ynsfS59hjj8VVV12Fm2++GZdffjk2bNiAN7/5zRgcHCzL81cda2mEkFnPmjVr8Nxzz+G+++4rdlNICFauXImnnnoK/f39uPbaa3HhhRfi7rvvLnaziCKbN2/GZz7zGdx6662oq6srdnNICE477bSZz4cccgiOPfZYLF26FL///e9RX19fxJaFY1ZaJOfNm4eqqqq8KKcdO3ZgwYIFRWoVcUM/J17na8GCBejp6bEsn5qawp49eyzrOJVhroNE46KLLsINN9yAO++8E4sXL575fcGCBZiYmEBfX59lffs59Ds/buu0tLSUZQdbitTW1mLvvffGkUceibVr1+LQQw/Fd7/7XZ7DMuHxxx9HT08PjjjiCFRXV6O6uhp33303vve976G6uhrz58/neSwz2trasO+++2L9+vVleR/OSiFZW1uLI488ErfffvvMb9lsFrfffjtWrVpVxJYRJ5YvX44FCxZYztfAwAAefvjhmfO1atUq9PX14fHHH59Z54477kA2m8Wxxx47s84999yDycnJmXVuvfVWrFy5Eu3t7QXam9mJpmm46KKLcN111+GOO+7A8uXLLcuPPPJI1NTUWM7hunXrsGnTJss5fPbZZy0vBLfeeitaWlpwwAEHzKxjLkNfh/dtcmSzWYyPj/Mclgknn3wynn32WTz11FMz/4466ihccMEFM595HsuLoaEhvPrqq1i4cGF53oexh++UCNdcc42WyWS0q666SnvhhRe0T3ziE1pbW5slyokUjsHBQe3JJ5/UnnzySQ2A9u1vf1t78skntddff13TNEn/09bWpv3pT3/SnnnmGe2ss85yTP9z+OGHaw8//LB23333afvss48l/U9fX582f/587QMf+ID23HPPaddcc43W0NDA9D8x8KlPfUprbW3V7rrrLkvKipGRkZl1PvnJT2pLlizR7rjjDu2xxx7TVq1apa1atWpmuZ6y4tRTT9Weeuop7eabb9Y6OjocU1b8y7/8i/biiy9qP/zhD5lyJEb+7d/+Tbv77ru1DRs2aM8884z2b//2b1oqldL+9re/aZrGc1iumKO2NY3nsdT53Oc+p911113ahg0btPvvv1875ZRTtHnz5mk9PT2appXf+Zu1QlLTNO373/++tmTJEq22tlY75phjtIceeqjYTapY7rzzTg1A3r8LL7xQ0zRJAfTv//7v2vz587VMJqOdfPLJ2rp16yxl7N69Wzv//PO1pqYmraWlRfvwhz+sDQ4OWtZ5+umntRNOOEHLZDLaokWLtMsuu6xQuzircTp3ALQrr7xyZp3R0VHtH/7hH7T29natoaFBe8973qNt27bNUs7GjRu10047Tauvr9fmzZunfe5zn9MmJyct69x5553aYYcdptXW1morVqyw1EGi8ZGPfERbunSpVltbq3V0dGgnn3zyjIjUNJ7DcsUuJHkeS5vzzjtPW7hwoVZbW6stWrRIO++887T169fPLC+385fSNE2L385JCCGEEEJmO7PSR5IQQgghhCQPhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAkFhSQhhBBCCAnF/w8nMtfYsuDkTgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAsIAAAHDCAYAAAAupnzhAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABYUElEQVR4nO3deZyO9f7H8dd9z5jFMotkxmgsyVLZikxTaDGZyql0OifJKUlEFKYFdSytHFIhEi2qc4p0TuVnK4fKSWNk7EtSEdEMYmYsY7b7+/vjMjd3DRkx133f1/v5eFyPe/vec39uV3i7+n4/X5cxxiAiIiIi4jBuuwsQEREREbGDgrCIiIiIOJKCsIiIiIg4koKwiIiIiDiSgrCIiIiIOJKCsIiIiIg4koKwiIiIiDiSgrCIiIiIOJKCsIiIiIg4koKwiIgDTZ8+HZfLxbZt2+wuRUTENgrCIiIiIuJICsIiIiIi4kgKwiIi8hvGGPLz8+0uQ0TkrFIQFhER6tWrx5/+9Cc++eQTWrduTWRkJK+++qrdZYmInFUKwiIiAsDmzZvp2rUr1113HePHj6dly5Z2lyQiclaF2l2AiIj4h++++44FCxaQmppqdykiIhVCV4RFRASA+vXrKwSLiKMoCIuICGAFYRERJ1EQFhERACIjI+0uQUSkQikIi4iIiIgjKQiLiIiIiCMpCIuIiIiIIykIi4iIiIgjuYwxxu4iREREREQqmq4Ii4iIiIgjKQiLiIiIiCMpCIuIiIiIIykIi4iIiIgjKQiLiIiIiCMpCIuIiIiII4XaXUCg8Xg87Nq1i2rVquFyuewuR0RERER+xRjDgQMHSEhIwO0+8XVfBeFy2rVrF4mJiXaXISIiIiK/Y8eOHZx33nknfF1BuJyqVasGWL+wUVFRNlcjIiIiIr+Wl5dHYmKiN7ediIJwOZVOh4iKilIQFhEREfFjvzeNVYvlRERERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRTisIT5o0iXr16hEREUFSUhLLly8/6fhZs2bRpEkTIiIiaNasGfPmzfN53RjD8OHDqVWrFpGRkaSkpLBlyxafMfv27aNbt25ERUURExNDz549OXjwoPf1I0eOcM8999CsWTNCQ0Pp3LlzmbV8/vnnXHrppYSHh3PBBRcwffr00/klEBEREZEAV+4gPHPmTNLS0hgxYgQrV66kRYsWpKamsnv37jLHf/XVV3Tt2pWePXuyatUqOnfuTOfOnVm/fr13zJgxY5gwYQJTpkwhIyODKlWqkJqaypEjR7xjunXrxoYNG1i4cCFz5sxhyZIl9O7d2/t6SUkJkZGRPPTQQ6SkpJRZy9atW+nUqRPXXHMNq1evZuDAgdx333188skn5f1lEBEREZEA5zLGmPK8ISkpicsuu4yXX34ZAI/HQ2JiIg8++CBDhgz5zfguXbpw6NAh5syZ433u8ssvp2XLlkyZMgVjDAkJCTz88MM88sgjAOTm5hIXF8f06dO544472LRpExdddBFff/01rVu3BmDBggXceOON/PTTTyQkJPh85j333ENOTg4fffSRz/ODBw9m7ty5PiH8jjvuICcnhwULFpzS98/LyyM6Oprc3FyioqJO6T0iIiIiUnFONa+V64pwYWEhmZmZPldc3W43KSkppKenl/me9PT031yhTU1N9Y7funUrWVlZPmOio6NJSkryjklPTycmJsYbggFSUlJwu91kZGSccv2/V4sjZW+HrB+hqNDuSkREREQqVGh5Bu/du5eSkhLi4uJ8no+Li+Obb74p8z1ZWVlljs/KyvK+XvrcycbUrFnTt/DQUKpXr+4dcypOVEteXh75+flERkb+5j0FBQUUFBR4H+fl5Z3y5/m9bRth/EPWfZcLomtAjdpQo5Z1WzMRatWH6vHg1rpKERERCS7lCsJONGrUKJ588km7yzg7dv1w7L4xkLPHOr5b7TsuLALi60J8PSsYJ5wPiY0gsmpFVisiIiJyRpUrCNeoUYOQkBCys7N9ns/OziY+Pr7M98THx590fOltdnY2tWrV8hnTsmVL75hfL8YrLi5m3759J/zc8tQSFRVV5tVggKFDh5KWluZ9nJeXR2Ji4il/pl/LP2Tdtr4Obrkf9u46euyEPTutaRPZP0LhEdi+2TqOd25tSGwMdZpAncZQ+wIIC6/47yEiIiJyGsoVhMPCwmjVqhWLFi3ytifzeDwsWrSI/v37l/me5ORkFi1axMCBA73PLVy4kOTkZADq169PfHw8ixYt8gbfvLw8MjIy6Nu3r/dn5OTkkJmZSatWrQBYvHgxHo+HpKSkU64/OTn5N63bjq+lLOHh4YSHB2m4yz9g3VauBlVjrKPeRb5jSkrgl13w89Zjx87v4ZefrbC8ZyesXGyNdbshoQE0aG4d5zeFKtEV+Y1ERERETlm5p0akpaXRvXt3WrduTZs2bXjppZc4dOgQPXr0AODuu++mdu3ajBo1CoABAwZw1VVXMW7cODp16sSMGTNYsWIFU6dOBcDlcjFw4ECeeeYZGjZsSP369Rk2bBgJCQnesH3hhRdy/fXX06tXL6ZMmUJRURH9+/fnjjvu8OkYsXHjRgoLC9m3bx8HDhxg9erVAN6A3adPH15++WUee+wx7r33XhYvXsz777/P3LlzT/fXL7AdPtqHObLKiceEhFhzhWsmQov2x54/mAs7NsOOb+HHb6z7B/bDT1us44t/W+Nq1YMGLeD8ZtCwpRW2RURERPxAuYNwly5d2LNnD8OHDycrK4uWLVuyYMEC7yK07du34z5uYdUVV1zBu+++y9///ncef/xxGjZsyEcffUTTpk29Yx577DEOHTpE7969ycnJoW3btixYsICIiAjvmH/961/079+fDh064Ha7ue2225gwYYJPbTfeeCM//vij9/Ell1wCWBt2gHX1ee7cuQwaNIjx48dz3nnn8dprr5GamlreX4bgcKQ0CFcr/3urRsOFbawDjs4x3g1bN8B3a+GHtdbUip+3WceXH1sL8s5rCE1aQ5PLoO5FVtAWERERsUG5+wg7XVD1EZ4yGDZnQtfHoE3HM//zD+yHH9bB9+vg+zW+i/MAIipDo0utUHxRktW1QkREROQPOtW8pq4RTla6WK7yWer+UC3Wmk5ROqUi9xcreH/zNXybCYfyYO2X1gFQ90JodiU0aws1zzs7NYmIiIgcpSDsZKWL5U5nasTpiD7HuvLcpiN4SmDHFisUb8qw5hn/uMk65rwGcXWtUNy8rTWdwuWqmBpFRETEMRSEnaz0ivDJFsudLe4QqNvEOlLvgty9sP4rWLcUtqy22rZl/wj/fdfa3OPSa6wjrm7F1yoiIiJBSUHYqYyBw8e1T7NbdA248mbryD8IGzOsULxpudXX+NN/WkftBnDJtXDp1RAb97s/VkREROREFISdqvCINT0B/G+HuMiq0KqDdRTkW1eKV35mTaPY+b11zJlmtWRrkwotr4LwsjdEERERETkRBWGnyj/aOs0dYm2h7K/CI4+F4kO5sOZ/sOoz+H6t1ZHih3Xw4SRoeTUkXW9tCKL5xCIiInIKFISdqnRaRGTVwAmOVaLhij9ZR84eWLEQMj6xpk5kzLeOmnUgKRUu62h1rRARERE5AQVhpzrbrdPOtphzIeVO6NAVflhvheA1S2D3dvi/aTDvTWjeDtreDPWbBk7YFxERkQqjIOxUFd067WxxuaBBM+v4cz9Y/QUsm2e1Y1v1mXUknG8twmvVQXOJRURExEtB2KlK5wjb0TrtbImoApffaB07tsDS2bBysbWj3ayXrCvFbVKh7S1wbm27qxURERGbKQg71eGjQdgfWqedDYkN4Y6H4ebesPwTKxTv3QVL/gP/+xCaXgnX/BXqX2x3pSIiImITBWGnKp0aERGgc4RPVeVqcPVfoP2fYfMK+N9HVm/idV9aR72LrEDc9Aqrg4aIiIg4hoKwUwX6YrnycrvhwjbWkfUjfP4BrPgvbNsIbz4JNRLgqtusbhOaRywiIuIIbrsLEJsc3z7NaeLrWtMmhv8LrutmXTXeuwv+PRGe7gYL/3VsDrWIiIgELQVhpzpSuljOgUG4VFR1uLEHDH8XbnsQzqkFh/Ks1mtP/w3mv2U9FhERkaCkqRFOFeyL5cojPNLqJJH8J1j9OSx8F7J/hE/fgS8+sFqvXf0XbdAhIiISZBSEnSpfV4R/IyTE6jV8yTWw9ktrisSu72HxTGuR3RV/gmu7WFeSRUREJOApCDuVgvCJud3Qsj20aAcblsHCf8L2zfDFvyF9LrTrDNfcDlWi7K5URERE/gAFYady8mK5U+VyQdNkuPhyq/XagresHesWzYCl/2e1XWv/Z4iobHelIiIichoUhJ2opBgKj1j3ndI+7Y9wuaDJZdC4tXWFeN4b8PNWmD/dmjKR0hWuuAkqhdldqYiIiJSDukY40fGtwYJ9Q40zqfQK8SOvwl2PQ43acDAHPnoFnusOy+aBp8TuKkVEROQUKQg7UWkQDq9sLRCT8nG74dJrYcjr0CUNYs6FnD0w8wV4/n5r5zoRERHxewrCTuRtnaarwX9ISChcfiM8/hbc0sdqRffzNpj6OEwZDLt+sLtCEREROQkFYSfK10K5M6pSmNVn+Im3rduQUNicaV0dnvE85O61u0IREREpg4KwE6l12tlRuZp1ZXjIG9DyKjAGMhbAc/dYu9SVLlAUERERv6Ag7ESHFYTPqhoJ0H0YDJgA9S62AvCn78CoHrDqcysgi4iIiO0UhJ0oX9srV4h6F8FDL0H34RAbZy2oe/sZmPyo1X5NREREbKUg7ESlQTiiir11OIHLZe1SN+R1SL3Lmk/83Wpr/vB/Jvm2shMREZEKpSDsRLoiXPHCIuD67tb84eZtweOB/30Iz5b2H/bYXaGIiIjjKAg7kbZXtk/1eOgxEvr8A+LqwKFcq//w+Adhxxa7qxMREXEUBWEnUtcI+zVuBY9OtbpMRFSG7ZvhxX7w4WQ4ctju6kRERBxBQdiJNDXCP4SEWn2Hh06HS68B44El/4F/9IR1S+2uTkREJOgpCDuR94qwFsv5hajqcNcTcP9oOKeW1V3ijRHw+jDYn213dSIiIkFLQdiJvEFYV4T9SpPW8NhrkHInuENgfTqM7gmffwAlJXZXJyIiEnQUhJ3GGM0R9mdh4dDpXnjkVajf1NqM4+Mp8OID8JMW04mIiJxJCsJOU5B/rFVXZQVhv1WrHvR/Abo8bM3l3vm9tZhu3htQXGh3dSIiIkFBQdhp8o+2TgupBJXC7a1FTs7thstvsHoPt2hv/QNm4bvwfF/4cZPd1YmIiAQ8BWGnOXzcQjmXy95a5NRUi4V7hltHtVjI/hHGD4CPX7WmToiIiMhpURB2GrVOC1wt2sPg16B1itVq7fNZMPZ++G6N3ZWJiIgEJAVhp1HrtMBWJRq6DYFez0LMubB3J0x6GD6YYM3/FhERkVOmIOw0ap0WHC5KgsemQXIn6/HS2fB8H9i20d66REREAoiCsNMcPrpYTq3TAl9kVbh9EPT5x7GrwxMGwtw3oLjI7upERET8noKw0+Qfsm7VOi14NG5lXR0unTv833fhxf6w6we7KxMREfFrCsJOU9o+TVMjgktkVWvu8D3DoUoU7PoeXugHi2eCR7vSiYiIlEVB2GkOa7FcUGvR3tqm+aLLoaQI/m8avPww7N1ld2UiIiJ+R0HYadQ+LfhFVYf7nrZ2pQuPhK3rYWxvyFhgbbEtIiIigIKw85ROjYjQHOGg5nJZu9I9OhXOb2ZtvDHjeXj7mWP/GBIREXE4BWGn0WI5ZzmnFvR7Hjr1BHcIrP7Cujr8wzq7KxMREbGdgrDT5Kt9muO4QyClKzw0HmokwP7d1rzhBW9BiRbSiYiIcykIO03pFWEFYeep2wQengKtr7ParH3yDrycBvuy7K5MRETEFgrCTlJcZM0VBS2Wc6qIytBtMPxtqHV/2wYYez+s+szuykRERCqcgrCTHL9IKqKyfXWI/Vp1gEdehboXwpFD8Paz8N5YKMi3uzIREZEKoyDsJKVBOKKKNW9UnO2cWvDgS9Dxb+Byw/JPrB3psn60uzIREZEKoSDsJIe1UE5+JSQEbrgHHhgLUedA9o/wYj/4+lO7KxMRETnrFISdxLuZhoKw/MoFLeCRKdDoUmse+btjrKkSpXPKRUREgpCCsJOUBmFdEZayVIuF+0dZV4iPnyqRrakSIiISnBSEncQbhNUxQk7AHWLNGe47BqpVh6xt8EI/+Hqh3ZWJiIiccQrCTnJYUyPkFDVsCY++Cg0vOTpV4h8wY5ymSoiISFBREHYS7Son5VEtFvqMhuu7g8sFGfPhpQdhz092VyYiInJGKAg7ieYIS3m5QyD1rqNTJWLh563wwgOw/iu7KxMREfnDFISd5LCCsJymhpdY2zPXbwpHDsPrw2HeG+ApsbsyERGR06Yg7CTe9mlaLCenIfoc6Pc8tP+z9Xjhu/Dq43Aw1966RERETpOCsJMcv7OcyOkICYVbH4C7HoewCPg2E17oC9s3212ZiIhIuSkIO4muCMuZcum1MGAC1KgN+3fDxIGwbL7dVYmIiJSLgrCTaGc5OZMSzoe0SdD0CigugpnjrKOo0O7KRERETomCsFN4PMdNjVAQljMksir0GAmd7rV2o1s237o6vH+33ZWJiIj8LgVhpyg4DMZY9zU1Qs4ktxtS7rS2Z64SBTu+tVqs/bDO7spERERO6rSC8KRJk6hXrx4REREkJSWxfPnyk46fNWsWTZo0ISIigmbNmjFv3jyf140xDB8+nFq1ahEZGUlKSgpbtmzxGbNv3z66detGVFQUMTEx9OzZk4MHD/qMWbt2Le3atSMiIoLExETGjBnzm1peeuklGjduTGRkJImJiQwaNIgjRxywW1bp1eDQSlApzN5aJDg1bgVpkyGhARzMgcmPwldz7K5KRETkhModhGfOnElaWhojRoxg5cqVtGjRgtTUVHbvLvt/hX711Vd07dqVnj17smrVKjp37kznzp1Zv369d8yYMWOYMGECU6ZMISMjgypVqpCamuoTULt168aGDRtYuHAhc+bMYcmSJfTu3dv7el5eHh07dqRu3bpkZmYyduxYRo4cydSpU71j3n33XYYMGcKIESPYtGkTr7/+OjNnzuTxxx8v7y9D4PFupqGrwXIWVY+Hh16CFu2hpBhmvQQfjLfmEIuIiPgbU05t2rQx/fr18z4uKSkxCQkJZtSoUWWOv/32202nTp18nktKSjL333+/McYYj8dj4uPjzdixY72v5+TkmPDwcPPee+8ZY4zZuHGjAczXX3/tHTN//nzjcrnMzp07jTHGTJ482cTGxpqCggLvmMGDB5vGjRt7H/fr189ce+21PrWkpaWZK6+88pS/f25urgFMbm7uKb/HL3y7ypiBHYx5rofdlYgTeDzGLPyXMYNSrP/uJg4yJm+f3VWJiIhDnGpeK9cV4cLCQjIzM0lJSfE+53a7SUlJIT09vcz3pKen+4wHSE1N9Y7funUrWVlZPmOio6NJSkryjklPTycmJobWrVt7x6SkpOB2u8nIyPCOad++PWFhYT6fs3nzZvbv3w/AFVdcQWZmpncqxw8//MC8efO48cYby/PLEJjyD1i36hghFcHlsuYN93wawivD92utecM/bfn994qIiFSQcgXhvXv3UlJSQlxcnM/zcXFxZGVllfmerKysk44vvf29MTVr1vR5PTQ0lOrVq/uMKetnHP8Zd955J0899RRt27alUqVKNGjQgKuvvvqkUyMKCgrIy8vzOQJSvrZXFhtcfDkMmgjn1oacPTBhIKz8zO6qREREAId1jfj888957rnnmDx5MitXruQ///kPc+fO5emnnz7he0aNGkV0dLT3SExMrMCKz6DDCsJik7i6MGgSNLkMigrgnWdhzmvgKbG7MhERcbhyBeEaNWoQEhJCdna2z/PZ2dnEx8eX+Z74+PiTji+9/b0xv16MV1xczL59+3zGlPUzjv+MYcOGcdddd3HffffRrFkzbr31Vp577jlGjRqFx+Mps/6hQ4eSm5vrPXbs2FHmOL+nXeXETpFVodczcG0X6/GiGfDaMDhyyN66RETE0coVhMPCwmjVqhWLFi3yPufxeFi0aBHJycllvic5OdlnPMDChQu94+vXr098fLzPmLy8PDIyMrxjkpOTycnJITMz0ztm8eLFeDwekpKSvGOWLFlCUVGRz+c0btyY2NhYAA4fPozb7fuVQ0JCAKuFW1nCw8OJioryOQKSd2pEFXvrEOdyh8BNveCux60WfpuWw4QBsK/saVUiIiJnW7mnRqSlpTFt2jTeeustNm3aRN++fTl06BA9evQA4O6772bo0KHe8QMGDGDBggWMGzeOb775hpEjR7JixQr69+8PgMvlYuDAgTzzzDPMnj2bdevWcffdd5OQkEDnzp0BuPDCC7n++uvp1asXy5cvZ+nSpfTv35877riDhIQEwJr/GxYWRs+ePdmwYQMzZ85k/PjxpKWleWu56aabeOWVV5gxYwZbt25l4cKFDBs2jJtuuskbiIOW2qeJv7j0Wuj/AlSrDj9vgxf7w9YNdlclIiJOdDotKSZOnGjq1KljwsLCTJs2bcyyZcu8r1111VWme/fuPuPff/9906hRIxMWFmYuvvhiM3fuXJ/XPR6PGTZsmImLizPh4eGmQ4cOZvPmzT5jfvnlF9O1a1dTtWpVExUVZXr06GEOHDjgM2bNmjWmbdu2Jjw83NSuXduMHj3a5/WioiIzcuRI06BBAxMREWESExPNAw88YPbv33/K3z1g26e9OtRqY7Vsnt2ViFj27zZmbG/rv8uHrzdmxX/trkhERILEqeY1lzEnmBMgZcrLyyM6Oprc3NzAmiYx/iHYthF6jIDm7eyuRsRSkA//Gg3rllqPr7sTrr/H2rZZRETkNJ1qXtPfNk6hqRHij8Ij4Z4R0OEO6/HCd+HtZ6DQAduei4iI7RSEneKwFsuJn3K74U/3QdfHICQU1iyBl9Mgd6/dlYmISJBTEHaKI2qfJn6uTUfoOxaqRMGOb61FdDu0E52IiJw9CsJOUFRoHaANNcS/NWgGA1+2NuHI3QsvD4K1X9pdlYiIBCkFYSconR/sckF4ZXtrEfk9NRJgwHhrJ7rCIzD9Sfj8A9C6XhEROcMUhJ0g/4B1G1lVq/ElMERWhfuegba3WAH44ynwn5ehRNsyi4jImaNU5ASlC+UitFBOAkhICPy5P9zSx/q/GV9+DG+MsFquiYiInAEKwk6Qr4VyEqBcLrj6L3DPcGtb5o3LjnaU+MXuykREJAgoCDuBt4ewFspJgGreDh54HqrGwE9bYPyD1vbMIiIif4CCsBMoCEswqHcRDJgA554H+3fDhAGwZZXdVYmISABTEHaCw8ctlhMJZDUSrDB8fjM4cgimDIHln9pdlYiIBCgFYSfIP2TdVlYQliBQJQr6/gMuvQY8JfDeGFjwltqriYhIuSkIO4G3fZoWy0mQCA2DbkMh5U7r8SfvwLv/gOIie+sSEZGAoiDsBKXt0yLVPk2CiNsNne6FLmnW/RX/hamPH5sTLyIi8jsUhJ1A7dMkmF1+I/R6FsIjrcVzL6dZ2zOLiIj8DgVhJ8jXYjkJck0ug/4vQLVY2PUDjB8A2dvtrkpERPycgrATlC6WUxCWYHZew6Pt1WrD/myrvdrWDXZXJSIifkxB2AnUPk2c4pxa8NAEqNvE+u/+lUdh/Vd2VyUiIn5KQTjYeTxQcNi6rznC4gRVo6HvWLjocigqhDdGwldz7K5KRET8kIJwsDty6Fh/VXWNEKcIj4R7n4SkG8B4YNZLMH+6eg2LiIgPBeFgVzotolK41XtVxClCQqzWah3vsh5/+k+Y+QKUlNhbl4iI+A0F4WCnXeXEyVwuuKE7/HUguNyQMR/eGA4F+XZXJiIifkBBONiVtk6LUBAWB7viT9BjBFQKg40ZMPlROJhjd1UiImIzBeFg591MQ0FYHK7ZldYiusrVYPs3Vnu1fVl2VyUiIjZSEA52pUFYrdNEoP7F8NB4iI2DPTutMPzzVrurEhERmygIBztvEFbrNBEA4upYYTiuLuT+Ym3JrI03REQcSUE42B3W1AiR34ipAQ++CHUvPLrxxmOwabndVYmISAVTEA52+dpVTqRMVaKg7xhochkUFcBrwyBzkd1ViYhIBVIQDnaaIyxyYuGR0PMpuPQa8JTAP0fBkg/trkpERCqIgnCwO6wgLHJSoZWg21Bo19l6/OEk7UInIuIQCsLBzts+TYvlRE7I7YZb+8EN91iPP/0nfDDeukosIiJBS0E42HmnRlSxtw4Rf+dyQce/wV8esu5/NQfefhaKC+2uTEREzhIF4WCn9mki5XPlzXDXExASCmuWwLS/a0tmEZEgpSAczIxR+zSR03HJ1dDrWQiLgG9XwuRH4GCu3VWJiMgZpiAczIoKoKTIuq/FciLl07gVPPC81WZt+2aYOAj277a7KhEROYMUhINZ6bQIlxvCK9tbi0ggqtvE2ngj5lzYvR0mDoQ9P9ldlYiInCEKwsHs8HEL5Vwue2sRCVRxda0tmc89z7oiPHEQ7PrB7qpEROQMUBAOZmqdJnJmxNa0rgzXbgAH9sPLabBto91ViYjIH6QgHMxKg3CE5geL/GHVYqHfOKh3sfV765XHrIV0IiISsBSEg9nhA9atOkaInBmRVaHPaGjUCgqPwNQnYN1Su6sSEZHTpCAczPK1vbLIGRceCb2ehmZtra4s05+EzEV2VyUiIqdBQTiYKQiLnB2hYdB9GLS+Djwe+NdoWDrb7qpERKScFISDmRbLiZw9ISHQ9VFoe4u1ec0HE+C/79ldlYiIlIOCcDDLP659moiceW43/Lk/XHen9Xju6/B/06xgLCIifk9BOJh5g7CuCIucNS4X3Hgv3NTLerx4Jvx7gjVlQkRE/JqCcDAr7RqhOcIiZ9+1XeCvA61gvPT/4N1/QEmx3VWJiMhJKAgHM+8cYQVhkQpxxZ/gb0PBHWJ1kpj+FBQV2l2ViIicgIJwMNPUCJGKd+m1cO+TEFoJ1n8F056Agny7qxIRkTIoCAezw2qfJmKLiy+H3qOsnsNbVlm70JX+w1RERPyGgnCwKimBgsPWfU2NEKl4DVtC37FW+8IfN8GkR+Bgrt1ViYjIcRSEg9WR464+6YqwiD3qNoF+46BqDOz8DiY9DHn77K5KRESOUhAOVvmHrNuwCAgJtbcWESdLOB/6vwDR50DWNnh5EOzfbXdVIiKCgnDwUus0Ef8RVwf6vwixcbBnJ7ycBnt32V2ViIjjKQgHK7VOE/EvNRLgwRegRm3Yl2WF4eztdlclIuJoCsLBSq3TRPxPbJw1TSKuLuTutcLwrh/srkpExLEUhIOVpkaI+Kfoc6wwXLsBHMyxFtBt32x3VSIijqQgHKxKF8spCIv4n6rR8MDzVleJwwfglUfhh/V2VyUi4jgKwsEqX1eERfxa5WrQZww0aA5HDsOrQ6zNN0REpMIoCAcrLZYT8X8RlaH3c9C4FRQegamPw8YMu6sSEXEMBeFgla/tlUUCQlgE3Pc0NL0CiovgjRGw9n92VyUi4ggKwsHKu1hOXSNE/F5oGNwzHC65GkqK4a2nIXOR3VWJiAQ9BeFgVbpYTlMjRAJDSCj8bSi0SQWPB/41GpbNs7sqEZGgpiAcrLRYTiTwuEOgy8Nw5c1gDMx8AZZ8aHdVIiJBS0E4WKl9mkhgcrvhtgfh6r9ajz+cBItm2FuTiEiQUhAORsZoQw2RQOZywc29oeNd1uM5r8GCt63f2yIicsYoCAejwiPgKbHuV9ZiOZGA5HLBDd2hU0/r8Sdvw7w3FYZFRM4gBeFgVNo6ze22WjOJSOBK6Qq39LHu//ddmD1VYVhE5Aw5rSA8adIk6tWrR0REBElJSSxfvvyk42fNmkWTJk2IiIigWbNmzJvnuxLaGMPw4cOpVasWkZGRpKSksGXLFp8x+/bto1u3bkRFRRETE0PPnj05ePCgz5i1a9fSrl07IiIiSExMZMyYMb+pJScnh379+lGrVi3Cw8Np1KjRb+oJeMe3TnO57K1FRP64q/9izRsG+HyWNW9YYVhE5A8rdxCeOXMmaWlpjBgxgpUrV9KiRQtSU1PZvXt3meO/+uorunbtSs+ePVm1ahWdO3emc+fOrF+/3jtmzJgxTJgwgSlTppCRkUGVKlVITU3lyJEj3jHdunVjw4YNLFy4kDlz5rBkyRJ69+7tfT0vL4+OHTtSt25dMjMzGTt2LCNHjmTq1KneMYWFhVx33XVs27aNDz74gM2bNzNt2jRq165d3l8G/6bWaSLBp+0tcPsg6x+3//sIZr1ktVkTEZHTZ8qpTZs2pl+/ft7HJSUlJiEhwYwaNarM8bfffrvp1KmTz3NJSUnm/vvvN8YY4/F4THx8vBk7dqz39ZycHBMeHm7ee+89Y4wxGzduNID5+uuvvWPmz59vXC6X2blzpzHGmMmTJ5vY2FhTUFDgHTN48GDTuHFj7+NXXnnFnH/++aawsLC8X9srNzfXACY3N/e0f8ZZt26pMQM7GPPCA3ZXIiJnWsYCYwalWL/H3x1jTEmx3RWJiPidU81r5boiXFhYSGZmJikpKd7n3G43KSkppKenl/me9PR0n/EAqamp3vFbt24lKyvLZ0x0dDRJSUneMenp6cTExNC6dWvvmJSUFNxuNxkZGd4x7du3JywszOdzNm/ezP79+wGYPXs2ycnJ9OvXj7i4OJo2bcpzzz1HSUnJCb9zQUEBeXl5Poff0/bKIsGrTSp0G2KtAVj+Cbw7Bk7yZ5iIiJxYuYLw3r17KSkpIS4uzuf5uLg4srKyynxPVlbWSceX3v7emJo1a/q8HhoaSvXq1X3GlPUzjv+MH374gQ8++ICSkhLmzZvHsGHDGDduHM8888wJv/OoUaOIjo72HomJiScc6zcOKwiLBLVWHeCuv1sbcGQugneetbZmFhGRcnFU1wiPx0PNmjWZOnUqrVq1okuXLjzxxBNMmTLlhO8ZOnQoubm53mPHjh0VWPFp8l4RVus0kaDVsj3cM9zamnnNEnjraSgutLsqEZGAUq4gXKNGDUJCQsjOzvZ5Pjs7m/j4+DLfEx8ff9Lxpbe/N+bXi/GKi4vZt2+fz5iyfsbxn1GrVi0aNWpESEiId8yFF15IVlYWhYVl/wUSHh5OVFSUz+H3vEG4ir11iMjZ1exKuPdJCK0E65bCm09CkcKwiMipKlcQDgsLo1WrVixatMj7nMfjYdGiRSQnJ5f5nuTkZJ/xAAsXLvSOr1+/PvHx8T5j8vLyyMjI8I5JTk4mJyeHzMxM75jFixfj8XhISkryjlmyZAlFRUU+n9O4cWNiY2MBuPLKK/nuu+/wHLfS+ttvv6VWrVo+c4sDXmn7NG2mIRL8LkqC+56BSuGwMQNeH2ZtqiMiIr+r3FMj0tLSmDZtGm+99RabNm2ib9++HDp0iB49egBw9913M3ToUO/4AQMGsGDBAsaNG8c333zDyJEjWbFiBf379wfA5XIxcOBAnnnmGWbPns26deu4++67SUhIoHPnzoB11fb666+nV69eLF++nKVLl9K/f3/uuOMOEhISALjzzjsJCwujZ8+ebNiwgZkzZzJ+/HjS0tK8tfTt25d9+/YxYMAAvv32W+bOnctzzz1Hv379TvsX0C8d0RxhEUdp3Ap6P2ttoLM5E6b9HQry7a5KRMT/nU5LiokTJ5o6deqYsLAw06ZNG7Ns2TLva1dddZXp3r27z/j333/fNGrUyISFhZmLL77YzJ071+d1j8djhg0bZuLi4kx4eLjp0KGD2bx5s8+YX375xXTt2tVUrVrVREVFmR49epgDBw74jFmzZo1p27atCQ8PN7Vr1zajR4/+Te1fffWVSUpKMuHh4eb88883zz77rCkuPvX2QwHRPm3CQKu10srP7K5ERCrS9+uMGXyT9ft//ABj8g/aXZGIiC1ONa+5jNH2ROWRl5dHdHQ0ubm5/jtfeEwv+Hkr9PmHdaVIRJzjx00wZQgcOQR1L4T7R+n/DomI45xqXnNU1wjH0GI5EeeqeyE8MNZaI/DjJpj8GBwKgP7nIiI2UBAORmqfJuJsiY3ggeehSjT89C1MfhQO5thdlYiI31EQDjYlxccWyeh/h4o4V+0G0G8cVIuFXd/DpEcgb5/dVYmI+BUF4WBTejUYFIRFnK5WPej/AkSfA1nbYNLDkLPX7qpERPyGgnCwKQ3C4ZXhuI1DRMShaiZCvxcg5lzYvQMmpcH+7N9/n4iIAygIB5vDWignIr9ybm148EWoHg97d8HLD8MvP9tdlYiI7RSEg03pFWHtKicix6seb02TOLc27MuCl9Ngz067qxIRsZWCcLDJP7q9suYHi8ivxda0pknUrAM5e6wwnL3d7qpERGyjIBxs8g9ZtwrCIlKW6HOg/zhrIV3eL1YY/nmr3VWJiNhCQTjYHD56RVhTI0TkRKrFwgPjrBZrB3OsbhI7v7O7KhGRCqcgHGxK5whHaLGciJxE1WjoO9bafONQnrXpxvbNdlclIlKhFISDjRbLicipqhIFfcdAvYus/5v0yqOwbaPdVYmIVBgF4WBzWIvlRKQcIqvC/aOhQXM4chimDIbv19ldlYhIhVAQDjZaLCci5RVRGXo9Cw0vsbZonzoUtqyyuyoRkbNOQTjYqH2aiJyO8Ei47xlochkUHoFpT8A3X9tdlYjIWaUgHGy8c4QVhEWknMLCoeeTcHEyFBXCa8NhfbrdVYmInDUKwsGmNAhHarGciJyG0DC4Zzg0bwclRfDmSFj7P7urEhE5KxSEg4kxxwVhXREWkdMUWgnu/jtccg14SuCtp2HlZ3ZXJSJyxikIB5OCfPB4rPuaGiEif0RICPxtCLS+zvpz5Z+j4OtP7a5KROSMUhAOJqUL5UJCoVK4vbWISOBzh0DXR+HyG8B44L2xsGy+3VWJiJwxCsLB5PjWaS6XvbWISHBwu+Gvg+DKm63pVzPHwZcf212ViMgZoSAcTLSZhoicDW433PYgXHWb9fjfE+GLf9tbk4jIGaAgHEzUOk1EzhaXC27pAx3usB5/9AosmmFvTSIif5CCcDBR6zQROZtcLujUE1Lvsh7PeQ0+/ae9NYmI/AEKwsFEUyNE5GxzueD67nDjvdbj+dNh3hvW/GERkQCjIBxMShfLaWqEiJxt190JN99v3V/4LvzfVIVhEQk4CsLBJF9XhEWkAl3zV7i1n3X/s1nw4WSFYREJKArCwUS7yolIRWt/K/x1oHX/fx/CrPHHNvYREfFzCsLB5LCCsIjY4Io/wR2PWPOH0+dYvYY9JXZXJSLyuxSEg4l3aoS6RohIBUu6HroNAZcbln8C746BEoVhEfFvCsLBRIvlRMROrTrA3U9YWzNnLoJ3noWSYrurEhE5IQXhYKLFciJit5ZXwT3DISQU1iyBt56G4kK7qxIRKZOCcDApvSKsICwidmp2Jdz7JIRWgnVL4Y2RUKQwLCL+R0E4WBQXQeER676CsIjY7aIkuO8ZqBQOm5bDa8OO/RklIuInFISDRWnrNIDIKvbVISJSqnEr6P0shEXAt5kw7QkoyLe7KhERLwXhYFEahCMqWwtVRET8wQUt4f7REF4ZvlsDrw6BI4fsrkpEBFAQDh6H1TpNRPzU+U2h7z8gogps3QBTBvv+XywREZsoCAeL0r9U1DpNRPxR3QvhgeehcjX48RuY/CgcyrW7KhFxOAXhYKHtlUXE3yU2hH7joGoM/LTFCsMH9ttdlYg4mIJwsPAGYU2NEBE/lnC+FYarVYddP8CkRyD3F7urEhGHUhAOFodLg7A6RoiIn4uvC/1fgOgakP0jTHoYcvbaXZWIOJCCcLDwzhHWFWERCQA1z7PCcGxN2PMTvDwI9mXbXZWIOIyCcLDQ9soiEmhqJED/F+GcWvDLzzBxoBWKRUQqiIJwsDisxXIiEoCqx1lhuGYdyNkDEwfBz1vtrkpEHEJBOFhoaoSIBKqYGtB/HCQ0sLpIvJwGO761uyoRcQAF4WDh3VlOi+VEJABVi4V+z0PdJtYGQZMfgR/W212ViAQ5BeFgoSvCIhLoKleDPmOgQXM4ctjajvnblXZXJSJBTEE4WGhDDREJBhGVofdz0KQ1FB6BaU/A+nS7qxKRIKUgHAw8Hsg/ZN1XEBaRQBcWAT2fgmZtobgI3hwJqz63uyoRCUIKwsGg4DAYj3VfQVhEgkFoGHQfBq06gKcE3nkOln9id1UiEmQUhINB6bSI0EoQFm5vLSIiZ0pICNz5GFx+o/WP/ffGwpcf212ViAQRBeFg4J0frIVyIhJk3CFw+yBo/2fr8b8nwqKZ9tYkIkFDQTgYaDMNEQlmLhd07gvX3Wk9njMN5k8HY2wtS0QCn4JwMPC2TlMQFpEg5XLBjfdCp57W40//CbNfVRgWkT9EQTgY5B+wbnVFWESCXUpXuLWfdf/zD2DWeKtzjojIaVAQDgZqnSYiTtL+VrjjYesqcfoceG8MlJTYXZWIBCAF4WBwWFeERcRhkm6Avz0Objes+C+8/bTVc1hEpBwUhIOB5giLiBNdeg3cMwJCKsHaL+H14dZudCIip0hBOBiofZqIOFWzK6HXM9ZudN98DVMGH/szUUTkdygIBwNNjRARJ2vcCvr8AyKqwNYNMOlhOLDf7qpEJAAoCAeD0sVymhohIk5V/2Lo/wJUi4Wd38PEgbA/2+6qRMTPKQgHA7VPExGB2g3gwRchNg727IQJAyF7u91ViYgfUxAOBtpZTkTEcu558NBLULMO5OyBiYNgxxa7qxIRP6UgHAyOKAiLiHjFnGtdGT6vERzKteYMf7/W7qpExA8pCAe6okLrAKisrhEiIgBUjYZ+Y6FBcyg4DK8OgQ3L7K5KRPyMgnCgK20T5HJBeGV7axER8ScRVaD3KLg42bpg8MYIWLnY7qpExI8oCAe60oVyEVWsHZZEROSYsHDoMQJadQBPCfxzFCydbXdVIuInTis5TZo0iXr16hEREUFSUhLLly8/6fhZs2bRpEkTIiIiaNasGfPmzfN53RjD8OHDqVWrFpGRkaSkpLBli+/ihn379tGtWzeioqKIiYmhZ8+eHDzo2zR97dq1tGvXjoiICBITExkzZswJa5oxYwYul4vOnTuX78v7m9LWaZofLCJStpBQuHMwtL0FjIEPJsB/37Xui4ijlTsIz5w5k7S0NEaMGMHKlStp0aIFqamp7N69u8zxX331FV27dqVnz56sWrWKzp0707lzZ9avX+8dM2bMGCZMmMCUKVPIyMigSpUqpKamcuTIsa0yu3XrxoYNG1i4cCFz5sxhyZIl9O7d2/t6Xl4eHTt2pG7dumRmZjJ27FhGjhzJ1KlTf1PTtm3beOSRR2jXrl15v77/0WYaIiK/z+2GP/eH67pZj+e+Af83TWFYxOlMObVp08b069fP+7ikpMQkJCSYUaNGlTn+9ttvN506dfJ5Likpydx///3GGGM8Ho+Jj483Y8eO9b6ek5NjwsPDzXvvvWeMMWbjxo0GMF9//bV3zPz5843L5TI7d+40xhgzefJkExsbawoKCrxjBg8ebBo3buzz2cXFxeaKK64wr732munevbu55ZZbyvX9c3NzDWByc3PL9b6zZsV/jRnYwZhJj9hdiYhIYPhslvXn5sAOxsx43piSYrsrEpEz7FTzWrmuCBcWFpKZmUlKSor3ObfbTUpKCunp6WW+Jz093Wc8QGpqqnf81q1bycrK8hkTHR1NUlKSd0x6ejoxMTG0bt3aOyYlJQW3201GRoZ3TPv27QkLC/P5nM2bN7N//7GtNp966ilq1qxJz549T+k7FxQUkJeX53P4ldLFcpFV7K1DRCRQXP0XuONhcLlh2Xx4+1koLrS7KhGxQbmC8N69eykpKSEuLs7n+bi4OLKyssp8T1ZW1knHl97+3piaNWv6vB4aGkr16tV9xpT1M47/jC+//JLXX3+dadOmndoXBkaNGkV0dLT3SExMPOX3Vgjv1Ai1ThMROWVJN0D3v0NIJVizBKY+AUcO2V2ViFQwx7QZOHDgAHfddRfTpk2jRo0ap/y+oUOHkpub6z127NhxFqs8DaWL5SprjrCISLm0aA+9n4XwSNiyCiY9Agf2//77RCRolCsI16hRg5CQELKzs32ez87OJj4+vsz3xMfHn3R86e3vjfn1Yrzi4mL27dvnM6asn1H62vfff8+2bdu46aabCA0NJTQ0lLfffpvZs2cTGhrK999/X2b94eHhREVF+Rx+JV+L5URETlujS6HfOKgaAz9tgQkDYO8uu6sSkQpSriAcFhZGq1atWLRokfc5j8fDokWLSE5OLvM9ycnJPuMBFi5c6B1fv3594uPjfcbk5eWRkZHhHZOcnExOTg6ZmZneMYsXL8bj8ZCUlOQds2TJEoqKinw+p3HjxsTGxtKkSRPWrVvH6tWrvcfNN9/MNddcw+rVq/1vysOpytf2yiIif0hiI3joJageb4XgCQNh53d2VyUiFaDcUyPS0tKYNm0ab731Fps2baJv374cOnSIHj16AHD33XczdOhQ7/gBAwawYMECxo0bxzfffMPIkSNZsWIF/fv3B8DlcjFw4ECeeeYZZs+ezbp167j77rtJSEjw9vi98MILuf766+nVqxfLly9n6dKl9O/fnzvuuIOEhAQA7rzzTsLCwujZsycbNmxg5syZjB8/nrS0NAAiIiJo2rSpzxETE0O1atVo2rSpzyK7gHJYQVhE5A879zx4aDwkNIAD++DlNPhutd1VichZFlreN3Tp0oU9e/YwfPhwsrKyaNmyJQsWLPAuTNu+fTvu43Y4u+KKK3j33Xf5+9//zuOPP07Dhg356KOPaNq0qXfMY489xqFDh+jduzc5OTm0bduWBQsWEBER4R3zr3/9i/79+9OhQwfcbje33XYbEyZM8L4eHR3Np59+Sr9+/WjVqhU1atRg+PDhPr2Gg1Lp1IjKWiwnIvKHRJ8D/cfB68Ph+7UwZSjc/Tg0D4Ke8yJSJpcx6iZeHnl5eURHR5Obm+sf84Wf/hvsy4IBE6DeRXZXIyIS+IoK4Z/PwdovrRZrf3kIrviT3VWJSDmcal5zTNeIoKWd5UREzqxKYdB9GCR3AuOBWS/BJ+9oFzqRIKQgHMg8Hig4bN3X1AgRkTPHHQJ/HQgd77IeL3gL/j0RPCW2liUiZ5aCcCA7cujYFQrtLCcicma5XHBDd7jtQev+0tnahU4kyCgIB7LS1mmVwiE0QLteiIj4u7a3wN3ahU4kGCkIBzK1ThMRqRgtr4L7nzu2C93EQZCz1+6qROQPUhAOZNpVTkSk4jS8BPq9ANWqw64fYPyD8PM2u6sSkT9AQTiQaVc5EZGKldgQBk6AmomQs8faklkbb4gELAXhQFYahCsrCIuIVJjq8dYudPUvtuYKTxkKKz+zuyoROQ0KwoHMe0VYrdNERCpUlSjoO9bada6kCN55Fj57X72GRQKMgnAg8y6WU+s0EZEKV7rxRvs/W49nT4UPJ6nXsEgAURAOZN6pEboiLCJiC7cbbn0AbuljPf7fRzD9aSgssLUsETk1CsKBTF0jRET8w9V/OdZreN2X8MqjcDDX7qpE5HcoCAcy9REWEfEfl1wNff9h/Zm8baPVUWLvLrurEpGTUBAOZJoaISLiXxo0tzpKxNaEPT/B+Idg+zd2VyUiJ6AgHMjytVhORMTvxNeFAROgdgM4mAMvPwxrv7S7KhEpg4JwIFP7NBER/xRdA/q/AE0ug6ICmP6k2quJ+CEF4UBljOYIi4j4s4gqcN8zcOVN1p/Zs6fCrPFQUmx3ZSJylIJwoCoqtJq4g3aWExHxVyEhcNtD0LkvuFyQPgemPXHs/+iJiK0UhANVaes0lxvCIu2tRURETszlgqtug3ufhLAI2JwJEwbCvmy7KxNxPAXhQHX8rnJunUYREb/X9Apr3nDUOZC1DV7qDz+qo4SInZSgApVap4mIBJ7ERjDoZUhoAAf2w6Q0WLPE7qpEHEtBOFCVBuEIzQ8WEQkoMefCgy/CRUnWeo/pT8GimeooIWIDBeFA5b0irCAsIhJwIirDvU9Bu87W4znT4P0X1FFCpIIpCAeqw0cXy6l1mohIYAoJgT/3h1v7WQufl82HKUPgUK7dlYk4hoJwoDpyyLpVEBYRCWztb4WeT0F4JHy3Gl7sD1k/2l2ViCMoCAcqXREWEQkeF19ubctcPR5++RleehA2ZthdlUjQUxAOVJojLCISXGrVh0GToEFzKDgMr/1d2zKLnGUKwoGqNAhHqn2aiEjQqBoNff4ByZ2Obcv87hiru4SInHEKwoFKUyNERIJTaCX460BrIZ3bDSsWwuRHIG+f3ZWJBB0F4UCVf3SxnKZGiIgEH5fLaq3We5R1wWPbRnixH/y0xe7KRIKKgnCgytcVYRGRoNe4FQx8GWomQs4emDgIVmsnOpEzRUE4UGmOsIiIM9Q8DwZOhCatofAIvPUUzH8LPB67KxMJeArCgchTAkcOW/cjq9hbi4iInH2RVeG+Z+Gq26zHn74Drw8/dlFERE6LgnAgOv4Pvsq6Iiwi4gghIdC5L3R9zFpQt3GZNW9Ym2+InDYF4UBUulAuLAJCQu2tRUREKlabjvDQeIitCXt2wkv9Ye3/7K5KJCApCAcitU4TEXG2xEaQNhkuaAkF+fDmkzD3dWvqnIicMgXhQKRd5UREpGqMtfnG1X+xHv/3PZj292MXS0TkdykIB6LSIByhICwi4mghIXBLH/jbUKgUDt98DS88ALt+sLsykYCgIByISv+1r4VyIiIC0KoDDBgP1ePhl59h/EOw6jO7qxLxewrCgah0sZxap4mISKnaF0DaJGjUyuo3/Paz8OFkKC6yuzIRv6UgHIi8u8rpirCIiBynSjTc/xxc28V6vOQ/MOlha1c6EfkNBeFApMVyIiJyIu4QuKkX3PskRFSBbRvh+T6wOdPuykT8joJwIPJur6wgLCIiJ9DsSnj4FWvKxKFceHUIfPKOtmYWOY6CcCDyBmFNjRARkZOokQADJsDlN4IxsOAtmPY4HMy1uzIRv6AgHIgOlwZhLZYTEZHfUSkMuqRZWzNXCodvVsC4PtaUCRGHUxAORPlqnyYiIuXUpiMMnAjn1rYWz00cBEs+tK4UiziUgnAg8rZP0xxhEREph4Tzra2ZW7S3tmP+cJK1PfOhPLsrE7GFgnCgMebYhhoKwiIiUl4RVaD7MLi1H4SEwrovrakSP6y3uzKRCqcgHGgKj1j/igdNjRARkdPjckH7W62FdDVqw/7dMCkNFr577O8YEQdQEA40pR0j3G4Ii7C3FhERCWyJjawWa606WG3V5r0BU4ZA7i92VyZSIRSEA83xrdNcLntrERGRwBdRGboNga6PWhdYtqyC5++HTcvtrkzkrFMQDjSHtZmGiIicYS4XtEm1FtIlNICDOTD1cZg9FYqL7K5O5KxREA403tZpCsIiInKGxdWxWqy1vcV6/Nn7Vpu1vbvsrUvkLFEQDjSlUyMiFIRFROQsqBQGtz0IPUZai7K3fwNje8Oy+eo5LEFHQTjQlE6N0BVhERE5m5q3hUemQIPmVseimePgzZHWtAmRIKEgHGiOXywnIiJyNsXGwQNj4aZeR3sOL4UxvbSQToKGgnCg8QbhKvbWISIizuAOgWu7wKCXIa4uHNhvLaT790TrSrFIAFMQDjSlQVibaYiISEWqfYHVVaL9n63HX34M4x6AHVvsrUvkD1AQDjT52l5ZRERsEhYOtz4Aff4B0efA7u3wUn9rR7oS7UgngUdBONCoj7CIiNitcSt4dCo0b2dtyTzvDZjwEGT9aHdlIuWiIBxo8hWERUTED1SJhnuGw52Drb+Ttm+G5/vAf9/T1WEJGArCgSZf7dNERMRPuFxw2XUw+DW4KAlKimDu6zBhgK4OS0BQEA40ap8mIiL+JroG3PcMdH0MIqpYm3CM6wOLZujqsPg1BeFAUlIMBfnWfU2NEBERf+JyQZuOMPh1uLANFBfBnNdg4gDI3m53dSJlUhAOJPmHjt1XEBYREX8UUwN6PQt3PAIRleHHb+D5+492lii2uzoRHwrCgaS0dVp4JISE2FuLiIjIibhckHS9dXW4yWXW1eF5b8C4vvDjJrurE/E6rSA8adIk6tWrR0REBElJSSxffvKtFmfNmkWTJk2IiIigWbNmzJs3z+d1YwzDhw+nVq1aREZGkpKSwpYtvg269+3bR7du3YiKiiImJoaePXty8OBBnzFr166lXbt2REREkJiYyJgxY3xenzZtGu3atSM2NpbY2FhSUlJ+t3a/otZpIiISSGLOhd7PWZ0lqkTBz1th/EPwn5fhyGG7qxMpfxCeOXMmaWlpjBgxgpUrV9KiRQtSU1PZvXt3meO/+uorunbtSs+ePVm1ahWdO3emc+fOrF+/3jtmzJgxTJgwgSlTppCRkUGVKlVITU3lyJFjWzd269aNDRs2sHDhQubMmcOSJUvo3bu39/W8vDw6duxI3bp1yczMZOzYsYwcOZKpU6d6x3z++ed07dqVzz77jPT0dBITE+nYsSM7d+4s7y+DPdQ6TUREAk1pZ4khb0LrFDAG/vcR/KMnbFhmd3XidKac2rRpY/r16+d9XFJSYhISEsyoUaPKHH/77bebTp06+TyXlJRk7r//fmOMMR6Px8THx5uxY8d6X8/JyTHh4eHmvffeM8YYs3HjRgOYr7/+2jtm/vz5xuVymZ07dxpjjJk8ebKJjY01BQUF3jGDBw82jRs3PuF3KS4uNtWqVTNvvfXWqX59k5ubawCTm5t7yu85Y1YuNmZgB2MmDqr4zxYRETkTvllhzFPdrL/PBnYwZvpTxuT+YndVEmRONa+V64pwYWEhmZmZpKSkeJ9zu92kpKSQnp5e5nvS09N9xgOkpqZ6x2/dupWsrCyfMdHR0SQlJXnHpKenExMTQ+vWrb1jUlJScLvdZGRkeMe0b9+esLAwn8/ZvHkz+/fvL7O2w4cPU1RURPXq1cvzy2Cf0sVyuiIsIiKBqnErq+/wNbeD2w2rv4DR98LS2dYudSIVqFxBeO/evZSUlBAXF+fzfFxcHFlZWWW+Jysr66TjS29/b0zNmjV9Xg8NDaV69eo+Y8r6Gcd/xq8NHjyYhISE3wT14xUUFJCXl+dz2Obw0cVyCsIiIhLIwiLg5t4waBKc18ia+vfBBHixvxbTSYVybNeI0aNHM2PGDD788EMiIiJOOG7UqFFER0d7j8TExAqs8lc0R1hERILJeQ1h0ES47UFrI46ftsBLD8LMcXAw1+7qxAHKFYRr1KhBSEgI2dnZPs9nZ2cTHx9f5nvi4+NPOr709vfG/HoxXnFxMfv27fMZU9bPOP4zSj3//POMHj2aTz/9lObNm5/0Ow8dOpTc3FzvsWPHjpOOP6sUhEVEJNi4Q6DtLfD4dLiso/Xcsvkw6h74ao6mS8hZVa4gHBYWRqtWrVi0aJH3OY/Hw6JFi0hOTi7zPcnJyT7jARYuXOgdX79+feLj433G5OXlkZGR4R2TnJxMTk4OmZmZ3jGLFy/G4/GQlJTkHbNkyRKKiop8Pqdx48bExsZ6nxszZgxPP/00CxYs8JlzfCLh4eFERUX5HLYpnRpRWdsri4hIkKkWC3c+Bg++CAnnW3/nzXrJukK8/Ru7q5NgVd5VeDNmzDDh4eFm+vTpZuPGjaZ3794mJibGZGVlGWOMueuuu8yQIUO845cuXWpCQ0PN888/bzZt2mRGjBhhKlWqZNatW+cdM3r0aBMTE2M+/vhjs3btWnPLLbeY+vXrm/z8fO+Y66+/3lxyySUmIyPDfPnll6Zhw4ama9eu3tdzcnJMXFycueuuu8z69evNjBkzTOXKlc2rr77q8zlhYWHmgw8+MD///LP3OHDgwCl/f1u7Rkx+zFphu/yTiv9sERGRilJcbMwX/zFmyE3W33uDUox5d4wxOXvsrkwCxKnmtXIHYWOMmThxoqlTp44JCwszbdq0McuWLfO+dtVVV5nu3bv7jH///fdNo0aNTFhYmLn44ovN3LlzfV73eDxm2LBhJi4uzoSHh5sOHTqYzZs3+4z55ZdfTNeuXU3VqlVNVFSU6dGjx28C7Jo1a0zbtm1NeHi4qV27thk9erTP63Xr1jXAb44RI0ac8ne3NQiP62v9gbDuq4r/bBERkYqWs9eYd0Yda7X2WCdjPnnHmIIjdlcmfu5U85rLGGNsuxwdgPLy8oiOjiY3N7fip0k82x327rT+t9H5zSr2s0VEROyybSN8NBl+PDpFIrYm/KkXXHK1tWGHyK+cal5zbNeIgJSv9mkiIuJA9S6CARPhb0OtbZv374Z3noUJA4+FY5HToCAcKIw5rmuEFsuJiIjDuFzQqgMMfRNuuMfqRbxtA7zUH95+FvbusrtCCUAKwoGiIB88Hut+ZBV7axEREbFLWAR0/BsMnX6s3dqqz2BUD/j3RDhQ9m6yImVREA4UpdMiQkKtPwREREScLKaG1W7t4VegSWur3/CXH8Mzd8H86XDkkN0VSgBQEA4U+Ud/Q0dW1cIAERGRUuc1hPtHwwPPQ50mUHgEPv0nPHM3fPEfKC60u0LxYwrCgeKwFsqJiIicUMOWMHAi3DMczj0PDuVanSaeu8faoa646Pd+gjiQgnCgKF0oV1lBWEREpEwuF7RoD4Nfh78OhOhzrA4Ts16yAnH6XAVi8aEgHCi8HSMUhEVERE4qJASu+BM8/jZ0fgCqVYf92fD+i9aiumXzoKTY7irFDygIBwrv1Ai1ThMRETklYeFw1Z/h7+9A575QLRb2ZcHMF6wrxMvm6wqxwykIBwrvYjm1ThMRESmXsHC46rYyAvE4ePZu+PwDq02pOI6CcKAobZ9WWVeERURETktYxLFAfEsfiDoHcvbAx1PgqTuttmsHc+yuUipQqN0FyCkqnSMcoTnCIiIif0hYBFz9F2h7M6z4Lyx+H/b8ZLVd+2wWJF0P1/wVqsfbXamcZQrCgeKwukaIiIicUaFhcPmN0CYV1i2FxTNh+2ZrY46v/s/qQNH+z1D3QvXwD1IKwoHC2zVCUyNERETOKHeIFXqbt4PvVsOiGbA5E1Z9bh11GluBuEV7CK1kc7FyJikIBwpvENZiORERkbPC5YKGl1jHzu9hyX9g5WLrKvE/R8HHr8KVN1mt2arF2l2tnAEKwoFCi+VEREQqTu0G0PVRuKmXtRHH0tmQ+wsseAsWvgst2kFyJ2jQXNMmApiCcKDwtk/THGEREZEKUzUGrusG13aBNUusq8Q/fmNdKV65GGomWvOML+sIVaPtrlbKSUE4EBQXQeER676CsIiISMULCYVLr7WOHd9aV4lXLobdO2D2qzD3DWjRFi4/epXYrQ61gUBBOBCUzg8GzREWERGxW2Ij67j5flj1GXw1F376FlZ+Zh2xcdA6BVpfBzXPs7taOQkF4UDg7SFc2VrZKiIiIvaLqGzNE07udOwq8arPYH82LPyXddRtYgXiS66GKpo64W8UhAOBWqeJiIj4t9KrxJ0fgA1fwdcLYfMKaz7xj9/AR6/AhZdBi6vg4ss11dFPKAgHgsNHO0boN42IiIh/CwuHS66xjgP7rakSKxbCT1tgfbp1hFSCxq2gZXtoeoX+freRgnAgyNeuciIiIgGnWixc9Wfr+HkbrP4C1nwB2dth4zLrCAmFRpdam3lc2Aaiz7G7akdREA4E3qkRCsIiIiIBqVY967ihuxWK1yyxjqxtsGm5dQCc1wguToKLLofzGqr7xFmmIBwIDisIi4iIBI3SUHz93ZD9I6z5H2xYBtu/sbpP/PQtfPKOdUX5oiRo3BoatrR6GssZpSAcCLxTI7RYTkREJKjE1YWOdaHj3yBvn3VleGOGtdDuwH7IWGAdAAnnH9sCukEziFBL1T9KQTgQ5GuxnIiISNCLqg5J11tHcRF8v9YKxltWwa4fjh1f/NuaMlGnCdS/GOpdDPUust4v5aIgHAi0vbKIiIizhB7tLNG4lfX4wH74bjVsWW0F4727YNtG62CWNaZ6vBWI610EdS+EhPoQGmbTFwgMCsKBQO3TREREnK1a7LG2bAD7suG7NUfD8AZr0d2+LOtYudga4w6BuDpQuwHUvsA6Es6HKlG2fQ1/oyAcCNQ1QkRERI5XPQ7adLQOgCOHrI07SoPx9s3WhbSft1rHiv8ee2/MuXDueVAz8ehxHtSsYz3vsC4VCsKBQEFYRERETiaiiu9UCmMgZw/s/B52fQ8/fQc7v7OuGOfssY4tq3x/RmgliK0JsXHH3R69HxVrda2IrBZUYVlB2J8VHoGtG+CXn63H6hohIiIip8LlOhpma0LT5GPP5x+0NvTYvePo8ZN1u3entUBvz07rOBG3G6rEQLWYo8G4KoRHWkE8PBLCK0NEZWuHvZBKEBoK7lDrNjbOmqrhRxSE/VnePpgy+NhjBWERERH5IyKrHltQd7ySEsjZDft3w/7sY7f7dlvPH8yxplp4PHBgn3WU15U3wV8GnJGvcaYoCPuzkFCoVd+6f35Ta6K8iIiIyJkWEgLn1LKOEykugkO5Vig+kGPdHjlkHQX5cOSwdRQchqICK1wXF0FJMZQUQUzNCvoyp05B2J/F1oTHptldhYiIiIg1hzi6hnUEieCZ7SwiIiIiUg4KwiIiIiLiSArCIiIiIuJICsIiIiIi4kgKwiIiIiLiSArCIiIiIuJICsIiIiIi4kgKwiIiIiLiSArCIiIiIuJICsIiIiIi4kgKwiIiIiLiSArCIiIiIuJICsIiIiIi4kgKwiIiIiLiSKF2FxBojDEA5OXl2VyJiIiIiJSlNKeV5rYTURAupwMHDgCQmJhocyUiIiIicjIHDhwgOjr6hK+7zO9FZfHh8XjYtWsX1apVw+VyndXPysvLIzExkR07dhAVFXVWP0vOHp3HwKdzGPh0DoODzmPgq6hzaIzhwIEDJCQk4HafeCawrgiXk9vt5rzzzqvQz4yKitJv+CCg8xj4dA4Dn85hcNB5DHwVcQ5PdiW4lBbLiYiIiIgjKQiLiIiIiCMpCPux8PBwRowYQXh4uN2lyB+g8xj4dA4Dn85hcNB5DHz+dg61WE5EREREHElXhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSE/dikSZOoV68eERERJCUlsXz5crtLcqwlS5Zw0003kZCQgMvl4qOPPvJ53RjD8OHDqVWrFpGRkaSkpLBlyxafMfv27aNbt25ERUURExNDz549OXjwoM+YtWvX0q5dOyIiIkhMTGTMmDFn+6s5wqhRo7jsssuoVq0aNWvWpHPnzmzevNlnzJEjR+jXrx/nnHMOVatW5bbbbiM7O9tnzPbt2+nUqROVK1emZs2aPProoxQXF/uM+fzzz7n00ksJDw/nggsuYPr06Wf76znGK6+8QvPmzb2N+JOTk5k/f773dZ3DwDN69GhcLhcDBw70Pqfz6N9GjhyJy+XyOZo0aeJ9PeDOnxG/NGPGDBMWFmbeeOMNs2HDBtOrVy8TExNjsrOz7S7NkebNm2eeeOIJ85///McA5sMPP/R5ffTo0SY6Otp89NFHZs2aNebmm2829evXN/n5+d4x119/vWnRooVZtmyZ+d///mcuuOAC07VrV+/rubm5Ji4uznTr1s2sX7/evPfeeyYyMtK8+uqrFfU1g1Zqaqp58803zfr1683q1avNjTfeaOrUqWMOHjzoHdOnTx+TmJhoFi1aZFasWGEuv/xyc8UVV3hfLy4uNk2bNjUpKSlm1apVZt68eaZGjRpm6NCh3jE//PCDqVy5sklLSzMbN240EydONCEhIWbBggUV+n2D1ezZs83cuXPNt99+azZv3mwef/xxU6lSJbN+/XpjjM5hoFm+fLmpV6+ead68uRkwYID3eZ1H/zZixAhz8cUXm59//tl77Nmzx/t6oJ0/BWE/1aZNG9OvXz/v45KSEpOQkGBGjRplY1VijPlNEPZ4PCY+Pt6MHTvW+1xOTo4JDw837733njHGmI0bNxrAfP31194x8+fPNy6Xy+zcudMYY8zkyZNNbGysKSgo8I4ZPHiwady48Vn+Rs6ze/duA5gvvvjCGGOdr0qVKplZs2Z5x2zatMkAJj093Rhj/WPI7XabrKws75hXXnnFREVFec/ZY489Zi6++GKfz+rSpYtJTU0921/JsWJjY81rr72mcxhgDhw4YBo2bGgWLlxorrrqKm8Q1nn0fyNGjDAtWrQo87VAPH+aGuGHCgsLyczMJCUlxfuc2+0mJSWF9PR0GyuTsmzdupWsrCyf8xUdHU1SUpL3fKWnpxMTE0Pr1q29Y1JSUnC73WRkZHjHtG/fnrCwMO+Y1NRUNm/ezP79+yvo2zhDbm4uANWrVwcgMzOToqIin3PYpEkT6tSp43MOmzVrRlxcnHdMamoqeXl5bNiwwTvm+J9ROka/b8+8kpISZsyYwaFDh0hOTtY5DDD9+vWjU6dOv/m11nkMDFu2bCEhIYHzzz+fbt26sX37diAwz5+CsB/au3cvJSUlPv+RAMTFxZGVlWVTVXIipefkZOcrKyuLmjVr+rweGhpK9erVfcaU9TOO/wz54zweDwMHDuTKK6+kadOmgPXrGxYWRkxMjM/YX5/D3zs/JxqTl5dHfn7+2fg6jrNu3TqqVq1KeHg4ffr04cMPP+Siiy7SOQwgM2bMYOXKlYwaNeo3r+k8+r+kpCSmT5/OggULeOWVV9i6dSvt2rXjwIEDAXn+Qs/oTxMR8XP9+vVj/fr1fPnll3aXIqehcePGrF69mtzcXD744AO6d+/OF198YXdZcop27NjBgAEDWLhwIREREXaXI6fhhhtu8N5v3rw5SUlJ1K1bl/fff5/IyEgbKzs9uiLsh2rUqEFISMhvVllmZ2cTHx9vU1VyIqXn5GTnKz4+nt27d/u8XlxczL59+3zGlPUzjv8M+WP69+/PnDlz+OyzzzjvvPO8z8fHx1NYWEhOTo7P+F+fw987PycaExUVFZB/QfijsLAwLrjgAlq1asWoUaNo0aIF48eP1zkMEJmZmezevZtLL72U0NBQQkND+eKLL5gwYQKhoaHExcXpPAaYmJgYGjVqxHfffReQvw8VhP1QWFgYrVq1YtGiRd7nPB4PixYtIjk52cbKpCz169cnPj7e53zl5eWRkZHhPV/Jycnk5OSQmZnpHbN48WI8Hg9JSUneMUuWLKGoqMg7ZuHChTRu3JjY2NgK+jbByRhD//79+fDDD1m8eDH169f3eb1Vq1ZUqlTJ5xxu3ryZ7du3+5zDdevW+fyDZuHChURFRXHRRRd5xxz/M0rH6Pft2ePxeCgoKNA5DBAdOnRg3bp1rF692nu0bt2abt26ee/rPAaWgwcP8v3331OrVq3A/H14xpffyRkxY8YMEx4ebqZPn242btxoevfubWJiYnxWWUrFOXDggFm1apVZtWqVAcwLL7xgVq1aZX788UdjjNU+LSYmxnz88cdm7dq15pZbbimzfdoll1xiMjIyzJdffmkaNmzo0z4tJyfHxMXFmbvuususX7/ezJgxw1SuXFnt086Avn37mujoaPP555/7tPw5fPiwd0yfPn1MnTp1zOLFi82KFStMcnKySU5O9r5e2vKnY8eOZvXq1WbBggXm3HPPLbPlz6OPPmo2bdpkJk2apJZNZ9CQIUPMF198YbZu3WrWrl1rhgwZYlwul/n000+NMTqHger4rhHG6Dz6u4cffth8/vnnZuvWrWbp0qUmJSXF1KhRw+zevdsYE3jnT0HYj02cONHUqVPHhIWFmTZt2phly5bZXZJjffbZZwb4zdG9e3djjNVCbdiwYSYuLs6Eh4ebDh06mM2bN/v8jF9++cV07drVVK1a1URFRZkePXqYAwcO+IxZs2aNadu2rQkPDze1a9c2o0ePrqivGNTKOneAefPNN71j8vPzzQMPPGBiY2NN5cqVza233mp+/vlnn5+zbds2c8MNN5jIyEhTo0YN8/DDD5uioiKfMZ999plp2bKlCQsLM+eff77PZ8gfc++995q6deuasLAwc+6555oOHTp4Q7AxOoeB6tdBWOfRv3Xp0sXUqlXLhIWFmdq1a5suXbqY7777zvt6oJ0/lzHGnPnrzCIiIiIi/k1zhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJEUhEVERETEkRSERURERMSRFIRFRERExJH+H7e4rPYTpc+9AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArYAAAHDCAYAAADRBFkDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABMDklEQVR4nO3deXgUVb7G8bezdCcsSVizETZFkF1ZIggCEo2KsqgIDAoi48KgojiOcOcK4p0BlxmGURFGJ6DjxiYyKIhAABVFURAhAgFlFUjYEwiQQPe5fxQ0NAlCQpLqdL6f5+knVadPVX5ljfhOUecchzHGCAAAACjjguwuAAAAACgOBFsAAAAEBIItAAAAAgLBFgAAAAGBYAsAAICAQLAFAABAQCDYAgAAICAQbAEAABAQCLYAAAAICARbAAAABASCLQBcgm3btsnhcOitt96y5fe/9dZbcjgc2rZtmy2/HwDKAoItAPiRsWPHas6cOXaXobvuuku33Xab3WUAQKEQbAHAj1wo2N533306fvy46tSpU+I1nDx5UosWLVK3bt1K/HcBQHEKsbsAAMDFBQcHKzg4uFR+15dffqkjR44QbAGUOTyxBVAm7dq1Sw888ICio6PlcrnUpEkTTZkyRZKUmZmpkJAQjRkzJt9x6enpcjgceu211yRJBw8e1B//+Ec1a9ZMlSpVUkREhG699Vb9+OOPF62hc+fO6ty5c772+++/X3Xr1vVp+9vf/qb27durWrVqCg8PV6tWrTRr1iyfPg6HQzk5OXr77bflcDjkcDh0//33S7rwO7avv/66mjRpIpfLpbi4OA0dOlSHDx/OV2fTpk21fv16denSRRUqVFB8fLxeeumlAq9r3rx5aty4sfca1q5dq/vvv1/169dXWFiYYmJi9MADD+jAgQP5jt21a5cGDx6suLg4uVwu1atXT0OGDFFeXp63z+HDh/Xkk0+qbt26crlcqlWrlgYMGKD9+/cXWA8AXCqe2AIoczIzM3XdddfJ4XDo0UcfVY0aNfTpp59q8ODBys7O1hNPPKFOnTppxowZGj16tM+x06dPV3BwsHr37i1J2rJli+bMmaPevXurXr16yszM1L/+9S916tRJ69evV1xcXLHU/M9//lPdu3dX//79lZeXp2nTpql379765JNPvE9G33nnHf3+979X27Zt9dBDD0mSrrjiigue87nnntOYMWOUlJSkIUOGKD09XZMmTdJ3332nr776SqGhod6+hw4d0i233KI777xT99xzj2bNmqVnnnlGzZo106233upz3vnz5+v222/37i9atEhbtmzRoEGDFBMTo59++klvvPGGfvrpJ33zzTdyOBySpN27d6tt27Y6fPiwHnroITVq1Ei7du3SrFmzdOzYMTmdTh09elQdO3bUhg0b9MADD+jaa6/V/v37NXfuXP3666+qXr16sfzzBlBOGQAoYwYPHmxiY2PN/v37fdr79u1rIiMjzbFjx8y//vUvI8msW7fOp0/jxo3NjTfe6N0/ceKEcbvdPn22bt1qXC6Xef75533aJJmpU6d62zp16mQ6deqUr76BAweaOnXq+LQdO3bMZz8vL880bdrUpxZjjKlYsaIZOHBgvnNOnTrVSDJbt241xhizd+9e43Q6zc033+xT/2uvvWYkmSlTpvjUKcn85z//8bbl5uaamJgYc9ddd/n8ni1bthhJZunSpRes3RhjPvjgAyPJfPHFF962AQMGmKCgIPPdd9/l6+/xeIwxxowaNcpIMrNnz75gHwAoKl5FAFCmGGP04Ycf6o477pAxRvv37/d+kpOTlZWVpdWrV+vOO+9USEiIpk+f7j02LS1N69evV58+fbxtLpdLQUHWH4Vut1sHDhxQpUqV1LBhQ61evbrY6g4PD/duHzp0SFlZWerYsWORf8fixYuVl5enJ554wlu/JD344IOKiIjQvHnzfPpXqlRJ9957r3ff6XSqbdu22rJli0+/efPmKTIyUh06dCiw9hMnTmj//v267rrrJMlbv8fj0Zw5c3THHXeodevW+eo981T3ww8/VIsWLdSrV68L9gGAoiLYAihT9u3bp8OHD+uNN95QjRo1fD6DBg2SJO3du1fVq1dX165dNWPGDO+x06dPV0hIiO68805vm8fj0T/+8Q81aNBALpdL1atXV40aNbR27VplZWUVW92ffPKJrrvuOoWFhalq1aqqUaOGJk2aVOTfsX37dklSw4YNfdqdTqfq16/v/f6MWrVq5QuOVapU0aFDh3za5s2bp5tvvlkhIWffVDt48KCGDRum6OhohYeHq0aNGqpXr54keevft2+fsrOz1bRp09+s+5dffrloHwAoKt6xBVCmeDweSdK9996rgQMHFtinefPmkqS+fftq0KBBWrNmjVq2bKkZM2aoa9euPu9xjh07Vs8++6weeOAB/d///Z+qVq2qoKAgPfHEE97fdSEOh0PGmHztbrfbZ//LL79U9+7ddcMNN+j1119XbGysQkNDNXXqVL3//vuFuv6iutCMCufWf+zYMS1btkyTJk3y6XPPPffo66+/1tNPP62WLVuqUqVK8ng8uuWWWy76zwgAShPBFkCZUqNGDVWuXFlut1tJSUm/2bdnz556+OGHva8jbNq0SSNHjvTpM2vWLHXp0kUpKSk+7YcPH77oQKYqVark+6t8Sfmeln744YcKCwvTZ599JpfL5W2fOnVqvmMv9a/jz8xnm56ervr163vb8/LytHXr1ov+synIkiVLlJub6zOY7NChQ0pNTdWYMWM0atQob/vmzZt9jq1Ro4YiIiKUlpb2m7/jiiuuuGgfACgqXkUAUKYEBwfrrrvu0ocfflhgQNq3b593OyoqSsnJyZoxY4amTZsmp9Opnj175jvf+U9dZ86cqV27dl20liuuuEIbN270+Z0//vijvvrqq3y/w+Fw+DzJ3bZtW4ELMVSsWDHfdF0FSUpKktPp1CuvvOJTf0pKirKysoo0B+38+fPVunVrRUdH+9QuKd8/owkTJvjsBwUFqWfPnvr444/1/fff5zv3mePvuusu/fjjj/roo48u2AcAioontgDKnBdeeEFLly5VYmKiHnzwQTVu3FgHDx7U6tWrtXjxYh08eNDbt0+fPrr33nv1+uuvKzk5WVFRUT7nuv322/X8889r0KBBat++vdatW6f33nvP5ynohTzwwAMaP368kpOTNXjwYO3du1eTJ09WkyZNlJ2d7e3XrVs3jR8/Xrfccot+97vfae/evZo4caKuvPJKrV271uecrVq10uLFizV+/HjFxcWpXr16SkxMzPe7a9SooZEjR2rMmDG65ZZb1L17d6Wnp+v1119XmzZtfAaKXar58+d731M+IyIiQjfccINeeuklnTx5UvHx8Vq4cKG2bt2a7/ixY8dq4cKF6tSpkx566CFdffXV2rNnj2bOnKnly5crKipKTz/9tGbNmqXevXvrgQceUKtWrXTw4EHNnTtXkydPVosWLQpdNwB42TYfAwBchszMTDN06FCTkJBgQkNDTUxMjOnatat54403fPplZ2eb8PBwI8m8++67+c5z4sQJ89RTT5nY2FgTHh5urr/+erNixYp8U3kVNN2XMca8++67pn79+sbpdJqWLVuazz77rMDpvlJSUkyDBg2My+UyjRo1MlOnTjWjR4825/8xvHHjRnPDDTd4az4z9df5032d8dprr5lGjRqZ0NBQEx0dbYYMGWIOHTrk06dTp06mSZMm+a793DrT0tKMJLNy5cp8/X799VfTq1cvExUVZSIjI03v3r3N7t27jSQzevRon77bt283AwYMMDVq1DAul8vUr1/fDB061OTm5nr7HDhwwDz66KMmPj7eOJ1OU6tWLTNw4MB807cBQGE5jOHvfgCgvHvppZc0fvx47dmzh2m3AJRZvGMLAFDdunX1j3/8g1ALoEzjiS0AAAACAk9sAQAAEBAItgAAAAgIBFsAAAAEhICZx9bj8Wj37t2qXLkygx8AAAD8kDFGR44cUVxcnIKCiv/5asAE2927dyshIcHuMgAAAHARO3fuVK1atYr9vAETbCtXrizJ+gcVERFhczUAAAA4X3Z2thISEry5rbgFTLA98/pBREQEwRYAAMCPldRrowweAwAAQEAg2AIAACAgEGwBAAAQEAi2AAAACAgEWwAAAAQEgi0AAAACAsEWAAAAAYFgCwAAgIBAsAUAAEBAINgCAAAgIBBsAQAAEBAItgAAAAgIBFsAAAAEhBC7CwAAAEAp8Lil3OPSiWOnf+ZYP2vES1Vj7K6uWBBsAQAA/JUxUt6JsyH0RE7+YHp+W77vj1nbeScK/h09HpE6312611VCCLYAAABFYYzkPinl5VqhMS9XOnninP0T0sncs/snL6HfyRNS7rnHHbd+T3EKDpHCKkiuClJYRSm8YvGe30YEWwAAULw8bikn2/ocPSzlZElHs3x/nsiRHEFSUJAUFGz9dJyz7fMzuHD9HEFScLDk8UinTkruU1YAPXWhn+f2OafdfeoC++dsG0/p/DN1BJ0Oo+FWGM23XcH6ee52QW1hFaQQZ+nUbAOCLQAAuLAzfxWek3U6pGaf97OA0Hr8aPE/ZfR3wSFSqEtyhklO1znbYae3C9gPDTun/fz9MCnUaQVSV7i173DYfZV+j2ALAEB55T4lHd4nHdprfQ7vPbt95JAVUnOypJN5RTt/hcpSxUjrU+m8nxUqSUbW012P23q66nFbT0Dd5/08853358W+P/0zKMgKnCGhUnCoFBJy+mfoOe0hZ7e9+wX0udA5zgTRYCKVP+AuAAAQiIyRjh3xDavnh9fsA5f+ZDUkVKoUdU5QjZAqRp0NqueH1woR1usAQCki2AIAUBadOill7S84sB7KtH5eaBT8uUJCpaiaUpVzPlE1pchqUsWIs2GWvwpHGUCwBQDAn+XlShnbpD1brc/uLdLeHVL2wUt72lopyjewnhtgq0RboTWI9ZoQGAi2AAD4A49HOphxNrye+bl/94VH3oeEFhxYo06H1qga1jugQDlBsAUAoLTlZJ0OrlulPadD7J5tF351oGKkFFdfiq13+lPXWimqUhSvBwDnINgCAFBSTuVJmTvPPoE9E2KzDhTcPyRUiq7jG2Lj6kuVqxBggUtAsAUAoDicOint3CRtSZN2/WwF2L07rWmnClI1RoqrJ8WeDrFx9aTqtZhJALgMBFsAAIoi97i0bb20ZZ312b6h4PlewyvlfwIbU9daAQpAsSLYAgBwKXKyrKexW9KkLWulXzdbA77OVTFSqt9Mqt3QCrBx9aXI6rxGAJQSgi0AAAU5tPfs09gtadaUW+erUlOq31yq31S6orlUM4EQC9iIYAsAgDHSvl+tEPvLWivIHszI3y+6tvVE9orTYbZKdOnXCuCCCLYAgPLH45Z2bTnniew66ehh3z6OICn+SumKZlaYrd/Uml4LgN8i2AIAApvbLe3/9eyUWzs3S9t+kk4c8+0XEirVudoKsPWbS3UbM8ALKGMItgCAwGCMtczsuUvP7tkqZW63puI6n6uCVK/J2SeytRtKIc7SrxtAsSHYAgDKnrwT1mCu3eeF2Jysgvs7w6zVumJPzxtbv6k1Y0EQc8YCgYRgCwDwXx63dGCPb3jdvVU6sNt6Qns+R5BUPe7svLFnflaNkYKCSr9+AKWKYAsA8A/GWIscbN94dvnZjO3W09mCVIryDa9x9a3laJ2uUi0bgP8g2AIA7JWTLX2/SFoxT8rckf/7UKcUXff08rPnrN5VuUqplwrAvxFsAQClzxhpa5r09Tzpx8/PDu5yhklXXSvFXXE6wNazXi3gXVgAl4BgCwAoPceOSN8vlr7+xJqt4Iz4K6R2t0utbpTCKtpXH4AyjWALAChZxkjb1lth9sfPpZN5VrszTLqmsxVoazdkKVoAl41gCwAoGcePWk9nV3wi7dl2tj2uvtSum9SqqxReybbyAAQegi0AoPicmdlgxTzph2XSyVyrPdR1+ulsN2t1L57OAigBBFsAwOU7flRalWoF2t1bzrbH1rVeNWidxNNZACWOYAsAKBpjpB3p1qsGPyw7O99sqFNq0Ulqf7tUtzFPZwGUGoItAKBwTuRIq5ZYgXbXL2fbo2uffTpbMcK++gCUWwRbAMClydwuLZslrV569ulsSOjpp7PdpHpNeToLwFYEWwDAbzuUKS34j/TdIsl4rLaaCdZAsDY3SRUj7a0PAE4j2AIACpaTJS3+QFr+37MrgzVtL3W6S7qiOU9nAfidoKIcNHHiRNWtW1dhYWFKTEzUypUrL9i3c+fOcjgc+T7dunXz6bdhwwZ1795dkZGRqlixotq0aaMdOwpYMxwAULJyj0sL35X+cp/16sGpk1aQHfaKNPh56coWhFoAfqnQT2ynT5+u4cOHa/LkyUpMTNSECROUnJys9PR01axZM1//2bNnKy8vz7t/4MABtWjRQr179/a2/fLLL+rQoYMGDx6sMWPGKCIiQj/99JPCwsKKeFkAgEI7ddKarmvhu9LRw1Zb/BVSt8FSozaEWQB+z2GMMYU5IDExUW3atNFrr70mSfJ4PEpISNBjjz2mESNGXPT4CRMmaNSoUdqzZ48qVrTWA+/bt69CQ0P1zjvvFOESLNnZ2YqMjFRWVpYiIhiNCwCXzOORflgqzZ8qHcyw2qrFSrcNklp2loKK9Jd7AJBPSee1Qv1plZeXp1WrVikpKensCYKClJSUpBUrVlzSOVJSUtS3b19vqPV4PJo3b56uuuoqJScnq2bNmkpMTNScOXMKUxoAoLCMkX76Rvr7I9K746xQW7mqdPfj0ogp0rU3EmoBlCmFehVh//79crvdio6O9mmPjo7Wxo0bL3r8ypUrlZaWppSUFG/b3r17dfToUb3wwgv6y1/+ohdffFELFizQnXfeqaVLl6pTp04Fnis3N1e5ubne/ezs7MJcCgCUb1t/kj75t7RlnbUfVkG6sa90Qy/JFW5vbQBQRKU6K0JKSoqaNWumtm3bets8HmvqmB49eujJJ5+UJLVs2VJff/21Jk+efMFgO27cOI0ZM6bkiwaAQLJnqzR/ipR2+m/ZQkKljr2krn2YtgtAmVeoYFu9enUFBwcrMzPTpz0zM1MxMTG/eWxOTo6mTZum559/Pt85Q0JC1LhxY5/2q6++WsuXL7/g+UaOHKnhw4d797Ozs5WQkHCplwIA5cvBDOnTt6VVi61XEBxBUmKylDxAiqphd3UAUCwKFWydTqdatWql1NRU9ezZU5L1xDU1NVWPPvrobx47c+ZM5ebm6t577813zjZt2ig9Pd2nfdOmTapTp84Fz+dyueRyuQpTPgCUP0cOWXPRfvWx5D49F23zjtbAsOja9tYGAMWs0K8iDB8+XAMHDlTr1q3Vtm1bTZgwQTk5ORo0aJAkacCAAYqPj9e4ceN8jktJSVHPnj1VrVq1fOd8+umn1adPH91www3q0qWLFixYoI8//ljLli0r2lUBQHl3Iseag3bZLGteWklqcI10+2CpdiN7awOAElLoYNunTx/t27dPo0aNUkZGhlq2bKkFCxZ4B5Tt2LFDQeeNok1PT9fy5cu1cOHCAs/Zq1cvTZ48WePGjdPjjz+uhg0b6sMPP1SHDh2KcEkAUI6dypO++kRa9J61cpgk1Wog3f57qWEre2sDgBJW6Hls/RXz2AIo19xuaVWqtOBt6dDpcRA14qXbHrBePWDaLgB+oKTzWqnOigAAKGYet7Tmc+mzd6S9O622yGrSzfdJibdIwfwxD6D84E88ACiLPB5p7ZfSgv9ImduttgqVpRv7SB17Sk6WJAdQ/hBsAaAsMUZa95X1ysGerVZbeCWpS28r0IZVtLU8ALATwRYAyoIzy98ueFva9bPVFlZB6nSX9QmvZG99AOAHCLYA4M+MkTZ+J336lrRzk9XmCreWvu10t1SRwbIAcAbBFgD8kTHSptVWoN2+wWpzhkkdekhd7pEqsfwtAJyPYAsA/mbzGivQbk2z9kOd0vU9pBvvkSpXsbMyAPBrBFsA8Bdb1kmfvi39vMbaDwmV2t9hzXQQmX/VRgCAL4ItANht23or0G5aZe0Hh0rtbpO69pOiqttbGwCUIQRbALDLjo3WPLQbVlr7QcFS4q3STf2kKtH21gYAZRDBFgBK26+brUD70wprPyhIapMs3dxfqhpjb20AUIYRbAGgtOzeYgXadcutfUeQ1DpJuvleqXqcvbUBQAAg2AJASdu9RVr4rvTjF9a+wyFde6N0831SzVr21gYAAYRgCwAlZedmadG71hK4Z1zT2Qq0MXXsqgoAAhbBFgCK2/aN1hPa9d9Y+w6H1LKzdNPvpNh6tpYGAIGMYAsAxWXbeumzd6wlcCXrHdprb7RmOYjmCS0AlDSCLQBcrl/WSQvfsZbAlaxZDlolWU9oa/AOLQCUFoItABSFMdLPP1qB9ucfrbagYKnNzVJSP2Y5AAAbEGwBoDCMsZ7MLnzXWgJXkoJDpLa3SEl9mYcWAGxEsAWAS2GM9e7swnetd2mls0vf3thHqlLT3voAAARbAPhNxkjrv7VeOdiRbrWFOqV2t0td7pGiqttbHwDAi2ALAAXxeKwlbz97R9r1s9UW6pKu7y516S1FVLW3PgBAPgRbADiXxyOtXW4trLB7i9XmDJM69JA63y1VrmJvfQCACyLYAoAkedzSmi+sd2gzt1ttrgrSDT2lG+6SKkXaWh4A4OIItgCweY304StS5g5rP6yidMOd0g29pIoRtpYGALh0BFsA5dfRw9J/J0vfL7b2K1SWOt0ldewphVeyszIAQBEQbAGUPx6PtHKB9PGb0rEjksMhtb9D6vYAgRYAyjCCLYDyZc82aeYEaWuatR9/hdT7CanO1TYWBQAoDgRbAOVD3glrYNjSmdZAMWeYdOv9UsdeUnCw3dUBAIoBwRZA4NuwUpr1inQww9pv2l66c6hUJdreugAAxYpgCyBwZR2Q5rwurfnc2o+qId35qNTsenvrAgCUCIItgMDjcUtffyLNS5FOHJOCgqzpu24ZKLnC7a4OAFBCCLYAAsuvm63BYTvSrf3ajaR7npDir7SzKgBAKSDYAggMucelT9+SvvhIMh4prILUbbDU/nYpiMFhAFAeEGwBlH3rvpJmvyYd3mftX9NZ6jFEiqxmZ1UAgFJGsAVQdh3KtAJt2gprv2qMdPfj0tVt7a0LAGALgi2Assftlr6YLS1425qfNihYuvEe6ab+1vy0AIByiWALoGzZvkGaMUHa/Yu1X7+ZdPcwKbaunVUBAPwAwRZA2XD8qDV919efSMZIFSpL3R+S2iRb03kBAMo9gi0A/+Y+JX2/SJo3VTpy0Gprc7MVaitF2VoaAMC/EGwB+Cf3Ken7xdKi96QDe6y2mglS72HSlS1tLQ0A4J8ItgD8i9strTodaPfvttoqRUk39pE69pBCnLaWBwDwXwRbAP7B7ZZWpUqL3j0v0N4jtb+DpXABABdFsAVgLwItAKCYEGwB2MPtllanSgvfk/bvstoqRlqB9vruBFoAQKERbAGUrgsF2i73SB0ItACAoiPYAigdbre0eon1ysE+Ai0AoPgRbAGULLdb+mGptPBdad+vVlvFiNOBtgeBFgBQbIq0XM/EiRNVt25dhYWFKTExUStXrrxg386dO8vhcOT7dOvWrcD+jzzyiBwOhyZMmFCU0gD4C4/bmof2xcHSey9YobZihNRtsPS/70pd+xJqAQDFqtBPbKdPn67hw4dr8uTJSkxM1IQJE5ScnKz09HTVrFkzX//Zs2crLy/Pu3/gwAG1aNFCvXv3ztf3o48+0jfffKO4uLjClgXAX3jc0g/LpM/eOfuEtkJlqUtvqUNPKayCndUBAAJYoYPt+PHj9eCDD2rQoEGSpMmTJ2vevHmaMmWKRowYka9/1apVffanTZumChUq5Au2u3bt0mOPPabPPvvsgk9zAfixM4F24bvS3p1WG4EWAFCKChVs8/LytGrVKo0cOdLbFhQUpKSkJK1YseKSzpGSkqK+ffuqYsWK3jaPx6P77rtPTz/9tJo0aVKYkgDYzZjTT2j/4xtoO/e2VgoLq/ibhwMAUFwKFWz3798vt9ut6Ohon/bo6Ght3LjxosevXLlSaWlpSklJ8Wl/8cUXFRISoscff/ySa8nNzVVubq53Pzs7+5KPBVBMMrdLM/8p/bLW2q9QWep8t9SxJ4EWAFDqSnVWhJSUFDVr1kxt27b1tq1atUr//Oc/tXr1ajkcjks+17hx4zRmzJiSKBPAxeTlSovfk5bMkNynpFCXNRis050EWgCAbQo1K0L16tUVHByszMxMn/bMzEzFxMT85rE5OTmaNm2aBg8e7NP+5Zdfau/evapdu7ZCQkIUEhKi7du366mnnlLdunUveL6RI0cqKyvL+9m5c2dhLgVAUW38Tnrp99Ki961Q2zhRGpEiJd9HqAUA2KpQT2ydTqdatWql1NRU9ezZU5L1fmxqaqoeffTR3zx25syZys3N1b333uvTft999ykpKcmnLTk5Wffdd593gFpBXC6XXC5XYcoHcDmyDkhzXpfWfG7tR1aX7hwqNesgFeJvWwAAKCmFfhVh+PDhGjhwoFq3bq22bdtqwoQJysnJ8YbQAQMGKD4+XuPGjfM5LiUlRT179lS1atV82qtVq5avLTQ0VDExMWrYsGFhywNQ3Dxu6auPpflTpBPHJEeQdEMv6ZaBzHQAAPArhQ62ffr00b59+zRq1ChlZGSoZcuWWrBggXdA2Y4dOxQU5PuGQ3p6upYvX66FCxcWT9UASsfOzdLMf0g7N1n7tRtKvZ+QajWwtSwAAAriMMYYu4soDtnZ2YqMjFRWVpYiIiLsLgco207kSJ++LX05RzIe68lst8FS+9uloGC7qwMAlFElnddKdVYEAH7OGGntl9JHE613aiXpmi5Sj0ekyGq/fSwAADYj2AKwHNgjffiqtGGltV89TrrrcalRa3vrAgDgEhFsgfLu1Elp2SxrKdyTuVJwiDUnbdd+kpOZRwAAZQfBFijPtqyzVg7L2GbtX9lCunuYFF3b1rIAACgKgi1QHuVkSR//W/r2U2u/UpTU/WGpdRJz0gIAyiyCLVCeGCN9t1Ca+y8pJ9tqu+426fbfSxWZTQQAULYRbIHyInO79drBL2ut/di6Uu8npXpNbC0LAIDiQrAFAl1errT4PWnJDMl9SnKGScn3SZ3usgaKAQAQIPivGhDIjJH+8xfppxXWfpN20p2PSlWj7a0LAIASQLAFAtkPy6xQGxwqDfgfqVkHBocBAAIWwRYIVDnZ1gpiknRzf6l5R3vrAQCghAXZXQCAEjL3X9LRw1J0HenGPnZXAwBAiSPYAoFo8w/Sys+s1w76DJdCQu2uCACAEkewBQJNXq40Y4K13f4OpvMCAJQbBFsg0Cx6T9q/S4qsJnV7wO5qAAAoNQRbIJDs3iItmW5t3/W4FF7J3noAAChFBFsgUHjc0ozx1s9mHaRm19tdEQAApYpgCwSK5XOl7RulsArWIgwAAJQzBFsgEBzaK82fYm3f/nspqrq99QAAYAOCLVDWGSN9+KqUe1yq20Rqd7vdFQEAYAuCLVDW/fjl6WVzQ6w5a4P41xoAUD7xX0CgLDt2RJr9mrXdtZ8UU8feegAAsBHBFijLPn5TOnJQqpkg3dTP7moAALAVwRYoq35ZK30z39q+Z7gU4rS3HgAAbEawBcqik3nSjH9Y2+26SVc0s7ceAAD8AMEWKIsWfyDt3SlVrird8aDd1QAA4BcItkBZs2eblPqBtX3noyybCwDAaQRboCzxeKxXENynpCbtpBYd7a4IAAC/QbAFypIVn0jbfpJc4dJdj0kOh90VAQDgNwi2QFlxeL/0yb+t7dsekKrUtLceAAD8DMEWKCtmvyadOCbVaSR16G53NQAA+B2CLVAWrF0urVsuBQVbc9YGBdtdEQAAfodgC/i740el2a9a2zf2keLq21sPAAB+imAL+Lt5KVLWAal6vHRTf7urAQDAbxFsAX+2JU366mNr+54nJafL3noAAPBjBFvAX506Z9nctslSg5a2lgMAgL8j2AL+askMKXO7VClK6v6w3dUAAOD3CLaAP8rcIS18z9ru9QepYoS99QAAUAYQbAF/410296R0dVvpmi52VwQAQJlAsAX8zbefSlvWSc4w6e7HWTYXAIBLRLAF/EnWAWnuG9b2rfdLVWNsLQcAgLKEYAv4k48mSidypFpXSR172V0NAABlCsEW8BdpK6Qfv5CCgqQ+T0rBLJsLAEBhEGwBf3DimPThK9Z2p7ulWg3srQcAgDKIYAv4g/lTpMP7rHdqbxlgdzUAAJRJBFvAbts3SMv/a23f86Q1GwIAACg0gi1gp1N50vTxkjFS6ySpYSu7KwIAoMwi2AJ28Xik91+S9my1Vhbr8YjdFQEAUKYVKdhOnDhRdevWVVhYmBITE7Vy5coL9u3cubMcDke+T7du3SRJJ0+e1DPPPKNmzZqpYsWKiouL04ABA7R79+6iXRFQFhgj/Xey9MMyKThEuu/PUqUou6sCAKBMK3SwnT59uoYPH67Ro0dr9erVatGihZKTk7V3794C+8+ePVt79uzxftLS0hQcHKzevXtLko4dO6bVq1fr2Wef1erVqzV79mylp6ere/ful3dlgD9bMkP6Yra13e9pXkEAAKAYOIwxpjAHJCYmqk2bNnrttdckSR6PRwkJCXrsscc0YsSIix4/YcIEjRo1Snv27FHFihUL7PPdd9+pbdu22r59u2rXrn1JdWVnZysyMlJZWVmKiIi49AsCStt3C61XECTr9YPOd9tbDwAApaSk81qhntjm5eVp1apVSkpKOnuCoCAlJSVpxYoVl3SOlJQU9e3b94KhVpKysrLkcDgUFRVVmPIA/7dhpTTt79Z2596EWgAAilFIYTrv379fbrdb0dHRPu3R0dHauHHjRY9fuXKl0tLSlJKScsE+J06c0DPPPKN+/fr9ZpLPzc1Vbm6udz87O/sSrgCw0faN0lvPSx631KqrdMeDdlcEAEBAKdVZEVJSUtSsWTO1bdu2wO9Pnjype+65R8YYTZo06TfPNW7cOEVGRno/CQkJJVEyUDz2/Sq9+Wcp74R0VSup7x+tpXMBAECxKdR/WatXr67g4GBlZmb6tGdmZiomJuY3j83JydG0adM0ePDgAr8/E2q3b9+uRYsWXfS9i5EjRyorK8v72blzZ2EuBSg92QelySOknCyp1lXSoNFSSKjdVQEAEHAKFWydTqdatWql1NRUb5vH41FqaqratWv3m8fOnDlTubm5uvfee/N9dybUbt68WYsXL1a1atUuWovL5VJERITPB/A7J3KkN0ZKBzOk6nHSQ3+VwirYXRUAAAGpUO/YStLw4cM1cOBAtW7dWm3bttWECROUk5OjQYMGSZIGDBig+Ph4jRs3zue4lJQU9ezZM19oPXnypO6++26tXr1an3zyidxutzIyMiRJVatWldPpLOq1AfY6lSdNeU7a9Ys1R+3DL0iVq9hdFQAAAavQwbZPnz7at2+fRo0apYyMDLVs2VILFizwDijbsWOHgs57dzA9PV3Lly/XwoUL851v165dmjt3riSpZcuWPt8tXbpUnTt3LmyJgP3OrCq2+QfJFS49NNZ6YgsAAEpMoeex9VfMYwu/YYw0Z5K1AENQsBVqWYABAAD/mscWwCVYes6qYr/7E6EWAIBSQrAFitN3i6SP37S2uz9szVcLAABKBcEWKC4bVkrT/mZtd+4tdeltbz0AAJQzBFugOOw4Z1Wxa29kVTEAAGxAsAUu175fpTf/9+yqYv2eZlUxAABswH99gctxZlWxo4elWg1YVQwAABsRbIGiOpEjvfE/1qpi1WKtab1YVQwAANsQbIGiOHXy9KpiP7OqGAAAfoJgCxSWxyN9cHpVMWeY9aS2RrzdVQEAUO4RbIHCMEb672Rp9VJrVbFBz0kJV9ldFQAAEMEWKJxlM31XFWvU2t56AACAF8EWuFTfLZLmvmFts6oYAAB+h2ALXIqN352zqtjdrCoGAIAfItgCF7MjXZo65pxVxR6yuyIAAFAAgi3wW/btkt788+lVxa5lVTEAAPwY/4UGLuRUnjT1uXNWFXuOVcUAAPBjBFvgQj57R9qz1VqA4cG/sqoYAAB+jmALFGT7Bil1urXde5gUUdXeegAAwEURbIHz5eVK778kGY81WKx5R7srAgAAl4BgC5xv/hRp704popp056N2VwMAAC4RwRY41y/rzq4sds+TUsUIe+sBAACXjGALnJF7XPrgJckYKfEWqcl1dlcEAAAKgWALnPHxm9KBPVJUDanHI3ZXAwAAColgC0hS+irpq7nWdt8/SuGV7K0HAAAUGsEWOH5UmvY3a/v67lLDVvbWAwAAioRgC/x3snR4n1QtVrrjQburAQAARUSwRfn20zfStwskh0Pq9yfJFW53RQAAoIgItii/crKl6eOt7U53SVc0s7ceAABwWQi2KL9mvyYdOSjVrC3dOsjuagAAwGUi2KJ8WvultHqJ5AiSfve05HTZXREAALhMBFuUP0cOSTP/aW137SPVudreegAAQLEg2KJ8MUaa9U/p6GEptp6UfJ/dFQEAgGJCsEX5snqJtHa5FBQs9X9GCnHaXREAACgmBFuUH1n7rQFjknTzvVL8lfbWAwAAihXBFuWDMdKMf0jHjki1rpKS+tldEQAAKGYEW5QPKxdI67+VgkOl3/1JCg6xuyIAAFDMCLYIfIcypY8mWdu33i/F1rWzGgAAUEIItghsxkjT/i7lHpPqNpa63G13RQAAoIQQbBHYvv5Y2rRaCnVJ/f5kzYYAAAACEsEWgWv/bmnuG9b27b+Xataytx4AAFCiCLYITB6P9MHLUt4J6coWUocedlcEAABKGMEWgemL2dKWdZIrXOr7RymI/6kDABDo+K89Ak/mDmn+FGu7+8NStVh76wEAAKWCYIvA4nZL778kncyTGrWW2nWzuyIAAFBKCLYILEunSzs2SmEVpT5PSQ6H3RUBAIBSQrBF4Ni9RVrwH2u711Apqoa99QAAgFJFsEVgOHVSev9FyX1KatpOanOT3RUBAIBSRrBFYFj0vrTrF6lCZan3k7yCAABAOVSkYDtx4kTVrVtXYWFhSkxM1MqVKy/Yt3PnznI4HPk+3bqdHdRjjNGoUaMUGxur8PBwJSUlafPmzUUpDeXRzk3S4ves7bsflyKq2lsPAACwRaGD7fTp0zV8+HCNHj1aq1evVosWLZScnKy9e/cW2H/27Nnas2eP95OWlqbg4GD17t3b2+ell17SK6+8osmTJ+vbb79VxYoVlZycrBMnThT9ylA+nMyzZkHweKQWN0jXdLG7IgAAYJNCB9vx48frwQcf1KBBg9S4cWNNnjxZFSpU0JQpUwrsX7VqVcXExHg/ixYtUoUKFbzB1hijCRMm6H//93/Vo0cPNW/eXP/5z3+0e/duzZkz57IuDuXAgreljG1SpSjp7mF2VwMAAGxUqGCbl5enVatWKSkp6ewJgoKUlJSkFStWXNI5UlJS1LdvX1WsWFGStHXrVmVkZPicMzIyUomJiZd8TpRT29ZLS2da2/c8KVWKtLceAABgq5DCdN6/f7/cbreio6N92qOjo7Vx48aLHr9y5UqlpaUpJSXF25aRkeE9x/nnPPNdQXJzc5Wbm+vdz87OvqRrQIDIPS6996JkPFLrJKnZ9XZXBAAAbFaqsyKkpKSoWbNmatu27WWfa9y4cYqMjPR+EhISiqFClBnzUqT9u6TI6tKdj9pdDQAA8AOFCrbVq1dXcHCwMjMzfdozMzMVExPzm8fm5ORo2rRpGjx4sE/7meMKe86RI0cqKyvL+9m5c2dhLgVl2abV0pdzrO2+f5TCK9laDgAA8A+FCrZOp1OtWrVSamqqt83j8Sg1NVXt2rX7zWNnzpyp3Nxc3XvvvT7t9erVU0xMjM85s7Oz9e233/7mOV0ulyIiInw+KAeOH5Wm/c3abn+71Ki1vfUAAAC/Uah3bCVp+PDhGjhwoFq3bq22bdtqwoQJysnJ0aBBgyRJAwYMUHx8vMaNG+dzXEpKinr27Klq1ar5tDscDj3xxBP6y1/+ogYNGqhevXp69tlnFRcXp549exb9yhCY/jtZOrRXqhYrdX/Y7moAAIAfKXSw7dOnj/bt26dRo0YpIyNDLVu21IIFC7yDv3bs2KGgIN8Hwenp6Vq+fLkWLlxY4Dn/9Kc/KScnRw899JAOHz6sDh06aMGCBQoLCyvCJSFgpa2Qvl1grSrW70+SK9zuigAAgB9xGGOM3UUUh+zsbEVGRiorK4vXEgJRTpb04u+lI4ekzr2lHjytBQCgrCnpvFaqsyIARfbhq1aoja4t3TbI7moAAIAfItjC//2wVPphmRQUJP3uGSnUaXdFAADADxFs4d+yDkizXrG2k/pLtRvaWw8AAPBbBFv4L2OkGf+Qjh2R4q+Ubu5vd0UAAMCPEWzhv1Z+Jq3/RgoOlfo/IwUXehIPAABQjhBs4Z8OZkofvW5t33q/FFvP1nIAAID/I9jC/3g81upiucekuk2kLnfbXREAACgDCLbwP1/NlTb/IDnDpN/9SQoKtrsiAABQBhBs4V/2/ip9/Ka1fceDUo14e+sBAABlBsEW/sPjlt5/UTqZKzW4Rmp/h90VAQCAMoRgC/+xdKa0fYMUVkHq90drQQYAAIBLRHKAf9i9Rfr0bWu711CpSrS99QAAgDKHYAv7nTppvYLgPik1aSe1udnuigAAQBlEsIX9Fr0n7fpFqhgh3fOk5HDYXREAACiDCLaw146N0uL3re27h0kRVe2tBwAAlFkEW9gnL1d67yVrQYZrukgtO9ldEQAAKMMItrDPp1OlvTukylWlux6zuxoAAFDGEWxhj1/WSp9/aG33GW69XwsAAHAZCLYofSeOSe+/JBkjJd4qNbnO7ooAAEAAINii9M19QzqYYc1V2/MRu6sBAAABgmCL0rXxO2nFJ9Z2v6elsIr21gMAAAIGwRal59gRadrfrO2OvaQGLW0tBwAABBaCLUrP7IlS1gGpRi3p9sF2VwMAAAIMwRalY+2X0qrFkiNI+t2fJGeY3RUBAIAAQ7BFyTtySJr5T2u7ax+pbmN76wEAAAGJYIuSZYw0c4J09LAUV19Kvs/uigAAQIAi2KJkrUqV1n0lBYdIv3tGCnHaXREAAAhQBFuUnMP7pA9ftbaT75Pir7C3HgAAENAItig5sydKJ3Kk2g2lG/vaXQ0AAAhwBFuUjN1bpHXLJYdD6vtHKTjY7ooAAECAI9iiZCz+wPrZvKMUW8/eWgAAQLlAsEXx2/ertOZza/um39lbCwAAKDcItih+qdMk45EaJ0rxV9pdDQAAKCcItihehzKl7xZZ2zf1t7cWAABQrhBsUbyWzJA8bunKlqwwBgAAShXBFsXnyCHp20+tbd6tBQAApYxgi+KzbJZ0Mk+q00hqcI3d1QAAgHKGYIvikZMtfTXX2k7qb81fCwAAUIoItigeX86Rco9LcfWlJtfZXQ0AACiHCLa4fCeOSV9+ZG0n9eNpLQAAsAXBFpfv64+lY0ekGrWkFjfYXQ0AACinCLa4PHm51qAxSeraVwoKtrceAABQbhFscXlWLrCm+apSU2qdZHc1AACgHCPYoujcp6Ql063tG/tIwSH21gMAAMo1gi2K7vvF0qG9UuUqUttb7K4GAACUcwRbFI3HLaV+YG13vltyuuytBwAAlHsEWxTNmi+kfbukCpWl9nfYXQ0AAADBFkXg8UiL37e2O/aSwirYWw8AAICKGGwnTpyounXrKiwsTImJiVq5cuVv9j98+LCGDh2q2NhYuVwuXXXVVZo/f773e7fbrWeffVb16tVTeHi4rrjiCv3f//2fjDFFKQ8lbf030p6tkquC1LGn3dUAAABIkgo9jH369OkaPny4Jk+erMTERE2YMEHJyclKT09XzZo18/XPy8vTTTfdpJo1a2rWrFmKj4/X9u3bFRUV5e3z4osvatKkSXr77bfVpEkTff/99xo0aJAiIyP1+OOPX9YFopgZIy06/bT2+jukihH21gMAAHBaoYPt+PHj9eCDD2rQoEGSpMmTJ2vevHmaMmWKRowYka//lClTdPDgQX399dcKDQ2VJNWtW9enz9dff60ePXqoW7du3u8/+OCDiz4Jhg02rZZ2bJRCndagMQAAAD9RqFcR8vLytGrVKiUlnZ2IPygoSElJSVqxYkWBx8ydO1ft2rXT0KFDFR0draZNm2rs2LFyu93ePu3bt1dqaqo2bdokSfrxxx+1fPly3XrrrUW5JpSkM+/WXnebNc0XAACAnyjUE9v9+/fL7XYrOjrapz06OlobN24s8JgtW7ZoyZIl6t+/v+bPn6+ff/5Zf/jDH3Ty5EmNHj1akjRixAhlZ2erUaNGCg4Oltvt1l//+lf179//grXk5uYqNzfXu5+dnV2YS0FRbP1J+vlHayGGLvfYXQ0AAICPEl8qyuPxqGbNmnrjjTcUHBysVq1aadeuXXr55Ze9wXbGjBl677339P7776tJkyZas2aNnnjiCcXFxWngwIEFnnfcuHEaM2ZMSZePcy16z/rZ+iZrCV0AAAA/UqhgW716dQUHByszM9OnPTMzUzExMQUeExsbq9DQUAUHB3vbrr76amVkZCgvL09Op1NPP/20RowYob59+0qSmjVrpu3bt2vcuHEXDLYjR47U8OHDvfvZ2dlKSEgozOWgMH7dLG1YKTmCpK597a4GAAAgn0K9Y+t0OtWqVSulpqZ62zwej1JTU9WuXbsCj7n++uv1888/y+PxeNs2bdqk2NhYOZ1OSdKxY8cUFORbSnBwsM8x53O5XIqIiPD5oAQtPr3K2DWdpBrx9tYCAABQgELPYzt8+HC9+eabevvtt7VhwwYNGTJEOTk53lkSBgwYoJEjR3r7DxkyRAcPHtSwYcO0adMmzZs3T2PHjtXQoUO9fe644w799a9/1bx587Rt2zZ99NFHGj9+vHr16lUMl4jLlrldWvultZ30O3trAQAAuIBCv2Pbp08f7du3T6NGjVJGRoZatmypBQsWeAeU7dixw+fpa0JCgj777DM9+eSTat68ueLj4zVs2DA988wz3j6vvvqqnn32Wf3hD3/Q3r17FRcXp4cfflijRo0qhkvEZVs8zZq/tml7Kbae3dUAAAAUyGECZHmv7OxsRUZGKisri9cSitOBPdLYgdYyuk++JtVuZHdFAACgjCrpvFakJXVRjiyZboXaq1oRagEAgF8j2OLCsvZL335mbd/Eu7UAAMC/EWxxYctmSe6TUr0m0hXN7a4GAADgNxFsUbCjWdLXn1jbN/WXHA576wEAALgIgi0K9sVsKe+EVKuB1KiN3dUAAABcFMEW+R0/Kn05x9pO6sfTWgAAUCYQbJHfV3OlEzlSdG2pWQe7qwEAALgkBFv4yjshfT7b2u7aTwrifyIAAKBsILXA14r50tHDUtUY6dob7a4GAADgkhFscdapPGnpDGu7a18pONjeegAAAAqBYIuzvltkLcoQWU1qe7Pd1QAAABQKwRYWt1tKnWZtd+4thTjtrQcAAKCQCLawrFkmHdgjVYyU2nWzuxoAAIBCI9hC8nikRe9b253ulFzh9tYDAABQBARbSGlfS5nbpbAKUocedlcDAABQJATb8s6Ys09rO/SUwivZWg4AAEBREWzLu/TvpV83Sc4w6zUEAACAMopgW96deVp73W1SpShbSwEAALgcBNvy7Jd10pZ1UnCo1OUeu6sBAAC4LATb8mzxe9bPtjdLUdXtrQUAAOAyEWzLqw0rpY3fS0FB0o197K4GAADgshFsy6P9u6V3xlrb7e+QqsfZWw8AAEAxINiWN7nHpSmjpONHpTqNpB4P210RAABAsSDYlifGSNP+Ju3ZJlWuKt3/nBTitLsqAACAYkGwLU+WTJfWfC4Fh0j3j2LAGAAACCgE2/Ji43fSvBRru9dQqX5Te+sBAAAoZgTb8uDMYDFjpMRbpfa3210RAABAsSPYBrrc49KU0dKxI9ZgsbsfkxwOu6sCAAAodgTbQOYdLLZVqlyFwWIAACCgEWwD2ZIZ1mCxoGAGiwEAgIBHsA1UG78/O1jszqFS/Wb21gMAAFDCCLaBaP9u6Z2/SsYjJd5irS4GAAAQ4Ai2gSb3uDT1OWuwWO1G0l2PM1gMAACUCwTbQGKMNP3v0u4t1mCxQc9JoQwWAwAA5QPBNpAsnSH9sMwaLDaQwWIAAKB8IdgGivRV0idnVhb7g3QFg8UAAED5QrANBAf2SP/5y9nBYtd3t7siAACAUkewLevOXVmMwWIAAKAcI9iWZcZI08dbg8UqRUmDRjNYDAAAlFsE27Js2Szph6WnVxYbLUXVsLsiAAAA2xBsy6r0VdLHb1rbDBYDAAAg2JZJ5w4Wa5vMYDEAAAARbMuevBPSlOdODxZrKN09jMFiAAAAItiWLd6VxX45PVjsOQaLAQAAnEawLUs+/1BafWaw2CgGiwEAAJyDYFtWbFotzX3D2u45RLqiub31AAAA+BmCbVlwMMN3sFiHHnZXBAAA4HcItv4u74S1slhOtpRwFYPFAAAALqBIwXbixImqW7euwsLClJiYqJUrV/5m/8OHD2vo0KGKjY2Vy+XSVVddpfnz5/v02bVrl+69915Vq1ZN4eHhatasmb7//vuilBc4zqwstovBYgAAABcTUtgDpk+fruHDh2vy5MlKTEzUhAkTlJycrPT0dNWsWTNf/7y8PN10002qWbOmZs2apfj4eG3fvl1RUVHePocOHdL111+vLl266NNPP1WNGjW0efNmValS5bIursz7/ENp9ZKzg8Wq5P/nCwAAAIvDGGMKc0BiYqLatGmj1157TZLk8XiUkJCgxx57TCNGjMjXf/LkyXr55Ze1ceNGhYaGFnjOESNG6KuvvtKXX35ZhEuwZGdnKzIyUllZWYqIiCjyefzGxu+lN/9H8nikXkOlG3rZXREAAMBlKem8VqhXEfLy8rRq1SolJSWdPUFQkJKSkrRixYoCj5k7d67atWunoUOHKjo6Wk2bNtXYsWPldrt9+rRu3Vq9e/dWzZo1dc011+jNN98s4iWVcR6PlDr9bKhtc7PUsafdVQEAAPi9QgXb/fv3y+12Kzo62qc9OjpaGRkZBR6zZcsWzZo1S263W/Pnz9ezzz6rv//97/rLX/7i02fSpElq0KCBPvvsMw0ZMkSPP/643n777QvWkpubq+zsbJ9PmXf0sPTv/5U+edMKtdd2kXo/wWAxAACAS1Dod2wLy+PxqGbNmnrjjTcUHBysVq1aadeuXXr55Zc1evRob5/WrVtr7NixkqRrrrlGaWlpmjx5sgYOHFjgeceNG6cxY8aUdPml55d10jt/kbIOWAPEeg2VrruNUAsAAHCJCvXEtnr16goODlZmZqZPe2ZmpmJiYgo8JjY2VldddZWCg4O9bVdffbUyMjKUl5fn7dO4cWOf466++mrt2LHjgrWMHDlSWVlZ3s/OnTsLcyn+w+ORFr0nTXzKCrU1E6QnXpPadSPUAgAAFEKhgq3T6VSrVq2UmprqbfN4PEpNTVW7du0KPOb666/Xzz//LI/H423btGmTYmNj5XQ6vX3S09N9jtu0aZPq1KlzwVpcLpciIiJ8PmXOkUPSv0ZK86daiy+0TpKGvy7F1be7MgAAgDKn0PPYDh8+XG+++abefvttbdiwQUOGDFFOTo4GDRokSRowYIBGjhzp7T9kyBAdPHhQw4YN06ZNmzRv3jyNHTtWQ4cO9fZ58skn9c0332js2LH6+eef9f777+uNN97w6RNwNq+RXn5Y2rRKCnVJ/Z6W+o+QXOF2VwYAAFAmFfod2z59+mjfvn0aNWqUMjIy1LJlSy1YsMA7oGzHjh0KCjqblxMSEvTZZ5/pySefVPPmzRUfH69hw4bpmWee8fZp06aNPvroI40cOVLPP/+86tWrpwkTJqh///7FcIl+xuOWFr4nLXzXekobU1ca+KwUc+Gn0wAAALi4Qs9j66/KxDy2WQekd8dJP6+x9hNvke58VHKG2VoWAABAaSjpvFbisyLgtPRVVqg9etgKsr2fsN6pBQAAQLEg2JY0t1v67G1p8QeSMdbAsIHPWrMfAAAAoNgQbEvS4X3SO2OlLeus/Xa3Sz2HSE6XvXUBAAAEIIJtSdmwUnrvBSknW3JVkPo8KV3Txe6qAAAAAhbBtri5T1nz0i6Zbu3XaiAN+F+pRry9dQEAAAQ4gm1xOpQp/eev0rb11n6HHlKPh6UQp711AQAAlAME2+KS9rX0wcvSsSNSWEWp71NSixvsrgoAAKDcINherlMnpU/+LX3+obVfu6H16kG1WHvrAgAAKGcItpfjwB7r1YMdG639TndJt/9eCgm1ty4AAIByiGBbVGu/lD74m3QiR6pQWer3tNS0vd1VAQAAlFsE26L4+hNp5gRru25j6b4/S1WjbS0JAACgvAuyu4Ayqdn1UmQ16cY+0qPjCbUAAAB+gCe2RVG5ivRMihReye5KAAAAcBpPbIuKUAsAAOBXCLYAAAAICARbAAAABASCLQAAAAICwRYAAAABgWALAACAgECwBQAAQEAg2AIAACAgEGwBAAAQEAi2AAAACAgEWwAAAAQEgi0AAAACAsEWAAAAAYFgCwAAgIAQYncBxcUYI0nKzs62uRIAAAAU5ExOO5PbilvABNsjR45IkhISEmyuBAAAAL/lyJEjioyMLPbzOkxJReZS5vF4tHv3blWuXFlHjhxRQkKCdu7cqYiICLtLQwnKzs7mXpcD3Ofyg3tdPnCfy4/z77UxRkeOHFFcXJyCgor/jdiAeWIbFBSkWrVqSZIcDockKSIign9hygnudfnAfS4/uNflA/e5/Dj3XpfEk9ozGDwGAACAgECwBQAAQEAIyGDrcrk0evRouVwuu0tBCeNelw/c5/KDe10+cJ/Lj9K+1wEzeAwAAADlW0A+sQUAAED5Q7AFAABAQCDYAgAAICAQbAEAABAQAjLYTpw4UXXr1lVYWJgSExO1cuVKu0vCb/jiiy90xx13KC4uTg6HQ3PmzPH53hijUaNGKTY2VuHh4UpKStLmzZt9+hw8eFD9+/dXRESEoqKiNHjwYB09etSnz9q1a9WxY0eFhYUpISFBL730UklfGs4xbtw4tWnTRpUrV1bNmjXVs2dPpaen+/Q5ceKEhg4dqmrVqqlSpUq66667lJmZ6dNnx44d6tatmypUqKCaNWvq6aef1qlTp3z6LFu2TNdee61cLpeuvPJKvfXWWyV9eTht0qRJat68uXcy9nbt2unTTz/1fs89DlwvvPCCHA6HnnjiCW8b97vse+655+RwOHw+jRo18n7vd/fYBJhp06YZp9NppkyZYn766Sfz4IMPmqioKJOZmWl3abiA+fPnmz//+c9m9uzZRpL56KOPfL5/4YUXTGRkpJkzZ4758ccfTffu3U29evXM8ePHvX1uueUW06JFC/PNN9+YL7/80lx55ZWmX79+3u+zsrJMdHS06d+/v0lLSzMffPCBCQ8PN//6179K6zLLveTkZDN16lSTlpZm1qxZY2677TZTu3Ztc/ToUW+fRx55xCQkJJjU1FTz/fffm+uuu860b9/e+/2pU6dM06ZNTVJSkvnhhx/M/PnzTfXq1c3IkSO9fbZs2WIqVKhghg8fbtavX29effVVExwcbBYsWFCq11tezZ0718ybN89s2rTJpKenm//5n/8xoaGhJi0tzRjDPQ5UK1euNHXr1jXNmzc3w4YN87Zzv8u+0aNHmyZNmpg9e/Z4P/v27fN+72/3OOCCbdu2bc3QoUO9+26328TFxZlx48bZWBUu1fnB1uPxmJiYGPPyyy972w4fPmxcLpf54IMPjDHGrF+/3kgy3333nbfPp59+ahwOh9m1a5cxxpjXX3/dVKlSxeTm5nr7PPPMM6Zhw4YlfEW4kL179xpJ5vPPPzfGWPc1NDTUzJw509tnw4YNRpJZsWKFMcb6P0FBQUEmIyPD22fSpEkmIiLCe2//9Kc/mSZNmvj8rj59+pjk5OSSviRcQJUqVcy///1v7nGAOnLkiGnQoIFZtGiR6dSpkzfYcr8Dw+jRo02LFi0K/M4f73FAvYqQl5enVatWKSkpydsWFBSkpKQkrVixwsbKUFRbt25VRkaGzz2NjIxUYmKi956uWLFCUVFRat26tbdPUlKSgoKC9O2333r73HDDDXI6nd4+ycnJSk9P16FDh0rpanCurKwsSVLVqlUlSatWrdLJkyd97nWjRo1Uu3Ztn3vdrFkzRUdHe/skJycrOztbP/30k7fPuec404c/A0qf2+3WtGnTlJOTo3bt2nGPA9TQoUPVrVu3fPeE+x04Nm/erLi4ONWvX1/9+/fXjh07JPnnPQ6oYLt//3653W6ff3iSFB0drYyMDJuqwuU4c99+655mZGSoZs2aPt+HhISoatWqPn0KOse5vwOlx+Px6IknntD111+vpk2bSrLug9PpVFRUlE/f8+/1xe7jhfpkZ2fr+PHjJXE5OM+6detUqVIluVwuPfLII/roo4/UuHFj7nEAmjZtmlavXq1x48bl+477HRgSExP11ltvacGCBZo0aZK2bt2qjh076siRI355j0MK1RsAisHQoUOVlpam5cuX210KSkDDhg21Zs0aZWVladasWRo4cKA+//xzu8tCMdu5c6eGDRumRYsWKSwszO5yUEJuvfVW73bz5s2VmJioOnXqaMaMGQoPD7exsoIF1BPb6tWrKzg4ON9ovMzMTMXExNhUFS7Hmfv2W/c0JiZGe/fu9fn+1KlTOnjwoE+fgs5x7u9A6Xj00Uf1ySefaOnSpapVq5a3PSYmRnl5eTp8+LBP//Pv9cXu44X6RERE+OUfwoHI6XTqyiuvVKtWrTRu3Di1aNFC//znP7nHAWbVqlXau3evrr32WoWEhCgkJESff/65XnnlFYWEhCg6Opr7HYCioqJ01VVX6eeff/bLf6cDKtg6nU61atVKqamp3jaPx6PU1FS1a9fOxspQVPXq1VNMTIzPPc3Ozta3337rvaft2rXT4cOHtWrVKm+fJUuWyOPxKDEx0dvniy++0MmTJ719Fi1apIYNG6pKlSqldDXlmzFGjz76qD766CMtWbJE9erV8/m+VatWCg0N9bnX6enp2rFjh8+9Xrdunc//kVm0aJEiIiLUuHFjb59zz3GmD38G2Mfj8Sg3N5d7HGC6du2qdevWac2aNd5P69at1b9/f+829zvwHD16VL/88otiY2P989/pQg8383PTpk0zLpfLvPXWW2b9+vXmoYceMlFRUT6j8eBfjhw5Yn744Qfzww8/GElm/Pjx5ocffjDbt283xljTfUVFRZn//ve/Zu3ataZHjx4FTvd1zTXXmG+//dYsX77cNGjQwGe6r8OHD5vo6Ghz3333mbS0NDNt2jRToUIFpvsqRUOGDDGRkZFm2bJlPtPGHDt2zNvnkUceMbVr1zZLliwx33//vWnXrp1p166d9/sz08bcfPPNZs2aNWbBggWmRo0aBU4b8/TTT5sNGzaYiRMnMjVQKRoxYoT5/PPPzdatW83atWvNiBEjjMPhMAsXLjTGcI8D3bmzIhjD/Q4ETz31lFm2bJnZunWr+eqrr0xSUpKpXr262bt3rzHG/+5xwAVbY4x59dVXTe3atY3T6TRt27Y133zzjd0l4TcsXbrUSMr3GThwoDHGmvLr2WefNdHR0cblcpmuXbua9PR0n3McOHDA9OvXz1SqVMlERESYQYMGmSNHjvj0+fHHH02HDh2My+Uy8fHx5oUXXiitS4QxBd5jSWbq1KnePsePHzd/+MMfTJUqVUyFChVMr169zJ49e3zOs23bNnPrrbea8PBwU716dfPUU0+ZkydP+vRZunSpadmypXE6naZ+/fo+vwMl64EHHjB16tQxTqfT1KhRw3Tt2tUbao3hHge684Mt97vs69Onj4mNjTVOp9PEx8ebPn36mJ9//tn7vb/dY4cxxhT+OS8AAADgXwLqHVsAAACUXwRbAAAABASCLQAAAAICwRYAAAABgWALAACAgECwBQAAQEAg2AIAACAgEGwBAAAQEAi2AAAACAgEWwAAAAQEgi0AAAACAsEWAAAAAeH/AY3XyWwvO3YcAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArYAAAHDCAYAAADRBFkDAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABNTUlEQVR4nO3deXxU5d3///dkmwRIhkDIRgKELQgIKCLEBVBSAlol1XorVQFFrRS+t/yq1tLbXdsgWlusFbUV0NtiXCrUG1lENjcWQZBVBNkhCQokIQGSkFy/P04yZEgC2c9k8no+Hqdz5pxrZj6HU+2bq9e5LocxxggAAABo4vzsLgAAAACoDwRbAAAA+ASCLQAAAHwCwRYAAAA+gWALAAAAn0CwBQAAgE8g2AIAAMAnEGwBAADgEwi2AAAA8AkEWwAAAPgEgi0AnMfevXvlcDg0e/ZsW35/9uzZcjgc2rt3b6P/9pNPPimHw9HovwsAtUWwBQAv8Kc//Unz5s2zuwzdfPPNuu666+wuAwBqhWALAF6gqmB755136tSpU+rYsWOD11BUVKQlS5bo+uuvb/DfAoCGEGB3AQCAqvn7+8vf379Rfuvzzz/XiRMnCLYAmix6bAE0KYcOHdLdd9+tqKgoOZ1O9erVSzNnzpQkZWVlKSAgQE899VSFz+3YsUMOh0Mvv/yyJOnYsWN66KGHdPHFF6tVq1YKCwvTyJEj9e23316whqFDh2ro0KEVjo8bN06dOnXyOPbCCy/oiiuuUNu2bRUSEqL+/fvrgw8+8GjjcDiUn5+vN998Uw6HQw6HQ+PGjZNU9RjbV155Rb169ZLT6VRsbKwmTpyo7OzsCnX27t1b27Zt0zXXXKMWLVqoffv2mjZtWqXX9fHHH6tnz54VrqG8M2fO6JlnnlGXLl3kdDrVqVMn/eEPf1BBQYFHu3Xr1iklJUUREREKCQlRQkKC7r77bo826enp6t+/v0JDQxUWFqaLL75Y06dPr/K3AeBC6LEF0GRkZWVp0KBBcjgcmjRpktq1a6eFCxdq/Pjxys3N1eTJkzVkyBC99957euKJJzw+++6778rf31+33HKLJGn37t2aN2+ebrnlFiUkJCgrK0uvvfaahgwZom3btik2NrZeap4+fbpuvPFG3X777SosLFR6erpuueUWzZ8/390z+r//+7+65557dPnll+u+++6TJHXp0qXK73zyySf11FNPKTk5WRMmTNCOHTs0Y8YMff311/ryyy8VGBjobnv8+HGNGDFCN910k/7rv/5LH3zwgR555BFdfPHFGjlypMf3LliwQD//+c/Pez333HOP3nzzTf3yl7/Ugw8+qDVr1igtLU3bt2/X3LlzJUlHjhzR8OHD1a5dO/3+979X69attXfvXn344Yfu71myZIlGjx6tYcOG6bnnnpMkbd++XV9++aUeeOCBGvwJA0A5BgCaiPHjx5uYmBjz008/eRy/7bbbjMvlMidPnjSvvfaakWQ2b97s0aZnz57m2muvdb8/ffq0KS4u9mizZ88e43Q6zdNPP+1xTJKZNWuW+9iQIUPMkCFDKtQ3duxY07FjR49jJ0+e9HhfWFhoevfu7VGLMca0bNnSjB07tsJ3zpo1y0gye/bsMcYYc+TIERMUFGSGDx/uUf/LL79sJJmZM2d61CnJvPXWW+5jBQUFJjo62tx8880ev7N7924jySxfvtx97IknnjDl/2di48aNRpK55557PD770EMPGUlm2bJlxhhj5s6daySZr7/+usL1lHnggQdMWFiYOXPmTJVtAKCmGIoAoEkwxujf//63brjhBhlj9NNPP7m3lJQU5eTk6JtvvtFNN92kgIAAvfvuu+7PbtmyRdu2bdOtt97qPuZ0OuXnZ/0rsLi4WEePHlWrVq2UmJiob775pt7qDgkJce8fP35cOTk5uvrqq2v9G59++qkKCws1efJkd/2SdO+99yosLEwff/yxR/tWrVrpjjvucL8PCgrS5Zdfrt27d3u0+/jjj+VyuXTVVVdV+dsLFiyQJP32t7/1OP7ggw+6v0OSWrduLUmaP3++ioqKKv2u1q1bKz8/X0uWLDnf5QJAjRBsATQJP/74o7Kzs/X666+rXbt2Httdd90lyfq/wCMiIjRs2DC999577s++++67CggI0E033eQ+VlJSor/85S/q1q2bnE6nIiIi1K5dO23atEk5OTn1Vvf8+fM1aNAgBQcHq02bNmrXrp1mzJhR69/Yt2+fJCkxMdHjeFBQkDp37uw+XyYuLq7CXLTh4eE6fvy4x7GPP/5Yw4cPV0BA1SPU9u3bJz8/P3Xt2tXjeHR0tFq3bu3+7SFDhujmm2/WU089pYiICI0aNUqzZs3yGIf7m9/8Rt27d9fIkSMVFxenu+++W4sWLarmnwIAVI5gC6BJKCkpkSTdcccdWrJkSaXblVdeKUm67bbb9P3332vjxo2SpPfee0/Dhg1TRESE+/v+9Kc/6be//a0GDx6st99+W4sXL9aSJUvUq1cv929VpapFC4qLiz3ef/7557rxxhsVHBysV155RQsWLNCSJUv0q1/9SsaY2v5R1EhVMyqU//2TJ09qxYoV1Z6/9kKLNjgcDn3wwQdatWqVJk2a5H7gr3///srLy5MkRUZGauPGjfroo4904403avny5Ro5cqTGjh1bzSsDgIp4eAxAk9CuXTuFhoaquLhYycnJ522bmpqqX//61+7hCN9//72mTJni0eaDDz7QNddcozfeeMPjeHZ2tkcArkx4eHiF/ytfUoXe0n//+98KDg7W4sWL5XQ63cdnzZpV4bPVXeGrbD7bHTt2qHPnzu7jhYWF2rNnzwX/bCqzbNkyFRQUVHiYrLLfLikp0c6dO3XRRRe5j2dlZSk7O7vCXLuDBg3SoEGD9Mc//lFz5szR7bffrvT0dN1zzz2SrF7mG264QTfccINKSkr0m9/8Rq+99poee+yxCr3CAFAd9NgCaBL8/f11880369///re2bNlS4fyPP/7o3m/durVSUlL03nvvKT09XUFBQUpNTa3wfef2mr7//vs6dOjQBWvp0qWLvvvuO4/f/Pbbb/Xll19W+A2Hw+HRk7t3795KF2Jo2bJlhem6KpOcnKygoCC99NJLHvW/8cYbysnJqdUctAsWLNBll12mqKio87Yr69H961//6nH8xRdflCT3bx8/frzCn22/fv0kyT0c4ejRox7n/fz81KdPH482AFBT9NgCaDKmTp2q5cuXa+DAgbr33nvVs2dPHTt2TN98840+/fRTHTt2zN321ltv1R133KFXXnlFKSkp7geayvz85z/X008/rbvuuktXXHGFNm/erH/9618evaBVufvuu/Xiiy8qJSVF48eP15EjR/Tqq6+qV69eys3Ndbe7/vrr9eKLL2rEiBH61a9+pSNHjujvf/+7unbtqk2bNnl8Z//+/fXpp5/qxRdfVGxsrBISEjRw4MAKv92uXTtNmTJFTz31lEaMGKEbb7xRO3bs0CuvvKIBAwZ4PChWXQsWLHCPUz6fvn37auzYsXr99deVnZ2tIUOGaO3atXrzzTeVmpqqa665RpL05ptv6pVXXtEvfvELdenSRSdOnNA//vEPhYWFucPxPffco2PHjunaa69VXFyc9u3bp7/97W/q16+fR28wANSIjTMyAECNZWVlmYkTJ5r4+HgTGBhooqOjzbBhw8zrr7/u0S43N9eEhIQYSebtt9+u8D2nT582Dz74oImJiTEhISHmyiuvNKtWraowlVdl030ZY8zbb79tOnfubIKCgky/fv3M4sWLK53u64033jDdunUzTqfT9OjRw8yaNavCNFrGGPPdd9+ZwYMHu2sum/rr3Om+yrz88sumR48eJjAw0ERFRZkJEyaY48ePe7QZMmSI6dWrV4VrL1/nli1bjCSzdu3aCu0qq7OoqMg89dRTJiEhwQQGBpr4+HgzZcoUc/r0aXebb775xowePdp06NDBOJ1OExkZaX7+85+bdevWudt88MEHZvjw4SYyMtIEBQWZDh06mF//+tcmIyOjQh0AUF0OYxrpCQYAgNeZNm2aXnzxRWVkZFR7nC8AeCvG2AJAM9apUyf95S9/IdQC8An02AIAAMAn0GMLAAAAn0CwBQAAgE8g2AIAAMAn+MQ8tiUlJTp8+LBCQ0N5AAIAAMALGWN04sQJxcbGys+vYfpWfSLYHj58WPHx8XaXAQAAgAs4cOCA4uLiGuS7fSLYhoaGSrL+oMLCwmyuBgAAAOfKzc1VfHy8O7c1BJ8ItmXDD8LCwgi2AAAAXqwhh43y8BgAAAB8AsEWAAAAPoFgCwAAAJ9AsAUAAIBPINgCAADAJ9Qo2M6YMUN9+vRxzz6QlJSkhQsXVtl+9uzZcjgcHltwcLBHG2OMHn/8ccXExCgkJETJycnauXNn7a4GAAAAzVaNgm1cXJymTp2q9evXa926dbr22ms1atQobd26tcrPhIWFKSMjw73t27fP4/y0adP00ksv6dVXX9WaNWvUsmVLpaSk6PTp07W7IgAAADRLNZrH9oYbbvB4/8c//lEzZszQ6tWr1atXr0o/43A4FB0dXek5Y4z++te/6tFHH9WoUaMkSW+99ZaioqI0b9483XbbbTUpDwAAAM1YrcfYFhcXKz09Xfn5+UpKSqqyXV5enjp27Kj4+PgKvbt79uxRZmamkpOT3cdcLpcGDhyoVatW1bY0AAAANEM1Xnls8+bNSkpK0unTp9WqVSvNnTtXPXv2rLRtYmKiZs6cqT59+ignJ0cvvPCCrrjiCm3dulVxcXHKzMyUJEVFRXl8Lioqyn2uMgUFBSooKHC/z83NrellAAAAwMfUuMc2MTFRGzdu1Jo1azRhwgSNHTtW27Ztq7RtUlKSxowZo379+mnIkCH68MMP1a5dO7322mt1KjotLU0ul8u9xcfH1+n7AAAA0PTVONgGBQWpa9eu6t+/v9LS0tS3b19Nnz69Wp8NDAzUJZdcol27dkmSe+xtVlaWR7usrKwqx+VK0pQpU5STk+PeDhw4UNPLAAAAgI+p8zy2JSUlHsMCzqe4uFibN29WTEyMJCkhIUHR0dFaunSpu01ubq7WrFlz3nG7TqfTPeVY2QYAAIDmrUZjbKdMmaKRI0eqQ4cOOnHihObMmaMVK1Zo8eLFkqQxY8aoffv2SktLkyQ9/fTTGjRokLp27ars7Gw9//zz2rdvn+655x5J1owJkydP1rPPPqtu3bopISFBjz32mGJjY5Wamlq/V1qfigqlPVuk4BZShx52VwMAAADVMNgeOXJEY8aMUUZGhlwul/r06aPFixfrZz/7mSRp//798vM72wl8/Phx3XvvvcrMzFR4eLj69++vr776yuNhs9/97nfKz8/Xfffdp+zsbF111VVatGhRhYUcvMrSd6TF/ytdco005n/srgYAAACSHMYYY3cRdZWbmyuXy6WcnJzGGZawe4v0t8lSyzDp6Q8kP1YmBgAAOJ/GyGskstro2MMahpCfKx3aZXc1AAAAEMG2dvwDpG6XWPs71tlbCwAAACQRbGsvsb/1umO9vXUAAABAEsG29rqXBts9W6WCU/bWAgAAAIJtrUXESm2ipeIz0q5v7a4GAACg2SPY1pbDISVeZu0zHAEAAMB2BNu66FE6HOF7gi0AAIDdCLZ10e0SyeEnZe2Xjh+xuxoAAIBmjWBbFyGtrDltJXptAQAAbEawrSum/QIAAPAKBNu66l4u2JYU21sLAABAM0awrauy5XVPnpAOsrwuAACAXQi2deWxvC7DEQAAAOxCsK0PiUz7BQAAYDeCbX0oW6iB5XUBAABsQ7CtDxGxUtsYltcFAACwEcG2vjDtFwAAgK0ItvWFYAsAAGArgm19KVte98h+6XiW3dUAAAA0OwTb+lJ+ed0d39hbCwAAQDNEsK1P7uEI6+ytAwAAoBki2Nansmm/vv+G5XUBAAAaGcG2PnVgeV0AAAC7EGzrk7+/1O1Sa5/ZEQAAABoVwba+JZYFW8bZAgAANCaCbX0rG2e7d5t0+qS9tQAAADQjBNv6Vn553R822V0NAABAs0GwbQhM+wUAANDoCLYNoWw4Ags1AAAANBqCbUPo1k/yY3ldAACAxkSwbQghraQOF1n79NoCAAA0CoJtQ2GcLQAAQKMi2DaUsmDL8roAAACNgmDbUDyW191pdzUAAAA+j2DbUFheFwAAoFERbBuSe5wtwRYAAKChEWwbUlmwZXldAACABkewbUgRsdbG8roAAAANrkbBdsaMGerTp4/CwsIUFhampKQkLVy4sMr2//jHP3T11VcrPDxc4eHhSk5O1tq1az3ajBs3Tg6Hw2MbMWJE7a7GGzHtFwAAQKOoUbCNi4vT1KlTtX79eq1bt07XXnutRo0apa1bt1bafsWKFRo9erSWL1+uVatWKT4+XsOHD9ehQ4c82o0YMUIZGRnu7Z133qn9FXmb7oyzBQAAaAwOY4ypyxe0adNGzz//vMaPH3/BtsXFxQoPD9fLL7+sMWPGSLJ6bLOzszVv3rxa15CbmyuXy6WcnByFhYXV+nsaxKk86dGbpJIS6bF/SW2i7K4IAACg0TVGXqv1GNvi4mKlp6crPz9fSUlJ1frMyZMnVVRUpDZt2ngcX7FihSIjI5WYmKgJEybo6NGj5/2egoIC5ebmemxeq/zyut/TawsAANBQahxsN2/erFatWsnpdOr+++/X3Llz1bNnz2p99pFHHlFsbKySk5Pdx0aMGKG33npLS5cu1XPPPaeVK1dq5MiRKi6uerWutLQ0uVwu9xYfH1/Ty2hcZeNsv2OcLQAAQEOp8VCEwsJC7d+/Xzk5Ofrggw/0z3/+UytXrrxguJ06daqmTZumFStWqE+fPlW22717t7p06aJPP/1Uw4YNq7RNQUGBCgoK3O9zc3MVHx/vnUMRJGu6r+n/LbUIlZ75QPLzt7siAACARuWVQxGCgoLUtWtX9e/fX2lpaerbt6+mT59+3s+88MILmjp1qj755JPzhlpJ6ty5syIiIrRr164q2zidTvfMDGWbV4tPlIJbsrwuAABAA6rzPLYlJSUevafnmjZtmp555hktWrRIl1122QW/7+DBgzp69KhiYmLqWpr38PeXul9i7TM7AgAAQIOoUbCdMmWKPvvsM+3du1ebN2/WlClTtGLFCt1+++2SpDFjxmjKlCnu9s8995wee+wxzZw5U506dVJmZqYyMzOVl5cnScrLy9PDDz+s1atXa+/evVq6dKlGjRqlrl27KiUlpR4v0wsw7RcAAECDCqhJ4yNHjmjMmDHKyMiQy+VSnz59tHjxYv3sZz+TJO3fv19+fmez8owZM1RYWKhf/vKXHt/zxBNP6Mknn5S/v782bdqkN998U9nZ2YqNjdXw4cP1zDPPyOl01sPleZGyB8j2bLWW1w1uYW89AAAAPqbO89h6A6+ex7a8P46RfjosjX9G6l29KdIAAAB8gVc+PIY6YHldAACABkOwbUyJpQ/PMc4WAACg3hFsG1PXvpKfn/TjQelYlt3VAAAA+BSCbWMKaSV1ZHldAACAhkCwbWzdWV4XAACgIRBsG1uP0nG2OzdIJcX21gIAAOBDCLaNrfzyugdYXhcAAKC+EGwbm8fyugxHAAAAqC8EWzsw7RcAAEC9I9jaoWyhhr3brOV1AQAAUGcEWzu0jZEi2lsPj+361u5qAAAAfALB1i6Jl1qvjLMFAACoFwRbuzDOFgAAoF4RbO3isbxupt3VAAAANHkEW7uUX16XXlsAAIA6I9jaieEIAAAA9YZga6eyab9YXhcAAKDOCLZ2ik+0hiSwvC4AAECdEWzt5O8vdWN5XQAAgPpAsLVb2XAExtkCAADUCcHWbh7L6+bbWwsAAEATRrC1G8vrAgAA1AuCrTdgOAIAAECdEWy9gTvY8gAZAABAbRFsvUG3fqXL6x5ieV0AAIBaIth6g+CWUsee1j7DEQAAAGqFYOstGGcLAABQJwRbb1EWbL//huV1AQAAaoFg6y3Kltc9lScd+N7uagAAAJocgq238Fhel+EIAAAANUWw9SZlwxG+Y9ovAACAmiLYepOyYLuP5XUBAABqimDrTdrGSO3aSyUlLK8LAABQQwRbb9Odab8AAABqg2DrbVheFwAAoFYItt6m/PK6RzPsrgYAAKDJINh6G5bXBQAAqJUaBdsZM2aoT58+CgsLU1hYmJKSkrRw4cLzfub9999Xjx49FBwcrIsvvlgLFizwOG+M0eOPP66YmBiFhIQoOTlZO3furPmV+JIel1mvBFsAAIBqq1GwjYuL09SpU7V+/XqtW7dO1157rUaNGqWtW7dW2v6rr77S6NGjNX78eG3YsEGpqalKTU3Vli1b3G2mTZuml156Sa+++qrWrFmjli1bKiUlRadPn67blTVlZeNsd26QilleFwAAoDocxhhTly9o06aNnn/+eY0fP77CuVtvvVX5+fmaP3+++9igQYPUr18/vfrqqzLGKDY2Vg8++KAeeughSVJOTo6ioqI0e/Zs3XbbbdWqITc3Vy6XSzk5OQoLC6vL5XiHkmLp0Zut5XX/e7qU0MvuigAAAOqkMfJarcfYFhcXKz09Xfn5+UpKSqq0zapVq5ScnOxxLCUlRatWrZIk7dmzR5mZmR5tXC6XBg4c6G7TLPn5nx2OsG2NvbUAAAA0ETUOtps3b1arVq3kdDp1//33a+7cuerZs2elbTMzMxUVFeVxLCoqSpmZme7zZceqalOZgoIC5ebmemw+p+cg63XbanvrAAAAaCJqHGwTExO1ceNGrVmzRhMmTNDYsWO1bdu2hqitSmlpaXK5XO4tPj6+UX+/UfQYIDn8pMO7peNH7K4GAADA69U42AYFBalr167q37+/0tLS1LdvX02fPr3SttHR0crKyvI4lpWVpejoaPf5smNVtanMlClTlJOT494OHDhQ08vwfq1cUqeLrH2GIwAAAFxQneexLSkpUUFBQaXnkpKStHTpUo9jS5YscY/JTUhIUHR0tEeb3NxcrVmzpspxu5LkdDrdU46VbT6J4QgAAADVFlCTxlOmTNHIkSPVoUMHnThxQnPmzNGKFSu0ePFiSdKYMWPUvn17paWlSZIeeOABDRkyRH/+8591/fXXKz09XevWrdPrr78uSXI4HJo8ebKeffZZdevWTQkJCXrssccUGxur1NTU+r3SpqjXIOnjN6xpvwpPS0HBdlcEAADgtWoUbI8cOaIxY8YoIyNDLpdLffr00eLFi/Wzn/1MkrR//375+Z3tBL7iiis0Z84cPfroo/rDH/6gbt26ad68eerdu7e7ze9+9zvl5+frvvvuU3Z2tq666iotWrRIwcGEOEV3ksKjpONZ0s6NVtAFAABApeo8j6038Ll5bMv74CXpy4+kpJ9L/zXZ7moAAABqxavnsUUj6VVunG3T/zsIAABAgyHYeruu/ayxtTk/SYd/sLsaAAAAr0Ww9XaBQVL3S6x9pv0CAACoEsG2KSib9msr034BAABUhWDbFFw00Hrd/5104ri9tQAAAHgpgm1T0DpCiutmPTy2/Wu7qwEAAPBKBNumomdpry2rkAEAAFSKYNtUlI2z/W6ddKbI3loAAAC8EMG2qYjvLrVqLRWclHZvtrsaAAAAr0OwbSr8/MoNR2DaLwAAgHMRbJuS8quQAQAAwAPBtinp3l/yD5B+PCQdOWh3NQAAAF6FYNuUBLeQuvS19um1BQAA8ECwbWrKxtmyChkAAIAHgm1TUzbOdvdm6VSevbUAAAB4EYJtUxMRK0V2kEqKrTltAQAAIIlg2zT1YtovAACAcxFsm6KyVci2r7V6bgEAAECwbZISekkhraT8HGn/DrurAQAA8AoE26bIP0DqMcDaZ3YEAAAASQTbpotVyAAAADwQbJuqHpdJDj/p8G7peJbd1QAAANiOYNtUtXRJnS6y9pkdAQAAgGDbpJXNjkCwBQAAINg2aWXjbHdukApP21sLAACAzQi2TVl0Jyk8SioqlHZutLsaAAAAWxFsmzKH42yvLdN+AQCAZo5g29T1LFted7VkjL21AAAA2Ihg29R17ScFBUs5P0mHf7C7GgAAANsQbJu6wCCp+yXWPsMRAABAM0aw9QU9WYUMAACAYOsLLiodZ7t/h3TiuL21AAAA2IRg6wtaR0hx3ayHx7Z/bXc1AAAAtiDY+gqGIwAAgGaOYOsrepUOR/hunXSmyN5aAAAAbECw9RVx3aXQcKngpLR7s93VAAAANDqCra/w85MuutzaZ9ovAADQDBFsfUnZ8rrb1thbBwAAgA1qFGzT0tI0YMAAhYaGKjIyUqmpqdqxY8d5PzN06FA5HI4K2/XXX+9uM27cuArnR4wYUbsras6695f8A6SfDklHDtpdDQAAQKOqUbBduXKlJk6cqNWrV2vJkiUqKirS8OHDlZ+fX+VnPvzwQ2VkZLi3LVu2yN/fX7fccotHuxEjRni0e+edd2p3Rc1ZcAupS19rn9kRAABAMxNQk8aLFi3yeD979mxFRkZq/fr1Gjx4cKWfadOmjcf79PR0tWjRokKwdTqdio6Orkk5qEyvQdL3661xtkN/aXc1AAAAjaZOY2xzcnIkVQyv5/PGG2/otttuU8uWLT2Or1ixQpGRkUpMTNSECRN09OjRKr+joKBAubm5HhtK9Syd9mv3ZulUnr21AAAANKJaB9uSkhJNnjxZV155pXr37l2tz6xdu1ZbtmzRPffc43F8xIgReuutt7R06VI999xzWrlypUaOHKni4uJKvyctLU0ul8u9xcfH1/YyfE9ErBTVQSoptua0BQAAaCYcxhhTmw9OmDBBCxcu1BdffKG4uLhqfebXv/61Vq1apU2bNp233e7du9WlSxd9+umnGjZsWIXzBQUFKigocL/Pzc1VfHy8cnJyFBYWVrML8UUfvSYtf1+6LFm6/fd2VwMAAKDc3Fy5XK4GzWu16rGdNGmS5s+fr+XLl1c71Obn5ys9PV3jx4+/YNvOnTsrIiJCu3btqvS80+lUWFiYx4ZyypbX3b7W6rkFAABoBmoUbI0xmjRpkubOnatly5YpISGh2p99//33VVBQoDvuuOOCbQ8ePKijR48qJiamJuWhTEIvKaSVlJ8r7T//dGwAAAC+okbBduLEiXr77bc1Z84chYaGKjMzU5mZmTp16pS7zZgxYzRlypQKn33jjTeUmpqqtm3behzPy8vTww8/rNWrV2vv3r1aunSpRo0apa5duyolJaWWl9XM+QdIPQZY+6xCBgAAmokaBdsZM2YoJydHQ4cOVUxMjHt799133W3279+vjIwMj8/t2LFDX3zxRaXDEPz9/bVp0ybdeOON6t69u8aPH6/+/fvr888/l9PprOVl4ewqZARbAADQPNRoHtvqPGe2YsWKCscSExOr/GxISIgWL15ckzJQHT0ukxx+0uHd0vEsKTzK7ooAAAAaVJ3msYUXa+mSOvW09retsbcWAACARkCw9WVlizUwzhYAADQDBFtfVjbOducGqeDU+dsCAAA0cQRbXxbdyRpbe6ZI2rnR7moAAAAaFMHWlzkc5WZHYJwtAADwbQRbX1c2znbbaql2qycDAAA0CQRbX9e1nxQULOX8JB3+we5qAAAAGgzB1tcFBkndL7X2mR0BAAD4MIJtc1B+OAIAAICPItg2BxeVBtv9O6QTx+2tBQAAoIEQbJuD1hFSXDfr4bHta+2uBgAAoEEQbJuLnkz7BQAAfBvBtrnoVToc4bt11oINAAAAPoZg21zEdZdCw6WCk9LuzXZXAwAAUO8Its2Fn9/Z2RGY9gsAAPgggm1zwipkAADAhxFsm5Pu/SX/AOmnw9KPB+2uBgAAoF4RbJuT4BZSl77WPsMRAACAjyHYNje9mPYLAAD4JoJtc1M2znb3ZulUnr21AAAA1COCbXMTEStFdZBKiq05bQEAAHwEwbY5cq9CxjhbAADgOwi2zVHZONvta62eWwAAAB9AsG2OOvWSQlpJ+bnSvu/srgYAAKBeEGybI39/qccAa3/rKntrAQAAqCcE2+aqd5L1umGFVFJiaykAAAD1gWDbXPW+QgpuKR3LlHZttLsaAACAOiPYNldBwVL/Ydb+6oX21gIAAFAPCLbN2aCR1uumL6T8HHtrAQAAqCOCbXMW183aioukdZ/aXQ0AAECdEGybu7Je29ULJWPsrQUAAKAOCLbN3aXXSoFOKXOvtG+73dUAAADUGsG2uQtpJfUdbO3zEBkAAGjCCLaQBl1nvW5YLp0+aW8tAAAAtUSwhdS5t9QuTio8LW1cYXc1AAAAtUKwheRwlHuIbIG9tQAAANQSwRaWAcMlP39p33fS4d12VwMAAFBjNQq2aWlpGjBggEJDQxUZGanU1FTt2LHjvJ+ZPXu2HA6HxxYcHOzRxhijxx9/XDExMQoJCVFycrJ27txZ86tB7YWGW8vsStKaRfbWAgAAUAs1CrYrV67UxIkTtXr1ai1ZskRFRUUaPny48vPzz/u5sLAwZWRkuLd9+/Z5nJ82bZpeeuklvfrqq1qzZo1atmyplJQUnT59uuZXhNorG46wbolUVGhvLQAAADUUUJPGixZ59uTNnj1bkZGRWr9+vQYPHlzl5xwOh6Kjoys9Z4zRX//6Vz366KMaNWqUJOmtt95SVFSU5s2bp9tuu60mJaIuEvtLrdtJ2T9Km7+ULr3G7ooAAACqrU5jbHNyciRJbdq0OW+7vLw8dezYUfHx8Ro1apS2bt3qPrdnzx5lZmYqOTnZfczlcmngwIFatWpVXcpDTfn5SwNHWPs8RAYAAJqYWgfbkpISTZ48WVdeeaV69+5dZbvExETNnDlT//nPf/T222+rpKREV1xxhQ4ePChJyszMlCRFRUV5fC4qKsp97lwFBQXKzc312FBPLh9hzZKwc4P002G7qwEAAKi2WgfbiRMnasuWLUpPTz9vu6SkJI0ZM0b9+vXTkCFD9OGHH6pdu3Z67bXXavvTSktLk8vlcm/x8fG1/i6co02UNSRB4iEyAADQpNQq2E6aNEnz58/X8uXLFRcXV6PPBgYG6pJLLtGuXbskyT32Nisry6NdVlZWleNyp0yZopycHPd24MCBWlwFqjSwdCWytYul4mJ7awEAAKimGgVbY4wmTZqkuXPnatmyZUpISKjxDxYXF2vz5s2KiYmRJCUkJCg6OlpLly51t8nNzdWaNWuUlJRU6Xc4nU6FhYV5bKhHvZOkli4p96i0fa3d1QAAAFRLjYLtxIkT9fbbb2vOnDkKDQ1VZmamMjMzderUKXebMWPGaMqUKe73Tz/9tD755BPt3r1b33zzje644w7t27dP99xzjyRrxoTJkyfr2Wef1UcffaTNmzdrzJgxio2NVWpqav1cJWomINBasEGS1vAQGQAAaBpqNN3XjBkzJElDhw71OD5r1iyNGzdOkrR//375+Z3Ny8ePH9e9996rzMxMhYeHq3///vrqq6/Us2dPd5vf/e53ys/P13333afs7GxdddVVWrRoUYWFHNCIBo2QVrwvbVsj5fwkuSLsrggAAOC8HMYYY3cRdZWbmyuXy6WcnByGJdSnlx6Q9myVrr9bSv6V3dUAAIAmrDHyWp3msYWPG1T6ENmaRVJJib21AAAAXADBFlXrO1gKbmHNZ/vDJrurAQAAOC+CLarmDJEuvdbaZyUyAADg5Qi2OL+y4QibPpfyWeENAAB4L4Itzi+um9S+i3SmSFq/9MLtAQAAbEKwxfk5HGd7bVcvkJr+JBoAAMBHEWxxYZdeKwUGSRl7pP077K4GAACgUgRbXFiLUKnPYGufh8gAAICXItiiegaNtF43LJcKTp2/LQAAgA0ItqieLn2kiPZWqN24wu5qAAAAKiDYonocjrO9tqsX2lsLAABAJQi2qL4BwyU/P2nvNiljr93VAAAAeCDYovrC2ki9kqz9NfTaAgAA70KwRc2UzWm7bol0ptDeWgAAAMoh2KJmelwmuSKs5XU3f2V3NQAAAG4EW9SMn780cIS1z0NkAADAixBsUXMDR1izJHy/XjqaYXc1AAAAkgi2qI020VK3S639tYvtrQUAAKAUwRa1Uzan7ZpFUnGxvbUAAACIYIvauvgKqWWYlPOT9N3XdlcDAABAsEUtBQRJl/3M2mdOWwAA4AUItqi9gaXDEbauknKP2VsLAABo9gi2qL2YTlKnnlJJifT1J3ZXAwAAmjmCLeqm7CGy1QslY+ytBQAANGsEW9RNv6GSs4X00yHph012VwMAAJoxgi3qxhkiXXqNtc9KZAAAwEYEW9Rd2XCETZ9JJ0/YWwsAAGi2CLaou/hEKbazVFQofbPM7moAAEAzRbBF3TkcZ6f+WrWAh8gAAIAtCLaoH/2HSQGB0uEfpIM77a4GAAA0QwRb1I+WYVKfq639VQvsrQUAADRLBFvUn0HXWa/fLJMKTtlbCwAAaHYItqg/XfpIEbFSwUnp28/srgYAADQzBFvUHz8/aeAIa381wxEAAEDjItiifg0YbgXcPVulrH12VwMAAJoRgi3qlytC6jnQ2l+9yN5aAABAs0KwRf0bdL31+vUn0pkie2sBAADNBsEW9a/HAMnVVsrPkbassrsaAADQTNQo2KalpWnAgAEKDQ1VZGSkUlNTtWPHjvN+5h//+IeuvvpqhYeHKzw8XMnJyVq7dq1Hm3HjxsnhcHhsI0aMqPnVwDv4+0uXp1j7a3iIDAAANI4aBduVK1dq4sSJWr16tZYsWaKioiINHz5c+fn5VX5mxYoVGj16tJYvX65Vq1YpPj5ew4cP16FDhzzajRgxQhkZGe7tnXfeqd0VwTtcXvoXkx3rpYw99tYCAACaBYcxxtT2wz/++KMiIyO1cuVKDR48uFqfKS4uVnh4uF5++WWNGTNGktVjm52drXnz5tWqjtzcXLlcLuXk5CgsLKxW34EGMPMJafOXUkwnafLfpSCn3RUBAACbNEZeq9MY25ycHElSmzZtqv2ZkydPqqioqMJnVqxYocjISCUmJmrChAk6evRold9RUFCg3Nxcjw1e6JbJUmi4lLFX+uhVu6sBAAA+rtbBtqSkRJMnT9aVV16p3r17V/tzjzzyiGJjY5WcnOw+NmLECL311ltaunSpnnvuOa1cuVIjR45UcXFxpd+RlpYml8vl3uLj42t7GWhIoeHSrx6x9r/8P2nT5/bWAwAAfFqthyJMmDBBCxcu1BdffKG4uLhqfWbq1KmaNm2aVqxYoT59+lTZbvfu3erSpYs+/fRTDRs2rML5goICFRQUuN/n5uYqPj6eoQje6v/+IS17VwppJT38mhQeZXdFAACgkXntUIRJkyZp/vz5Wr58ebVD7QsvvKCpU6fqk08+OW+olaTOnTsrIiJCu3btqvS80+lUWFiYxwYvdt1dUoce0qk86X/TpCp64gEAAOqiRsHWGKNJkyZp7ty5WrZsmRISEqr1uWnTpumZZ57RokWLdNlll12w/cGDB3X06FHFxMTUpDx4K/8A6c4/SM4W0p4t0pK37a4IAAD4oBoF24kTJ+rtt9/WnDlzFBoaqszMTGVmZurUqVPuNmPGjNGUKVPc75977jk99thjmjlzpjp16uT+TF5eniQpLy9PDz/8sFavXq29e/dq6dKlGjVqlLp27aqUlJR6ukzYLiJW+q/J1v4n/5J2fWtrOQAAwPfUKNjOmDFDOTk5Gjp0qGJiYtzbu+++626zf/9+ZWRkeHymsLBQv/zlLz0+88ILL0iS/P39tWnTJt14443q3r27xo8fr/79++vzzz+X08n0UD7l0muthRtMifR2mrUyGQAAQD2p0zy23oJ5bJuQglPSi7+RjhyQel8h3f2U5HDYXRUAAGhgXvvwGFBrzhBrvK1/oLTlK+nLj+yuCAAA+AiCLRpfXDfphnut/f+8Kh36wd56AACATyDYwh6DfyH1HCSdKZLe+qM1RAEAAKAOCLawh8MhjX5YCmsrHdkvzXvF7ooAAEATR7CFfVq5pDumWCF39UJpw3K7KwIAAE0YwRb26tZPSh5t7b/3F+loxnmbAwAAVIVgC/uljJU69ZROn5T+909S8Rm7KwIAAE0QwRb28/e3pgALbint2y4tetPuigAAQBNEsIV3aBMt3fqgtb80Xfr+G3vrAQAATQ7BFt6j32Ap6XrJGOlfz0l52XZXBAAAmhCCLbxL6gQpqqOUe1SaM00qKbG7IgAA0EQQbOFdgoKlMf8jBQZJ29dKn8+1uyIAANBEEGzhfWI7S6Put/b/7x/SgZ321gMAAJoEgi280xU3SBdfZU399b/PWlOBAQAAnAfBFt7J4ZBu/a3Uup304yHpw5ftrggAAHg5gi28V8sw6Y4/SA4/6etPpPVL7a4IAAB4MYItvFuXi6Xhd1j77/9V+umwreUAAADvRbCF9xt+u9Slj1RwSnrrWelMkd0VAQAAL0Swhffz85du/73UIlQ68L20YKbdFQEAAC9EsEXTEB4p3faQtb/8fem7r+2tBwAAeB2CLZqOi6+Urhpl7f/rOSn3mL31AAAAr0KwRdNyw31STIKUl22FW5bcBQAApQi2aFqCnNLYR6VAp/T9emnF+3ZXBAAAvATBFk1PVEfpponW/sczpX3b7a0HAAB4BYItmqaBI6W+g6WSYum1KdLuzXZXBAAAbEawRdNUtuRup17SqTxpxu+kTZ/bXRUAALARwRZNV0gracI0a7aEM0XS7KelL/5jd1UAAMAmBFs0bUFOadzj0hU/l4yR/v03a9ytMXZXBgAAGhnBFk2fn7/0ywekkeOs95/Okd55Xio+Y2tZAACgcRFs4RscDmn4HdJtD0p+ftLXn0j/fEwqOGV3ZQAAoJEQbOFbBo6U7n5aCgq2lt39+4PSieN2VwUAABoBwRa+p9cg6TcvSC1d0oHvpZcekH48ZHdVAACggRFs4Zs69pD+e7rUJlr66bAVbvfvsLsqAADQgAi28F2RcdLkv0lx3aS8bGtYwva1dlcFAAAaCMEWvi00XJr4Z6l7f6nwtPTPR6W1i+2uCgAANACCLXxfcAvp3mel/slSSYk1FdiSOcx1CwCAjyHYonkICJRuf0S69lbr/YKZ1mIOJcX21gUAAOpNjYJtWlqaBgwYoNDQUEVGRio1NVU7dlz4gZz3339fPXr0UHBwsC6++GItWLDA47wxRo8//rhiYmIUEhKi5ORk7dy5s2ZXAlyIwyHdcK/0i4nW/pcfSW8+IxUW2F0ZAACoBzUKtitXrtTEiRO1evVqLVmyREVFRRo+fLjy8/Or/MxXX32l0aNHa/z48dqwYYNSU1OVmpqqLVu2uNtMmzZNL730kl599VWtWbNGLVu2VEpKik6fPl37KwOqMvgX0phHJf9AadMX0quPSPm5dlcFAADqyGFM7Qca/vjjj4qMjNTKlSs1ePDgStvceuutys/P1/z5893HBg0apH79+unVV1+VMUaxsbF68MEH9dBDD0mScnJyFBUVpdmzZ+u22267YB25ublyuVzKyclRWFhYbS8Hzc2ub6U3HpdO50tRHaRfp0nhUXZXBQCAT2qMvFanMbY5OTmSpDZt2lTZZtWqVUpOTvY4lpKSolWrVkmS9uzZo8zMTI82LpdLAwcOdLcBGkTXvtL/+4vkipCy9kvTH5AO77a7KgAAUEu1DrYlJSWaPHmyrrzySvXu3bvKdpmZmYqK8uwFi4qKUmZmpvt82bGq2pyroKBAubm5HhtQK7GdpQdekqI6Sjk/SX/7/6RdG+2uCgAA1EKtg+3EiRO1ZcsWpaen12c91ZKWliaXy+Xe4uPjG70G+JDwSKvnNqG3NSzh1SnSxpV2VwUAAGqoVsF20qRJmj9/vpYvX664uLjzto2OjlZWVpbHsaysLEVHR7vPlx2rqs25pkyZopycHPd24MCB2lwGcFbLMGnCNKnPVVJxkfTWs9Jnc+2uCgAA1ECNgq0xRpMmTdLcuXO1bNkyJSQkXPAzSUlJWrp0qcexJUuWKCkpSZKUkJCg6Ohojza5ublas2aNu825nE6nwsLCPDagzgKDpLGPSVfeaC3eMPfv0kevW4s6AAAArxdQk8YTJ07UnDlz9J///EehoaHuMbAul0shISGSpDFjxqh9+/ZKS0uTJD3wwAMaMmSI/vznP+v6669Xenq61q1bp9dff12S5HA4NHnyZD377LPq1q2bEhIS9Nhjjyk2Nlapqan1eKlANfj5Szf/P6l1hPTxTGn5e1LOj9JNk6SWLrurAwAA51Gj6b4cDkelx2fNmqVx48ZJkoYOHapOnTpp9uzZ7vPvv/++Hn30Ue3du1fdunXTtGnTdN1117nPG2P0xBNP6PXXX1d2drauuuoqvfLKK+revXu16mK6LzSItZ9I775g9dg6W0hDfykNvVkKbml3ZQAANDmNkdfqNI+ttyDYosHs+laa94p06Afrfcswadhoa7hCkNPe2gAAaEIIttVEsEWDKimRNn0uLZwtHSl9UNHVVhp+pzRwhORfoxE9AAA0SwTbaiLYolEUF0vrlkiL35KOH7GOtY2RRoyVLr3GGp8LAAAqRbCtJoItGtWZQmnVAmnJv6QTx61j0Z2k6+6Sel8hVTEWHQCA5oxgW00EW9ii4JT0+Txp2bvSqTzrWIdE6bq7pe6XEnABACiHYFtNBFvY6lSeNS3Yyg+lwtPWsa59rYCb0Mve2gAA8BIE22oi2MIrnDguffqO9OX/WauXSVLPQdYQhfZd7K0NAACbEWyriWALr3I8S/rkbWnt4rOrll0yVBoxToo8/xLUAAD4KoJtNRFs4ZWOHJQWvSltWG699/OTBqRIKXdI4VH21gYAQCMj2FYTwRZe7dAP1hy4W1dZ7/0DpStvkJJHS6HhtpYGAEBjIdhWE8EWTcLebdLHb1irmUlSULA0+CbpmlukFqH21gYAQAMj2FYTwRZNhjHS999IC2ZK+3dYx0JaWUv0Jl0ntYm2tz4AABoIwbaaCLZocoyRtnwlLZglZe61jjkcUuJl0hU/t2ZT8GclMwCA7yDYVhPBFk1WSbG0+Uvpq4+l79efPe5qKw0cKQ0ayYNmAACfQLCtJoItfMJPh6VVH1vThOVlW8ccDumiy6Wkn1uv9OICAJoogm01EWzhU84UWb24qz6Wdm44e7x1O2ngCGnQddY+AABNCMG2mgi28FlHDkqrP5bWfiLl51jHHH5Sz4FS0vXSRQMkP3pxAQDej2BbTQRb+LwzhdKmL6Sv5ks/bDp7PDzS6sEdOEJyRdhXHwAAF0CwrSaCLZqVrP3S6gXWWNyTJ6xjfn5SrySrFzfxMus9AABehGBbTQRbNEtFhdKmz61e3N2bzx5vE322FzesjX31AQBQDsG2mgi2aPYy91kPm339iXQqzzrm5y/1vsKaF7fbJfTiAgBsRbCtJoItUKqwQPp2pTUv7t6tZ4+3iZb6DbG2uG7WNGIAADQigm01EWyBSmTssYYprPtUOp1/9njbGKnvYGuL707IBQA0CoJtNRFsgfMoPC1tWyttXCFtX2u9L9Mm2gq4/YYQcgEADYpgW00EW6CaCk5J27+2hitsW1NFyB0sxScScgEA9YpgW00EW6AWynpyqwy5V5f25BJyAQB1R7CtJoItUEeFp61hChvPE3L7DpE6EHIBALVDsK0mgi1Qj8pC7refSVtXe4bc8KizwxU69CDkAgCqjWBbTQRboIEUnpa++9rqya0QciPPPnhGyAUAXADBtpoItkAjcIfcz6StqyqG3C59pbiuUvsuUvuuUkgr+2oFAHgdgm01EWyBRlZYUK4n95yQW6ZNtBVw47par+27SK4IenYBoJki2FYTwRawUWGBtHODdHCndGiXdHCXdDyr8rYtXWdDblngbdfeWv4XAODTCLbVRLAFvMzJE9KhH6ygW7Zl7ZdKSiq2DQqWYhLODmGI6ypFJ0hBzsavGwDQYBojrwU0yLcCaN5ahErd+llbmcICKXNvubD7g3R4tzWMYd92ayvj5ydFdvAMuzGdpZZhDGUAAFSJHlsA9ikpln48dLZ392Bp6M3Pqbx9cAupbYzUNrb0tXSLiLUeYPPn7+oA4K0YilBNBFvAhxgj5Rz1HMZwcJd0LPP8n3P4WeH23MBbtt8itHHqBwBUiqEIAJofh0NqHWFtvQadPV54WjqWJR3NsLafDp/dP5YhFRVa4fdYpvUw27lCWpX29EZXDL2tIyV/HmADgKaOYAugaQgKlqI7Wtu5SkqkE8fOBt2jGdJP5fZPHJNO5UkHv7e2c/n5W729LV2SM8Qa8uAMKd1anH0NPud9WZvgFtb7gEDGAAOAjQi2AJo+Pz9rjlxXhNT54ornC05ZPbkewfewdDTT6u09U3T2eJ3q8C8XiltIzuDS19Jjoa2l+ESp40VWkCYEA0C9qnGw/eyzz/T8889r/fr1ysjI0Ny5c5Wamlpl+3HjxunNN9+scLxnz57aunWrJOnJJ5/UU0895XE+MTFR3333XU3LA4CKnCHWlGIxCRXPlZRIucesgHsyTyo4aQXhgpPS6dLXsmOny50rOH32eNkCFSXF1lRnJ09cuKbQcKljD6nDRaWviVJwy/q9bgBoZmocbPPz89W3b1/dfffduummmy7Yfvr06Zo6dar7/ZkzZ9S3b1/dcsstHu169eqlTz/99GxhAXQmA2gEfn5nx/TWVklxaeA9NxCXheHSIHw8S9r3nXT4B+nEcWnLKmuTrN7byA5WyO1YGnajExj7CwA1UOP0OHLkSI0cObLa7V0ul1wul/v9vHnzdPz4cd11112ehQQEKDo6uqblAID9/Pyth9NCWlWvfWGBNdvDvu3S/u+ssHssU8raZ21rF1vtgoKluG6ePbut2zGEAQCq0Ojdom+88YaSk5PVsaPnAyA7d+5UbGysgoODlZSUpLS0NHXo0KHS7ygoKFBBQYH7fW5uboPWDAD1KsgpJfSytjInjlsBd//20tfvrN7e3ZutrUxY29KgW9qzG9/dGtcLAGjcYHv48GEtXLhQc+bM8Tg+cOBAzZ49W4mJicrIyNBTTz2lq6++Wlu2bFFoaMW5J9PS0iqMyQWAJi00XOqdZG2SNfb3xwNWyN273Qq8GXuk3KPS5i+tTbLm743uYPXodki05ustKZaKi63Xsu3c9yUlF25T4TPFkpE1fMPP/+zm71/FsfMdr+K9n78UEWONh2bBDQA1VKcFGhwOxwUfHisvLS1Nf/7zn3X48GEFBQVV2S47O1sdO3bUiy++qPHjx1c4X1mPbXx8PAs0APBthaelAzs9e3WPH7G7qoYREGgtqRyfWLp1l6LireALoEnyqQUajDGaOXOm7rzzzvOGWklq3bq1unfvrl27dlV63ul0yul0NkSZAOC9goKlLhdbW5mco6XjdLdLB3daU5dVt2fU45jfhT9T9iBbScnZXt+qenbP2/tbUsW5Emuhjax91rzD+0rHH5e//rhuVsjtUBp228ZatQOAGjHYrly5Urt27aq0B/ZceXl5+uGHH3TnnXc2QmUA0IS52koXX2ltvsIYa57hAzukA9+f3QpPVxxzHNzSCrjusJvIHMFAM1bjYJuXl+fRk7pnzx5t3LhRbdq0UYcOHTRlyhQdOnRIb731lsfn3njjDQ0cOFC9e/eu8J0PPfSQbrjhBnXs2FGHDx/WE088IX9/f40ePboWlwQAaNIcDqlde2u79FrrWEmxdOSgFXb3lwbeQ7uk0/nWEsrll1Fu1fps2I3vboVdV1tbLgVA46pxsF23bp2uueYa9/vf/va3kqSxY8dq9uzZysjI0P79+z0+k5OTo3//+9+aPn16pd958OBBjR49WkePHlW7du101VVXafXq1WrXrl1NywMA+CI//7NLKg8Ybh0rPiNl7rVCblnYPbxbysuWtq+1tjKutmdDblw363taRzKMAfAxdXp4zFs0xmBkAEATUFRohVv3MIYdUuZ+yZRUbBvolCLjpKgOUlRHa4GMqHirpzjg/M+CAKg5n3p4DACABhcYVLp6W4+zxwpOWcMWynp2D++WfjwoFRVIh36wtvL8/KS2MVbgjexQGnw7SJHx1V+EA4AtCLYAAN/mDJE6X2xtZYqLpaMZ0pH9UtY5W8FJ6cdD1qZVnt/lausZdssCb1hbHlgDvABDEQAAKGOMtQhG+aB75ID1mnu06s8Ft7SGMUR1LA26baxjIS2tV/d+q7PTpgHNDEMRAABoTA6H5Iqwtu6Xep47lecZdMu2oxnW7AznzrtblaDgcmG3RcXgG9LSMxQTjoFqI9gCAFAdIa2kTj2trbwzhdawhfI9vCdzpVP5VuA9nW/tF5622heetrbz9QBfSGCQ5B9ordAWEGg97BYQWHHzP9+xcp/xD6h4LMhphfCgYCkoxHp1lr5nuWN4Kf6bCQBAXQQESTEJ1nY+xWfOhtzygbfCsbxz3p88u18WjosKrc0u/oFnQ25VmzOk3H4l5wOdpeOSHZJDKv0P65ij3L7k2absM2UfcX9HuTHOZccCAq3fCQw6+8pYaJ9GsAUAoDH4B0gtXdZWW2XhuOCUtXzymSKrx/hMkXXu3GPnbsWVHCtrf+65woKzvcuFp6zXktJp04qLpJNF0skT9fNn05g8gq7T6pku/z4wSAoM9gzDQcEV2wQFn+3ldvd4n9MD7u4dD7D2CdUNjmALAEBTUR/huLaMsQJtwelygfe0FbILzz12umIoPrdtUZEkY32vSp9jNzp7rPyz7WVtKj2mcsfLtTHGCuhFBdbKdWWKCqxNNoRyd+ANOCcQB0j+Qdbruef9A6wFSvz9PV/LNo/zfhXPndu+su+KSZBCwxv/z6MBEGwBAMCFORylY3CDpJZNbAai4uKzgbZsKyyUik6XDusoO1Z2vvDsa+E5bTzOF0hnzpT2lp/Tc15cdLaH211HaVtvM+ZR6ZKhdldRLwi2AADAt/n7S/4tpOAWjfu7JcWlwbf8cJAz5wwLKQ3FFY6XbiXF1lZcfHbf/b6kdP9M1e3KjlV1vKRYahHauH8uDYhgCwAA0BD8/KUgf0lOuytpNvzsLgAAAACoDwRbAAAA+ASCLQAAAHwCwRYAAAA+gWALAAAAn0CwBQAAgE8g2AIAAMAnEGwBAADgEwi2AAAA8AkEWwAAAPgEgi0AAAB8AsEWAAAAPoFgCwAAAJ9AsAUAAIBPCLC7gPpgjJEk5ebm2lwJAAAAKlOW08pyW0PwiWB74sQJSVJ8fLzNlQAAAOB8Tpw4IZfL1SDf7TANGZsbSUlJiQ4fPqzQ0FA5HA7l5uYqPj5eBw4cUFhYmN3loQFxr5sH7nPzwb1uHrjPzUf5ex0aGqoTJ04oNjZWfn4NMxrWJ3ps/fz8FBcXV+F4WFgY/8A0E9zr5oH73Hxwr5sH7nPzUXavG6qntgwPjwEAAMAnEGwBAADgE3wy2DqdTj3xxBNyOp12l4IGxr1uHrjPzQf3unngPjcfjX2vfeLhMQAAAMAne2wBAADQ/BBsAQAA4BMItgAAAPAJBFsAAAD4BJ8Mtn//+9/VqVMnBQcHa+DAgVq7dq3dJeE8PvvsM91www2KjY2Vw+HQvHnzPM4bY/T4448rJiZGISEhSk5O1s6dOz3aHDt2TLfffrvCwsLUunVrjR8/Xnl5eR5tNm3apKuvvlrBwcGKj4/XtGnTGvrSUE5aWpoGDBig0NBQRUZGKjU1VTt27PBoc/r0aU2cOFFt27ZVq1atdPPNNysrK8ujzf79+3X99derRYsWioyM1MMPP6wzZ854tFmxYoUuvfRSOZ1Ode3aVbNnz27oy0OpGTNmqE+fPu7J2JOSkrRw4UL3ee6xb5o6daocDocmT57sPsa99g1PPvmkHA6Hx9ajRw/3ea+7z8bHpKenm6CgIDNz5kyzdetWc++995rWrVubrKwsu0tDFRYsWGD+53/+x3z44YdGkpk7d67H+alTpxqXy2XmzZtnvv32W3PjjTeahIQEc+rUKXebESNGmL59+5rVq1ebzz//3HTt2tWMHj3afT4nJ8dERUWZ22+/3WzZssW88847JiQkxLz22muNdZnNXkpKipk1a5bZsmWL2bhxo7nuuutMhw4dTF5enrvN/fffb+Lj483SpUvNunXrzKBBg8wVV1zhPn/mzBnTu3dvk5ycbDZs2GAWLFhgIiIizJQpU9xtdu/ebVq0aGF++9vfmm3btpm//e1vxt/f3yxatKhRr7e5+uijj8zHH39svv/+e7Njxw7zhz/8wQQGBpotW7YYY7jHvmjt2rWmU6dOpk+fPuaBBx5wH+de+4YnnnjC9OrVy2RkZLi3H3/80X3e2+6zzwXbyy+/3EycONH9vri42MTGxpq0tDQbq0J1nRtsS0pKTHR0tHn++efdx7Kzs43T6TTvvPOOMcaYbdu2GUnm66+/drdZuHChcTgc5tChQ8YYY1555RUTHh5uCgoK3G0eeeQRk5iY2MBXhKocOXLESDIrV640xlj3NTAw0Lz//vvuNtu3bzeSzKpVq4wx1l+C/Pz8TGZmprvNjBkzTFhYmPve/u53vzO9evXy+K1bb73VpKSkNPQloQrh4eHmn//8J/fYB504ccJ069bNLFmyxAwZMsQdbLnXvuOJJ54wffv2rfScN95nnxqKUFhYqPXr1ys5Odl9zM/PT8nJyVq1apWNlaG29uzZo8zMTI976nK5NHDgQPc9XbVqlVq3bq3LLrvM3SY5OVl+fn5as2aNu83gwYMVFBTkbpOSkqIdO3bo+PHjjXQ1KC8nJ0eS1KZNG0nS+vXrVVRU5HGve/TooQ4dOnjc64svvlhRUVHuNikpKcrNzdXWrVvdbcp/R1kb/h3Q+IqLi5Wenq78/HwlJSVxj33QxIkTdf3111e4H9xr37Jz507Fxsaqc+fOuv3227V//35J3nmffSrY/vTTTyouLvb4w5OkqKgoZWZm2lQV6qLsvp3vnmZmZioyMtLjfEBAgNq0aePRprLvKP8baDwlJSWaPHmyrrzySvXu3VuSdR+CgoLUunVrj7bn3usL3ceq2uTm5urUqVMNcTk4x+bNm9WqVSs5nU7df//9mjt3rnr27Mk99jHp6en65ptvlJaWVuEc99p3DBw4ULNnz9aiRYs0Y8YM7dmzR1dffbVOnDjhlfc5oEatAaAeTJw4UVu2bNEXX3xhdyloAImJidq4caNycnL0wQcfaOzYsVq5cqXdZaEeHThwQA888ICWLFmi4OBgu8tBAxo5cqR7v0+fPho4cKA6duyo9957TyEhITZWVjmf6rGNiIiQv79/hafxsrKyFB0dbVNVqIuy+3a+exodHa0jR454nD9z5oyOHTvm0aay7yj/G2gckyZN0vz587V8+XLFxcW5j0dHR6uwsFDZ2dke7c+91xe6j1W1CQsL88p/CfuioKAgde3aVf3791daWpr69u2r6dOnc499yPr163XkyBFdeumlCggIUEBAgFauXKmXXnpJAQEBioqK4l77qNatW6t79+7atWuXV/4z7VPBNigoSP3799fSpUvdx0pKSrR06VIlJSXZWBlqKyEhQdHR0R73NDc3V2vWrHHf06SkJGVnZ2v9+vXuNsuWLVNJSYkGDhzobvPZZ5+pqKjI3WbJkiVKTExUeHh4I11N82aM0aRJkzR37lwtW7ZMCQkJHuf79++vwMBAj3u9Y8cO7d+/3+Neb9682eMvMkuWLFFYWJh69uzpblP+O8ra8O8A+5SUlKigoIB77EOGDRumzZs3a+PGje7tsssu0+233+7e5177pry8PP3www+KiYnxzn+ma/y4mZdLT083TqfTzJ4922zbts3cd999pnXr1h5P48G7nDhxwmzYsMFs2LDBSDIvvvii2bBhg9m3b58xxpruq3Xr1uY///mP2bRpkxk1alSl031dcsklZs2aNeaLL74w3bp185juKzs720RFRZk777zTbNmyxaSnp5sWLVow3VcjmjBhgnG5XGbFihUe08acPHnS3eb+++83HTp0MMuWLTPr1q0zSUlJJikpyX2+bNqY4cOHm40bN5pFixaZdu3aVTptzMMPP2y2b99u/v73vzM9UCP6/e9/b1auXGn27NljNm3aZH7/+98bh8NhPvnkE2MM99iXlZ8VwRjuta948MEHzYoVK8yePXvMl19+aZKTk01ERIQ5cuSIMcb77rPPBVtjjPnb3/5mOnToYIKCgszll19uVq9ebXdJOI/ly5cbSRW2sWPHGmOsKb8ee+wxExUVZZxOpxk2bJjZsWOHx3ccPXrUjB492rRq1cqEhYWZu+66y5w4ccKjzbfffmuuuuoq43Q6Tfv27c3UqVMb6xJhTKX3WJKZNWuWu82pU6fMb37zGxMeHm5atGhhfvGLX5iMjAyP79m7d68ZOXKkCQkJMREREebBBx80RUVFHm2WL19u+vXrZ4KCgkznzp09fgMN6+677zYdO3Y0QUFBpl27dmbYsGHuUGsM99iXnRtsude+4dZbbzUxMTEmKCjItG/f3tx6661m165d7vPedp8dxhhT835eAAAAwLv41BhbAAAANF8EWwAAAPgEgi0AAAB8AsEWAAAAPoFgCwAAAJ9AsAUAAIBPINgCAADAJxBsAQAA4BMItgAAAPAJBFsAAAD4BIItAAAAfALBFgAAAD7h/wdtXsPv2SVEnwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tb_dir = os.path.join(WORK_DIR, \"tensorboard_output\")\n", + "fname = os.listdir(tb_dir)[0]\n", + "tb_path = os.path.join(tb_dir, fname)\n", + "#\n", + "data = read_tensorboard_file(tb_path)\n", + "print(data.keys())\n", + "_ = plot_image(data, \"loss\", 0.9)\n", + "_ = plot_image(data, \"lr\", 0)\n", + "_ = plot_image(data, \"evaluation/acc\", 0)\n", + "_ = plot_image(data, \"evaluation/loss\", 0)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 推理\n", + "推理部分见chatglm2_infer.ipynb" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "hackathon", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 45cf0035f414464cd8eabe099372893d72914e21 Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Mon, 3 Jul 2023 23:16:38 +0800 Subject: [PATCH 05/87] fix chatglm2 evaluation error: hypothesis emtpy (#348) * fix evaluation error: hypothesis emtpy * fix pipeline * fix bug --- examples/pytorch/chatglm6b/chatglm_trainer.py | 6 ++++- examples/pytorch/chatglm6b/finetune.py | 14 +++++++---- .../chatglm6b/text_generation_metric.py | 2 +- .../pipelines/nlp/text_generation_pipeline.py | 23 ++++++++++++++----- 4 files changed, 33 insertions(+), 12 deletions(-) diff --git a/examples/pytorch/chatglm6b/chatglm_trainer.py b/examples/pytorch/chatglm6b/chatglm_trainer.py index b34563bd..efa4dfce 100644 --- a/examples/pytorch/chatglm6b/chatglm_trainer.py +++ b/examples/pytorch/chatglm6b/chatglm_trainer.py @@ -16,6 +16,8 @@ class Seq2SeqTrainer(EpochBasedTrainer): if ignore_pad_token_for_loss: tokens = np.where(tokens != -100, tokens, self.tokenizer.pad_token_id) + tokens = np.where(tokens < self.tokenizer.vocab_size, tokens, + self.tokenizer.pad_token_id) return [ t for t in self.tokenizer.batch_decode( tokens, skip_special_tokens=True) if t != '' @@ -59,7 +61,9 @@ class Seq2SeqTrainer(EpochBasedTrainer): gen_kwargs['input_ids'] = generation_inputs gen_kwargs['pad_token_id'] = self.tokenizer.pad_token_id - generated_tokens = self.model.generate(**gen_kwargs) + self.model.eval() + with torch.no_grad(): + generated_tokens = self.model.generate(**gen_kwargs) generated_tokens = generated_tokens[:, generation_inputs.size()[-1]:] # in case the batch is shorter than max length, the output should be padded diff --git a/examples/pytorch/chatglm6b/finetune.py b/examples/pytorch/chatglm6b/finetune.py index 3fa73ba0..bf3953e8 100644 --- a/examples/pytorch/chatglm6b/finetune.py +++ b/examples/pytorch/chatglm6b/finetune.py @@ -192,8 +192,15 @@ if config['model']['type'] == 'chatglm6b': model_config['model']['prefix_projection'] = args.prefix_projection tokenizer = ChatGLMTokenizer.from_pretrained(model_dir, trust_remote_code=True) + +device_map_kwargs = {} +device_kwargs = {} +if args.use_lora != 0: + device_kwargs['device_map'] = 'auto' + # No placement for model, leave the model to `device_map` + device_kwargs['device'] = 'cpu' model = Model.from_pretrained( - model_dir, cfg_dict=model_config, device_map='auto') + model_dir, cfg_dict=model_config, **device_map_kwargs) if args.ptuning_checkpoint is not None: # Evaluation @@ -378,8 +385,7 @@ trainer = Seq2SeqTrainer( seed=args.seed, data_collator=data_collator, remove_unused_data=True, - # No placement for model, leave the model to `device_map` - device='cpu', - cfg_modify_fn=cfg_modify_fn) + cfg_modify_fn=cfg_modify_fn, + **device_kwargs) trainer.tokenizer = tokenizer trainer.train() diff --git a/examples/pytorch/chatglm6b/text_generation_metric.py b/examples/pytorch/chatglm6b/text_generation_metric.py index 2083453a..536bbe06 100644 --- a/examples/pytorch/chatglm6b/text_generation_metric.py +++ b/examples/pytorch/chatglm6b/text_generation_metric.py @@ -53,7 +53,7 @@ class TextGenerationMetric(Metric): } for pred, label in zip(preds, labels): hypothesis = list(jieba.cut(pred)) - if len(hypothesis) == 0: + if len(hypothesis) == 0 or ''.join(hypothesis) == '.': hypothesis = [''] reference = list(jieba.cut(label)) rouge = Rouge() diff --git a/modelscope/pipelines/nlp/text_generation_pipeline.py b/modelscope/pipelines/nlp/text_generation_pipeline.py index a0e8a0ee..a7806702 100644 --- a/modelscope/pipelines/nlp/text_generation_pipeline.py +++ b/modelscope/pipelines/nlp/text_generation_pipeline.py @@ -6,6 +6,7 @@ from typing import Any, Dict, Optional, Union import torch +from modelscope import snapshot_download from modelscope.metainfo import Pipelines from modelscope.models.base import Model from modelscope.outputs import (ModelOutputBase, OutputKeys, @@ -192,9 +193,14 @@ class ChatGLM6bTextGenerationPipeline(Pipeline): quantization_bit=None, use_bf16=False, **kwargs): - from modelscope.models.nlp.chatglm.text_generation import ChatGLMForConditionalGeneration - model = ChatGLMForConditionalGeneration(model) if isinstance( - model, str) else model + from modelscope.models.nlp.chatglm.text_generation import ChatGLMForConditionalGeneration, ChatGLMConfig + if isinstance(model, str): + model_dir = snapshot_download( + model) if not os.path.exists(model) else model + config = ChatGLMConfig.from_pretrained(model_dir) + model = ChatGLMForConditionalGeneration(config).half() + if torch.cuda.is_available(): + model = model.cuda() if quantization_bit is not None: model = model.quantize(quantization_bit) if use_bf16: @@ -225,9 +231,14 @@ class ChatGLM6bV2TextGenerationPipeline(Pipeline): quantization_bit=None, use_bf16=False, **kwargs): - from modelscope.models.nlp import ChatGLM2ForConditionalGeneration, ChatGLM2Tokenizer - model = ChatGLM2ForConditionalGeneration(model) if isinstance( - model, str) else model + from modelscope.models.nlp import ChatGLM2ForConditionalGeneration, ChatGLM2Tokenizer, ChatGLM2Config + if isinstance(model, str): + model_dir = snapshot_download( + model) if not os.path.exists(model) else model + config = ChatGLM2Config.from_pretrained(model_dir) + model = ChatGLM2ForConditionalGeneration(config) + if torch.cuda.is_available(): + model = model.cuda() if quantization_bit is not None: model = model.quantize(quantization_bit) if use_bf16: From 08c71f1f3d160cdceb496f4f82d36ff7344da97a Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Tue, 4 Jul 2023 01:58:57 +0800 Subject: [PATCH 06/87] Fix/chatglm6b 2 (#354) --- examples/pytorch/chatglm6b/finetune.py | 2 +- modelscope/models/nlp/chatglm/text_generation.py | 1 - modelscope/pipelines/nlp/text_generation_pipeline.py | 7 +++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/examples/pytorch/chatglm6b/finetune.py b/examples/pytorch/chatglm6b/finetune.py index bf3953e8..6d04924c 100644 --- a/examples/pytorch/chatglm6b/finetune.py +++ b/examples/pytorch/chatglm6b/finetune.py @@ -196,7 +196,7 @@ tokenizer = ChatGLMTokenizer.from_pretrained(model_dir, trust_remote_code=True) device_map_kwargs = {} device_kwargs = {} if args.use_lora != 0: - device_kwargs['device_map'] = 'auto' + device_map_kwargs['device_map'] = 'auto' # No placement for model, leave the model to `device_map` device_kwargs['device'] = 'cpu' model = Model.from_pretrained( diff --git a/modelscope/models/nlp/chatglm/text_generation.py b/modelscope/models/nlp/chatglm/text_generation.py index 53f8f105..64b82862 100644 --- a/modelscope/models/nlp/chatglm/text_generation.py +++ b/modelscope/models/nlp/chatglm/text_generation.py @@ -931,7 +931,6 @@ class ChatGLMModel(ChatGLMPreTrainedModel): self.num_attention_heads, self.hidden_size // self.num_attention_heads) # seq_len, b, nh, hidden_size - print('#########################:', past_key_values.device) past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) # past_key_values = [(v[0], v[1]) for v in past_key_values] diff --git a/modelscope/pipelines/nlp/text_generation_pipeline.py b/modelscope/pipelines/nlp/text_generation_pipeline.py index a7806702..cb9c89db 100644 --- a/modelscope/pipelines/nlp/text_generation_pipeline.py +++ b/modelscope/pipelines/nlp/text_generation_pipeline.py @@ -197,8 +197,8 @@ class ChatGLM6bTextGenerationPipeline(Pipeline): if isinstance(model, str): model_dir = snapshot_download( model) if not os.path.exists(model) else model - config = ChatGLMConfig.from_pretrained(model_dir) - model = ChatGLMForConditionalGeneration(config).half() + model = ChatGLMForConditionalGeneration.from_pretrained( + model_dir).half() if torch.cuda.is_available(): model = model.cuda() if quantization_bit is not None: @@ -235,8 +235,7 @@ class ChatGLM6bV2TextGenerationPipeline(Pipeline): if isinstance(model, str): model_dir = snapshot_download( model) if not os.path.exists(model) else model - config = ChatGLM2Config.from_pretrained(model_dir) - model = ChatGLM2ForConditionalGeneration(config) + model = ChatGLM2ForConditionalGeneration.from_pretrained(model_dir) if torch.cuda.is_available(): model = model.cuda() if quantization_bit is not None: From 423e2ce9408967d0a10f32738c5f77d23fe8f330 Mon Sep 17 00:00:00 2001 From: Firmament-cyou <57580313+Firmament-cyou@users.noreply.github.com> Date: Tue, 4 Jul 2023 18:39:36 +0800 Subject: [PATCH 07/87] Add lora_inference for baichuan. (#352) * add lora_inference.py for baichuan * fix linttest * fix linttest --------- Co-authored-by: hemu --- examples/pytorch/baichuan/lora_inference.py | 28 +++ examples/pytorch/llm_agent/_common.py | 246 ++++++++++++-------- 2 files changed, 171 insertions(+), 103 deletions(-) create mode 100644 examples/pytorch/baichuan/lora_inference.py diff --git a/examples/pytorch/baichuan/lora_inference.py b/examples/pytorch/baichuan/lora_inference.py new file mode 100644 index 00000000..661e8493 --- /dev/null +++ b/examples/pytorch/baichuan/lora_inference.py @@ -0,0 +1,28 @@ +import os.path as osp + +import torch + +from modelscope.pipelines import pipeline +from modelscope.swift import Swift +from modelscope.swift.lora import LoRAConfig +from modelscope.utils.constant import Tasks + +# 使用源模型 model_id 初始化 pipeline +model_id = 'baichuan-inc/baichuan-7B' +pipe = pipeline( + task=Tasks.text_generation, model=model_id, model_revision='v1.0.2') +# lora 配置,replace_modules,rank,alpha 需与训练参数相同 +lora_config = LoRAConfig(replace_modules=['pack'], rank=32, lora_alpha=32) +# 转 bf16,需与训练精度相同 +model = pipe.model.bfloat16() +# model 转 lora +Swift.prepare_model(model, lora_config) +# 加载 lora 参数,默认 link 到于 output/model 路径 +work_dir = './tmp' +state_dict = torch.load(osp.join(work_dir, 'output/pytorch_model.bin')) +model.load_state_dict(state_dict) +# 使用 lora model 替换 pipeline 中的 model +pipe.model = model +# 使用 pipeline 推理 +result_zh = pipe('今天天气是真的') +print(result_zh) diff --git a/examples/pytorch/llm_agent/_common.py b/examples/pytorch/llm_agent/_common.py index 12e57eab..dd0cd7d4 100644 --- a/examples/pytorch/llm_agent/_common.py +++ b/examples/pytorch/llm_agent/_common.py @@ -1,46 +1,52 @@ +import ast +import datetime as dt +import math import os import random import re import sys -import math -import json -import ast -import datetime as dt -from typing import List, Tuple, Dict, Callable, Optional, Union, Any from functools import partial -# -from tqdm import tqdm -import numpy as np -from numpy import ndarray +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import json import matplotlib.pyplot as plt -from matplotlib.axes import Axes -from matplotlib.figure import Figure +import numpy as np # import torch import torch.nn as nn import torch.optim as optim -from torch import Tensor, device as Device, dtype as Dtype +from matplotlib.axes import Axes +from matplotlib.figure import Figure +from numpy import ndarray +from tensorboard.backend.event_processing.event_accumulator import \ + EventAccumulator +from torch import Tensor +from torch import device as Device +from torch import dtype as Dtype from torch.nn import Module -from torch.optim import Optimizer -from torch.utils.data import Dataset from torch.nn.parameter import Parameter +from torch.nn.utils.rnn import pad_sequence +from torch.optim import Optimizer from torch.optim import lr_scheduler as lrs from torch.optim.lr_scheduler import _LRScheduler as LRScheduler -from torch.nn.utils.rnn import pad_sequence +from torch.utils.data import Dataset # from torchmetrics import Accuracy, MeanMetric -from tensorboard.backend.event_processing.event_accumulator import EventAccumulator # -from modelscope import get_logger -from modelscope import MsDataset, snapshot_download, Model, read_config -from modelscope.utils.config import Config, ConfigDict -from modelscope.msdatasets.dataset_cls.custom_datasets import TorchCustomDataset -from modelscope.trainers import EpochBasedTrainer -from modelscope.swift import Swift, LoRAConfig +from tqdm import tqdm + +# +from modelscope import (Model, MsDataset, get_logger, read_config, + snapshot_download) from modelscope.metrics.base import Metric from modelscope.metrics.builder import METRICS -from modelscope.utils.registry import default_group from modelscope.models.nlp.chatglm2 import ChatGLM2Tokenizer +from modelscope.msdatasets.dataset_cls.custom_datasets import \ + TorchCustomDataset +from modelscope.swift import LoRAConfig, Swift +from modelscope.trainers import EpochBasedTrainer +from modelscope.utils.config import Config, ConfigDict +from modelscope.utils.registry import default_group # SYSTEM_TEXT = """{system}""" @@ -51,7 +57,7 @@ ASSISTANT_PROMPT = """\n\n### 助手 MAX_LENGTH = 2048 TEST_MAX_LENGTH = MAX_LENGTH -COLOR, COLOR_S = "#FFE2D9", "#FF7043" +COLOR, COLOR_S = '#FFE2D9', '#FF7043' logger = get_logger() # @@ -68,7 +74,7 @@ def _get_version(work_dir: str) -> int: fnames = [] v_list = [-1] for fname in fnames: - m = re.match(r"v(\d+)", fname) + m = re.match(r'v(\d+)', fname) if m is None: continue v = m.group(1) @@ -80,10 +86,10 @@ def get_work_dir(work_dir: str) -> str: """add version""" work_dir = os.path.abspath(work_dir) version = _get_version(work_dir) - time = dt.datetime.now().strftime("%Y%m%d-%H%M%S") + time = dt.datetime.now().strftime('%Y%m%d-%H%M%S') # - work_dir = os.path.join(work_dir, f"v{version}-{time}") - logger.info(f"work_dir: {work_dir}") + work_dir = os.path.join(work_dir, f'v{version}-{time}') + logger.info(f'work_dir: {work_dir}') return work_dir @@ -92,19 +98,21 @@ def select_device(device_ids: List[int]) -> Device: Return: master device """ if torch.cuda.is_initialized(): - logger.warning("CUDA has been initialized! Device selection fails!") - return torch.device("cuda:0") + logger.warning('CUDA has been initialized! Device selection fails!') + return torch.device('cuda:0') # - log_s = "Using device: " + log_s = 'Using device: ' if len(device_ids) == 0: # cpu - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - device: str = "cpu" + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + device: str = 'cpu' log_s += device else: - os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(d) for d in device_ids]) - assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device_ids) + os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( + [str(d) for d in device_ids]) + assert torch.cuda.is_available( + ) and torch.cuda.device_count() >= len(device_ids) log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. "cuda:1,7,8" - device = "cuda:0" + device = 'cuda:0' logger.info(log_s) return torch.device(device) @@ -118,15 +126,16 @@ def seed_everything(seed: Optional[int] = None, gpu_dtm: bool = False) -> int: np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) - logger.info(f"Global seed set to {seed}") + logger.info(f'Global seed set to {seed}') if gpu_dtm: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False - logger.info(f"Setting deterministic: {True}, benchmark: {False}") + logger.info(f'Setting deterministic: {True}, benchmark: {False}') return seed -def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, drop_last: bool) -> int: +def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, + drop_last: bool) -> int: """Calculate T_max in CosineAnnealingLR""" if drop_last: T_max = dataset_len // batch_size @@ -136,25 +145,32 @@ def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, drop_last: boo return T_max -def tokenize_function(system: str, user: str, assistant: Optional[str], tokenizer) -> Dict[str, Any]: +def tokenize_function(system: str, user: str, assistant: Optional[str], + tokenizer) -> Dict[str, Any]: """Only applicable to baichuan and chatglm2. Other models need to be tested""" system_text = SYSTEM_TEXT.format(system=system) user_text = USER_TEXT.format(user=user) - system_text_ids: List[int] = tokenizer(system_text, return_attention_mask=False, - add_special_tokens=True)["input_ids"] - user_text_ids: List[int] = tokenizer(user_text, return_attention_mask=False, - add_special_tokens=False)["input_ids"] - assistant_p_input_ids: List[int] = tokenizer(ASSISTANT_PROMPT, return_attention_mask=False, - add_special_tokens=False)["input_ids"] + system_text_ids: List[int] = tokenizer( + system_text, return_attention_mask=False, + add_special_tokens=True)['input_ids'] + user_text_ids: List[int] = tokenizer( + user_text, return_attention_mask=False, + add_special_tokens=False)['input_ids'] + assistant_p_input_ids: List[int] = tokenizer( + ASSISTANT_PROMPT, + return_attention_mask=False, + add_special_tokens=False)['input_ids'] # tokenizer.bos_token_id: Avoid `assistant` being empty assistant_input_ids: List[int] = [tokenizer.bos_token_id] if assistant is not None: - assistant_input_ids += tokenizer(assistant, return_attention_mask=False, add_special_tokens=False)["input_ids"] + assistant_input_ids += tokenizer( + assistant, return_attention_mask=False, + add_special_tokens=False)['input_ids'] assistant_input_ids += [tokenizer.eos_token_id] # input_ids = system_text_ids + user_text_ids + assistant_p_input_ids + assistant_input_ids - if assistant is not None: # train, val + if assistant is not None: # train, val if len(input_ids) > MAX_LENGTH: return {} len_mask = len(input_ids) - len(assistant_input_ids) @@ -164,12 +180,13 @@ def tokenize_function(system: str, user: str, assistant: Optional[str], tokenize labels = None # - return {"input_ids": input_ids, "labels": labels} + return {'input_ids': input_ids, 'labels': labels} class MyDataset(TorchCustomDataset): - def __init__(self, system: List[str], user: List[str], assistant: List[str], - tokenize_function) -> None: + + def __init__(self, system: List[str], user: List[str], + assistant: List[str], tokenize_function) -> None: self._data = [] for i in tqdm(range(len(system))): _d = tokenize_function(system[i], user[i], assistant[i]) @@ -184,36 +201,48 @@ class MyDataset(TorchCustomDataset): return len(self._data) -def stat_dataset(dataset: "MyDataset") -> None: +def stat_dataset(dataset: 'MyDataset') -> None: """Statistical analysis was performed on the data set""" _token_len = [] for d in dataset: - _token_len.append(len(d["input_ids"])) + _token_len.append(len(d['input_ids'])) _token_len = np.array(_token_len) mean = _token_len.mean().item() std = _token_len.std().item() min_ = _token_len.min().item() max_ = _token_len.max().item() logger.info( - f"Dataset Token Length: {mean:.6f}±{std:.6f}, min={min_:.6f}, max={max_:.6f}, size={_token_len.shape[0]}") + f'Dataset Token Length: {mean:.6f}±{std:.6f}, min={min_:.6f}, max={max_:.6f}, size={_token_len.shape[0]}' + ) def print_examples(examples: Dict[str, Any], tokenizer) -> None: - input_ids, labels = examples["input_ids"], examples["labels"] - print(f"[INPUT_IDS] {tokenizer.decode(input_ids)}") + input_ids, labels = examples['input_ids'], examples['labels'] + print(f'[INPUT_IDS] {tokenizer.decode(input_ids)}') print() - print(f"[LABLES] {tokenizer.decode([l if l != -100 else 0 for l in labels])}") + print( + f'[LABLES] {tokenizer.decode([l if l != -100 else 0 for l in labels])}' + ) def data_collate_fn(batch: List[Dict[str, Any]], tokenizer) -> Dict[str, Any]: - input_ids = [torch.tensor(b["input_ids"]) for b in batch] - labels = [torch.tensor(b["labels"]) for b in batch] - attention_mask = [torch.ones(len(input_ids[i]), dtype=torch.int64) for i in range(len(input_ids))] + input_ids = [torch.tensor(b['input_ids']) for b in batch] + labels = [torch.tensor(b['labels']) for b in batch] + attention_mask = [ + torch.ones(len(input_ids[i]), dtype=torch.int64) + for i in range(len(input_ids)) + ] # - input_ids = pad_sequence(input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) - attention_mask = pad_sequence(attention_mask, batch_first=True, padding_value=0) + input_ids = pad_sequence( + input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) + attention_mask = pad_sequence( + attention_mask, batch_first=True, padding_value=0) labels = pad_sequence(labels, batch_first=True, padding_value=-100) - return {"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels} + return { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'labels': labels + } def print_model_info(model: Module, name: Optional[str] = None) -> None: @@ -228,34 +257,35 @@ def print_model_info(model: Module, name: Optional[str] = None) -> None: n_grads /= 1e6 n_buffers /= 1e6 s = [ - f"{name}: ", - f"{n_params:.4f}M Params ({n_grads:.4f}M Trainable), ", - f"{n_buffers:.4f}M Buffers", + f'{name}: ', + f'{n_params:.4f}M Params ({n_grads:.4f}M Trainable), ', + f'{n_buffers:.4f}M Buffers', ] - s += "." - logger.info("".join(s)) + s += '.' + logger.info(''.join(s)) def show_freeze_layers(model: Module, max_lines: int = 20) -> None: named_p = list(model.named_parameters()) for i, (n, p) in enumerate(named_p): if i >= max_lines: - logger.info("...") + logger.info('...') break - logger.info(f"{n}: requires_grad={p.requires_grad}") + logger.info(f'{n}: requires_grad={p.requires_grad}') @METRICS.register_module(group_key=default_group, module_name='my_metric') class MyMetric(Metric): + def __init__(self, vocab_size: int): - self.acc = Accuracy("multiclass", num_classes=vocab_size) + self.acc = Accuracy('multiclass', num_classes=vocab_size) self.loss = MeanMetric() def add(self, outputs: Dict[str, Any], inputs: Dict[str, Any]) -> None: loss: Tensor = outputs.loss self.loss.update(loss) # - labels: Tensor = inputs["labels"] + labels: Tensor = inputs['labels'] labels = labels[:, 1:] labels_mask = labels != -100 logits: Tensor = outputs.logits[:, :-1] @@ -266,18 +296,19 @@ class MyMetric(Metric): def evaluate(self): return { - "acc": self.acc.compute().item(), - "loss": self.loss.compute().item() + 'acc': self.acc.compute().item(), + 'loss': self.loss.compute().item() } - def merge(self, other: "MyMetric") -> None: + def merge(self, other: 'MyMetric') -> None: """This script does not support ddp""" raise NotImplementedError -def get_baichuan_model_tokenizer(model_dir: Optional[str] = None, load_model: bool = True): +def get_baichuan_model_tokenizer(model_dir: Optional[str] = None, + load_model: bool = True): if model_dir is None: - model_id = "baichuan-inc/baichuan-7B" + model_id = 'baichuan-inc/baichuan-7B' model_dir = get_model_dir(model_id, None) # sys.path.insert(0, model_dir) @@ -286,51 +317,59 @@ def get_baichuan_model_tokenizer(model_dir: Optional[str] = None, load_model: bo from modeling_baichuan import BaiChuanForCausalLM model_config = BaiChuanConfig.from_pretrained(model_dir) model_config.torch_dtype = torch.float16 - logger.info(f"model_config: {model_config}") + logger.info(f'model_config: {model_config}') tokenizer = BaiChuanTokenizer.from_pretrained(model_dir) model = None if load_model: - model = BaiChuanForCausalLM.from_pretrained(model_dir, config=model_config, - device_map="auto", torch_dtype=torch.float16) + model = BaiChuanForCausalLM.from_pretrained( + model_dir, + config=model_config, + device_map='auto', + torch_dtype=torch.float16) # return model, tokenizer -def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, load_model: bool = True): +def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, + load_model: bool = True): if model_dir is None: - model_id = "ZhipuAI/chatglm2-6b" - model_revision = "v1.0.3" + model_id = 'ZhipuAI/chatglm2-6b' + model_revision = 'v1.0.3' model_dir = snapshot_download(model_id, model_revision) # config = read_config(model_dir) - config["model"] = ConfigDict({ - "type": "chatglm2-6b" - }) + config['model'] = ConfigDict({'type': 'chatglm2-6b'}) tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) model = None if load_model: model = Model.from_pretrained( - model_dir, cfg_dict=config, device_map='auto', torch_dtype=torch.float16) + model_dir, + cfg_dict=config, + device_map='auto', + torch_dtype=torch.float16) return model, tokenizer -def make_dataset(split: str, - tokenize_function: Callable[[str, str, Optional[str]], Dict[str, Any]]) -> MyDataset: +def make_dataset( + split: str, tokenize_function: Callable[[str, str, Optional[str]], + Dict[str, Any]] +) -> MyDataset: """ split: Literal["train", "validation"] """ - dataset = MsDataset.load('modelscope/ms_hackathon_23_agent_train_dev', split=split) + dataset = MsDataset.load( + 'modelscope/ms_hackathon_23_agent_train_dev', split=split) system = [] user = [] assistant = [] for d in dataset: - content = ast.literal_eval(d["conversations"]) - s = content[0]["value"] + content = ast.literal_eval(d['conversations']) + s = content[0]['value'] assert len(content) % 2 == 1 for i in range(len(content) // 2): system.append(s) - user.append(content[2 * i + 1]["value"]) - assistant.append(content[2 * i + 2]["value"]) + user.append(content[2 * i + 1]['value']) + assistant.append(content[2 * i + 2]['value']) return MyDataset(system, user, assistant, tokenize_function) @@ -339,21 +378,22 @@ Item = Dict[str, float] def read_tensorboard_file(fpath: str) -> Dict[str, List[Item]]: if not os.path.isfile(fpath): - raise FileNotFoundError(f"fpath: {fpath}") + raise FileNotFoundError(f'fpath: {fpath}') ea = EventAccumulator(fpath) ea.Reload() res = {} - tags = ea.Tags()["scalars"] + tags = ea.Tags()['scalars'] for tag in tags: values = ea.Scalars(tag) r = [] for v in values: - r.append({"step": v.step, "value": v.value}) + r.append({'step': v.step, 'value': v.value}) res[tag] = r return res -def tensorboard_smoothing(values: List[float], smooth: float = 0.9) -> List[float]: +def tensorboard_smoothing(values: List[float], + smooth: float = 0.9) -> List[float]: norm_factor = 1 x = 0 res = [] @@ -366,12 +406,12 @@ def tensorboard_smoothing(values: List[float], smooth: float = 0.9) -> List[floa return res -def plot_image(data: Dict[str, List[Item]], key_name: str, smooth: float) -> Figure: +def plot_image(data: Dict[str, List[Item]], key_name: str, + smooth: float) -> Figure: _data = data[key_name] - steps = [d["step"] for d in _data] - values = [d["value"] for d in _data] - fig, ax = plt.subplots(1, 1, squeeze=True, - figsize=(8, 5), dpi=100) + steps = [d['step'] for d in _data] + values = [d['value'] for d in _data] + fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(8, 5), dpi=100) ax.set_title(key_name) if smooth != 0: ax.plot(steps, values, color=COLOR) From 95907aed31dddd4ae26dcb01a3161a65fbe0db1d Mon Sep 17 00:00:00 2001 From: mushenL <125954878+mushenL@users.noreply.github.com> Date: Tue, 4 Jul 2023 18:42:09 +0800 Subject: [PATCH 08/87] Modify the parameter passing of the text_generation_pipeline class (#355) --- modelscope/pipelines/nlp/text_generation_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modelscope/pipelines/nlp/text_generation_pipeline.py b/modelscope/pipelines/nlp/text_generation_pipeline.py index cb9c89db..9e25ea95 100644 --- a/modelscope/pipelines/nlp/text_generation_pipeline.py +++ b/modelscope/pipelines/nlp/text_generation_pipeline.py @@ -66,7 +66,8 @@ class TextGenerationPipeline(Pipeline, PipelineStreamingOutputMixin): device=device, auto_collate=auto_collate, compile=kwargs.pop('compile', False), - compile_options=kwargs.pop('compile_options', {})) + compile_options=kwargs.pop('compile_options', {}), + **kwargs) assert isinstance(self.model, Model), \ f'please check whether model config exists in {ModelFile.CONFIGURATION}' From 645925cadd88e169aabe44a214047f98407919ca Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Tue, 4 Jul 2023 20:52:15 +0800 Subject: [PATCH 09/87] Fix/chatglm pipeline (#356) --- modelscope/pipelines/nlp/text_generation_pipeline.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/modelscope/pipelines/nlp/text_generation_pipeline.py b/modelscope/pipelines/nlp/text_generation_pipeline.py index 9e25ea95..e9987f8f 100644 --- a/modelscope/pipelines/nlp/text_generation_pipeline.py +++ b/modelscope/pipelines/nlp/text_generation_pipeline.py @@ -211,11 +211,15 @@ class ChatGLM6bTextGenerationPipeline(Pipeline): super().__init__(model=model, **kwargs) + def _sanitize_parameters(self, **pipeline_parameters): + return {}, pipeline_parameters, {} + def preprocess(self, inputs, **preprocess_params) -> Dict[str, Any]: return inputs # define the forward pass def forward(self, inputs: Dict, **forward_params) -> Dict[str, Any]: + inputs.update(forward_params) return self.model.chat(inputs) # format the outputs from pipeline @@ -250,12 +254,16 @@ class ChatGLM6bV2TextGenerationPipeline(Pipeline): super().__init__(model=model, **kwargs) + def _sanitize_parameters(self, **pipeline_parameters): + return {}, pipeline_parameters, {} + def preprocess(self, inputs, **preprocess_params) -> Dict[str, Any]: return inputs # define the forward pass def forward(self, inputs: Dict, **forward_params) -> Dict[str, Any]: - return self.model.chat(self.tokenizer, inputs['text']) + return self.model.chat(self.tokenizer, inputs['text'], + **forward_params) # format the outputs from pipeline def postprocess(self, input, **kwargs) -> Dict[str, Any]: From d49953b9434eb457ad5f1c7ec1f68c146a717c40 Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Tue, 4 Jul 2023 22:01:21 +0800 Subject: [PATCH 10/87] fix bugs of loading local sd dataset (#357) --- .../finetune_stable_diffusion_dreambooth.py | 23 ++++++++++++------- .../lora/finetune_stable_diffusion_lora.py | 23 ++++++++++++------- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py b/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py index f852d752..760396d0 100644 --- a/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py +++ b/examples/pytorch/stable_diffusion/dreambooth/finetune_stable_diffusion_dreambooth.py @@ -1,3 +1,4 @@ +import os from dataclasses import dataclass, field import cv2 @@ -63,14 +64,20 @@ training_args = StableDiffusionDreamboothArguments( task='text-to-image-synthesis').parse_cli() config, args = training_args.to_config() -train_dataset = MsDataset.load( - args.train_dataset_name, - split='train', - download_mode=DownloadMode.FORCE_REDOWNLOAD) -validation_dataset = MsDataset.load( - args.train_dataset_name, - split='validation', - download_mode=DownloadMode.FORCE_REDOWNLOAD) +if os.path.exists(args.train_dataset_name): + # Load local dataset + train_dataset = MsDataset.load(args.train_dataset_name) + validation_dataset = MsDataset.load(args.train_dataset_name) +else: + # Load online dataset + train_dataset = MsDataset.load( + args.train_dataset_name, + split='train', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + validation_dataset = MsDataset.load( + args.train_dataset_name, + split='validation', + download_mode=DownloadMode.FORCE_REDOWNLOAD) def cfg_modify_fn(cfg): diff --git a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py index 2561a6a7..8ad3a28b 100644 --- a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py +++ b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py @@ -1,3 +1,4 @@ +import os from dataclasses import dataclass, field import cv2 @@ -23,14 +24,20 @@ training_args = StableDiffusionLoraArguments( task='text-to-image-synthesis').parse_cli() config, args = training_args.to_config() -train_dataset = MsDataset.load( - args.train_dataset_name, - split='train', - download_mode=DownloadMode.FORCE_REDOWNLOAD) -validation_dataset = MsDataset.load( - args.train_dataset_name, - split='validation', - download_mode=DownloadMode.FORCE_REDOWNLOAD) +if os.path.exists(args.train_dataset_name): + # Load local dataset + train_dataset = MsDataset.load(args.train_dataset_name) + validation_dataset = MsDataset.load(args.train_dataset_name) +else: + # Load online dataset + train_dataset = MsDataset.load( + args.train_dataset_name, + split='train', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + validation_dataset = MsDataset.load( + args.train_dataset_name, + split='validation', + download_mode=DownloadMode.FORCE_REDOWNLOAD) def cfg_modify_fn(cfg): From 2a79a6cee799ccd57be72c4a08eb66d4f6fbc148 Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Wed, 5 Jul 2023 00:02:20 +0800 Subject: [PATCH 11/87] fix history problem (#358) --- .../models/nlp/chatglm2/text_generation.py | 63 ++++++++++++++----- .../pipelines/nlp/text_generation_pipeline.py | 4 +- 2 files changed, 51 insertions(+), 16 deletions(-) diff --git a/modelscope/models/nlp/chatglm2/text_generation.py b/modelscope/models/nlp/chatglm2/text_generation.py index 3c510c7a..7772f34e 100644 --- a/modelscope/models/nlp/chatglm2/text_generation.py +++ b/modelscope/models/nlp/chatglm2/text_generation.py @@ -1076,17 +1076,17 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): return inputs @torch.no_grad() - def chat(self, - tokenizer, - query: str, - history: List[Tuple[str, str]] = None, - max_length: int = 2048, - num_beams=1, - do_sample=True, - top_p=0.8, - temperature=0.8, - logits_processor=None, - **kwargs): + def _chat(self, + tokenizer, + query: str, + history: List[Tuple[str, str]] = None, + max_length: int = 2048, + num_beams=1, + do_sample=True, + top_p=0.8, + temperature=0.8, + logits_processor=None, + **kwargs): if history is None: history = [] if logits_processor is None: @@ -1107,7 +1107,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): response = tokenizer.decode(outputs) response = self.process_response(response) history = history + [(query, response)] - return {OutputKeys.RESPONSE: response, OutputKeys.HISTORY: history} + return response, history @torch.no_grad() def stream_chat(self, @@ -1295,6 +1295,41 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): self.transformer.encoder, bits, empty_init=empty_init, - device=device, - **kwargs) + device=device) return self + + def chat(self, input: Dict, tokenizer) -> Dict: + text = input['text'] + history = input['history'] + # args + if 'max_length' in input: + max_length = input['max_length'] + else: + max_length = 2048 + + if 'temperature' in input: + temperature = input['temperature'] + else: + temperature = 0.95 + + if 'num_beams' in input: + num_beams = input['num_beams'] + else: + num_beams = 1 + + if 'do_sample' in input: + do_sample = input['do_sample'] + else: + do_sample = True + + if type(history) == torch.Tensor: + history = history.tolist() + response, history = self._chat( + tokenizer, + text, + history, + max_length=max_length, + temperature=temperature, + num_beams=num_beams, + do_sample=do_sample) + return {OutputKeys.RESPONSE: response, OutputKeys.HISTORY: history} diff --git a/modelscope/pipelines/nlp/text_generation_pipeline.py b/modelscope/pipelines/nlp/text_generation_pipeline.py index e9987f8f..b62d07bd 100644 --- a/modelscope/pipelines/nlp/text_generation_pipeline.py +++ b/modelscope/pipelines/nlp/text_generation_pipeline.py @@ -262,8 +262,8 @@ class ChatGLM6bV2TextGenerationPipeline(Pipeline): # define the forward pass def forward(self, inputs: Dict, **forward_params) -> Dict[str, Any]: - return self.model.chat(self.tokenizer, inputs['text'], - **forward_params) + inputs.update(forward_params) + return self.model.chat(inputs, self.tokenizer) # format the outputs from pipeline def postprocess(self, input, **kwargs) -> Dict[str, Any]: From db0f70bc1c9e1c94448d5a91b757c44c5bb685f5 Mon Sep 17 00:00:00 2001 From: "LingFeng.Chen.Cn" Date: Fri, 7 Jul 2023 22:14:13 +0800 Subject: [PATCH 12/87] text_in is required (#365) --- modelscope/pipelines/audio/timestamp_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modelscope/pipelines/audio/timestamp_pipeline.py b/modelscope/pipelines/audio/timestamp_pipeline.py index 17cf9545..98e9eb05 100644 --- a/modelscope/pipelines/audio/timestamp_pipeline.py +++ b/modelscope/pipelines/audio/timestamp_pipeline.py @@ -93,7 +93,7 @@ class TimestampPipeline(Pipeline): def __call__(self, audio_in: Union[str, bytes], - text_in: str = None, + text_in: str, audio_fs: int = None, recog_type: str = None, audio_format: str = None, From 56eb0ab0c2023f33aecd64584e4160566492a33d Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 11:28:35 +0800 Subject: [PATCH 13/87] Update asr_dataset.py for download_mode when you need to re-download data --- .../custom_datasets/audio/asr_dataset.py | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index c0696615..326508e6 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -3,6 +3,8 @@ import os from modelscope.msdatasets.ms_dataset import MsDataset +from modelscope.utils.constant import DownloadMode +from typing import Optional class ASRDataset(MsDataset): @@ -33,16 +35,21 @@ class ASRDataset(MsDataset): dataset_name, namespace='speech_asr', train_set='train', - dev_set='validation'): - if os.path.exists(dataset_name): - data_dir = dataset_name - ds_dict = {} - ds_dict['train'] = cls.load_core(data_dir, train_set) - ds_dict['validation'] = cls.load_core(data_dir, dev_set) - ds_dict['raw_data_dir'] = data_dir + dev_set='validation', + download_mode: Optional[DownloadMode] = None): + if download_mode is not None: + ds_dict = MsDataset.load( + dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) return ds_dict else: - from modelscope.msdatasets import MsDataset - ds_dict = MsDataset.load( - dataset_name=dataset_name, namespace=namespace) - return ds_dict + if os.path.exists(dataset_name): + data_dir = dataset_name + ds_dict = {} + ds_dict['train'] = cls.load_core(data_dir, train_set) + ds_dict['validation'] = cls.load_core(data_dir, dev_set) + ds_dict['raw_data_dir'] = data_dir + return ds_dict + else: + ds_dict = MsDataset.load( + dataset_name=dataset_name, namespace=namespace) + return ds_dict From 20c15d3aaafd68848bc87c65d4ce002abf2332dd Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 11:33:18 +0800 Subject: [PATCH 14/87] Update finetune_speech_recognition.py using the newest ASRDataset, and add download_mode for re-download the dataset(dataset is broken and so on) --- .../auto_speech_recognition/finetune_speech_recognition.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index 4d62f66f..ff06d4de 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -1,15 +1,16 @@ import os -from modelscope.metainfo import Trainers -from modelscope.msdatasets.audio.asr_dataset import ASRDataset +from modelscope.msdatasets.dataset_cls.custom_datasets import ASRDataset +from modelscope.utils.constant import DownloadMode from modelscope.trainers import build_trainer +from modelscope.metainfo import Trainers def modelscope_finetune(params): if not os.path.exists(params.output_dir): os.makedirs(params.output_dir, exist_ok=True) # dataset split ["train", "validation"] - ds_dict = ASRDataset.load(params.data_path, namespace='speech_asr') + ds_dict = MsDataset.load(params.data_path, namespace='speech_asr', download_mode=None) kwargs = dict( model=params.model, data_dir=ds_dict, From 2073f4fd55d4f0480c385390cbc9eeb6f4fc224f Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 11:54:05 +0800 Subject: [PATCH 15/87] Update finetune_speech_recognition.py MsDataset replace by ASRDataset. --- .../auto_speech_recognition/finetune_speech_recognition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index ff06d4de..5b930b3d 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -10,7 +10,7 @@ def modelscope_finetune(params): if not os.path.exists(params.output_dir): os.makedirs(params.output_dir, exist_ok=True) # dataset split ["train", "validation"] - ds_dict = MsDataset.load(params.data_path, namespace='speech_asr', download_mode=None) + ds_dict = ASRDataset.load(params.data_path, namespace='speech_asr', download_mode=None) kwargs = dict( model=params.model, data_dir=ds_dict, From 61faedfb15709aecab19ad3a3457b107e0a752f2 Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 15:18:59 +0800 Subject: [PATCH 16/87] Update finetune_speech_recognition.py add params.download_mode set from params, config --- .../auto_speech_recognition/finetune_speech_recognition.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index 5b930b3d..4775fc8b 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -10,7 +10,7 @@ def modelscope_finetune(params): if not os.path.exists(params.output_dir): os.makedirs(params.output_dir, exist_ok=True) # dataset split ["train", "validation"] - ds_dict = ASRDataset.load(params.data_path, namespace='speech_asr', download_mode=None) + ds_dict = ASRDataset.load(params.data_path, namespace='speech_asr', download_mode=params.download_mode) kwargs = dict( model=params.model, data_dir=ds_dict, @@ -37,5 +37,6 @@ if __name__ == '__main__': # 如果dataset_type="large",batch_bins单位为毫秒, params.max_epoch = 50 # 最大训练轮数 params.lr = 0.00005 # 设置学习率 + params.download_mode = DownloadMode.FORCE_REDOWNLOAD # 重新下载数据,否则设置为None modelscope_finetune(params) From fd6e352922f70de5c83783074e5ac8294c729c3f Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Tue, 11 Jul 2023 15:36:35 +0800 Subject: [PATCH 17/87] Add pipeline num_inference_steps and guidance_scale parameter for stable diffusion pipeline (#367) * add pipeline num_inference_steps and guidance_scale parameter * precommit --------- Co-authored-by: XDUWQ --- .../diffusers_wrapped/diffusers_pipeline.py | 2 +- .../stable_diffusion_pipeline.py | 69 ++++++++++++++++++- 2 files changed, 69 insertions(+), 2 deletions(-) diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py index ce0455b6..3eed0947 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/diffusers_pipeline.py @@ -15,7 +15,7 @@ class DiffusersPipeline(Pipeline): """ use `model` to create a diffusers pipeline Args: - model: model id on modelscope hub. + model: model id on modelscope hub or local dir. device: str = 'gpu' """ diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py index f09d459d..1b75656e 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py @@ -48,6 +48,60 @@ class StableDiffusionPipeline(DiffusersPipeline): def forward(self, inputs: Dict[str, Any], **forward_params) -> Dict[str, Any]: + """ + Inputs Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*, defaults to 7.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a + plain tuple. + callback (`Callable`, *optional*): + A function that will be called every `callback_steps` steps during inference. The function will be + called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. + callback_steps (`int`, *optional*, defaults to 1): + The frequency at which the `callback` function will be called. If not specified, the callback will be + called at every step. + """ if not isinstance(inputs, dict): raise ValueError( f'Expected the input to be a dictionary, but got {type(input)}' @@ -57,7 +111,20 @@ class StableDiffusionPipeline(DiffusersPipeline): raise ValueError('input should contain "text", but not found') images = self.pipeline( - inputs['text'], num_inference_steps=30, guidance_scale=7.5) + prompt=inputs.get('text'), + height=inputs.get('height'), + width=inputs.get('width'), + num_inference_steps=inputs.get('num_inference_steps', 50), + guidance_scale=inputs.get('guidance_scale', 7.5), + negative_prompt=inputs.get('negative_prompt'), + num_images_per_prompt=inputs.get('num_images_per_prompt', 1), + eta=inputs.get('eta', 0.0), + generator=inputs.get('generator'), + latents=inputs.get('latents'), + output_type=inputs.get('output_type', 'pil'), + return_dict=inputs.get('return_dict', True), + callback=inputs.get('callback'), + callback_steps=inputs.get('callback_steps', 1)) return images From 0c43a0e8ea97ec6cf936d4a3f8330aea9db7fa6d Mon Sep 17 00:00:00 2001 From: "xingjun.wang" Date: Tue, 11 Jul 2023 16:25:11 +0800 Subject: [PATCH 18/87] fix load meta-csv cathe paths --- .../msdatasets/download/dataset_builder.py | 17 +++++++++++++---- modelscope/version.py | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/modelscope/msdatasets/download/dataset_builder.py b/modelscope/msdatasets/download/dataset_builder.py index 140503f0..ad5ebbcf 100644 --- a/modelscope/msdatasets/download/dataset_builder.py +++ b/modelscope/msdatasets/download/dataset_builder.py @@ -223,11 +223,20 @@ class CsvDatasetBuilder(csv.Csv): if field_name.endswith(':FILE'): transform_fields.append(field_name) - base_extracted_dir = self.split_path_dict.get(split_name, '') + base_extracted_dir: Union[str, list] = self.split_path_dict.get(split_name, '') for field_name in transform_fields: - if base_extracted_dir: - df[field_name] = df[field_name].apply( - lambda x: os.path.join(base_extracted_dir, x)) + if isinstance(base_extracted_dir, list) and len(base_extracted_dir) > 0: + if df.shape[0] != len(base_extracted_dir): + logger.error( + f"Number of lines in meta-csv file for split '{split_name}' ({df.shape[0]}) " + f"does not match number of data-files({len(base_extracted_dir)})!" + ) + else: + df[field_name] = base_extracted_dir + elif isinstance(base_extracted_dir, str) and base_extracted_dir: + df[field_name] = df[field_name].apply(lambda x: os.path.join(base_extracted_dir, x)) + else: + logger.warning(f'Nothing to do for field {field_name}') pa_data = pa.Table.from_pandas(df) return Dataset(arrow_table=pa_data) diff --git a/modelscope/version.py b/modelscope/version.py index e4028ca2..fbb09a54 100644 --- a/modelscope/version.py +++ b/modelscope/version.py @@ -1,5 +1,5 @@ # Make sure to modify __release_datetime__ to release time when making official release. -__version__ = '1.7.0' +__version__ = '1.7.1' # default release datetime for branches under active development is set # to be a time far-far-away-into-the-future __release_datetime__ = '2099-10-13 08:56:12' From d8dd3989de59c34674a3209fdedae9d621abab25 Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Tue, 11 Jul 2023 16:28:35 +0800 Subject: [PATCH 19/87] Xformers accelerate memory efficient attention (#362) * xformers accelerate memory efficient attention * xformers * precommit --------- Co-authored-by: XDUWQ --- .../stable_diffusion/stable_diffusion.py | 14 ++++++++++++++ modelscope/utils/error.py | 6 ++++++ modelscope/utils/import_utils.py | 1 + 3 files changed, 21 insertions(+) diff --git a/modelscope/models/multi_modal/stable_diffusion/stable_diffusion.py b/modelscope/models/multi_modal/stable_diffusion/stable_diffusion.py index 88cb4969..6b829485 100644 --- a/modelscope/models/multi_modal/stable_diffusion/stable_diffusion.py +++ b/modelscope/models/multi_modal/stable_diffusion/stable_diffusion.py @@ -6,6 +6,7 @@ from typing import Callable, List, Optional, Union import torch import torch.nn.functional as F from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel +from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from modelscope.metainfo import Models @@ -34,6 +35,7 @@ class StableDiffusion(TorchModel): """ super().__init__(model_dir, *args, **kwargs) revision = kwargs.pop('revision', None) + xformers_enable = kwargs.pop('xformers_enable', False) self.lora_tune = kwargs.pop('lora_tune', False) self.dreambooth_tune = kwargs.pop('dreambooth_tune', False) @@ -66,6 +68,18 @@ class StableDiffusion(TorchModel): self.unet.requires_grad_(False) self.unet = self.unet.to(self.device) + # xformers accelerate memory efficient attention + if xformers_enable: + import xformers + + xformers_version = version.parse(xformers.__version__) + if xformers_version == version.parse('0.0.16'): + logger.warn( + 'xFormers 0.0.16 cannot be used for training in some GPUs. ' + 'If you observe problems during training, please update xFormers to at least 0.0.17.' + ) + self.unet.enable_xformers_memory_efficient_attention() + def tokenize_caption(self, captions): """ Convert caption text to token data. diff --git a/modelscope/utils/error.py b/modelscope/utils/error.py index 841662c0..8259c7ce 100644 --- a/modelscope/utils/error.py +++ b/modelscope/utils/error.py @@ -168,3 +168,9 @@ TAMING_IMPORT_ERROR = """ {0} requires the timm library but it was not found in your environment. You can install it with pip: `pip install taming-transformers-rom1504` """ + +# docstyle-ignore +XFORMERS_IMPORT_ERROR = """ +{0} requires the timm library but it was not found in your environment. You can install it with pip: +`pip install xformers>=0.0.17` +""" diff --git a/modelscope/utils/import_utils.py b/modelscope/utils/import_utils.py index 3e8be2e1..f2fc7e37 100644 --- a/modelscope/utils/import_utils.py +++ b/modelscope/utils/import_utils.py @@ -306,6 +306,7 @@ REQUIREMENTS_MAAPING = OrderedDict([ ('mpi4py', (is_package_available('mpi4py'), MPI4PY_IMPORT_ERROR)), ('open_clip', (is_package_available('open_clip'), OPENCLIP_IMPORT_ERROR)), ('taming', (is_package_available('taming'), TAMING_IMPORT_ERROR)), + ('xformers', (is_package_available('xformers'), XFORMERS_IMPORT_ERROR)), ]) SYSTEM_PACKAGE = set(['os', 'sys', 'typing']) From e6db4239d2cae7e7f8305d0d614309f6bb89c1d8 Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 16:38:54 +0800 Subject: [PATCH 20/87] add download_mode param for load function when you want to re-download data --- .../custom_datasets/audio/asr_dataset.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index 326508e6..3f8c2219 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -4,7 +4,6 @@ import os from modelscope.msdatasets.ms_dataset import MsDataset from modelscope.utils.constant import DownloadMode -from typing import Optional class ASRDataset(MsDataset): @@ -37,19 +36,18 @@ class ASRDataset(MsDataset): train_set='train', dev_set='validation', download_mode: Optional[DownloadMode] = None): - if download_mode is not None: - ds_dict = MsDataset.load( - dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) - return ds_dict - else: - if os.path.exists(dataset_name): + if os.path.exists(dataset_name): + if download_mode != DownloadMode.FORCE_REDOWNLOAD: data_dir = dataset_name ds_dict = {} ds_dict['train'] = cls.load_core(data_dir, train_set) ds_dict['validation'] = cls.load_core(data_dir, dev_set) ds_dict['raw_data_dir'] = data_dir - return ds_dict else: ds_dict = MsDataset.load( - dataset_name=dataset_name, namespace=namespace) - return ds_dict + dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) + else: + ds_dict = MsDataset.load( + dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) + return ds_dict + From 4d77e57769988cc35d4b1a67ace345e57672d171 Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 16:43:37 +0800 Subject: [PATCH 21/87] add download_mode param to params maybe set from funasr is better. --- .../auto_speech_recognition/finetune_speech_recognition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index 4775fc8b..eaea6155 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -37,6 +37,6 @@ if __name__ == '__main__': # 如果dataset_type="large",batch_bins单位为毫秒, params.max_epoch = 50 # 最大训练轮数 params.lr = 0.00005 # 设置学习率 - params.download_mode = DownloadMode.FORCE_REDOWNLOAD # 重新下载数据,否则设置为None + params.download_mode = DownloadMode.FORCE_REDOWNLOAD # 重新下载数据,否则不设置,使用默认值DownloadMode.REUSE_DATASET_IF_EXISTS modelscope_finetune(params) From d062b4e8980d7600cd27b8e5edf28815abd92af6 Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 17:09:09 +0800 Subject: [PATCH 22/87] space align --- .../msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index 3f8c2219..ef246d1f 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -35,7 +35,7 @@ class ASRDataset(MsDataset): namespace='speech_asr', train_set='train', dev_set='validation', - download_mode: Optional[DownloadMode] = None): + download_mode: Optional[DownloadMode] = None): if os.path.exists(dataset_name): if download_mode != DownloadMode.FORCE_REDOWNLOAD: data_dir = dataset_name From ae3e7946923f75f0cc133313933bba1c86211a6e Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 17:11:11 +0800 Subject: [PATCH 23/87] set download_mode default value --- .../msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index ef246d1f..ed2220dd 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -35,7 +35,7 @@ class ASRDataset(MsDataset): namespace='speech_asr', train_set='train', dev_set='validation', - download_mode: Optional[DownloadMode] = None): + download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS): if os.path.exists(dataset_name): if download_mode != DownloadMode.FORCE_REDOWNLOAD: data_dir = dataset_name From d47684de5be203fe9e01124a1a05af95eb88de36 Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 17:12:59 +0800 Subject: [PATCH 24/87] Optimize comments --- .../auto_speech_recognition/finetune_speech_recognition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index eaea6155..2fee3a2e 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -37,6 +37,6 @@ if __name__ == '__main__': # 如果dataset_type="large",batch_bins单位为毫秒, params.max_epoch = 50 # 最大训练轮数 params.lr = 0.00005 # 设置学习率 - params.download_mode = DownloadMode.FORCE_REDOWNLOAD # 重新下载数据,否则不设置,使用默认值DownloadMode.REUSE_DATASET_IF_EXISTS + params.download_mode = DownloadMode.FORCE_REDOWNLOAD # 重新下载数据,否则设置为默认值DownloadMode.REUSE_DATASET_IF_EXISTS modelscope_finetune(params) From 32db34631fbcb2869705c94b40a7febc951fa2f5 Mon Sep 17 00:00:00 2001 From: fq Date: Tue, 11 Jul 2023 17:33:00 +0800 Subject: [PATCH 25/87] make it more proper ignore user-define dataset. --- .../custom_datasets/audio/asr_dataset.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index ed2220dd..73a40813 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -37,17 +37,15 @@ class ASRDataset(MsDataset): dev_set='validation', download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS): if os.path.exists(dataset_name): - if download_mode != DownloadMode.FORCE_REDOWNLOAD: - data_dir = dataset_name - ds_dict = {} - ds_dict['train'] = cls.load_core(data_dir, train_set) - ds_dict['validation'] = cls.load_core(data_dir, dev_set) - ds_dict['raw_data_dir'] = data_dir - else: - ds_dict = MsDataset.load( - dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) + data_dir = dataset_name + ds_dict = {} + ds_dict['train'] = cls.load_core(data_dir, train_set) + ds_dict['validation'] = cls.load_core(data_dir, dev_set) + ds_dict['raw_data_dir'] = data_dir + return ds_dict else: + from modelscope.msdatasets import MsDataset ds_dict = MsDataset.load( dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) - return ds_dict + return ds_dict From d20d033e07fe8a3ace848528b2f2b23bd6873c36 Mon Sep 17 00:00:00 2001 From: Jintao Date: Tue, 11 Jul 2023 17:35:11 +0800 Subject: [PATCH 26/87] add example/llm (#372) * add example/llm * fix lint test --- examples/pytorch/llm/_common.py | 449 ++++++++++++++++++ examples/pytorch/llm/baichuan_infer.py | 62 +++ examples/pytorch/llm/baichuan_sft.py | 199 ++++++++ examples/pytorch/llm/chatglm2_infer.py | 60 +++ examples/pytorch/llm/chatglm2_sft.py | 188 ++++++++ examples/pytorch/llm_agent/_common.py | 9 +- .../pytorch/llm_agent/baichuan_infer.ipynb | 42 +- examples/pytorch/llm_agent/baichuan_sft.ipynb | 124 ++--- .../pytorch/llm_agent/chatglm2_infer.ipynb | 42 +- examples/pytorch/llm_agent/chatglm2_sft.ipynb | 127 ++--- setup.cfg | 2 +- 11 files changed, 1124 insertions(+), 180 deletions(-) create mode 100644 examples/pytorch/llm/_common.py create mode 100644 examples/pytorch/llm/baichuan_infer.py create mode 100644 examples/pytorch/llm/baichuan_sft.py create mode 100644 examples/pytorch/llm/chatglm2_infer.py create mode 100644 examples/pytorch/llm/chatglm2_sft.py diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/_common.py new file mode 100644 index 00000000..79a958ec --- /dev/null +++ b/examples/pytorch/llm/_common.py @@ -0,0 +1,449 @@ +import ast +import datetime as dt +import math +import os +import random +import re +import sys +from functools import partial +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import json +import matplotlib.pyplot as plt +import numpy as np +# +import torch +import torch.nn as nn +import torch.optim as optim +from datasets import Dataset as HFDataset +from datasets import concatenate_datasets +from matplotlib.axes import Axes +from matplotlib.figure import Figure +from numpy import ndarray +from tensorboard.backend.event_processing.event_accumulator import \ + EventAccumulator +from torch import Tensor +from torch import device as Device +from torch import dtype as Dtype +from torch.nn import Module +from torch.nn.parameter import Parameter +from torch.nn.utils.rnn import pad_sequence +from torch.optim import Optimizer +from torch.optim import lr_scheduler as lrs +from torch.optim.lr_scheduler import _LRScheduler as LRScheduler +from torch.utils.data import Dataset +# +from torchmetrics import Accuracy, MeanMetric +# +from tqdm import tqdm + +# +from modelscope import (Model, MsDataset, get_logger, read_config, + snapshot_download) +from modelscope.metrics.base import Metric +from modelscope.metrics.builder import METRICS +from modelscope.models.nlp.chatglm2 import ChatGLM2Tokenizer +from modelscope.msdatasets.dataset_cls.custom_datasets import \ + TorchCustomDataset +from modelscope.swift import LoRAConfig, Swift +from modelscope.trainers import EpochBasedTrainer +from modelscope.utils.config import Config, ConfigDict +from modelscope.utils.registry import default_group + +# +TEST_SPLIT_P = 0.01 +SPLIT_SEED = 42 +MAX_LENGTH: Optional[int] = 2048 +COLOR, COLOR_S = '#FFE2D9', '#FF7043' + +PROMPT = """### 用户 +{instruction} +### AI助手 +""" + +logger = get_logger() +# + + +def get_model_dir(model_id: str, model_revision: Optional[str] = None) -> str: + model_dir = snapshot_download(model_id, model_revision) + return model_dir + + +def _get_version(work_dir: str) -> int: + if os.path.isdir(work_dir): + fnames = os.listdir(work_dir) + else: + fnames = [] + v_list = [-1] + for fname in fnames: + m = re.match(r'v(\d+)', fname) + if m is None: + continue + v = m.group(1) + v_list.append(int(v)) + return max(v_list) + 1 + + +def get_work_dir(work_dir: str) -> str: + """add version""" + work_dir = os.path.abspath(work_dir) + version = _get_version(work_dir) + time = dt.datetime.now().strftime('%Y%m%d-%H%M%S') + # + work_dir = os.path.join(work_dir, f'v{version}-{time}') + logger.info(f'work_dir: {work_dir}') + return work_dir + + +def select_device(device_ids: List[int]) -> Device: + """Call this function before cuda is initialized. + Return: master device + """ + if torch.cuda.is_initialized(): + logger.warning('CUDA has been initialized! Device selection fails!') + return torch.device('cuda:0') + # + log_s = 'Using device: ' + if len(device_ids) == 0: # cpu + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + device: str = 'cpu' + log_s += device + else: + os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( + [str(d) for d in device_ids]) + assert torch.cuda.is_available( + ) and torch.cuda.device_count() >= len(device_ids) + log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. 'cuda:1,7,8' + device = 'cuda:0' + logger.info(log_s) + return torch.device(device) + + +def seed_everything(seed: Optional[int] = None, gpu_dtm: bool = False) -> int: + if seed is None: + seed_max = np.iinfo(np.int32).max + seed = random.randint(0, seed_max) + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + logger.info(f'Global seed set to {seed}') + if gpu_dtm: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + logger.info(f'Setting deterministic: {True}, benchmark: {False}') + return seed + + +def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, + drop_last: bool) -> int: + """Calculate T_max in CosineAnnealingLR""" + if drop_last: + T_max = dataset_len // batch_size + else: + T_max = math.ceil(dataset_len / batch_size) + T_max *= max_epochs + return T_max + + +def tokenize_function(example: Dict[str, str], tokenizer) -> Dict[str, Any]: + """Only applicable to baichuan and chatglm2. Other models need to be tested""" + instruction = example['instruction'] + input_: str = example['input'] + if input_ is not None and input_ != '': + # instruction = instruction + '\n' + if input_.startswith('输入:'): + instruction = instruction + input_[3:] + else: + instruction = instruction + input_ + output = example['output'] + src_text = PROMPT.format(instruction=instruction, add_special_tokens=False) + src_input_ids: List[int] = tokenizer( + src_text, return_attention_mask=False, + add_special_tokens=True)['input_ids'] + # tokenizer.bos_token_id: Avoid `tgt_input_ids` being empty + tgt_input_ids = [tokenizer.bos_token_id] + if output is not None: + tgt_input_ids += tokenizer( + output, return_attention_mask=False, + add_special_tokens=False)['input_ids'] + tgt_input_ids += [tokenizer.eos_token_id] + labels = [-100] * len(src_input_ids) + tgt_input_ids + else: + labels = None + input_ids = src_input_ids + tgt_input_ids + # + if MAX_LENGTH is not None: + input_ids = input_ids[-MAX_LENGTH:] + if labels is not None: + labels = labels[-MAX_LENGTH:] + # + return {'input_ids': input_ids, 'labels': labels} + + +def stat_dataset(dataset: HFDataset) -> None: + """Statistical analysis was performed on the data set""" + _token_len = [] + for d in dataset: + _token_len.append(len(d['input_ids'])) + _token_len = np.array(_token_len) + mean = _token_len.mean().item() + std = _token_len.std().item() + min_ = _token_len.min().item() + max_ = _token_len.max().item() + logger.info( + f'Dataset Token Length: {mean:.6f}±{std:.6f}, min={min_:.6f}, max={max_:.6f}, size={_token_len.shape[0]}' + ) + + +def print_examples(examples: Dict[str, Any], tokenizer) -> None: + input_ids, labels = examples['input_ids'], examples['labels'] + print(f'[INPUT_IDS] {tokenizer.decode(input_ids)}') + print() + print( + f'[LABLES] {tokenizer.decode([lb if lb != -100 else 0 for lb in labels])}' + ) + + +def data_collate_fn(batch: List[Dict[str, Any]], tokenizer) -> Dict[str, Any]: + input_ids = [torch.tensor(b['input_ids']) for b in batch] + labels = [torch.tensor(b['labels']) for b in batch] + attention_mask = [ + torch.ones(len(input_ids[i]), dtype=torch.int64) + for i in range(len(input_ids)) + ] + # + input_ids = pad_sequence( + input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) + attention_mask = pad_sequence( + attention_mask, batch_first=True, padding_value=0) + labels = pad_sequence(labels, batch_first=True, padding_value=-100) + return { + 'input_ids': input_ids, + 'attention_mask': attention_mask, + 'labels': labels + } + + +def print_model_info(model: Module, name: Optional[str] = None) -> None: + if name is None: + name = model.__class__.__name__ + # + n_params = sum(p.numel() for p in model.parameters()) + n_grads = sum(p.numel() for p in model.parameters() if p.requires_grad) + n_buffers = sum(p.numel() for p in model.buffers()) + # + n_params /= 1e6 + n_grads /= 1e6 + n_buffers /= 1e6 + s = [ + f'{name}: ', + f'{n_params:.4f}M Params ({n_grads:.4f}M Trainable), ', + f'{n_buffers:.4f}M Buffers', + ] + s += '.' + logger.info(''.join(s)) + + +def show_freeze_layers(model: Module, max_lines: int = 20) -> None: + named_p = list(model.named_parameters()) + for i, (n, p) in enumerate(named_p): + if i >= max_lines: + logger.info('...') + break + logger.info(f'{n}: requires_grad={p.requires_grad}') + + +@METRICS.register_module(group_key=default_group, module_name='my_metric') +class MyMetric(Metric): + + def __init__(self, vocab_size: int): + self.acc = Accuracy('multiclass', num_classes=vocab_size) + self.loss = MeanMetric() + + def add(self, outputs: Dict[str, Any], inputs: Dict[str, Any]) -> None: + loss: Tensor = outputs.loss + self.loss.update(loss) + # + labels: Tensor = inputs['labels'] + labels = labels[:, 1:] + labels_mask = labels != -100 + logits: Tensor = outputs.logits[:, :-1] + logits = logits[labels_mask].contiguous().view(-1, logits.shape[-1]) + pred = logits.argmax(dim=-1) + labels = labels[labels_mask].to(logits.device) + self.acc.update(pred, labels) + + def evaluate(self): + return { + 'acc': self.acc.compute().item(), + 'loss': self.loss.compute().item() + } + + def merge(self, other: 'MyMetric') -> None: + """This script does not support ddp""" + raise NotImplementedError + + +def get_baichuan7B_model_tokenizer(model_dir: Optional[str] = None, + load_model: bool = True): + if model_dir is None: + model_id = 'baichuan-inc/baichuan-7B' + model_dir = get_model_dir(model_id, None) + # + sys.path.insert(0, model_dir) + from configuration_baichuan import BaiChuanConfig + from tokenization_baichuan import BaiChuanTokenizer + from modeling_baichuan import BaiChuanForCausalLM + model_config = BaiChuanConfig.from_pretrained(model_dir) + model_config.torch_dtype = torch.float16 + logger.info(f'model_config: {model_config}') + tokenizer = BaiChuanTokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = BaiChuanForCausalLM.from_pretrained( + model_dir, + config=model_config, + device_map='auto', + torch_dtype=torch.float16) + # + return model, tokenizer + + +def get_baichuan13B_model_tokenizer(model_dir: Optional[str] = None, + load_model: bool = True): + if model_dir is None: + model_id = 'baichuan-inc/Baichuan-13B-Base' + model_dir = get_model_dir(model_id, 'v1.0.1') + # + sys.path.insert(0, model_dir) + from configuration_baichuan import BaichuanConfig + from tokenization_baichuan import BaichuanTokenizer + from modeling_baichuan import BaichuanForCausalLM + model_config = BaichuanConfig.from_pretrained(model_dir) + model_config.torch_dtype = torch.float16 + logger.info(f'model_config: {model_config}') + tokenizer = BaichuanTokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = BaichuanForCausalLM.from_pretrained( + model_dir, + config=model_config, + device_map='auto', + torch_dtype=torch.float16) + # + return model, tokenizer + + +def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, + load_model: bool = True): + if model_dir is None: + model_id = 'ZhipuAI/chatglm2-6b' + model_dir = snapshot_download(model_id, None) + # + config = read_config(model_dir) + config['model'] = ConfigDict({'type': 'chatglm2-6b'}) + tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = Model.from_pretrained( + model_dir, + cfg_dict=config, + device_map='auto', + torch_dtype=torch.float16) + return model, tokenizer + + +def get_alpaca_en_zh_dataset( + tokenize_function, + only_val: bool = False) -> Tuple[HFDataset, HFDataset]: + """ + split: Literal['train', 'validation', None] + """ + + dataset_en: HFDataset = MsDataset.load( + 'AI-ModelScope/alpaca-gpt4-data-en', split='train').to_hf_dataset() + dataset_zh: HFDataset = MsDataset.load( + 'AI-ModelScope/alpaca-gpt4-data-zh', split='train').to_hf_dataset() + dataset_en = dataset_en.remove_columns(['text']) + dataset: HFDataset = concatenate_datasets([dataset_zh, dataset_en]) + # + # dataset = dataset.select(range(1000)) # for debug + dataset = dataset.train_test_split(TEST_SPLIT_P, seed=SPLIT_SEED) + if only_val: + dataset = dataset['test'] + if tokenize_function is not None: + dataset = dataset.map(tokenize_function) + dataset = dataset.remove_columns(['instruction', 'input', 'output']) + # + if only_val: + return None, dataset + else: + return dataset['train'], dataset['test'] + + +Item = Dict[str, float] + + +def read_tensorboard_file(fpath: str) -> Dict[str, List[Item]]: + if not os.path.isfile(fpath): + raise FileNotFoundError(f'fpath: {fpath}') + ea = EventAccumulator(fpath) + ea.Reload() + res = {} + tags = ea.Tags()['scalars'] + for tag in tags: + values = ea.Scalars(tag) + r = [] + for v in values: + r.append({'step': v.step, 'value': v.value}) + res[tag] = r + return res + + +def tensorboard_smoothing(values: List[float], + smooth: float = 0.9) -> List[float]: + norm_factor = 1 + x = 0 + res = [] + for i in range(len(values)): + x = x * smooth + values[i] # Exponential decay + res.append(x / norm_factor) + # + norm_factor *= smooth + norm_factor += 1 + return res + + +def plot_image(tb_dir: str, + smooth_key: List[str], + smooth_val: float = 0.9, + figsize: Tuple[int, int] = (8, 5), + dpi: int = 100) -> None: + image_dir = os.path.join(os.path.dirname(tb_dir), 'images') + os.makedirs(image_dir, exist_ok=True) + # + fname = os.listdir(tb_dir)[0] + tb_path = os.path.join(tb_dir, fname) + data = read_tensorboard_file(tb_path) + # + for k in data.keys(): + _data = data[k] + steps = [d['step'] for d in _data] + values = [d['value'] for d in _data] + if len(values) == 0: + continue + _, ax = plt.subplots(1, 1, squeeze=True, figsize=figsize, dpi=dpi) + ax.set_title(k) + if len(values) == 1: + ax.scatter(steps, values, color=COLOR_S) + elif k in smooth_key: + ax.plot(steps, values, color=COLOR) + values_s = tensorboard_smoothing(values, smooth_val) + ax.plot(steps, values_s, color=COLOR_S) + else: + ax.plot(steps, values, color=COLOR_S) + fpath = os.path.join(image_dir, k.replace('/', '_')) + plt.savefig(fpath, dpi=dpi, bbox_inches='tight') diff --git a/examples/pytorch/llm/baichuan_infer.py b/examples/pytorch/llm/baichuan_infer.py new file mode 100644 index 00000000..f9a49c09 --- /dev/null +++ b/examples/pytorch/llm/baichuan_infer.py @@ -0,0 +1,62 @@ +# ### Setting up experimental environment. +from _common import * +from transformers import TextStreamer + +device_ids = [0, 1] +logger.info(device_ids) +select_device(device_ids) + +# ### Loading Model and Tokenizer +# Note: You need to set the value of `CKPT_FPATH` +BAICHUAN_TYPE = '13B' # Literal['7B', '13B'] +CKPT_FAPTH = '/path/to/your/xxx.pth' +LORA_TARGET_MODULES = ['W_pack'] + +if BAICHUAN_TYPE == '7B': + model, tokenizer = get_baichuan7B_model_tokenizer() +else: + model, tokenizer = get_baichuan13B_model_tokenizer() +if tokenizer.pad_token_id is None: + tokenizer.pad_token_id = tokenizer.eos_token_id +model.bfloat16() # Consistent with training + +# ### Preparing lora +LORA_RANK = 8 +LORA_ALPHA = 32 +LORA_DROPOUT_P = 0 # Arbitrary value +lora_config = LoRAConfig( + replace_modules=LORA_TARGET_MODULES, + rank=LORA_RANK, + lora_alpha=LORA_ALPHA, + lora_dropout=LORA_DROPOUT_P, + pretrained_weights=CKPT_FAPTH) +logger.info(f'lora_config: {lora_config}') +Swift.prepare_model(model, lora_config) + +# ### Loading Dataset +_, test_dataset = get_alpaca_en_zh_dataset(None, True) + +# ### Inference +streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) +for d in test_dataset[:5]: + output = d['output'] + d['output'] = None + input_ids = tokenize_function(d, tokenizer)['input_ids'] + print(f'[TEST]{tokenizer.decode(input_ids)}', end='') + input_ids = torch.tensor(input_ids)[None].cuda() + attention_mask = torch.ones_like(input_ids) + generate_ids = model.generate( + input_ids=input_ids, + max_new_tokens=512, + attention_mask=attention_mask, + streamer=streamer, + pad_token_id=tokenizer.pad_token_id, + temperature=0.7, + top_k=50, + do_sample=True) + print() + print(f'[LABELS]{output}') + print( + '-----------------------------------------------------------------------------------' + ) + # input('next[ENTER]') diff --git a/examples/pytorch/llm/baichuan_sft.py b/examples/pytorch/llm/baichuan_sft.py new file mode 100644 index 00000000..18f71d22 --- /dev/null +++ b/examples/pytorch/llm/baichuan_sft.py @@ -0,0 +1,199 @@ +# ### Setting up experimental environment. +""" +pip install modelscope +pip install numpy pandas matplotlib scikit-learn +pip install transformers datasets +pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +pip install tqdm +pip install tensorboard +pip install torchmetrics +pip install sentencepiece +pip install accelerate + +pip install numpy -U # Resolve torchmetrics dependencies and update numpy +""" + +from _common import * + +device_ids = [0, 1, 2, 3] +logger.info(device_ids) +select_device(device_ids) +seed_everything(42) + +# ### Loading Model and Tokenizer +BAICHUAN_TYPE = '13B' # Literal['7B', '13B'] +WORK_DIR = f'runs/baichuan_{BAICHUAN_TYPE}' +LORA_TARGET_MODULES = ['W_pack'] +# +if BAICHUAN_TYPE == '7B': + model_id = 'baichuan-inc/baichuan-7B' + model_dir = get_model_dir(model_id, None) + model, tokenizer = get_baichuan7B_model_tokenizer(model_dir) +else: + model_id = 'baichuan-inc/Baichuan-13B-Base' + model_dir = get_model_dir(model_id, 'v1.0.1') + model, tokenizer = get_baichuan13B_model_tokenizer(model_dir) +# +GRADIENT_CHECKPOINTING = True +if GRADIENT_CHECKPOINTING: + # baichuan13B does not implement the `get_input_embeddings` function + if BAICHUAN_TYPE == '13B': + + def get_input_embeddings(self): + return self.model.embed_tokens + + model.__class__.get_input_embeddings = get_input_embeddings.__get__( + model) + model.gradient_checkpointing_enable() + model.enable_input_require_grads() +if tokenizer.pad_token_id is None: + tokenizer.pad_token_id = tokenizer.eos_token_id +# +logger.info( + f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, ' + f'pad_token_id: {tokenizer.pad_token_id}') + +# ### Preparing lora +LORA_RANK = 8 +LORA_ALPHA = 32 +LORA_DROPOUT_P = 0.1 +lora_config = LoRAConfig( + replace_modules=LORA_TARGET_MODULES, + rank=LORA_RANK, + lora_alpha=LORA_ALPHA, + lora_dropout=LORA_DROPOUT_P) +logger.info(f'lora_config: {lora_config}') +Swift.prepare_model(model, lora_config) +# +show_freeze_layers(model) +print_model_info(model) +_p = list(model.parameters())[100] +logger.info(f'device: {_p.device}, dtype: {_p.dtype}') +model.bfloat16() + +# ### Loading Dataset +tokenize_function = partial(tokenize_function, tokenizer=tokenizer) +train_dataset, val_dataset = get_alpaca_en_zh_dataset(tokenize_function) +# Data analysis +stat_dataset(train_dataset) +stat_dataset(val_dataset) +data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer) +print_examples(train_dataset[0], tokenizer) + +# ### Setting Config +cfg_file = os.path.join(model_dir, 'configuration.json') +# +BATCH_SIZE = 1 +MAX_EPOCHS = 1 +T_max = get_T_max(len(train_dataset), BATCH_SIZE, MAX_EPOCHS, True) +WORK_DIR = get_work_dir(WORK_DIR) +EVAL_INTERVAL = 500 +CONFIG = Config({ + 'train': { + 'dataloader': { + 'batch_size_per_gpu': BATCH_SIZE, + 'workers_per_gpu': 1, + 'shuffle': True, + 'drop_last': True, + 'pin_memory': True + }, + 'max_epochs': + MAX_EPOCHS, + 'work_dir': + WORK_DIR, + 'optimizer': { + 'type': 'AdamW', + 'lr': 1e-4, + 'weight_decay': 0.01, + 'options': { + 'cumulative_iters': 16, + 'grad_clip': { + 'norm_type': 2, + 'max_norm': 2.0 + } + } + }, + 'lr_scheduler': { + 'type': 'CosineAnnealingLR', + 'T_max': T_max, + 'eta_min': 1e-5, + 'options': { + 'by_epoch': False, + 'warmup': { + 'type': 'LinearWarmup', + 'warmup_ratio': 0.1, + 'warmup_iters': 200 + } + } + }, + 'hooks': [ + { + 'type': 'CheckpointHook', + 'by_epoch': False, + 'interval': EVAL_INTERVAL, + 'max_checkpoint_num': 1 + }, + { + 'type': 'EvaluationHook', + 'by_epoch': False, + 'interval': EVAL_INTERVAL + }, + { + 'type': 'BestCkptSaverHook', + 'metric_key': 'acc', + 'save_best': True, + 'rule': 'max', + 'max_checkpoint_num': 1 + }, + { + 'type': 'TextLoggerHook', + 'by_epoch': True, # Whether EpochBasedTrainer is used + 'interval': 5 + }, + { + 'type': 'TensorboardHook', + 'by_epoch': False, + 'interval': 5 + } + ] + }, + 'evaluation': { + 'dataloader': { + 'batch_size_per_gpu': BATCH_SIZE, + 'workers_per_gpu': 1, + 'shuffle': False, + 'drop_last': False, + 'pin_memory': True + }, + 'metrics': [{ + 'type': 'my_metric', + 'vocab_size': tokenizer.vocab_size + }] + } +}) + +# ### Finetuning + + +def cfg_modify_fn(cfg: Config) -> Config: + cfg.update(CONFIG) + return cfg + + +trainer = EpochBasedTrainer( + model=model, + cfg_file=cfg_file, + data_collator=data_collate_fn, + train_dataset=train_dataset, + eval_dataset=val_dataset, + remove_unused_data=True, + seed=42, + device='cpu', # No placement for model, leave the model to `device_map` + cfg_modify_fn=cfg_modify_fn, +) + +trainer.train() + +# ### Visualization +tb_dir = os.path.join(WORK_DIR, 'tensorboard_output') +plot_image(tb_dir, ['loss'], 0.9) diff --git a/examples/pytorch/llm/chatglm2_infer.py b/examples/pytorch/llm/chatglm2_infer.py new file mode 100644 index 00000000..741f9b18 --- /dev/null +++ b/examples/pytorch/llm/chatglm2_infer.py @@ -0,0 +1,60 @@ +# ### Setting up experimental environment. +from _common import * +from transformers import TextStreamer + +device_ids = [0, 1] +logger.info(device_ids) +select_device(device_ids) + +# ### Loading Model and Tokenizer +# Note: You need to set the value of `CKPT_FPATH` +CKPT_FAPTH = '/path/to/your/xxx.pth' +LORA_TARGET_MODULES = ['query_key_value'] + +model, tokenizer = get_chatglm2_model_tokenizer() +if tokenizer.eos_token_id is None: + tokenizer.eos_token_id = tokenizer.pad_token_id +if tokenizer.bos_token_id is None: + tokenizer.bos_token_id = 1 +model.bfloat16() # Consistent with training + +# ### Preparing lora +LORA_RANK = 8 +LORA_ALPHA = 32 +LORA_DROPOUT_P = 0 # Arbitrary value +lora_config = LoRAConfig( + replace_modules=LORA_TARGET_MODULES, + rank=LORA_RANK, + lora_alpha=LORA_ALPHA, + lora_dropout=LORA_DROPOUT_P, + pretrained_weights=CKPT_FAPTH) +logger.info(f'lora_config: {lora_config}') +Swift.prepare_model(model, lora_config) + +# ### Loading Dataset +_, test_dataset = get_alpaca_en_zh_dataset(None, True) + +# ### Inference +streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) +for d in test_dataset[:5]: + output = d['output'] + d['output'] = None + input_ids = tokenize_function(d, tokenizer)['input_ids'] + print(f'[TEST]{tokenizer.decode(input_ids)}', end='') + input_ids = torch.tensor(input_ids)[None].cuda() + attention_mask = torch.ones_like(input_ids) + generate_ids = model.generate( + input_ids=input_ids, + max_new_tokens=512, + attention_mask=attention_mask, + streamer=streamer, + pad_token_id=tokenizer.pad_token_id, + temperature=0.7, + top_k=50, + do_sample=True) + print() + print(f'[LABELS]{output}') + print( + '-----------------------------------------------------------------------------------' + ) + # input('next[ENTER]') diff --git a/examples/pytorch/llm/chatglm2_sft.py b/examples/pytorch/llm/chatglm2_sft.py new file mode 100644 index 00000000..ecd497a2 --- /dev/null +++ b/examples/pytorch/llm/chatglm2_sft.py @@ -0,0 +1,188 @@ +# ### Setting up experimental environment. +""" +pip install modelscope +pip install numpy pandas matplotlib scikit-learn +pip install transformers datasets +pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 +pip install tqdm +pip install tensorboard +pip install torchmetrics +pip install sentencepiece +pip install accelerate + +pip install numpy -U # Resolve torchmetrics dependencies and update numpy +""" + +from _common import * + +device_ids = [0, 1, 2, 3] +logger.info(device_ids) +select_device(device_ids) +seed_everything(42) + +# ### Loading Model and Tokenizer +model_id = 'ZhipuAI/chatglm2-6b' +WORK_DIR = 'runs/chatglm2' +LORA_TARGET_MODULES = ['query_key_value'] +# +model_dir = get_model_dir(model_id, None) +model, tokenizer = get_chatglm2_model_tokenizer(model_dir) +# chatglm2 does not support gradient_checkpointing +GRADIENT_CHECKPOINTING = False +if GRADIENT_CHECKPOINTING: + model.gradient_checkpointing_enable() + model.enable_input_require_grads() +logger.info(tokenizer.special_tokens) +if tokenizer.eos_token_id is None: + tokenizer.eos_token_id = tokenizer.pad_token_id +if tokenizer.bos_token_id is None: + tokenizer.bos_token_id = 1 +# +logger.info( + f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, ' + f'pad_token_id: {tokenizer.pad_token_id}') + +# ### Preparing lora +LORA_RANK = 8 +LORA_ALPHA = 32 +LORA_DROPOUT_P = 0.1 +lora_config = LoRAConfig( + replace_modules=LORA_TARGET_MODULES, + rank=LORA_RANK, + lora_alpha=LORA_ALPHA, + lora_dropout=LORA_DROPOUT_P) +logger.info(f'lora_config: {lora_config}') +Swift.prepare_model(model, lora_config) +# +show_freeze_layers(model) +print_model_info(model) +_p = list(model.parameters())[100] +logger.info(f'device: {_p.device}, dtype: {_p.dtype}') +model.bfloat16() + +# ### Loading Dataset +tokenize_function = partial(tokenize_function, tokenizer=tokenizer) +train_dataset, val_dataset = get_alpaca_en_zh_dataset(tokenize_function) +# Data analysis +stat_dataset(train_dataset) +stat_dataset(val_dataset) +data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer) +print_examples(train_dataset[0], tokenizer) + +# ### Setting Config +cfg_file = os.path.join(model_dir, 'configuration.json') +# +BATCH_SIZE = 1 +MAX_EPOCHS = 1 +T_max = get_T_max(len(train_dataset), BATCH_SIZE, MAX_EPOCHS, True) +WORK_DIR = get_work_dir(WORK_DIR) +EVAL_INTERVAL = 500 +CONFIG = Config({ + 'train': { + 'dataloader': { + 'batch_size_per_gpu': BATCH_SIZE, + 'workers_per_gpu': 1, + 'shuffle': True, + 'drop_last': True, + 'pin_memory': True + }, + 'max_epochs': + MAX_EPOCHS, + 'work_dir': + WORK_DIR, + 'optimizer': { + 'type': 'AdamW', + 'lr': 1e-4, + 'weight_decay': 0.01, + 'options': { + 'cumulative_iters': 16, + 'grad_clip': { + 'norm_type': 2, + 'max_norm': 2.0 + } + } + }, + 'lr_scheduler': { + 'type': 'CosineAnnealingLR', + 'T_max': T_max, + 'eta_min': 1e-5, + 'options': { + 'by_epoch': False, + 'warmup': { + 'type': 'LinearWarmup', + 'warmup_ratio': 0.1, + 'warmup_iters': 200 + } + } + }, + 'hooks': [ + { + 'type': 'CheckpointHook', + 'by_epoch': False, + 'interval': EVAL_INTERVAL, + 'max_checkpoint_num': 1 + }, + { + 'type': 'EvaluationHook', + 'by_epoch': False, + 'interval': EVAL_INTERVAL + }, + { + 'type': 'BestCkptSaverHook', + 'metric_key': 'acc', + 'save_best': True, + 'rule': 'max', + 'max_checkpoint_num': 1 + }, + { + 'type': 'TextLoggerHook', + 'by_epoch': True, # Whether EpochBasedTrainer is used + 'interval': 5 + }, + { + 'type': 'TensorboardHook', + 'by_epoch': False, + 'interval': 5 + } + ] + }, + 'evaluation': { + 'dataloader': { + 'batch_size_per_gpu': BATCH_SIZE, + 'workers_per_gpu': 1, + 'shuffle': False, + 'drop_last': False, + 'pin_memory': True + }, + 'metrics': [{ + 'type': 'my_metric', + 'vocab_size': tokenizer.vocab_size + }] + } +}) + +# ### Finetuning + + +def cfg_modify_fn(cfg: Config) -> Config: + cfg.update(CONFIG) + return cfg + + +trainer = EpochBasedTrainer( + model=model, + cfg_file=cfg_file, + data_collator=data_collate_fn, + train_dataset=train_dataset, + eval_dataset=val_dataset, + remove_unused_data=True, + seed=42, + device='cpu', # No placement for model, leave the model to `device_map` + cfg_modify_fn=cfg_modify_fn, +) + +trainer.train() + +# ### Visualization +tb_dir = os.path.join(WORK_DIR, 'tensorboard_output') +plot_image(tb_dir, ['loss'], 0.9) diff --git a/examples/pytorch/llm_agent/_common.py b/examples/pytorch/llm_agent/_common.py index dd0cd7d4..04097b50 100644 --- a/examples/pytorch/llm_agent/_common.py +++ b/examples/pytorch/llm_agent/_common.py @@ -111,7 +111,7 @@ def select_device(device_ids: List[int]) -> Device: [str(d) for d in device_ids]) assert torch.cuda.is_available( ) and torch.cuda.device_count() >= len(device_ids) - log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. "cuda:1,7,8" + log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. 'cuda:1,7,8' device = 'cuda:0' logger.info(log_s) return torch.device(device) @@ -221,7 +221,7 @@ def print_examples(examples: Dict[str, Any], tokenizer) -> None: print(f'[INPUT_IDS] {tokenizer.decode(input_ids)}') print() print( - f'[LABLES] {tokenizer.decode([l if l != -100 else 0 for l in labels])}' + f'[LABLES] {tokenizer.decode([lb if lb != -100 else 0 for lb in labels])}' ) @@ -334,8 +334,7 @@ def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, load_model: bool = True): if model_dir is None: model_id = 'ZhipuAI/chatglm2-6b' - model_revision = 'v1.0.3' - model_dir = snapshot_download(model_id, model_revision) + model_dir = snapshot_download(model_id, None) # config = read_config(model_dir) config['model'] = ConfigDict({'type': 'chatglm2-6b'}) @@ -355,7 +354,7 @@ def make_dataset( Dict[str, Any]] ) -> MyDataset: """ - split: Literal["train", "validation"] + split: Literal['train', 'validation'] """ dataset = MsDataset.load( 'modelscope/ms_hackathon_23_agent_train_dev', split=split) diff --git a/examples/pytorch/llm_agent/baichuan_infer.ipynb b/examples/pytorch/llm_agent/baichuan_infer.ipynb index 77719fc1..03f8f46b 100644 --- a/examples/pytorch/llm_agent/baichuan_infer.ipynb +++ b/examples/pytorch/llm_agent/baichuan_infer.ipynb @@ -16,15 +16,6 @@ "### 配置实验环境" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install transformers" - ] - }, { "cell_type": "code", "execution_count": 1, @@ -62,7 +53,7 @@ "source": [ "from _common import *\n", "from transformers import TextStreamer\n", - "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "device_ids = [0, 1]\n", "logger.info(device_ids)\n", "select_device(device_ids)" ] @@ -152,8 +143,8 @@ } ], "source": [ - "CKPT_FAPTH = \"/home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/output_best/pytorch_model.bin\"\n", - "LORA_TARGET_MODULES = [\"W_pack\"]\n", + "CKPT_FAPTH = '/home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/output_best/pytorch_model.bin'\n", + "LORA_TARGET_MODULES = ['W_pack']\n", "\n", "model, tokenizer = get_baichuan_model_tokenizer()\n", "if tokenizer.pad_token_id is None:\n", @@ -225,7 +216,7 @@ " lora_alpha=LORA_ALPHA,\n", " lora_dropout=LORA_DROPOUT_P,\n", " pretrained_weights=CKPT_FAPTH)\n", - "logger.info(f\"lora_config: {lora_config}\")\n", + "logger.info(f'lora_config: {lora_config}')\n", "Swift.prepare_model(model, lora_config)" ] }, @@ -289,8 +280,8 @@ } ], "source": [ - "test_dataset = make_dataset(\"validation\", lambda system, user, assistant:\n", - " {\"system\": system, \"user\": user, \"assistant\": assistant})" + "test_dataset = make_dataset('validation', lambda system, user, assistant:\n", + " {'system': system, 'user': user, 'assistant': assistant})" ] }, { @@ -451,20 +442,21 @@ "source": [ "streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n", "for d in test_dataset[:5]:\n", - " system = d[\"system\"]\n", - " user = d[\"user\"]\n", - " assistant = d[\"assistant\"]\n", - " input_ids = tokenize_function(system, user, None, tokenizer)[\"input_ids\"]\n", - " print(f\"[TEST]{tokenizer.decode(input_ids)}\", end=\"\")\n", + " system = d['system']\n", + " user = d['user']\n", + " assistant = d['assistant']\n", + " input_ids = tokenize_function(system, user, None, tokenizer)['input_ids']\n", + " print(f'[TEST]{tokenizer.decode(input_ids)}', end='')\n", " input_ids = torch.tensor(input_ids)[None].cuda()\n", " attention_mask = torch.ones_like(input_ids)\n", " generate_ids = model.generate(input_ids=input_ids, max_new_tokens=512,\n", " attention_mask=attention_mask,\n", - " streamer=streamer, pad_token_id=tokenizer.pad_token_id)\n", + " streamer=streamer, pad_token_id=tokenizer.pad_token_id, \n", + " temperature=0.7, top_k=50, do_sample=True)\n", " print()\n", - " print(f\"[LABELS]{assistant}\")\n", - " print(\"-----------------------------------------------------------------------------------\")\n", - " # input(\"next[ENTER]\")" + " print(f'[LABELS]{assistant}')\n", + " print('-----------------------------------------------------------------------------------')\n", + " # input('next[ENTER]')" ] } ], @@ -484,7 +476,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/examples/pytorch/llm_agent/baichuan_sft.ipynb b/examples/pytorch/llm_agent/baichuan_sft.ipynb index 5e656a24..cb732612 100644 --- a/examples/pytorch/llm_agent/baichuan_sft.ipynb +++ b/examples/pytorch/llm_agent/baichuan_sft.ipynb @@ -36,10 +36,12 @@ "# !pip install modelscope -U\n", "# !pip install numpy pandas matplotlib scikit-learn\n", "# !pip install transformers datasets\n", - "# !conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n", + "# !pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n", "# !pip install tqdm\n", "# !pip install tensorboard\n", "# !pip install torchmetrics\n", + "# !pip install sentencepiece\n", + "# !pip install accelerate\n", "#\n", "# !pip install numpy -U # Resolve torchmetrics dependencies and update numpy" ] @@ -73,7 +75,7 @@ ], "source": [ "from _common import *\n", - "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "device_ids = [0, 1, 2, 3]\n", "logger.info(device_ids)\n", "select_device(device_ids)\n", "_ = seed_everything(42)" @@ -130,9 +132,9 @@ } ], "source": [ - "model_id = \"baichuan-inc/baichuan-7B\"\n", - "WORK_DIR = \"runs/baichuan\"\n", - "LORA_TARGET_MODULES = [\"W_pack\"]\n", + "model_id = 'baichuan-inc/baichuan-7B'\n", + "WORK_DIR = 'runs/baichuan'\n", + "LORA_TARGET_MODULES = ['W_pack']\n", "#\n", "model_dir = get_model_dir(model_id, None)\n", "model, tokenizer = get_baichuan_model_tokenizer(model_dir)\n", @@ -144,8 +146,8 @@ "if tokenizer.pad_token_id is None:\n", " tokenizer.pad_token_id = tokenizer.eos_token_id\n", "#\n", - "logger.info(f\"bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, \"\n", - " f\"pad_token_id: {tokenizer.pad_token_id}\")" + "logger.info(f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, '\n", + " f'pad_token_id: {tokenizer.pad_token_id}')" ] }, { @@ -237,13 +239,13 @@ " rank=LORA_RANK,\n", " lora_alpha=LORA_ALPHA,\n", " lora_dropout=LORA_DROPOUT_P)\n", - "logger.info(f\"lora_config: {lora_config}\")\n", + "logger.info(f'lora_config: {lora_config}')\n", "Swift.prepare_model(model, lora_config)\n", "#\n", "show_freeze_layers(model)\n", "print_model_info(model)\n", "_p = list(model.parameters())[100]\n", - "logger.info(f\"device: {_p.device}, dtype: {_p.dtype}\")\n", + "logger.info(f'device: {_p.device}, dtype: {_p.dtype}')\n", "model.bfloat16()" ] }, @@ -308,8 +310,8 @@ ], "source": [ "tokenize_function = partial(tokenize_function, tokenizer=tokenizer)\n", - "train_dataset = make_dataset(\"train\", tokenize_function)\n", - "val_dataset = make_dataset(\"validation\", tokenize_function)\n", + "train_dataset = make_dataset('train', tokenize_function)\n", + "val_dataset = make_dataset('validation', tokenize_function)\n", "# Data analysis\n", "stat_dataset(train_dataset)\n", "stat_dataset(val_dataset)\n", @@ -339,7 +341,7 @@ } ], "source": [ - "cfg_file = os.path.join(model_dir, \"configuration.json\")\n", + "cfg_file = os.path.join(model_dir, 'configuration.json')\n", "#\n", "BATCH_SIZE = 1\n", "MAX_EPOCHS = 1\n", @@ -347,62 +349,62 @@ "WORK_DIR = get_work_dir(WORK_DIR)\n", "EVAL_INTERVAL = 200\n", "CONFIG = Config({\n", - " \"train\": {\n", - " \"dataloader\": {\n", - " \"batch_size_per_gpu\": BATCH_SIZE,\n", - " \"workers_per_gpu\": 1,\n", - " \"shuffle\": True,\n", - " \"drop_last\": True,\n", - " \"pin_memory\": True\n", + " 'train': {\n", + " 'dataloader': {\n", + " 'batch_size_per_gpu': BATCH_SIZE,\n", + " 'workers_per_gpu': 1,\n", + " 'shuffle': True,\n", + " 'drop_last': True,\n", + " 'pin_memory': True\n", " },\n", - " \"max_epochs\": MAX_EPOCHS,\n", - " \"work_dir\": WORK_DIR,\n", - " \"optimizer\": {\n", - " \"type\": \"AdamW\",\n", - " \"lr\": 1e-4,\n", - " \"weight_decay\": 0.01,\n", - " \"options\": {\n", - " \"cumulative_iters\": 16, \"grad_clip\": {\n", - " \"norm_type\": 2,\n", - " \"max_norm\": 2.0\n", + " 'max_epochs': MAX_EPOCHS,\n", + " 'work_dir': WORK_DIR,\n", + " 'optimizer': {\n", + " 'type': 'AdamW',\n", + " 'lr': 1e-4,\n", + " 'weight_decay': 0.01,\n", + " 'options': {\n", + " 'cumulative_iters': 16, 'grad_clip': {\n", + " 'norm_type': 2,\n", + " 'max_norm': 2.0\n", " }\n", " }\n", " },\n", - " \"lr_scheduler\": {\n", - " \"type\": \"CosineAnnealingLR\",\n", - " \"T_max\": T_max,\n", - " \"eta_min\": 1e-5,\n", - " \"options\": {\n", - " \"by_epoch\": False,\n", - " \"warmup\": {\n", + " 'lr_scheduler': {\n", + " 'type': 'CosineAnnealingLR',\n", + " 'T_max': T_max,\n", + " 'eta_min': 1e-5,\n", + " 'options': {\n", + " 'by_epoch': False,\n", + " 'warmup': {\n", " 'type': 'LinearWarmup',\n", " 'warmup_ratio': 0.1,\n", - " \"warmup_iters\": 200\n", + " 'warmup_iters': 200\n", " }\n", " }\n", " },\n", - " \"hooks\": [\n", - " {\"type\": \"CheckpointHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL, \"max_checkpoint_num\": 1},\n", - " {\"type\": \"EvaluationHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL},\n", - " {\"type\": \"BestCkptSaverHook\",\n", - " \"metric_key\": \"acc\",\n", - " \"save_best\": True, \"rule\": \"max\", \"max_checkpoint_num\": 1},\n", - " {\"type\": \"TextLoggerHook\",\n", - " \"by_epoch\": True, # Whether EpochBasedTrainer is used\n", - " \"interval\": 5},\n", - " {\"type\": \"TensorboardHook\", \"by_epoch\": False, \"interval\": 5}\n", + " 'hooks': [\n", + " {'type': 'CheckpointHook', 'by_epoch': False, 'interval': EVAL_INTERVAL, 'max_checkpoint_num': 1},\n", + " {'type': 'EvaluationHook', 'by_epoch': False, 'interval': EVAL_INTERVAL},\n", + " {'type': 'BestCkptSaverHook',\n", + " 'metric_key': 'acc',\n", + " 'save_best': True, 'rule': 'max', 'max_checkpoint_num': 1},\n", + " {'type': 'TextLoggerHook',\n", + " 'by_epoch': True, # Whether EpochBasedTrainer is used\n", + " 'interval': 5},\n", + " {'type': 'TensorboardHook', 'by_epoch': False, 'interval': 5}\n", " ]\n", " },\n", - " \"evaluation\": {\n", - " \"dataloader\": {\n", - " \"batch_size_per_gpu\": BATCH_SIZE,\n", - " \"workers_per_gpu\": 1,\n", - " \"shuffle\": False,\n", - " \"drop_last\": False,\n", - " \"pin_memory\": True\n", + " 'evaluation': {\n", + " 'dataloader': {\n", + " 'batch_size_per_gpu': BATCH_SIZE,\n", + " 'workers_per_gpu': 1,\n", + " 'shuffle': False,\n", + " 'drop_last': False,\n", + " 'pin_memory': True\n", " },\n", - " \"metrics\": [\n", - " {\"type\": \"my_metric\", \"vocab_size\": tokenizer.vocab_size}\n", + " 'metrics': [\n", + " {'type': 'my_metric', 'vocab_size': tokenizer.vocab_size}\n", " ]\n", " }\n", "})" @@ -1778,16 +1780,16 @@ } ], "source": [ - "tb_dir = os.path.join(WORK_DIR, \"tensorboard_output\")\n", + "tb_dir = os.path.join(WORK_DIR, 'tensorboard_output')\n", "fname = os.listdir(tb_dir)[0]\n", "tb_path = os.path.join(tb_dir, fname)\n", "#\n", "data = read_tensorboard_file(tb_path)\n", "print(data.keys())\n", - "_ = plot_image(data, \"loss\", 0.9)\n", - "_ = plot_image(data, \"lr\", 0)\n", - "_ = plot_image(data, \"evaluation/acc\", 0)\n", - "_ = plot_image(data, \"evaluation/loss\", 0)" + "_ = plot_image(data, 'loss', 0.9)\n", + "_ = plot_image(data, 'lr', 0)\n", + "_ = plot_image(data, 'evaluation/acc', 0)\n", + "_ = plot_image(data, 'evaluation/loss', 0)" ] }, { diff --git a/examples/pytorch/llm_agent/chatglm2_infer.ipynb b/examples/pytorch/llm_agent/chatglm2_infer.ipynb index 29388858..237d27c8 100644 --- a/examples/pytorch/llm_agent/chatglm2_infer.ipynb +++ b/examples/pytorch/llm_agent/chatglm2_infer.ipynb @@ -17,15 +17,6 @@ "The following code is copied from baichuan_infer.ipynb" ] }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install transformers" - ] - }, { "cell_type": "code", "execution_count": 2, @@ -63,7 +54,7 @@ "source": [ "from _common import *\n", "from transformers import TextStreamer\n", - "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "device_ids = [0, 1]\n", "logger.info(device_ids)\n", "select_device(device_ids)" ] @@ -149,8 +140,8 @@ } ], "source": [ - "CKPT_FAPTH = \"/home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/output_best/pytorch_model.bin\"\n", - "LORA_TARGET_MODULES = [\"query_key_value\"]\n", + "CKPT_FAPTH = '/home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/output_best/pytorch_model.bin'\n", + "LORA_TARGET_MODULES = ['query_key_value']\n", "\n", "model, tokenizer = get_chatglm2_model_tokenizer()\n", "if tokenizer.eos_token_id is None:\n", @@ -230,7 +221,7 @@ " lora_alpha=LORA_ALPHA,\n", " lora_dropout=LORA_DROPOUT_P,\n", " pretrained_weights=CKPT_FAPTH)\n", - "logger.info(f\"lora_config: {lora_config}\")\n", + "logger.info(f'lora_config: {lora_config}')\n", "Swift.prepare_model(model, lora_config)" ] }, @@ -295,8 +286,8 @@ } ], "source": [ - "test_dataset = make_dataset(\"validation\", lambda system, user, assistant:\n", - " {\"system\": system, \"user\": user, \"assistant\": assistant})" + "test_dataset = make_dataset('validation', lambda system, user, assistant:\n", + " {'system': system, 'user': user, 'assistant': assistant})" ] }, { @@ -484,20 +475,21 @@ "source": [ "streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n", "for d in test_dataset[:5]:\n", - " system = d[\"system\"]\n", - " user = d[\"user\"]\n", - " assistant = d[\"assistant\"]\n", - " input_ids = tokenize_function(system, user, None, tokenizer)[\"input_ids\"]\n", - " print(f\"[TEST]{tokenizer.decode(input_ids)}\", end=\"\")\n", + " system = d['system']\n", + " user = d['user']\n", + " assistant = d['assistant']\n", + " input_ids = tokenize_function(system, user, None, tokenizer)['input_ids']\n", + " print(f'[TEST]{tokenizer.decode(input_ids)}', end='')\n", " input_ids = torch.tensor(input_ids)[None].cuda()\n", " attention_mask = torch.ones_like(input_ids)\n", " generate_ids = model.generate(input_ids=input_ids, max_new_tokens=512,\n", " attention_mask=attention_mask,\n", - " streamer=streamer, pad_token_id=tokenizer.pad_token_id)\n", + " streamer=streamer, pad_token_id=tokenizer.pad_token_id, \n", + " temperature=0.7, top_k=50, do_sample=True)\n", " print()\n", - " print(f\"[LABELS]{assistant}\")\n", - " print(\"-----------------------------------------------------------------------------------\")\n", - " # input(\"next[ENTER]\")" + " print(f'[LABELS]{assistant}')\n", + " print('-----------------------------------------------------------------------------------')\n", + " # input('next[ENTER]')" ] } ], @@ -517,7 +509,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.11" + "version": "3.10.12" }, "orig_nbformat": 4 }, diff --git a/examples/pytorch/llm_agent/chatglm2_sft.ipynb b/examples/pytorch/llm_agent/chatglm2_sft.ipynb index 1f9306f1..70d9b8a1 100644 --- a/examples/pytorch/llm_agent/chatglm2_sft.ipynb +++ b/examples/pytorch/llm_agent/chatglm2_sft.ipynb @@ -43,10 +43,12 @@ "# !pip install modelscope -U\n", "# !pip install numpy pandas matplotlib scikit-learn\n", "# !pip install transformers datasets\n", - "# !conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n", + "# !pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n", "# !pip install tqdm\n", "# !pip install tensorboard\n", "# !pip install torchmetrics\n", + "# !pip install sentencepiece\n", + "# !pip install accelerate\n", "#\n", "# !pip install numpy -U # Resolve torchmetrics dependencies and update numpy" ] @@ -78,7 +80,7 @@ ], "source": [ "from _common import *\n", - "device_ids = list(range(min(4, torch.cuda.device_count())))\n", + "device_ids = [0, 1, 2, 3]\n", "logger.info(device_ids)\n", "select_device(device_ids)\n", "_ = seed_everything(42)" @@ -134,12 +136,11 @@ } ], "source": [ - "model_id = \"ZhipuAI/chatglm2-6b\"\n", - "model_revision = \"v1.0.3\"\n", - "WORK_DIR = \"runs/chatglm2\"\n", - "LORA_TARGET_MODULES = [\"query_key_value\"]\n", + "model_id = 'ZhipuAI/chatglm2-6b'\n", + "WORK_DIR = 'runs/chatglm2'\n", + "LORA_TARGET_MODULES = ['query_key_value']\n", "#\n", - "model_dir = get_model_dir(model_id, model_revision)\n", + "model_dir = get_model_dir(model_id, None)\n", "model, tokenizer = get_chatglm2_model_tokenizer(model_dir)\n", "# chatglm2 does not support gradient_checkpointing\n", "GRADIENT_CHECKPOINTING = False\n", @@ -152,8 +153,8 @@ "if tokenizer.bos_token_id is None:\n", " tokenizer.bos_token_id = 1\n", "#\n", - "logger.info(f\"bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, \"\n", - " f\"pad_token_id: {tokenizer.pad_token_id}\")" + "logger.info(f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, '\n", + " f'pad_token_id: {tokenizer.pad_token_id}')" ] }, { @@ -251,13 +252,13 @@ " rank=LORA_RANK,\n", " lora_alpha=LORA_ALPHA,\n", " lora_dropout=LORA_DROPOUT_P)\n", - "logger.info(f\"lora_config: {lora_config}\")\n", + "logger.info(f'lora_config: {lora_config}')\n", "Swift.prepare_model(model, lora_config)\n", "#\n", "show_freeze_layers(model)\n", "print_model_info(model)\n", "_p = list(model.parameters())[100]\n", - "logger.info(f\"device: {_p.device}, dtype: {_p.dtype}\")\n", + "logger.info(f'device: {_p.device}, dtype: {_p.dtype}')\n", "model.bfloat16()" ] }, @@ -399,8 +400,8 @@ ], "source": [ "tokenize_function = partial(tokenize_function, tokenizer=tokenizer)\n", - "train_dataset = make_dataset(\"train\", tokenize_function)\n", - "val_dataset = make_dataset(\"validation\", tokenize_function)\n", + "train_dataset = make_dataset('train', tokenize_function)\n", + "val_dataset = make_dataset('validation', tokenize_function)\n", "# Data analysis\n", "stat_dataset(train_dataset)\n", "stat_dataset(val_dataset)\n", @@ -431,7 +432,7 @@ } ], "source": [ - "cfg_file = os.path.join(model_dir, \"configuration.json\")\n", + "cfg_file = os.path.join(model_dir, 'configuration.json')\n", "#\n", "BATCH_SIZE = 1\n", "MAX_EPOCHS = 1\n", @@ -439,62 +440,62 @@ "WORK_DIR = get_work_dir(WORK_DIR)\n", "EVAL_INTERVAL = 200\n", "CONFIG = Config({\n", - " \"train\": {\n", - " \"dataloader\": {\n", - " \"batch_size_per_gpu\": BATCH_SIZE,\n", - " \"workers_per_gpu\": 1,\n", - " \"shuffle\": True,\n", - " \"drop_last\": True,\n", - " \"pin_memory\": True\n", + " 'train': {\n", + " 'dataloader': {\n", + " 'batch_size_per_gpu': BATCH_SIZE,\n", + " 'workers_per_gpu': 1,\n", + " 'shuffle': True,\n", + " 'drop_last': True,\n", + " 'pin_memory': True\n", " },\n", - " \"max_epochs\": MAX_EPOCHS,\n", - " \"work_dir\": WORK_DIR,\n", - " \"optimizer\": {\n", - " \"type\": \"AdamW\",\n", - " \"lr\": 1e-4,\n", - " \"weight_decay\": 0.01,\n", - " \"options\": {\n", - " \"cumulative_iters\": 16, \"grad_clip\": {\n", - " \"norm_type\": 2,\n", - " \"max_norm\": 2.0\n", + " 'max_epochs': MAX_EPOCHS,\n", + " 'work_dir': WORK_DIR,\n", + " 'optimizer': {\n", + " 'type': 'AdamW',\n", + " 'lr': 1e-4,\n", + " 'weight_decay': 0.01,\n", + " 'options': {\n", + " 'cumulative_iters': 16, 'grad_clip': {\n", + " 'norm_type': 2,\n", + " 'max_norm': 2.0\n", " }\n", " }\n", " },\n", - " \"lr_scheduler\": {\n", - " \"type\": \"CosineAnnealingLR\",\n", - " \"T_max\": T_max,\n", - " \"eta_min\": 1e-5,\n", - " \"options\": {\n", - " \"by_epoch\": False,\n", - " \"warmup\": {\n", + " 'lr_scheduler': {\n", + " 'type': 'CosineAnnealingLR',\n", + " 'T_max': T_max,\n", + " 'eta_min': 1e-5,\n", + " 'options': {\n", + " 'by_epoch': False,\n", + " 'warmup': {\n", " 'type': 'LinearWarmup',\n", " 'warmup_ratio': 0.1,\n", - " \"warmup_iters\": 200\n", + " 'warmup_iters': 200\n", " }\n", " }\n", " },\n", - " \"hooks\": [\n", - " {\"type\": \"CheckpointHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL, \"max_checkpoint_num\": 1},\n", - " {\"type\": \"EvaluationHook\", \"by_epoch\": False, \"interval\": EVAL_INTERVAL},\n", - " {\"type\": \"BestCkptSaverHook\",\n", - " \"metric_key\": \"acc\",\n", - " \"save_best\": True, \"rule\": \"max\", \"max_checkpoint_num\": 1},\n", - " {\"type\": \"TextLoggerHook\",\n", - " \"by_epoch\": True, # Whether EpochBasedTrainer is used\n", - " \"interval\": 5},\n", - " {\"type\": \"TensorboardHook\", \"by_epoch\": False, \"interval\": 5}\n", + " 'hooks': [\n", + " {'type': 'CheckpointHook', 'by_epoch': False, 'interval': EVAL_INTERVAL, 'max_checkpoint_num': 1},\n", + " {'type': 'EvaluationHook', 'by_epoch': False, 'interval': EVAL_INTERVAL},\n", + " {'type': 'BestCkptSaverHook',\n", + " 'metric_key': 'acc',\n", + " 'save_best': True, 'rule': 'max', 'max_checkpoint_num': 1},\n", + " {'type': 'TextLoggerHook',\n", + " 'by_epoch': True, # Whether EpochBasedTrainer is used\n", + " 'interval': 5},\n", + " {'type': 'TensorboardHook', 'by_epoch': False, 'interval': 5}\n", " ]\n", " },\n", - " \"evaluation\": {\n", - " \"dataloader\": {\n", - " \"batch_size_per_gpu\": BATCH_SIZE,\n", - " \"workers_per_gpu\": 1,\n", - " \"shuffle\": False,\n", - " \"drop_last\": False,\n", - " \"pin_memory\": True\n", + " 'evaluation': {\n", + " 'dataloader': {\n", + " 'batch_size_per_gpu': BATCH_SIZE,\n", + " 'workers_per_gpu': 1,\n", + " 'shuffle': False,\n", + " 'drop_last': False,\n", + " 'pin_memory': True\n", " },\n", - " \"metrics\": [\n", - " {\"type\": \"my_metric\", \"vocab_size\": tokenizer.vocab_size}\n", + " 'metrics': [\n", + " {'type': 'my_metric', 'vocab_size': tokenizer.vocab_size}\n", " ]\n", " }\n", "})" @@ -1884,16 +1885,16 @@ } ], "source": [ - "tb_dir = os.path.join(WORK_DIR, \"tensorboard_output\")\n", + "tb_dir = os.path.join(WORK_DIR, 'tensorboard_output')\n", "fname = os.listdir(tb_dir)[0]\n", "tb_path = os.path.join(tb_dir, fname)\n", "#\n", "data = read_tensorboard_file(tb_path)\n", "print(data.keys())\n", - "_ = plot_image(data, \"loss\", 0.9)\n", - "_ = plot_image(data, \"lr\", 0)\n", - "_ = plot_image(data, \"evaluation/acc\", 0)\n", - "_ = plot_image(data, \"evaluation/loss\", 0)" + "_ = plot_image(data, 'loss', 0.9)\n", + "_ = plot_image(data, 'lr', 0)\n", + "_ = plot_image(data, 'evaluation/acc', 0)\n", + "_ = plot_image(data, 'evaluation/loss', 0)" ] }, { diff --git a/setup.cfg b/setup.cfg index bfee5eec..80d07c5a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -21,7 +21,7 @@ ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids [flake8] max-line-length = 120 select = B,C,E,F,P,T4,W,B9 -ignore = F401,F405,F821,W503,E251 +ignore = F401,F403,F405,F821,W503,E251 exclude = docs/src,*.pyi,.git [darglint] From de32a8f3e69c0ff88a2ccd43e6758ccffc9ebe02 Mon Sep 17 00:00:00 2001 From: "xingjun.wang" Date: Tue, 11 Jul 2023 17:48:22 +0800 Subject: [PATCH 27/87] pre commit --- modelscope/msdatasets/download/dataset_builder.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/modelscope/msdatasets/download/dataset_builder.py b/modelscope/msdatasets/download/dataset_builder.py index ad5ebbcf..0c5c4154 100644 --- a/modelscope/msdatasets/download/dataset_builder.py +++ b/modelscope/msdatasets/download/dataset_builder.py @@ -223,18 +223,21 @@ class CsvDatasetBuilder(csv.Csv): if field_name.endswith(':FILE'): transform_fields.append(field_name) - base_extracted_dir: Union[str, list] = self.split_path_dict.get(split_name, '') + base_extracted_dir: Union[str, list] = self.split_path_dict.get( + split_name, '') for field_name in transform_fields: - if isinstance(base_extracted_dir, list) and len(base_extracted_dir) > 0: + if isinstance(base_extracted_dir, + list) and len(base_extracted_dir) > 0: if df.shape[0] != len(base_extracted_dir): logger.error( f"Number of lines in meta-csv file for split '{split_name}' ({df.shape[0]}) " - f"does not match number of data-files({len(base_extracted_dir)})!" + f'does not match number of data-files({len(base_extracted_dir)})!' ) else: df[field_name] = base_extracted_dir elif isinstance(base_extracted_dir, str) and base_extracted_dir: - df[field_name] = df[field_name].apply(lambda x: os.path.join(base_extracted_dir, x)) + df[field_name] = df[field_name].apply( + lambda x: os.path.join(base_extracted_dir, x)) else: logger.warning(f'Nothing to do for field {field_name}') From 574b4568ffc41d709d27f5882977ab6ff3fe85bd Mon Sep 17 00:00:00 2001 From: gg Date: Tue, 11 Jul 2023 18:36:54 +0800 Subject: [PATCH 28/87] flake8 --- .../finetune_speech_recognition.py | 3 +- .../custom_datasets/audio/asr_dataset.py | 35 +++++++++++-------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index 2fee3a2e..1716d8a0 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -27,8 +27,7 @@ if __name__ == '__main__': from funasr.utils.modelscope_param import modelscope_args params = modelscope_args( - model= - 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' + model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' ) params.output_dir = './checkpoint' # 模型保存路径 params.data_path = 'speech_asr_aishell1_trainsets' # 数据路径,可以为modelscope中已上传数据,也可以是本地数据 diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index 73a40813..9e64bcb3 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -15,8 +15,8 @@ class ASRDataset(MsDataset): @classmethod def load_core(cls, data_dir, data_set): - wav_file = os.path.join(data_dir, data_set, 'wav.scp') - text_file = os.path.join(data_dir, data_set, 'text') + wav_file = os.path.join(data_dir, data_set, "wav.scp") + text_file = os.path.join(data_dir, data_set, "text") with open(wav_file) as f: wav_lines = f.readlines() with open(text_file) as f: @@ -24,28 +24,33 @@ class ASRDataset(MsDataset): data_list = [] for wav_line, text_line in zip(wav_lines, text_lines): item = {} - item['Audio:FILE'] = wav_line.strip().split()[-1] - item['Text:LABEL'] = ' '.join(text_line.strip().split()[1:]) + item["Audio:FILE"] = wav_line.strip().split()[-1] + item["Text:LABEL"] = " ".join(text_line.strip().split()[1:]) data_list.append(item) return data_list @classmethod - def load(cls, - dataset_name, - namespace='speech_asr', - train_set='train', - dev_set='validation', - download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS): + def load( + cls, + dataset_name, + namespace="speech_asr", + train_set="train", + dev_set="validation", + download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS, + ): if os.path.exists(dataset_name): data_dir = dataset_name ds_dict = {} - ds_dict['train'] = cls.load_core(data_dir, train_set) - ds_dict['validation'] = cls.load_core(data_dir, dev_set) - ds_dict['raw_data_dir'] = data_dir + ds_dict["train"] = cls.load_core(data_dir, train_set) + ds_dict["validation"] = cls.load_core(data_dir, dev_set) + ds_dict["raw_data_dir"] = data_dir return ds_dict else: from modelscope.msdatasets import MsDataset + ds_dict = MsDataset.load( - dataset_name=dataset_name, namespace=namespace, download_mode=download_mode) + dataset_name=dataset_name, + namespace=namespace, + download_mode=download_mode, + ) return ds_dict - From 1caa45422c9d169694d62c1c9bbe7f5b93e64e08 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Tue, 11 Jul 2023 20:46:32 +0800 Subject: [PATCH 29/87] custom diffusion --- .../finetune_stable_diffusion_custom.py | 162 ++++ .../custom/run_train_custom.sh | 17 + modelscope/metainfo.py | 1 + .../multi_modal/custom_diffusion/__init__.py | 2 + .../custom_diffusion_trainer.py | 800 ++++++++++++++++++ .../trainers/test_custom_diffusion_trainer.py | 98 +++ 6 files changed, 1080 insertions(+) create mode 100644 examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py create mode 100644 examples/pytorch/stable_diffusion/custom/run_train_custom.sh create mode 100644 modelscope/trainers/multi_modal/custom_diffusion/__init__.py create mode 100644 modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py create mode 100644 tests/trainers/test_custom_diffusion_trainer.py diff --git a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py new file mode 100644 index 00000000..00e3e97a --- /dev/null +++ b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py @@ -0,0 +1,162 @@ +import os +from dataclasses import dataclass, field + +import cv2 + +from modelscope.metainfo import Trainers +from modelscope.msdatasets import MsDataset +from modelscope.pipelines import pipeline +from modelscope.trainers import EpochBasedTrainer, build_trainer +from modelscope.trainers.training_args import TrainingArgs +from modelscope.utils.constant import DownloadMode, Tasks + + +# Load configuration file and dataset +@dataclass(init=False) +class StableDiffusionCustomArguments(TrainingArgs): + class_prompt: str = field( + default=None, + metadata={ + 'help': + 'The prompt to specify images in the same class as provided instance images.', + }) + + instance_prompt: str = field( + default=None, + metadata={ + 'help': 'The prompt with identifier specifying the instance.', + }) + + modifier_token: str = field( + default=None, + metadata={ + 'help': 'A token to use as a modifier for the concept.', + }) + + num_class_images: int = field( + default=200, + metadata={ + 'help': + 'Minimal class images for prior preservation loss. If there are not enough images already present in class_data_dir, additional images will be sampled with class_prompt.', + }) + + train_batch_size: int = field( + default=4, + metadata={ + 'help': 'Batch size (per device) for the training dataloader.', + }) + + sample_batch_size: int = field( + default=4, + metadata={ + 'help': 'Batch size (per device) for sampling images.', + }) + + initializer_token: str = field( + default='ktn+pll+ucd', + metadata={ + 'help': 'A token to use as initializer word.', + }) + + class_data_dir: str = field( + default='/tmp/class_data', + metadata={ + 'help': 'A folder containing the training data of class images.', + }) + + resolution: int = field( + default=512, + metadata={ + 'help': + 'The resolution for input images, all the images in the train/validation dataset will be resized to this', + }) + + prior_loss_weight: float = field( + default=1.0, + metadata={ + 'help': 'The weight of prior preservation loss.', + }) + + freeze_model: str = field( + default='crossattn_kv', + metadata={ + 'help': + 'crossattn to enable fine-tuning of all params in the cross attention.', + }) + + instance_data_name: str = field( + default='buptwq/custom-stable-diffusion-cat', + metadata={ + 'help': 'The instance data local dir or online ID.', + }) + + +training_args = StableDiffusionCustomArguments( + task='text-to-image-synthesis').parse_cli() +config, args = training_args.to_config() + +if os.path.exists(args.train_dataset_name): + # Load local dataset + train_dataset = MsDataset.load(args.train_dataset_name) + validation_dataset = MsDataset.load(args.train_dataset_name) +else: + # Load online dataset + train_dataset = MsDataset.load( + args.train_dataset_name, + split='train', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + validation_dataset = MsDataset.load( + args.train_dataset_name, + split='validation', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + + +def cfg_modify_fn(cfg): + if args.use_model_config: + cfg.merge_from_dict(config) + else: + cfg = config + cfg.train.lr_scheduler = { + 'type': 'LambdaLR', + 'lr_lambda': lambda _: 1, + 'last_epoch': -1 + } + return cfg + + +kwargs = dict( + model=training_args.model, + model_revision=args.model_revision, + class_prompt=args.class_prompt, + instance_prompt=args.instance_prompt, + modifier_token=args.modifier_token, + num_class_images=args.num_class_images, + train_batch_size=args.train_batch_size, + sample_batch_size=args.sample_batch_size, + initializer_token=args.initializer_token, + class_data_dir=args.class_data_dir, + resolution=args.resolution, + prior_loss_weight=args.prior_loss_weight, + freeze_model=args.freeze_model, + instance_data_name=args.instance_data_name, + work_dir=training_args.work_dir, + train_dataset=train_dataset, + eval_dataset=validation_dataset, + cfg_modify_fn=cfg_modify_fn) + +# build trainer and training +trainer = build_trainer(name=Trainers.custom_diffusion, default_args=kwargs) +trainer.train() + +# pipeline after training and save result +pipe = pipeline( + task=Tasks.text_to_image_synthesis, + model=training_args.model, + custom_dir=training_args.work_dir + '/output', + modifier_token='', + model_revision=args.model_revision) + +output = pipe({'text': args.prompt}) +# visualize the result on ipynb and save it +output +cv2.imwrite('./custom_result.png', output['output_imgs'][0]) diff --git a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh new file mode 100644 index 00000000..ca3cf22e --- /dev/null +++ b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh @@ -0,0 +1,17 @@ +PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py \ + --model 'AI-ModelScope/stable-diffusion-v1-5' \ + --model_revision 'v1.0.9' \ + --class_prompt "dog" \ + --instance_prompt="photo of a dog" \ + --work_dir './tmp/custom_diffusion' \ + --class_data_dir './tmp/class_data' \ + --train_dataset_name 'buptwq/lora-stable-diffusion-finetune-dog' \ + --max_epochs 2 \ + --modifier_token "" \ + --num_class_images=200 \ + --save_ckpt_strategy 'by_epoch' \ + --logging_interval 1 \ + --train.dataloader.workers_per_gpu 0 \ + --evaluation.dataloader.workers_per_gpu 0 \ + --train.optimizer.lr 1e-5 \ + --use_model_config true diff --git a/modelscope/metainfo.py b/modelscope/metainfo.py index 2b5bca26..f88a1ab0 100644 --- a/modelscope/metainfo.py +++ b/modelscope/metainfo.py @@ -901,6 +901,7 @@ class MultiModalTrainers(object): stable_diffusion = 'stable-diffusion' lora_diffusion = 'lora-diffusion' dreambooth_diffusion = 'dreambooth-diffusion' + custom_diffusion = 'custom-diffusion' class AudioTrainers(object): diff --git a/modelscope/trainers/multi_modal/custom_diffusion/__init__.py b/modelscope/trainers/multi_modal/custom_diffusion/__init__.py new file mode 100644 index 00000000..66747553 --- /dev/null +++ b/modelscope/trainers/multi_modal/custom_diffusion/__init__.py @@ -0,0 +1,2 @@ +# Copyright © Alibaba, Inc. and its affiliates. +from .custom_diffusion_trainer import CustomDiffusionTrainer diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py new file mode 100644 index 00000000..e438a187 --- /dev/null +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -0,0 +1,800 @@ +# Copyright 2022-2023 The Alibaba Fundamental Vision Team Authors. All rights reserved. +import hashlib +import itertools +import os +import shutil +import warnings +from collections.abc import Mapping +from pathlib import Path +from typing import Union + +import torch +import torch.nn.functional as F +from diffusers import DiffusionPipeline +from diffusers.loaders import AttnProcsLayers +from diffusers.models.attention_processor import CustomDiffusionAttnProcessor +from PIL import Image +from PIL.ImageOps import exif_transpose +from torch.utils.data import Dataset +from torchvision import transforms +from tqdm.auto import tqdm + +from modelscope.metainfo import Trainers +from modelscope.msdatasets import MsDataset +from modelscope.outputs import ModelOutputBase, OutputKeys +from modelscope.trainers.builder import TRAINERS +from modelscope.trainers.hooks.checkpoint.checkpoint_hook import CheckpointHook +from modelscope.trainers.hooks.checkpoint.checkpoint_processor import \ + CheckpointProcessor +from modelscope.trainers.optimizer.builder import build_optimizer +from modelscope.trainers.trainer import EpochBasedTrainer +from modelscope.utils.config import ConfigDict +from modelscope.utils.constant import ModeKeys +from modelscope.utils.file_utils import func_receive_dict_inputs +from modelscope.utils.torch_utils import is_dist + + +class CustomCheckpointProcessor(CheckpointProcessor): + + def __init__(self, modifier_token, modifier_token_id): + """Checkpoint processor for custom diffusion. + + Args: + modifier_token: The token to use as a modifier for the concept. + modifier_token_id: The modifier token id for the concept. + """ + self.modifier_token = modifier_token + self.modifier_token_id = modifier_token_id + + def save_checkpoints(self, + trainer, + checkpoint_path_prefix, + output_dir, + meta=None): + """Save the state dict for custom diffusion model. + """ + trainer.model.unet = trainer.model.unet.to(torch.float32) + unet.save_attn_procs(output_dir) + learned_embeds = text_encoder.get_input_embeddings().weight + for x, y in zip(self.modifier_token_id, self.modifier_token): + learned_embeds_dict = {} + learned_embeds_dict[y] = learned_embeds[x] + torch.save(learned_embeds_dict, f'{output_dir}/{y}.bin') + + +class CustomDiffusionDataset(Dataset): + """ + A dataset to prepare the instance and class images with the prompts for fine-tuning the model. + It pre-processes the images and the tokenizes prompts. + """ + + def __init__( + self, + concepts_list, + tokenizer, + size=512, + mask_size=64, + center_crop=False, + with_prior_preservation=False, + num_class_images=200, + hflip=False, + aug=True, + ): + self.size = size + self.mask_size = mask_size + self.center_crop = center_crop + self.tokenizer = tokenizer + self.interpolation = Image.BILINEAR + self.aug = aug + + self.instance_images_path = [] + self.class_images_path = [] + self.with_prior_preservation = with_prior_preservation + for concept in concepts_list: + inst_img_path = [ + (x, concept['instance_prompt']) + for x in Path(concept['instance_data_dir']).iterdir() + if x.is_file() + ] + self.instance_images_path.extend(inst_img_path) + + if with_prior_preservation: + class_data_root = Path(concept['class_data_dir']) + if os.path.isdir(class_data_root): + class_images_path = list(class_data_root.iterdir()) + class_prompt = [ + concept['class_prompt'] + for _ in range(len(class_images_path)) + ] + else: + with open(class_data_root, 'r') as f: + class_images_path = f.read().splitlines() + with open(concept['class_prompt'], 'r') as f: + class_prompt = f.read().splitlines() + + class_img_path = [ + (x, y) for (x, y) in zip(class_images_path, class_prompt) + ] + self.class_images_path.extend( + class_img_path[:num_class_images]) + + random.shuffle(self.instance_images_path) + self.num_instance_images = len(self.instance_images_path) + self.num_class_images = len(self.class_images_path) + self._length = max(self.num_class_images, self.num_instance_images) + self.flip = transforms.RandomHorizontalFlip(0.5 * hflip) + + self.image_transforms = transforms.Compose([ + self.flip, + transforms.Resize( + size, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(size) + if center_crop else transforms.RandomCrop(size), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ]) + + def __len__(self): + return self._length + + def preprocess(self, image, scale, resample): + outer, inner = self.size, scale + factor = self.size // self.mask_size + if scale > self.size: + outer, inner = scale, self.size + top, left = np.random.randint(0, outer - inner + 1), np.random.randint( + 0, outer - inner + 1) + image = image.resize((scale, scale), resample=resample) + image = np.array(image).astype(np.uint8) + image = (image / 127.5 - 1.0).astype(np.float32) + instance_image = np.zeros((self.size, self.size, 3), dtype=np.float32) + mask = np.zeros((self.size // factor, self.size // factor)) + if scale > self.size: + instance_image = image[top:top + inner, left:left + inner, :] + mask = np.ones((self.size // factor, self.size // factor)) + else: + instance_image[top:top + inner, left:left + inner, :] = image + mask[top // factor + 1:(top + scale) // factor - 1, + left // factor + 1:(left + scale) // factor - 1] = 1.0 + return instance_image, mask + + def __getitem__(self, index): + example = {} + instance_image, instance_prompt = self.instance_images_path[ + index % self.num_instance_images] + instance_image = Image.open(instance_image) + if not instance_image.mode == 'RGB': + instance_image = instance_image.convert('RGB') + instance_image = self.flip(instance_image) + + # apply resize augmentation and create a valid image region mask + random_scale = self.size + if self.aug: + random_scale = ( + np.random.randint(self.size // 3, self.size + + 1) if np.random.uniform() < 0.66 else + np.random.randint(int(1.2 * self.size), int(1.4 * self.size))) + instance_image, mask = self.preprocess(instance_image, random_scale, + self.interpolation) + + if random_scale < 0.6 * self.size: + instance_prompt = np.random.choice(['a far away ', 'very small ' + ]) + instance_prompt + elif random_scale > self.size: + instance_prompt = np.random.choice(['zoomed in ', 'close up ' + ]) + instance_prompt + + example['instance_images'] = torch.from_numpy(instance_image).permute( + 2, 0, 1) + example['mask'] = torch.from_numpy(mask) + example['instance_prompt_ids'] = self.tokenizer( + instance_prompt, + truncation=True, + padding='max_length', + max_length=self.tokenizer.model_max_length, + return_tensors='pt', + ).input_ids + + if self.with_prior_preservation: + class_image, class_prompt = self.class_images_path[ + index % self.num_class_images] + class_image = Image.open(class_image) + if not class_image.mode == 'RGB': + class_image = class_image.convert('RGB') + example['class_images'] = self.image_transforms(class_image) + example['class_mask'] = torch.ones_like(example['mask']) + example['class_prompt_ids'] = self.tokenizer( + class_prompt, + truncation=True, + padding='max_length', + max_length=self.tokenizer.model_max_length, + return_tensors='pt', + ).input_ids + + return example + + +class ClassDataset(Dataset): + + def __init__( + self, + tokenizer, + class_data_root=None, + class_prompt=None, + class_num_images=None, + size=512, + center_crop=False, + ): + """A dataset to prepare class images with the prompts for fine-tuning the model. + It pre-processes the images and the tokenizes prompts. + + Args: + tokenizer: The tokenizer to use for tokenization. + class_data_root: The saved class data path. + class_prompt: The prompt to use for class images. + class_num_images: The number of class images to use. + size: The size to resize the images. + center_crop: Whether to do center crop or random crop. + + """ + self.size = size + self.center_crop = center_crop + self.tokenizer = tokenizer + + if class_data_root is not None: + self.class_data_root = Path(class_data_root) + self.class_data_root.mkdir(parents=True, exist_ok=True) + self.class_images_path = list(self.class_data_root.iterdir()) + if class_num_images is not None: + self.num_class_images = min( + len(self.class_images_path), class_num_images) + else: + self.num_class_images = len(self.class_images_path) + self.class_prompt = class_prompt + else: + raise ValueError( + f"Class {self.class_data_root} class data root doesn't exists." + ) + + self.image_transforms = transforms.Compose([ + transforms.Resize( + size, interpolation=transforms.InterpolationMode.BILINEAR), + transforms.CenterCrop(size) + if center_crop else transforms.RandomCrop(size), + transforms.ToTensor(), + transforms.Normalize([0.5], [0.5]), + ]) + + def __len__(self): + return self.num_class_images + + def __getitem__(self, index): + example = {} + + if self.class_data_root: + class_image = Image.open( + self.class_images_path[index % self.num_class_images]) + class_image = exif_transpose(class_image) + + if not class_image.mode == 'RGB': + class_image = class_image.convert('RGB') + example['pixel_values'] = self.image_transforms(class_image) + + class_text_inputs = self.tokenizer( + self.class_prompt, + max_length=self.tokenizer.model_max_length, + truncation=True, + padding='max_length', + return_tensors='pt') + input_ids = torch.squeeze(class_text_inputs.input_ids) + example['input_ids'] = input_ids + + return example + + +class PromptDataset(Dataset): + + def __init__(self, prompt, num_samples): + """Dataset to prepare the prompts to generate class images. + + Args: + prompt: Class prompt. + num_samples: The number sample for class images. + + """ + self.prompt = prompt + self.num_samples = num_samples + + def __len__(self): + return self.num_samples + + def __getitem__(self, index): + example = {} + example['prompt'] = self.prompt + example['index'] = index + return example + + +@TRAINERS.register_module(module_name=Trainers.custom_diffusion) +class CustomDiffusionTrainer(EpochBasedTrainer): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + """Custom diffusion trainers for fine-tuning stable diffusion. + + Args: + with_prior_preservation: a boolean indicating whether to enable prior loss. + instance_prompt: a string specifying the instance prompt. + class_prompt: a string specifying the class prompt. + class_data_dir: the path to the class data directory. + num_class_images: the number of class images to generate. + prior_loss_weight: the weight of the prior loss. + + """ + self.with_prior_preservation = kwargs.pop('with_prior_preservation', + True) + self.instance_prompt = kwargs.pop('instance_prompt', + 'a photo of sks dog') + self.class_prompt = kwargs.pop('class_prompt', 'dog') + self.real_prior = kwargs.pop('real_prior', False) + self.class_data_dir = kwargs.pop('class_data_dir', '/tmp/class_data') + self.num_class_images = kwargs.pop('num_class_images', 200) + self.resolution = kwargs.pop('resolution', 512) + self.prior_loss_weight = kwargs.pop('prior_loss_weight', 1.0) + self.modifier_token = kwargs.pop('modifier_token', '') + self.initializer_token = kwargs.pop('initializer_token', 'ktn+pll+ucd') + self.freeze_model = kwargs.pop('freeze_model', 'crossattn_kv') + self.sample_batch_size = kwargs.pop('sample_batch_size', 4) + self.train_batch_size = kwargs.pop('train_batch_size', 2) + instance_data_name = kwargs.pop( + 'instance_data_name', 'buptwq/lora-stable-diffusion-finetune-dog') + + # Extract downloaded image folder + if os.path.isdir(instance_data_name): + self.instance_data_dir = instance_data_name + else: + ds = MsDataset.load(instance_data_name) + self.instance_data_dir = os.path.dirname( + next(iter(ds))['Target:FILE']) + + self.concepts_list = [{ + 'instance_prompt': self.instance_prompt, + 'class_prompt': self.class_prompt, + 'instance_data_dir': self.instance_data_dir, + 'class_data_dir': self.class_data_dir, + }] + + # Adding a modifier token which is optimized + self.modifier_token_id = [] + initializer_token_id = [] + if self.modifier_token is not None: + self.modifier_token = self.modifier_token.split('+') + self.initializer_token = self.initializer_token.split('+') + if len(self.modifier_token) > len(self.initializer_token): + raise ValueError( + 'You must specify + separated initializer token for each modifier token.' + ) + for modifier_token, initializer_token in zip( + self.modifier_token, + self.initializer_token[:len(self.modifier_token)]): + # Add the placeholder token in tokenizer + num_added_tokens = self.model.tokenizer.add_tokens( + modifier_token) + if num_added_tokens == 0: + raise ValueError( + f'The tokenizer already contains the token {modifier_token}. Please pass a different' + ' `modifier_token` that is not already in the tokenizer.' + ) + + # Convert the initializer_token, placeholder_token to ids + token_ids = self.model.tokenizer.encode( + [initializer_token], add_special_tokens=False) + # Check if initializer_token is a single token or a sequence of tokens + if len(token_ids) > 1: + raise ValueError( + 'The initializer token must be a single token.') + + initializer_token_id.append(token_ids[0]) + self.modifier_token_id.append( + self.model.tokenizer.convert_tokens_to_ids(modifier_token)) + + # Resize the token embeddings as we are adding new special tokens to the tokenizer + self.model.text_encoder.resize_token_embeddings( + len(self.model.tokenizer)) + + # Resize the token embeddings as we are adding new special tokens to the tokenizer + self.model.text_encoder.resize_token_embeddings( + len(self.model.tokenizer)) + + # Initialise the newly added placeholder token with the embeddings of the initializer token + token_embeds = self.model.text_encoder.get_input_embeddings( + ).weight.data + for x, y in zip(self.modifier_token_id, initializer_token_id): + token_embeds[x] = token_embeds[y] + + # Freeze all parameters except for the token embeddings in text encoder + params_to_freeze = itertools.chain( + self.model.text_encoder.text_model.encoder.parameters(), + self.model.text_encoder.text_model.final_layer_norm.parameters(), + self.model.text_encoder.text_model.embeddings.position_embedding. + parameters(), + ) + freeze_params(params_to_freeze) + + # Save checkpoint and configurate files + ckpt_hook = list( + filter(lambda hook: isinstance(hook, CheckpointHook), + self.hooks))[0] + ckpt_hook.set_processor( + CustomCheckpointProcessor(modifier_token_id, modifier_token)) + + # Add new Custom Diffusion weights to the attention layers + attention_class = CustomDiffusionAttnProcessor + # Only train key, value projection layers if freeze_model = 'crossattn_kv' else train all params. + train_q_out = False if self.freeze_model == 'crossattn_kv' else True + custom_diffusion_attn_procs = {} + + st = self.model.unet.state_dict() + for name, _ in self.model.unet.attn_processors.items(): + cross_attention_dim = None if name.endswith( + 'attn1.processor' + ) else self.model.unet.config.cross_attention_dim + if name.startswith('mid_block'): + hidden_size = unet.config.block_out_channels[-1] + elif name.startswith('up_blocks'): + block_id = int(name[len('up_blocks.')]) + hidden_size = list(reversed( + unet.config.block_out_channels))[block_id] + elif name.startswith('down_blocks'): + block_id = int(name[len('down_blocks.')]) + hidden_size = unet.config.block_out_channels[block_id] + layer_name = name.split('.processor')[0] + weights = { + 'to_k_custom_diffusion.weight': + st[layer_name + '.to_k.weight'], + 'to_v_custom_diffusion.weight': + st[layer_name + '.to_v.weight'], + } + if train_q_out: + weights['to_q_custom_diffusion.weight'] = st[layer_name + + '.to_q.weight'] + weights['to_out_custom_diffusion.0.weight'] = st[ + layer_name + '.to_out.0.weight'] + weights['to_out_custom_diffusion.0.bias'] = st[ + layer_name + '.to_out.0.bias'] + if cross_attention_dim is not None: + custom_diffusion_attn_procs[name] = attention_class( + train_kv=True, + train_q_out=train_q_out, + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ).to(self.model.unet.device) + custom_diffusion_attn_procs[name].load_state_dict(weights) + else: + custom_diffusion_attn_procs[name] = attention_class( + train_kv=False, + train_q_out=False, + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ) + del st + self.model.unet.set_attn_processor(custom_diffusion_attn_procs) + self.custom_diffusion_layers = AttnProcsLayers( + self.model.unet.attn_processors) + + # Check for conflicts and conflicts + if self.with_prior_preservation: + if self.class_data_dir is None: + raise ValueError( + 'You must specify a data directory for class images.') + if self.class_prompt is None: + raise ValueError('You must specify prompt for class images.') + else: + if self.class_data_dir is not None: + warnings.warn( + 'You need not use --class_data_dir without --with_prior_preservation.' + ) + if self.class_prompt is not None: + warnings.warn( + 'You need not use --class_prompt without --with_prior_preservation.' + ) + + # Generate class images if prior preservation is enabled. + if self.with_prior_preservation: + generate_image() + + # Dataset and DataLoaders creation: + train_dataset = CustomDiffusionDataset( + concepts_list=self.concepts_list, + tokenizer=self.model.tokenizer, + with_prior_preservation=self.with_prior_preservation, + size=self.resolution, + mask_size=self.model.vae.encode( + torch.randn(1, 3, self.resolution, + self.resolution).to(dtype=torch.float32).to( + self.device)).latent_dist.sample().size()[-1], + center_crop=self.center_crop, + num_class_images=self.num_class_images, + hflip=False, + aug=True, + ) + train_dataloader = torch.utils.data.DataLoader( + train_dataset, + batch_size=self.train_batch_size, + shuffle=True, + collate_fn=lambda examples: collate_fn(examples), + num_workers=2, + ) + self.iter_train_dataloader = itertools.cycle(train_dataloader) + + def freeze_params(self, params): + for param in params: + param.requires_grad = False + + def collate_fn(self, examples): + input_ids = [example['instance_prompt_ids'] for example in examples] + pixel_values = [example['instance_images'] for example in examples] + mask = [example['mask'] for example in examples] + # Concat class and instance examples for prior preservation. + # We do this to avoid doing two forward passes. + if self.with_prior_preservation: + input_ids += [example['class_prompt_ids'] for example in examples] + pixel_values += [example['class_images'] for example in examples] + mask += [example['class_mask'] for example in examples] + + input_ids = torch.cat(input_ids, dim=0) + pixel_values = torch.stack(pixel_values) + mask = torch.stack(mask) + pixel_values = pixel_values.to( + memory_format=torch.contiguous_format).float() + mask = mask.to(memory_format=torch.contiguous_format).float() + + batch = { + 'input_ids': input_ids, + 'pixel_values': pixel_values, + 'mask': mask.unsqueeze(1) + } + return batch + + def generate_image(self): + """ Generate class images if prior preservation is enabled. + """ + for i, concept in enumerate(self.concepts_list): + class_images_dir = Path(concept['class_data_dir']) + if not class_images_dir.exists(): + class_images_dir.mkdir(parents=True, exist_ok=True) + + cur_class_images = len(list(class_images_dir.iterdir())) + + if cur_class_images < self.num_class_images: + pipeline = DiffusionPipeline.from_pretrained( + self.model_dir, + torch_dtype=torch.float32, + safety_checker=None, + revision=None, + ) + pipeline.set_progress_bar_config(disable=True) + + num_new_images = self.num_class_images - cur_class_images + logger.info( + f'Number of class images to sample: {num_new_images}.') + + sample_dataset = PromptDataset(self.class_prompt, + num_new_images) + sample_dataloader = torch.utils.data.DataLoader( + sample_dataset, batch_size=self.sample_batch_size) + + pipeline.to(self.device) + + for example in tqdm( + sample_dataloader, + desc='Generating class images', + # disable=not accelerator.is_local_main_process, + ): + images = pipeline(example['prompt']).images + + for i, image in enumerate(images): + hash_image = hashlib.sha1(image.tobytes()).hexdigest() + image_filename = ( + class_images_dir / + f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" + ) + image.save(image_filename) + + del pipeline + if torch.cuda.is_available(): + torch.cuda.empty_cache() + + def build_optimizer(self, cfg: ConfigDict, default_args: dict = None): + try: + return build_optimizer( + itertools.chain( + self.model.text_encoder.get_input_embeddings().parameters( + ), self.custom_diffusion_layers.parameters()), + cfg=cfg, + default_args=default_args) + except KeyError as e: + self.logger.error( + f'Build optimizer error, the optimizer {cfg} is a torch native component, ' + f'please check if your torch with version: {torch.__version__} matches the config.' + ) + raise e + + def train_step(self, model, inputs): + """ Perform a training step on a batch of inputs. + + Subclass and override to inject custom behavior. + + Args: + model (`TorchModel`): The model to train. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + + Return: + `torch.Tensor`: The tensor with training loss on this batch. + """ + model.train() + self._mode = ModeKeys.TRAIN + + batch = next(self.iter_train_dataloader) + # Convert images to latent space + latents = self.model.vae.encode(batch['pixel_values'].to( + dtype=torch.float32)).latent_dist.sample() + latents = latents * self.model.vae.config.scaling_factor + + # Sample noise that we'll add to the latents + noise = torch.randn_like(latents) + bsz = latents.shape[0] + # Sample a random timestep for each image + timesteps = torch.randint( + 0, + noise_scheduler.config.num_train_timesteps, (bsz, ), + device=latents.device) + timesteps = timesteps.long() + + # Add noise to the latents according to the noise magnitude at each timestep + # (this is the forward diffusion process) + noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) + + # Get the text embedding for conditioning + encoder_hidden_states = self.model.text_encoder(batch['input_ids'])[0] + + # Predict the noise residual + model_pred = self.model.unet(noisy_latents, timesteps, + encoder_hidden_states).sample + + # Get the target for loss depending on the prediction type + if noise_scheduler.config.prediction_type == 'epsilon': + target = noise + elif noise_scheduler.config.prediction_type == 'v_prediction': + target = noise_scheduler.get_velocity(latents, noise, timesteps) + else: + raise ValueError( + f'Unknown prediction type {noise_scheduler.config.prediction_type}' + ) + + if self.with_prior_preservation: + # Chunk the noise and model_pred into two parts and compute the loss on each part separately. + model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) + target, target_prior = torch.chunk(target, 2, dim=0) + mask = torch.chunk(batch['mask'], 2, dim=0)[0] + # Compute instance loss + loss = F.mse_loss( + model_pred.float(), target.float(), reduction='none') + loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean() + + # Compute prior loss + prior_loss = F.mse_loss( + model_pred_prior.float(), + target_prior.float(), + reduction='mean') + + # Add the prior loss to the instance loss. + loss = loss + self.prior_loss_weight * prior_loss + else: + mask = batch['mask'] + loss = F.mse_loss( + model_pred.float(), target.float(), reduction='none') + loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean() + + # train_outputs = {} + self.train_outputs[OutputKeys.LOSS] = loss + + # Zero out the gradients for all token embeddings except the newly added + # embeddings for the concept, as we only want to optimize the concept embeddings + if self.modifier_token is not None: + grads_text_encoder = self.model.text_encoder.get_input_embeddings( + ).weight.grad + # Get the index for tokens that we want to zero the grads for + index_grads_to_zero = torch.arange(len( + self.model.tokenizer)) != modifier_token_id[0] + for i in range(len(modifier_token_id[1:])): + index_grads_to_zero = index_grads_to_zero & ( + torch.arange(len(self.model.tokenizer)) != + modifier_token_id[i]) + grads_text_encoder.data[ + index_grads_to_zero, :] = grads_text_encoder.data[ + index_grads_to_zero, :].fill_(0) + + # if self.with_prior_preservation: + # # Convert to latent space + # batch = next(self.iter_class_dataloader) + # target_prior = batch['pixel_values'].to(self.device) + # input_ids = batch['input_ids'].to(self.device) + # with torch.no_grad(): + # latents = self.model.vae.encode( + # target_prior.to(dtype=torch.float32)).latent_dist.sample() + # latents = latents * self.model.vae.config.scaling_factor + + # # Sample noise that we'll add to the latents + # noise = torch.randn_like(latents) + # bsz = latents.shape[0] + # # Sample a random timestep for each image + # timesteps = torch.randint( + # 0, + # self.model.noise_scheduler.num_train_timesteps, (bsz, ), + # device=latents.device) + # timesteps = timesteps.long() + + # # Add noise to the latents according to the noise magnitude at each timestep + # # (this is the forward diffusion process) + # noisy_latents = self.model.noise_scheduler.add_noise( + # latents, noise, timesteps) + + # # Get the text embedding for conditioning + # with torch.no_grad(): + # encoder_hidden_states = self.model.text_encoder(input_ids)[0] + + # # Get the target for loss depending on the prediction type + # if self.model.noise_scheduler.config.prediction_type == 'epsilon': + # target_prior = noise + # elif self.model.noise_scheduler.config.prediction_type == 'v_prediction': + # target_prior = self.model.noise_scheduler.get_velocity( + # latents, noise, timesteps) + # else: + # raise ValueError( + # f'Unknown prediction type {self.model.noise_scheduler.config.prediction_type}' + # ) + + # # Predict the noise residual and compute loss + # model_pred_prior = self.model.unet(noisy_latents, timesteps, + # encoder_hidden_states).sample + + # # Compute prior loss + # prior_loss = F.mse_loss( + # model_pred_prior.float(), + # target_prior.float(), + # reduction='mean') + # # Add the prior loss to the instance loss. + # train_outputs[ + # OutputKeys.LOSS] += self.prior_loss_weight * prior_loss + + # if isinstance(train_outputs, ModelOutputBase): + # train_outputs = train_outputs.to_dict() + # if not isinstance(train_outputs, dict): + # raise TypeError('"model.forward()" must return a dict') + + # # add model output info to log + # if 'log_vars' not in train_outputs: + # default_keys_pattern = ['loss'] + # match_keys = set([]) + # for key_p in default_keys_pattern: + # match_keys.update( + # [key for key in train_outputs.keys() if key_p in key]) + + # log_vars = {} + # for key in match_keys: + # value = train_outputs.get(key, None) + # if value is not None: + # if is_dist(): + # value = value.data.clone().to('cuda') + # dist.all_reduce(value.div_(dist.get_world_size())) + # log_vars.update({key: value.item()}) + # self.log_buffer.update(log_vars) + # else: + # self.log_buffer.update(train_outputs['log_vars']) + + # self.train_outputs = train_outputs diff --git a/tests/trainers/test_custom_diffusion_trainer.py b/tests/trainers/test_custom_diffusion_trainer.py new file mode 100644 index 00000000..84bdbcf0 --- /dev/null +++ b/tests/trainers/test_custom_diffusion_trainer.py @@ -0,0 +1,98 @@ +# Copyright 2022-2023 The Alibaba Fundamental Vision Team Authors. All rights reserved. +import os +import shutil +import tempfile +import unittest + +import cv2 + +from modelscope.metainfo import Trainers +from modelscope.msdatasets import MsDataset +from modelscope.pipelines import pipeline +from modelscope.trainers import build_trainer +from modelscope.utils.constant import DownloadMode +from modelscope.utils.test_utils import test_level + + +class TestCustomDiffusionTrainer(unittest.TestCase): + + def setUp(self): + print(('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + + self.train_dataset = MsDataset.load( + 'buptwq/lora-stable-diffusion-finetune', + split='train', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + self.eval_dataset = MsDataset.load( + 'buptwq/lora-stable-diffusion-finetune', + split='validation', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + + self.max_epochs = 5 + + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + shutil.rmtree(self.tmp_dir) + super().tearDown() + + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + def test_custom_diffusion_train(self): + model_id = 'AI-ModelScope/stable-diffusion-v1-5' + model_revision = 'v1.0.8' + prompt = 'a dog.' + + def cfg_modify_fn(cfg): + cfg.train.max_epochs = self.max_epochs + cfg.train.lr_scheduler = { + 'type': 'LambdaLR', + 'lr_lambda': lambda _: 1, + 'last_epoch': -1 + } + cfg.train.optimizer.lr = 5e-6 + return cfg + + kwargs = dict( + model=model_id, + model_revision=model_revision, + work_dir=self.tmp_dir, + train_dataset=self.train_dataset, + eval_dataset=self.eval_dataset, + cfg_modify_fn=cfg_modify_fn) + + trainer = build_trainer( + name=Trainers.custom_diffusion, default_args=kwargs) + trainer.train() + result = trainer.evaluate() + print(f'Custom-diffusion train output: {result}.') + + results_files = os.listdir(self.tmp_dir) + self.assertIn(f'{trainer.timestamp}.log.json', results_files) + + pipe = pipeline( + task=Tasks.text_to_image_synthesis, model=f'{self.tmp_dir}/output') + output = pipe({'text': prompt}) + cv2.imwrite('./custom_result.png', output['output_imgs'][0]) + + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + def test_dreambooth_diffusion_eval(self): + model_id = 'AI-ModelScope/stable-diffusion-v1-5' + model_revision = 'v1.0.8' + + kwargs = dict( + model=model_id, + model_revision=model_revision, + work_dir=self.tmp_dir, + train_dataset=None, + eval_dataset=self.eval_dataset) + + trainer = build_trainer( + name=Trainers.dreambooth_diffusion, default_args=kwargs) + result = trainer.evaluate() + print(f'Custom-diffusion eval output: {result}.') + + +if __name__ == '__main__': + unittest.main() From 49c6d8bcf6c091df6d7c92fc90db2440a8b4703f Mon Sep 17 00:00:00 2001 From: gg Date: Wed, 12 Jul 2023 09:43:24 +0800 Subject: [PATCH 30/87] pre-commit --- .../finetune_speech_recognition.py | 14 ++++++++----- .../custom_datasets/audio/asr_dataset.py | 20 +++++++++---------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py index 1716d8a0..47af0b90 100644 --- a/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py +++ b/examples/pytorch/auto_speech_recognition/finetune_speech_recognition.py @@ -1,16 +1,19 @@ import os -from modelscope.msdatasets.dataset_cls.custom_datasets import ASRDataset -from modelscope.utils.constant import DownloadMode -from modelscope.trainers import build_trainer from modelscope.metainfo import Trainers +from modelscope.msdatasets.dataset_cls.custom_datasets import ASRDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import DownloadMode def modelscope_finetune(params): if not os.path.exists(params.output_dir): os.makedirs(params.output_dir, exist_ok=True) # dataset split ["train", "validation"] - ds_dict = ASRDataset.load(params.data_path, namespace='speech_asr', download_mode=params.download_mode) + ds_dict = ASRDataset.load( + params.data_path, + namespace='speech_asr', + download_mode=params.download_mode) kwargs = dict( model=params.model, data_dir=ds_dict, @@ -27,7 +30,8 @@ if __name__ == '__main__': from funasr.utils.modelscope_param import modelscope_args params = modelscope_args( - model='damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' + model= + 'damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' ) params.output_dir = './checkpoint' # 模型保存路径 params.data_path = 'speech_asr_aishell1_trainsets' # 数据路径,可以为modelscope中已上传数据,也可以是本地数据 diff --git a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py index 9e64bcb3..749e6171 100644 --- a/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py +++ b/modelscope/msdatasets/dataset_cls/custom_datasets/audio/asr_dataset.py @@ -15,8 +15,8 @@ class ASRDataset(MsDataset): @classmethod def load_core(cls, data_dir, data_set): - wav_file = os.path.join(data_dir, data_set, "wav.scp") - text_file = os.path.join(data_dir, data_set, "text") + wav_file = os.path.join(data_dir, data_set, 'wav.scp') + text_file = os.path.join(data_dir, data_set, 'text') with open(wav_file) as f: wav_lines = f.readlines() with open(text_file) as f: @@ -24,8 +24,8 @@ class ASRDataset(MsDataset): data_list = [] for wav_line, text_line in zip(wav_lines, text_lines): item = {} - item["Audio:FILE"] = wav_line.strip().split()[-1] - item["Text:LABEL"] = " ".join(text_line.strip().split()[1:]) + item['Audio:FILE'] = wav_line.strip().split()[-1] + item['Text:LABEL'] = ' '.join(text_line.strip().split()[1:]) data_list.append(item) return data_list @@ -33,17 +33,17 @@ class ASRDataset(MsDataset): def load( cls, dataset_name, - namespace="speech_asr", - train_set="train", - dev_set="validation", + namespace='speech_asr', + train_set='train', + dev_set='validation', download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS, ): if os.path.exists(dataset_name): data_dir = dataset_name ds_dict = {} - ds_dict["train"] = cls.load_core(data_dir, train_set) - ds_dict["validation"] = cls.load_core(data_dir, dev_set) - ds_dict["raw_data_dir"] = data_dir + ds_dict['train'] = cls.load_core(data_dir, train_set) + ds_dict['validation'] = cls.load_core(data_dir, dev_set) + ds_dict['raw_data_dir'] = data_dir return ds_dict else: from modelscope.msdatasets import MsDataset From ba2b5e0a57158b1e1d1c384b5a153a0f7a4e6d2b Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 09:51:46 +0800 Subject: [PATCH 31/87] custom diffusion --- .../custom_diffusion_trainer.py | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index e438a187..12b72421 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -2,12 +2,14 @@ import hashlib import itertools import os +import random import shutil import warnings from collections.abc import Mapping from pathlib import Path from typing import Union +import numpy as np import torch import torch.nn.functional as F from diffusers import DiffusionPipeline @@ -329,7 +331,12 @@ class CustomDiffusionTrainer(EpochBasedTrainer): class_data_dir: the path to the class data directory. num_class_images: the number of class images to generate. prior_loss_weight: the weight of the prior loss. - + modifier_token: + initializer_token: + freeze_model: + sample_batch_size: + train_batch_size: + center_crop: """ self.with_prior_preservation = kwargs.pop('with_prior_preservation', True) @@ -346,6 +353,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.freeze_model = kwargs.pop('freeze_model', 'crossattn_kv') self.sample_batch_size = kwargs.pop('sample_batch_size', 4) self.train_batch_size = kwargs.pop('train_batch_size', 2) + self.center_crop = kwargs.pop('center_crop', False) instance_data_name = kwargs.pop( 'instance_data_name', 'buptwq/lora-stable-diffusion-finetune-dog') @@ -354,8 +362,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.instance_data_dir = instance_data_name else: ds = MsDataset.load(instance_data_name) - self.instance_data_dir = os.path.dirname( - next(iter(ds))['Target:FILE']) + # print("--------next(iter(ds)): ", next(iter(ds))) + self.instance_data_dir = next(iter(ds))[-1] + '/data' self.concepts_list = [{ 'instance_prompt': self.instance_prompt, @@ -419,14 +427,14 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.model.text_encoder.text_model.embeddings.position_embedding. parameters(), ) - freeze_params(params_to_freeze) + self.freeze_params(params_to_freeze) # Save checkpoint and configurate files ckpt_hook = list( filter(lambda hook: isinstance(hook, CheckpointHook), self.hooks))[0] ckpt_hook.set_processor( - CustomCheckpointProcessor(modifier_token_id, modifier_token)) + CustomCheckpointProcessor(self.modifier_token_id, modifier_token)) # Add new Custom Diffusion weights to the attention layers attention_class = CustomDiffusionAttnProcessor @@ -440,14 +448,16 @@ class CustomDiffusionTrainer(EpochBasedTrainer): 'attn1.processor' ) else self.model.unet.config.cross_attention_dim if name.startswith('mid_block'): - hidden_size = unet.config.block_out_channels[-1] + hidden_size = self.model.unet.config.block_out_channels[-1] elif name.startswith('up_blocks'): block_id = int(name[len('up_blocks.')]) - hidden_size = list(reversed( - unet.config.block_out_channels))[block_id] + hidden_size = list( + reversed( + self.model.unet.config.block_out_channels))[block_id] elif name.startswith('down_blocks'): block_id = int(name[len('down_blocks.')]) - hidden_size = unet.config.block_out_channels[block_id] + hidden_size = self.model.unet.config.block_out_channels[ + block_id] layer_name = name.split('.processor')[0] weights = { 'to_k_custom_diffusion.weight': @@ -501,7 +511,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): # Generate class images if prior preservation is enabled. if self.with_prior_preservation: - generate_image() + self.generate_image() # Dataset and DataLoaders creation: train_dataset = CustomDiffusionDataset( @@ -522,7 +532,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): train_dataset, batch_size=self.train_batch_size, shuffle=True, - collate_fn=lambda examples: collate_fn(examples), + collate_fn=lambda examples: self.collate_fn(examples), num_workers=2, ) self.iter_train_dataloader = itertools.cycle(train_dataloader) @@ -576,8 +586,6 @@ class CustomDiffusionTrainer(EpochBasedTrainer): pipeline.set_progress_bar_config(disable=True) num_new_images = self.num_class_images - cur_class_images - logger.info( - f'Number of class images to sample: {num_new_images}.') sample_dataset = PromptDataset(self.class_prompt, num_new_images) @@ -710,11 +718,11 @@ class CustomDiffusionTrainer(EpochBasedTrainer): ).weight.grad # Get the index for tokens that we want to zero the grads for index_grads_to_zero = torch.arange(len( - self.model.tokenizer)) != modifier_token_id[0] + self.model.tokenizer)) != self.modifier_token_id[0] for i in range(len(modifier_token_id[1:])): index_grads_to_zero = index_grads_to_zero & ( torch.arange(len(self.model.tokenizer)) != - modifier_token_id[i]) + self.modifier_token_id[i]) grads_text_encoder.data[ index_grads_to_zero, :] = grads_text_encoder.data[ index_grads_to_zero, :].fill_(0) From 0ab981a021dd931b3abc60f15742dab0daac5b08 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 10:04:20 +0800 Subject: [PATCH 32/87] custom diffusion --- .../custom_diffusion_trainer.py | 68 ++----------------- 1 file changed, 4 insertions(+), 64 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 12b72421..334e54e5 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -362,7 +362,6 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.instance_data_dir = instance_data_name else: ds = MsDataset.load(instance_data_name) - # print("--------next(iter(ds)): ", next(iter(ds))) self.instance_data_dir = next(iter(ds))[-1] + '/data' self.concepts_list = [{ @@ -545,8 +544,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): input_ids = [example['instance_prompt_ids'] for example in examples] pixel_values = [example['instance_images'] for example in examples] mask = [example['mask'] for example in examples] - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. + # Concat class and instance examples which avoid doing two forward passes. if self.with_prior_preservation: input_ids += [example['class_prompt_ids'] for example in examples] pixel_values += [example['class_images'] for example in examples] @@ -560,9 +558,9 @@ class CustomDiffusionTrainer(EpochBasedTrainer): mask = mask.to(memory_format=torch.contiguous_format).float() batch = { - 'input_ids': input_ids, - 'pixel_values': pixel_values, - 'mask': mask.unsqueeze(1) + 'input_ids': input_ids.to(self.device), + 'pixel_values': pixel_values.to(self.device), + 'mask': mask.unsqueeze(1).to(self.device) } return batch @@ -727,64 +725,6 @@ class CustomDiffusionTrainer(EpochBasedTrainer): index_grads_to_zero, :] = grads_text_encoder.data[ index_grads_to_zero, :].fill_(0) - # if self.with_prior_preservation: - # # Convert to latent space - # batch = next(self.iter_class_dataloader) - # target_prior = batch['pixel_values'].to(self.device) - # input_ids = batch['input_ids'].to(self.device) - # with torch.no_grad(): - # latents = self.model.vae.encode( - # target_prior.to(dtype=torch.float32)).latent_dist.sample() - # latents = latents * self.model.vae.config.scaling_factor - - # # Sample noise that we'll add to the latents - # noise = torch.randn_like(latents) - # bsz = latents.shape[0] - # # Sample a random timestep for each image - # timesteps = torch.randint( - # 0, - # self.model.noise_scheduler.num_train_timesteps, (bsz, ), - # device=latents.device) - # timesteps = timesteps.long() - - # # Add noise to the latents according to the noise magnitude at each timestep - # # (this is the forward diffusion process) - # noisy_latents = self.model.noise_scheduler.add_noise( - # latents, noise, timesteps) - - # # Get the text embedding for conditioning - # with torch.no_grad(): - # encoder_hidden_states = self.model.text_encoder(input_ids)[0] - - # # Get the target for loss depending on the prediction type - # if self.model.noise_scheduler.config.prediction_type == 'epsilon': - # target_prior = noise - # elif self.model.noise_scheduler.config.prediction_type == 'v_prediction': - # target_prior = self.model.noise_scheduler.get_velocity( - # latents, noise, timesteps) - # else: - # raise ValueError( - # f'Unknown prediction type {self.model.noise_scheduler.config.prediction_type}' - # ) - - # # Predict the noise residual and compute loss - # model_pred_prior = self.model.unet(noisy_latents, timesteps, - # encoder_hidden_states).sample - - # # Compute prior loss - # prior_loss = F.mse_loss( - # model_pred_prior.float(), - # target_prior.float(), - # reduction='mean') - # # Add the prior loss to the instance loss. - # train_outputs[ - # OutputKeys.LOSS] += self.prior_loss_weight * prior_loss - - # if isinstance(train_outputs, ModelOutputBase): - # train_outputs = train_outputs.to_dict() - # if not isinstance(train_outputs, dict): - # raise TypeError('"model.forward()" must return a dict') - # # add model output info to log # if 'log_vars' not in train_outputs: # default_keys_pattern = ['loss'] From 568bd9dac72eed8bde67268ca69ee76f4901dfb3 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 10:09:37 +0800 Subject: [PATCH 33/87] custom diffusion --- .../stable_diffusion_pipeline.py | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py index f09d459d..61a233fa 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py @@ -25,12 +25,27 @@ from modelscope.utils.constant import Tasks module_name=Pipelines.diffusers_stable_diffusion) class StableDiffusionPipeline(DiffusersPipeline): - def __init__(self, model: str, lora_dir: str = None, **kwargs): + def __init__(self, + model: str, + lora_dir: str = None, + custom_dir: str = None, + modifier_token: str = None, + **kwargs): """ use `model` to create a stable diffusion pipeline Args: model: model id on modelscope hub or local model dir. + lora_dir: lora weight dir for unet. + custom_dir: custom diffusion weight dir for unet. + modifier_token: token to use as a modifier for the concept of custom diffusion. """ + # check custom diffusion input value + if custom_dir is None and modifier_token is not None: + raise ValueError( + 'custom_dir is None but modifier_token is not None') + elif custom_dir is not None and modifier_token is None: + raise ValueError( + 'modifier_token is None but custom_dir is not None') self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # load pipeline @@ -38,10 +53,18 @@ class StableDiffusionPipeline(DiffusersPipeline): self.pipeline = DiffuserStableDiffusionPipeline.from_pretrained( model, torch_dtype=torch_type) self.pipeline = self.pipeline.to(self.device) + # load lora moudle to unet if lora_dir is not None: assert os.path.exists(lora_dir), f"{lora_dir} isn't exist" self.pipeline.unet.load_attn_procs(lora_dir) + # load custom diffusion to unet + if custom_dir is not None: + assert os.path.exists(custom_dir), f"{custom_dir} isn't exist" + self.pipeline.unet.load_attn_procs( + custom_dir, weight_name='pytorch_custom_diffusion_weights.bin') + self.pipeline.load_textual_inversion( + custom_dir, weight_name=f'{modifier_token}.bin') def preprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]: return inputs From 2493a7d0f48a4f5481ddeaf50711bf13a9dbd273 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 10:13:44 +0800 Subject: [PATCH 34/87] fix bugs --- .../dreambooth_diffusion/dreambooth_diffusion_trainer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py b/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py index 65623ed8..80989823 100644 --- a/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py @@ -10,8 +10,6 @@ from typing import Union import torch import torch.nn.functional as F from diffusers import DiffusionPipeline -from diffusers.loaders import AttnProcsLayers -from diffusers.models.attention_processor import LoRAAttnProcessor from PIL import Image from PIL.ImageOps import exif_transpose from torch.utils.data import Dataset From 2fb3665c67d16ebb06a73c58706e95c6b33fe6df Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 10:14:00 +0800 Subject: [PATCH 35/87] custom diffusion --- .../custom_diffusion_trainer.py | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 334e54e5..38184d7d 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -706,8 +706,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): model_pred.float(), target.float(), reduction='none') loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean() - # train_outputs = {} - self.train_outputs[OutputKeys.LOSS] = loss + train_outputs = {} + train_outputs[OutputKeys.LOSS] = loss # Zero out the gradients for all token embeddings except the newly added # embeddings for the concept, as we only want to optimize the concept embeddings @@ -725,24 +725,24 @@ class CustomDiffusionTrainer(EpochBasedTrainer): index_grads_to_zero, :] = grads_text_encoder.data[ index_grads_to_zero, :].fill_(0) - # # add model output info to log - # if 'log_vars' not in train_outputs: - # default_keys_pattern = ['loss'] - # match_keys = set([]) - # for key_p in default_keys_pattern: - # match_keys.update( - # [key for key in train_outputs.keys() if key_p in key]) + # add model output info to log + if 'log_vars' not in train_outputs: + default_keys_pattern = ['loss'] + match_keys = set([]) + for key_p in default_keys_pattern: + match_keys.update( + [key for key in train_outputs.keys() if key_p in key]) - # log_vars = {} - # for key in match_keys: - # value = train_outputs.get(key, None) - # if value is not None: - # if is_dist(): - # value = value.data.clone().to('cuda') - # dist.all_reduce(value.div_(dist.get_world_size())) - # log_vars.update({key: value.item()}) - # self.log_buffer.update(log_vars) - # else: - # self.log_buffer.update(train_outputs['log_vars']) + log_vars = {} + for key in match_keys: + value = train_outputs.get(key, None) + if value is not None: + if is_dist(): + value = value.data.clone().to('cuda') + dist.all_reduce(value.div_(dist.get_world_size())) + log_vars.update({key: value.item()}) + self.log_buffer.update(log_vars) + else: + self.log_buffer.update(train_outputs['log_vars']) - # self.train_outputs = train_outputs + self.train_outputs = train_outputs From 544f6c0410380679800e3d74d2c617f3fc643267 Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Wed, 12 Jul 2023 14:44:09 +0800 Subject: [PATCH 36/87] Fea/chatglm6b v2 new version (#368) * upgrade code * add chatglm2 ptuning * pre-commit passed --- examples/pytorch/chatglm6b/finetune.py | 21 +-- .../run_train_chatglm2_ptuning_adv_v2.sh | 26 +++ .../models/nlp/chatglm2/configuration.py | 9 +- .../models/nlp/chatglm2/quantization.py | 2 - .../models/nlp/chatglm2/text_generation.py | 166 ++++++++++++++---- .../models/nlp/chatglm2/tokenization.py | 38 +++- 6 files changed, 199 insertions(+), 63 deletions(-) create mode 100644 examples/pytorch/chatglm6b/run_train_chatglm2_ptuning_adv_v2.sh diff --git a/examples/pytorch/chatglm6b/finetune.py b/examples/pytorch/chatglm6b/finetune.py index 6d04924c..f994d9a5 100644 --- a/examples/pytorch/chatglm6b/finetune.py +++ b/examples/pytorch/chatglm6b/finetune.py @@ -8,7 +8,6 @@ from text_generation_metric import TextGenerationMetric from transformers import DataCollatorForSeq2Seq from modelscope import snapshot_download -from modelscope.metainfo import Models from modelscope.models import Model from modelscope.msdatasets import MsDataset from modelscope.swift import Swift @@ -187,10 +186,8 @@ model_config['model'] = ConfigDict({ 'type': config['model']['type'], }) -if config['model']['type'] == 'chatglm6b': - model_config['model']['pre_seq_len'] = args.pre_seq_len - model_config['model']['prefix_projection'] = args.prefix_projection - +model_config['model']['pre_seq_len'] = args.pre_seq_len +model_config['model']['prefix_projection'] = args.prefix_projection tokenizer = ChatGLMTokenizer.from_pretrained(model_dir, trust_remote_code=True) device_map_kwargs = {} @@ -334,13 +331,10 @@ def preprocess_function_train(examples): pad_len = max_seq_length - len(input_ids) input_ids = input_ids + [tokenizer.pad_token_id] * pad_len - if config['model']['type'] == 'chatglm6b': - labels = labels + [tokenizer.pad_token_id] * pad_len - if args.ignore_pad_token_for_loss: - labels = [(lb if lb != tokenizer.pad_token_id else -100) - for lb in labels] - else: - labels = labels + [-100] * pad_len + labels = labels + [tokenizer.pad_token_id] * pad_len + if args.ignore_pad_token_for_loss: + labels = [(lb if lb != tokenizer.pad_token_id else -100) + for lb in labels] model_inputs['input_ids'].append(input_ids) model_inputs['labels'].append(labels) @@ -372,8 +366,7 @@ data_collator = DataCollatorForSeq2Seq( padding=False) model.gradient_checkpointing_enable() -if config['model']['type'] == 'chatglm6b': - model.enable_input_require_grads() +model.enable_input_require_grads() # import torch # model = torch.nn.DataParallel(model).cuda() diff --git a/examples/pytorch/chatglm6b/run_train_chatglm2_ptuning_adv_v2.sh b/examples/pytorch/chatglm6b/run_train_chatglm2_ptuning_adv_v2.sh new file mode 100644 index 00000000..582c464c --- /dev/null +++ b/examples/pytorch/chatglm6b/run_train_chatglm2_ptuning_adv_v2.sh @@ -0,0 +1,26 @@ +PRE_SEQ_LEN=128 +LR=2e-2 + +PYTHONPATH=. python examples/pytorch/chatglm6b/finetune.py \ + --train_dataset_name AdvertiseGen/train.json \ + --val_dataset_name AdvertiseGen/dev.json \ + --prompt_column content \ + --response_column summary \ + --model "ZhipuAI/chatglm2-6b" \ + --max_source_length 64 \ + --max_target_length 128 \ + --per_device_train_batch_size 16 \ + --per_device_eval_batch_size 1 \ + --train.optimizer.options.cumulative_iters 1 \ + --max_epochs 1 \ + --save_strategy 'by_step' \ + --save_interval 1000 \ + --lr $LR \ + --eval_strategy "by_step" \ + --eval_interval 1000 \ + --lr_strategy 'by_step' \ + --task 'chat' \ + --model.type 'chatglm2-6b' \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 \ + --work_dir ptuning_adv_target \ diff --git a/modelscope/models/nlp/chatglm2/configuration.py b/modelscope/models/nlp/chatglm2/configuration.py index b10db870..1583e886 100644 --- a/modelscope/models/nlp/chatglm2/configuration.py +++ b/modelscope/models/nlp/chatglm2/configuration.py @@ -1,12 +1,13 @@ """ ChatGLM model configuration """ -from transformers.configuration_utils import PretrainedConfig +from transformers import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) class ChatGLM2Config(PretrainedConfig): + model_type = 'chatglm' def __init__(self, num_layers=28, @@ -24,7 +25,6 @@ class ChatGLM2Config(PretrainedConfig): post_layer_norm=True, add_bias_linear=False, add_qkv_bias=False, - interleaved_qkv=False, bias_dropout_fusion=True, multi_query_attention=False, multi_query_group_num=1, @@ -32,8 +32,11 @@ class ChatGLM2Config(PretrainedConfig): attention_softmax_in_fp32=True, fp32_residual_connection=False, quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, **kwargs): self.num_layers = num_layers + self.vocab_size = padded_vocab_size self.padded_vocab_size = padded_vocab_size self.hidden_size = hidden_size self.ffn_hidden_size = ffn_hidden_size @@ -55,4 +58,6 @@ class ChatGLM2Config(PretrainedConfig): self.attention_softmax_in_fp32 = attention_softmax_in_fp32 self.fp32_residual_connection = fp32_residual_connection self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection super().__init__(**kwargs) diff --git a/modelscope/models/nlp/chatglm2/quantization.py b/modelscope/models/nlp/chatglm2/quantization.py index 612c9e4b..116bc0ea 100644 --- a/modelscope/models/nlp/chatglm2/quantization.py +++ b/modelscope/models/nlp/chatglm2/quantization.py @@ -1,11 +1,9 @@ import base64 import bz2 import ctypes -from functools import partial from typing import List import torch -from torch.nn import Linear from torch.nn.parameter import Parameter from transformers.utils import logging diff --git a/modelscope/models/nlp/chatglm2/text_generation.py b/modelscope/models/nlp/chatglm2/text_generation.py index 7772f34e..aed855cb 100644 --- a/modelscope/models/nlp/chatglm2/text_generation.py +++ b/modelscope/models/nlp/chatglm2/text_generation.py @@ -2,10 +2,9 @@ import copy import math -import re import sys import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple import torch import torch.nn.functional as F @@ -22,10 +21,11 @@ from transformers.modeling_outputs import (BaseModelOutputWithPast, from transformers.modeling_utils import PreTrainedModel from transformers.utils import logging +from modelscope import Model, TorchModel from modelscope.metainfo import Models -from modelscope.models import MODELS, Model, TorchModel from modelscope.outputs import OutputKeys from modelscope.utils.constant import Tasks +from ... import MODELS from .configuration import ChatGLM2Config # flags required to enable jit fusion kernels @@ -61,17 +61,50 @@ class InvalidScoreLogitsProcessor(LogitsProcessor): return scores +class PrefixEncoder(torch.nn.Module): + """ + The torch.nn model to encode the prefix + Input shape: (batch-size, prefix-length) + Output shape: (batch-size, prefix-length, 2*layers*hidden) + """ + + def __init__(self, config: ChatGLM2Config): + super().__init__() + self.prefix_projection = config.prefix_projection + if self.prefix_projection: + # Use a two-layer MLP to encode the prefix + kv_size = config.num_layers * config.kv_channels * config.multi_query_group_num * 2 + self.embedding = torch.nn.Embedding(config.pre_seq_len, kv_size) + self.trans = torch.nn.Sequential( + torch.nn.Linear(kv_size, config.hidden_size), torch.nn.Tanh(), + torch.nn.Linear(config.hidden_size, kv_size)) + else: + self.embedding = torch.nn.Embedding( + config.pre_seq_len, config.num_layers * config.kv_channels + * config.multi_query_group_num * 2) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.trans(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values + + def split_tensor_along_last_dim( tensor: torch.Tensor, num_partitions: int, contiguous_split_chunks: bool = False, ) -> List[torch.Tensor]: """Split a tensor along its last dimension. + Arguments: tensor: input tensor. num_partitions: number of partitions to split the tensor contiguous_split_chunks: If True, make each chunk contiguous in memory. + Returns: A list of Tensors """ @@ -92,7 +125,7 @@ class RotaryEmbedding(nn.Module): def __init__(self, dim, original_impl=False, device=None, dtype=None): super().__init__() inv_freq = 1.0 / (10000**( - torch.arange(0, dim, 2, device=device, dtype=dtype) / dim)) + torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim)) self.register_buffer('inv_freq', inv_freq) self.dim = dim self.original_impl = original_impl @@ -104,6 +137,7 @@ class RotaryEmbedding(nn.Module): device: torch.device, base: int = 10000): """Enhanced Transformer with Rotary Position Embedding. + Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/ transformers/rope/__init__.py. MIT License: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license. @@ -325,6 +359,7 @@ class CoreAttention(torch.nn.Module): class SelfAttention(torch.nn.Module): """Parallel self-attention layer abstract class. + Self-attention layer takes input with size [s, b, h] and returns output of the same size. """ @@ -421,9 +456,9 @@ class SelfAttention(torch.nn.Module): self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)) else: - new_tensor_shape = mixed_x_layer.size()[:-1] + ( - self.num_attention_heads_per_partition, # noqa - 3 * self.hidden_size_per_attention_head) # noqa + new_tensor_shape = mixed_x_layer.size()[:-1] + \ + (self.num_attention_heads_per_partition, # noqa + 3 * self.hidden_size_per_attention_head) # noqa mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] @@ -436,11 +471,11 @@ class SelfAttention(torch.nn.Module): key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) # adjust key and value for inference + if kv_cache is not None: + cache_k, cache_v = kv_cache + key_layer = torch.cat((cache_k, key_layer), dim=0) + value_layer = torch.cat((cache_v, value_layer), dim=0) if use_cache: - if kv_cache is not None: - cache_k, cache_v = kv_cache - key_layer = torch.cat((cache_k, key_layer), dim=0) - value_layer = torch.cat((cache_v, value_layer), dim=0) kv_cache = (key_layer, value_layer) else: kv_cache = None @@ -487,6 +522,7 @@ def _config_to_kwargs(args): class MLP(torch.nn.Module): """MLP. + MLP will take the input with h hidden state, project it to 4*h hidden dimension, perform nonlinear transformation, and project the state back into h hidden dimension. @@ -530,6 +566,7 @@ class MLP(torch.nn.Module): class GLMBlock(torch.nn.Module): """A single transformer layer. + Transformer layer takes input with size [s, b, h] and returns an output of the same size. """ @@ -642,6 +679,8 @@ class GLMTransformer(torch.nn.Module): device=device, dtype=config.torch_dtype) + self.gradient_checkpointing = False + def _get_layer(self, layer_number): return self.layers[layer_number] @@ -657,6 +696,13 @@ class GLMTransformer(torch.nn.Module): if not kv_caches: kv_caches = [None for _ in range(self.num_layers)] presents = () if use_cache else None + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + all_self_attentions = None all_hidden_states = () if output_hidden_states else None for index in range(self.num_layers): @@ -664,13 +710,18 @@ class GLMTransformer(torch.nn.Module): all_hidden_states = all_hidden_states + (hidden_states, ) layer = self._get_layer(index) - - hidden_states, kv_cache = layer( - hidden_states, - attention_mask, - rotary_pos_emb, - kv_cache=kv_caches[index], - use_cache=use_cache) + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, hidden_states, attention_mask, rotary_pos_emb, + kv_caches[index], use_cache) + else: + layer_ret = layer( + hidden_states, + attention_mask, + rotary_pos_emb, + kv_cache=kv_caches[index], + use_cache=use_cache) + hidden_states, kv_cache = layer_ret if use_cache: presents = presents + (kv_cache, ) @@ -724,7 +775,7 @@ class ChatGLMPreTrainedModel(TorchModel, PreTrainedModel): dim=-1) # noqa if padding_mask is not None: full_attention_mask = full_attention_mask * padding_mask.unsqueeze( - 1) # noqa + 1) if not past_length and padding_mask is not None: full_attention_mask -= padding_mask.unsqueeze(-1) - 1 full_attention_mask = (full_attention_mask < 0.5).bool() @@ -739,7 +790,7 @@ class ChatGLMPreTrainedModel(TorchModel, PreTrainedModel): return position_ids def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, ChatGLMModel): + if isinstance(module, GLMTransformer): module.gradient_checkpointing = value @classmethod @@ -801,6 +852,9 @@ class ChatGLMModel(ChatGLMPreTrainedModel): if device is not None: init_kwargs['device'] = device self.embedding = init_method(Embedding, config, **init_kwargs) + self.num_layers = config.num_layers + self.multi_query_group_num = config.multi_query_group_num + self.kv_channels = config.kv_channels # Rotary positional embeddings self.seq_length = config.seq_length @@ -821,7 +875,30 @@ class ChatGLMModel(ChatGLMPreTrainedModel): bias=False, dtype=config.torch_dtype, **init_kwargs) - self.gradient_checkpointing = False + self.pre_seq_len = config.pre_seq_len + self.prefix_projection = config.prefix_projection + if self.pre_seq_len is not None: + for param in self.parameters(): + param.requires_grad = False + self.prefix_tokens = torch.arange(self.pre_seq_len).long() + self.prefix_encoder = PrefixEncoder(config) + self.dropout = torch.nn.Dropout(0.1) + + def get_input_embeddings(self): + return self.embedding.word_embeddings + + def get_prompt(self, batch_size, device, dtype=torch.half): + prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, + -1).to(device) + past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) + past_key_values = past_key_values.view(batch_size, self.pre_seq_len, + self.num_layers * 2, + self.multi_query_group_num, + self.kv_channels) + # seq_len, b, nh, hidden_size + past_key_values = self.dropout(past_key_values) + past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) + return past_key_values def forward( self, @@ -847,6 +924,21 @@ class ChatGLMModel(ChatGLMPreTrainedModel): if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) + if self.pre_seq_len is not None: + if past_key_values is None: + past_key_values = self.get_prompt( + batch_size=batch_size, + device=input_ids.device, + dtype=inputs_embeds.dtype) + if attention_mask is not None: + attention_mask = torch.cat( + [ + attention_mask.new_ones( # noqa + (batch_size, self.pre_seq_len)), + attention_mask # noqa + ], # noqa + dim=-1) # noqa + if full_attention_mask is None: if (attention_mask is not None and not attention_mask.all()) or (past_key_values @@ -923,7 +1015,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): attention_mask, # noqa attention_mask.new_ones( (attention_mask.shape[0], 1)) # noqa - ], + ], # noqa dim=-1) # noqa # update position ids @@ -1003,7 +1095,6 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-100) - shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) @@ -1032,6 +1123,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. + Output shares the same memory storage as `past`. """ return tuple(( @@ -1048,11 +1140,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): tokenizer, query: str, history: List[Tuple[str, str]] = None): - prompt = '' - for i, (old_query, response) in enumerate(history): - prompt += '[Round {}]\n\n问:{}\n\n答:{}\n\n'.format( - i + 1, old_query, response) - prompt += '[Round {}]\n\n问:{}\n\n答:'.format(len(history) + 1, query) + prompt = tokenizer.build_prompt(query, history=history) inputs = tokenizer([prompt], return_tensors='pt') inputs = inputs.to(self.device) return inputs @@ -1080,7 +1168,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): tokenizer, query: str, history: List[Tuple[str, str]] = None, - max_length: int = 2048, + max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, @@ -1115,7 +1203,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): query: str, history: List[Tuple[str, str]] = None, past_key_values=None, - max_length: int = 2048, + max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8, @@ -1142,6 +1230,8 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): tokenizer, query, history=history) if past_key_values is not None: past_length = past_key_values[0][0].shape[0] + if self.transformer.pre_seq_len is not None: + past_length -= self.transformer.pre_seq_len inputs.position_ids += past_length attention_mask = inputs.attention_mask attention_mask = torch.cat( @@ -1157,12 +1247,13 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): outputs, past_key_values = outputs outputs = outputs.tolist()[0][len(inputs['input_ids'][0]):] response = tokenizer.decode(outputs) - response = self.process_response(response) - new_history = history + [(query, response)] - if return_past_key_values: - yield response, new_history, past_key_values - else: - yield response, new_history + if response and response[-1] != '�': + response = self.process_response(response) + new_history = history + [(query, response)] + if return_past_key_values: + yield response, new_history, past_key_values + else: + yield response, new_history @torch.no_grad() def stream_generate( @@ -1295,7 +1386,8 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): self.transformer.encoder, bits, empty_init=empty_init, - device=device) + device=device, + **kwargs) return self def chat(self, input: Dict, tokenizer) -> Dict: diff --git a/modelscope/models/nlp/chatglm2/tokenization.py b/modelscope/models/nlp/chatglm2/tokenization.py index 5036d881..7014dc9c 100644 --- a/modelscope/models/nlp/chatglm2/tokenization.py +++ b/modelscope/models/nlp/chatglm2/tokenization.py @@ -1,13 +1,10 @@ -"""Tokenization classes for ChatGLM.""" import os from typing import Dict, List, Optional, Union from sentencepiece import SentencePieceProcessor -from transformers.tokenization_utils import PreTrainedTokenizer +from transformers import PreTrainedTokenizer from transformers.tokenization_utils_base import BatchEncoding, EncodedInput -from transformers.utils import PaddingStrategy, logging - -logger = logging.get_logger(__name__) +from transformers.utils import PaddingStrategy class SPTokenizer: @@ -21,7 +18,7 @@ class SPTokenizer: self.n_words: int = self.sp_model.vocab_size() self.bos_id: int = self.sp_model.bos_id() self.eos_id: int = self.sp_model.eos_id() - self.pad_id: int = self.sp_model.eos_id() + self.pad_id: int = self.sp_model.unk_id() assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() special_tokens = ['[MASK]', '[gMASK]', '[sMASK]', 'sop', 'eop'] @@ -62,7 +59,9 @@ class SPTokenizer: def convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" - if index in self.index_special_tokens: + if index in self.index_special_tokens or index in [ + self.eos_id, self.bos_id, self.pad_id + ] or index < 0: return '' return self.sp_model.IdToPiece(index) @@ -76,6 +75,7 @@ class ChatGLM2Tokenizer(PreTrainedTokenizer): super().__init__(padding_side=padding_side, **kwargs) self.name = 'GLMTokenizer' + self.vocab_file = vocab_file self.tokenizer = SPTokenizer(vocab_file) self.special_tokens = { '': self.tokenizer.bos_id, @@ -91,12 +91,16 @@ class ChatGLM2Tokenizer(PreTrainedTokenizer): @property def pad_token(self) -> str: - return '' + return '' @property def pad_token_id(self): return self.get_command('') + @property + def eos_token(self) -> str: + return '' + @property def eos_token_id(self): return self.get_command('') @@ -131,11 +135,13 @@ class ChatGLM2Tokenizer(PreTrainedTokenizer): def save_vocabulary(self, save_directory, filename_prefix=None): """ Save the vocabulary and special tokens file to a directory. + Args: save_directory (`str`): The directory in which to save the vocabulary. filename_prefix (`str`, *optional*): An optional prefix to add to the named of the saved files. + Returns: `Tuple(str)`: Paths to the files saved. """ @@ -157,6 +163,16 @@ class ChatGLM2Tokenizer(PreTrainedTokenizer): prefix_tokens = [self.get_command('[gMASK]'), self.get_command('sop')] return prefix_tokens + def build_prompt(self, query, history=None): + if history is None: + history = [] + prompt = '' + for i, (old_query, response) in enumerate(history): + prompt += '[Round {}]\n\n问:{}\n\n答:{}\n\n'.format( + i + 1, old_query, response) + prompt += '[Round {}]\n\n问:{}\n\n答:'.format(len(history) + 1, query) + return prompt + def build_inputs_with_special_tokens( self, token_ids_0: List[int], @@ -164,13 +180,16 @@ class ChatGLM2Tokenizer(PreTrainedTokenizer): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: + - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` + Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. + Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ @@ -192,16 +211,19 @@ class ChatGLM2Tokenizer(PreTrainedTokenizer): ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: + - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. From 0dc57f8decb40adc12c8e906f80fe5517f6d4624 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 14:56:14 +0800 Subject: [PATCH 37/87] custom diffusion --- .../custom/run_train_custom.sh | 2 +- .../custom_diffusion_trainer.py | 110 ++++++++++++------ 2 files changed, 75 insertions(+), 37 deletions(-) diff --git a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh index ca3cf22e..3853bde3 100644 --- a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh +++ b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh @@ -6,7 +6,7 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/custom/finetune_stable_d --work_dir './tmp/custom_diffusion' \ --class_data_dir './tmp/class_data' \ --train_dataset_name 'buptwq/lora-stable-diffusion-finetune-dog' \ - --max_epochs 2 \ + --max_epochs 1 \ --modifier_token "" \ --num_class_images=200 \ --save_ckpt_strategy 'by_epoch' \ diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 38184d7d..f4b7aec6 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -29,9 +29,10 @@ from modelscope.trainers.hooks.checkpoint.checkpoint_hook import CheckpointHook from modelscope.trainers.hooks.checkpoint.checkpoint_processor import \ CheckpointProcessor from modelscope.trainers.optimizer.builder import build_optimizer +from modelscope.utils.data_utils import to_device from modelscope.trainers.trainer import EpochBasedTrainer from modelscope.utils.config import ConfigDict -from modelscope.utils.constant import ModeKeys +from modelscope.utils.constant import ModeKeys, TrainerStages from modelscope.utils.file_utils import func_receive_dict_inputs from modelscope.utils.torch_utils import is_dist @@ -56,12 +57,13 @@ class CustomCheckpointProcessor(CheckpointProcessor): """Save the state dict for custom diffusion model. """ trainer.model.unet = trainer.model.unet.to(torch.float32) - unet.save_attn_procs(output_dir) - learned_embeds = text_encoder.get_input_embeddings().weight - for x, y in zip(self.modifier_token_id, self.modifier_token): + trainer.model.unet.save_attn_procs(output_dir) + + learned_embeds = trainer.model.text_encoder.get_input_embeddings().weight + for x, y in zip([self.modifier_token_id], self.modifier_token): learned_embeds_dict = {} learned_embeds_dict[y] = learned_embeds[x] - torch.save(learned_embeds_dict, f'{output_dir}/{y}.bin') + torch.save(learned_embeds_dict, f"{output_dir}/{y}.bin") class CustomDiffusionDataset(Dataset): @@ -337,6 +339,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): sample_batch_size: train_batch_size: center_crop: + """ self.with_prior_preservation = kwargs.pop('with_prior_preservation', True) @@ -433,7 +436,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): filter(lambda hook: isinstance(hook, CheckpointHook), self.hooks))[0] ckpt_hook.set_processor( - CustomCheckpointProcessor(self.modifier_token_id, modifier_token)) + CustomCheckpointProcessor(self.modifier_token, self.modifier_token_id)) # Add new Custom Diffusion weights to the attention layers attention_class = CustomDiffusionAttnProcessor @@ -558,9 +561,9 @@ class CustomDiffusionTrainer(EpochBasedTrainer): mask = mask.to(memory_format=torch.contiguous_format).float() batch = { - 'input_ids': input_ids.to(self.device), - 'pixel_values': pixel_values.to(self.device), - 'mask': mask.unsqueeze(1).to(self.device) + 'input_ids': input_ids, + 'pixel_values': pixel_values, + 'mask': mask.unsqueeze(1) } return batch @@ -625,6 +628,55 @@ class CustomDiffusionTrainer(EpochBasedTrainer): f'please check if your torch with version: {torch.__version__} matches the config.' ) raise e + + def train_loop(self, data_loader): + """ Training loop used by `EpochBasedTrainer.train()` + """ + self.invoke_hook(TrainerStages.before_run) + self.model.train() + for _ in range(self._epoch, self._max_epochs): + self.invoke_hook(TrainerStages.before_train_epoch) + for i, data_batch in enumerate(data_loader): + if i < self.inner_iter: + # inner_iter may be read out from the checkpoint file, so skip the trained iters in the epoch. + continue + data_batch = to_device(data_batch, self.device) + self.data_batch = data_batch + self._inner_iter = i + self.invoke_hook(TrainerStages.before_train_iter) + self.train_step(self.model, data_batch) + self.invoke_hook(TrainerStages.after_train_iter) + # Zero out the gradients for all token embeddings except the newly added + # embeddings for the concept to optimize the concept embeddings. + if self.modifier_token is not None: + grads_text_encoder = self.model.text_encoder.get_input_embeddings( + ).weight.grad + # Get the index for tokens that we want to zero the grads. + index_grads_to_zero = torch.arange(len( + self.model.tokenizer)) != self.modifier_token_id[0] + for i in range(len(self.modifier_token_id[1:])): + index_grads_to_zero = index_grads_to_zero & ( + torch.arange(len(self.model.tokenizer)) != + self.modifier_token_id[i]) + grads_text_encoder.data[ + index_grads_to_zero, :] = grads_text_encoder.data[ + index_grads_to_zero, :].fill_(0) + # Value changed after the hooks are invoked, do not move them above the invoke_hook code. + del self.data_batch + self._iter += 1 + self._mode = ModeKeys.TRAIN + + if i + 1 >= self.iters_per_epoch: + break + + self.invoke_hook(TrainerStages.after_train_epoch) + # Value changed after the hooks are invoked, do not move them above the invoke_hook code. + self._inner_iter = 0 + self._epoch += 1 + if self._stop_training: + break + + self.invoke_hook(TrainerStages.after_run) def train_step(self, model, inputs): """ Perform a training step on a batch of inputs. @@ -642,13 +694,15 @@ class CustomDiffusionTrainer(EpochBasedTrainer): Return: `torch.Tensor`: The tensor with training loss on this batch. """ - model.train() + self.model.unet.train() + if self.modifier_token is not None: + self.model.text_encoder.train() self._mode = ModeKeys.TRAIN batch = next(self.iter_train_dataloader) # Convert images to latent space latents = self.model.vae.encode(batch['pixel_values'].to( - dtype=torch.float32)).latent_dist.sample() + dtype=torch.float32).to(self.device)).latent_dist.sample() latents = latents * self.model.vae.config.scaling_factor # Sample noise that we'll add to the latents @@ -657,36 +711,36 @@ class CustomDiffusionTrainer(EpochBasedTrainer): # Sample a random timestep for each image timesteps = torch.randint( 0, - noise_scheduler.config.num_train_timesteps, (bsz, ), + self.model.noise_scheduler.config.num_train_timesteps, (bsz, ), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) + noisy_latents = self.model.noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning - encoder_hidden_states = self.model.text_encoder(batch['input_ids'])[0] + encoder_hidden_states = self.model.text_encoder(batch['input_ids'].to(self.device))[0] # Predict the noise residual model_pred = self.model.unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == 'epsilon': + if self.model.noise_scheduler.config.prediction_type == 'epsilon': target = noise - elif noise_scheduler.config.prediction_type == 'v_prediction': - target = noise_scheduler.get_velocity(latents, noise, timesteps) + elif self.model.noise_scheduler.config.prediction_type == 'v_prediction': + target = self.model.noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError( - f'Unknown prediction type {noise_scheduler.config.prediction_type}' + f'Unknown prediction type {self.model.noise_scheduler.config.prediction_type}' ) if self.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) - mask = torch.chunk(batch['mask'], 2, dim=0)[0] + mask = torch.chunk(batch['mask'].to(self.device), 2, dim=0)[0] # Compute instance loss loss = F.mse_loss( model_pred.float(), target.float(), reduction='none') @@ -701,7 +755,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): # Add the prior loss to the instance loss. loss = loss + self.prior_loss_weight * prior_loss else: - mask = batch['mask'] + mask = batch['mask'].to(self.device) loss = F.mse_loss( model_pred.float(), target.float(), reduction='none') loss = ((loss * mask).sum([1, 2, 3]) / mask.sum([1, 2, 3])).mean() @@ -709,22 +763,6 @@ class CustomDiffusionTrainer(EpochBasedTrainer): train_outputs = {} train_outputs[OutputKeys.LOSS] = loss - # Zero out the gradients for all token embeddings except the newly added - # embeddings for the concept, as we only want to optimize the concept embeddings - if self.modifier_token is not None: - grads_text_encoder = self.model.text_encoder.get_input_embeddings( - ).weight.grad - # Get the index for tokens that we want to zero the grads for - index_grads_to_zero = torch.arange(len( - self.model.tokenizer)) != self.modifier_token_id[0] - for i in range(len(modifier_token_id[1:])): - index_grads_to_zero = index_grads_to_zero & ( - torch.arange(len(self.model.tokenizer)) != - self.modifier_token_id[i]) - grads_text_encoder.data[ - index_grads_to_zero, :] = grads_text_encoder.data[ - index_grads_to_zero, :].fill_(0) - # add model output info to log if 'log_vars' not in train_outputs: default_keys_pattern = ['loss'] From d6368b26178e59e8414ab19c0c775130f0925e0e Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 15:02:43 +0800 Subject: [PATCH 38/87] custom diffusion --- .../finetune_stable_diffusion_custom.py | 2 +- .../custom/run_train_custom.sh | 2 +- .../custom_diffusion_trainer.py | 25 +++++++++++-------- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py index 00e3e97a..1c79fe53 100644 --- a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py +++ b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py @@ -156,7 +156,7 @@ pipe = pipeline( modifier_token='', model_revision=args.model_revision) -output = pipe({'text': args.prompt}) +output = pipe({'text': args.instance_prompt}) # visualize the result on ipynb and save it output cv2.imwrite('./custom_result.png', output['output_imgs'][0]) diff --git a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh index 3853bde3..fe8de203 100644 --- a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh +++ b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh @@ -6,7 +6,7 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/custom/finetune_stable_d --work_dir './tmp/custom_diffusion' \ --class_data_dir './tmp/class_data' \ --train_dataset_name 'buptwq/lora-stable-diffusion-finetune-dog' \ - --max_epochs 1 \ + --max_epochs 250 \ --modifier_token "" \ --num_class_images=200 \ --save_ckpt_strategy 'by_epoch' \ diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index f4b7aec6..63bdf338 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -29,10 +29,10 @@ from modelscope.trainers.hooks.checkpoint.checkpoint_hook import CheckpointHook from modelscope.trainers.hooks.checkpoint.checkpoint_processor import \ CheckpointProcessor from modelscope.trainers.optimizer.builder import build_optimizer -from modelscope.utils.data_utils import to_device from modelscope.trainers.trainer import EpochBasedTrainer from modelscope.utils.config import ConfigDict from modelscope.utils.constant import ModeKeys, TrainerStages +from modelscope.utils.data_utils import to_device from modelscope.utils.file_utils import func_receive_dict_inputs from modelscope.utils.torch_utils import is_dist @@ -59,11 +59,12 @@ class CustomCheckpointProcessor(CheckpointProcessor): trainer.model.unet = trainer.model.unet.to(torch.float32) trainer.model.unet.save_attn_procs(output_dir) - learned_embeds = trainer.model.text_encoder.get_input_embeddings().weight + learned_embeds = trainer.model.text_encoder.get_input_embeddings( + ).weight for x, y in zip([self.modifier_token_id], self.modifier_token): learned_embeds_dict = {} learned_embeds_dict[y] = learned_embeds[x] - torch.save(learned_embeds_dict, f"{output_dir}/{y}.bin") + torch.save(learned_embeds_dict, f'{output_dir}/{y}.bin') class CustomDiffusionDataset(Dataset): @@ -436,7 +437,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): filter(lambda hook: isinstance(hook, CheckpointHook), self.hooks))[0] ckpt_hook.set_processor( - CustomCheckpointProcessor(self.modifier_token, self.modifier_token_id)) + CustomCheckpointProcessor(self.modifier_token, + self.modifier_token_id)) # Add new Custom Diffusion weights to the attention layers attention_class = CustomDiffusionAttnProcessor @@ -628,7 +630,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): f'please check if your torch with version: {torch.__version__} matches the config.' ) raise e - + def train_loop(self, data_loader): """ Training loop used by `EpochBasedTrainer.train()` """ @@ -652,8 +654,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): grads_text_encoder = self.model.text_encoder.get_input_embeddings( ).weight.grad # Get the index for tokens that we want to zero the grads. - index_grads_to_zero = torch.arange(len( - self.model.tokenizer)) != self.modifier_token_id[0] + index_grads_to_zero = torch.arange( + len(self.model.tokenizer)) != self.modifier_token_id[0] for i in range(len(self.modifier_token_id[1:])): index_grads_to_zero = index_grads_to_zero & ( torch.arange(len(self.model.tokenizer)) != @@ -717,10 +719,12 @@ class CustomDiffusionTrainer(EpochBasedTrainer): # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) - noisy_latents = self.model.noise_scheduler.add_noise(latents, noise, timesteps) + noisy_latents = self.model.noise_scheduler.add_noise( + latents, noise, timesteps) # Get the text embedding for conditioning - encoder_hidden_states = self.model.text_encoder(batch['input_ids'].to(self.device))[0] + encoder_hidden_states = self.model.text_encoder(batch['input_ids'].to( + self.device))[0] # Predict the noise residual model_pred = self.model.unet(noisy_latents, timesteps, @@ -730,7 +734,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): if self.model.noise_scheduler.config.prediction_type == 'epsilon': target = noise elif self.model.noise_scheduler.config.prediction_type == 'v_prediction': - target = self.model.noise_scheduler.get_velocity(latents, noise, timesteps) + target = self.model.noise_scheduler.get_velocity( + latents, noise, timesteps) else: raise ValueError( f'Unknown prediction type {self.model.noise_scheduler.config.prediction_type}' From 07296a837a3174cf30148c55d2b4e0e06c91c960 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 15:19:09 +0800 Subject: [PATCH 39/87] fix bugs --- .../custom_diffusion_trainer.py | 97 +++---------------- 1 file changed, 14 insertions(+), 83 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 63bdf338..ad51e23c 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -68,11 +68,6 @@ class CustomCheckpointProcessor(CheckpointProcessor): class CustomDiffusionDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - def __init__( self, concepts_list, @@ -85,6 +80,20 @@ class CustomDiffusionDataset(Dataset): hflip=False, aug=True, ): + """A dataset to prepare the instance and class images with the prompts for fine-tuning the model. + It pre-processes the images and the tokenizes prompts. + + Args: + concepts_list: contain multiple concepts, instance_prompt, class_prompt, etc. + tokenizer: pretrained tokenizer. + size: the size of images. + mask_size: the mask size of images. + center_crop: execute center crop or not. + with_prior_preservation: flag to add prior preservation loss. + hflip: whether to flip horizontally. + aug: perform data augmentation. + + """ self.size = size self.mask_size = mask_size self.center_crop = center_crop @@ -219,84 +228,6 @@ class CustomDiffusionDataset(Dataset): return example -class ClassDataset(Dataset): - - def __init__( - self, - tokenizer, - class_data_root=None, - class_prompt=None, - class_num_images=None, - size=512, - center_crop=False, - ): - """A dataset to prepare class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - - Args: - tokenizer: The tokenizer to use for tokenization. - class_data_root: The saved class data path. - class_prompt: The prompt to use for class images. - class_num_images: The number of class images to use. - size: The size to resize the images. - center_crop: Whether to do center crop or random crop. - - """ - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - if class_num_images is not None: - self.num_class_images = min( - len(self.class_images_path), class_num_images) - else: - self.num_class_images = len(self.class_images_path) - self.class_prompt = class_prompt - else: - raise ValueError( - f"Class {self.class_data_root} class data root doesn't exists." - ) - - self.image_transforms = transforms.Compose([ - transforms.Resize( - size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) - if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ]) - - def __len__(self): - return self.num_class_images - - def __getitem__(self, index): - example = {} - - if self.class_data_root: - class_image = Image.open( - self.class_images_path[index % self.num_class_images]) - class_image = exif_transpose(class_image) - - if not class_image.mode == 'RGB': - class_image = class_image.convert('RGB') - example['pixel_values'] = self.image_transforms(class_image) - - class_text_inputs = self.tokenizer( - self.class_prompt, - max_length=self.tokenizer.model_max_length, - truncation=True, - padding='max_length', - return_tensors='pt') - input_ids = torch.squeeze(class_text_inputs.input_ids) - example['input_ids'] = input_ids - - return example - - class PromptDataset(Dataset): def __init__(self, prompt, num_samples): From 0e99c27d54f6da400f1452b11cc9f97dabc711cc Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 15:27:08 +0800 Subject: [PATCH 40/87] custom diffusion --- .../custom/finetune_stable_diffusion_custom.py | 2 +- .../custom_diffusion/custom_diffusion_trainer.py | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py index 1c79fe53..debe6f49 100644 --- a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py +++ b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py @@ -85,7 +85,7 @@ class StableDiffusionCustomArguments(TrainingArgs): }) instance_data_name: str = field( - default='buptwq/custom-stable-diffusion-cat', + default='buptwq/lora-stable-diffusion-finetune-dog', metadata={ 'help': 'The instance data local dir or online ID.', }) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index ad51e23c..cc5e76e2 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -265,12 +265,13 @@ class CustomDiffusionTrainer(EpochBasedTrainer): class_data_dir: the path to the class data directory. num_class_images: the number of class images to generate. prior_loss_weight: the weight of the prior loss. - modifier_token: - initializer_token: - freeze_model: - sample_batch_size: - train_batch_size: - center_crop: + modifier_token: A token to use as a modifier for the concept. + initializer_token: A token to use as initializer word. + freeze_model: crossattn to enable fine-tuning of all params in the cross attention. + sample_batch_size: Batch size (per device) for sampling images. + train_batch_size: Batch size (per device) for the training dataloader. + center_crop: execute center crop or not. + instance_data_name: The instance data local dir or online ID. """ self.with_prior_preservation = kwargs.pop('with_prior_preservation', From bcf443c672ad580d36ffe822a0ba72e45b19b734 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 15:32:48 +0800 Subject: [PATCH 41/87] custom diffusion --- .../custom/finetune_stable_diffusion_custom.py | 3 +-- .../multi_modal/custom_diffusion/custom_diffusion_trainer.py | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py index debe6f49..35eb3792 100644 --- a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py +++ b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py @@ -36,8 +36,7 @@ class StableDiffusionCustomArguments(TrainingArgs): num_class_images: int = field( default=200, metadata={ - 'help': - 'Minimal class images for prior preservation loss. If there are not enough images already present in class_data_dir, additional images will be sampled with class_prompt.', + 'help': 'Minimal class images for prior preservation loss.', }) train_batch_size: int = field( diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index cc5e76e2..82f3a494 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -68,6 +68,7 @@ class CustomCheckpointProcessor(CheckpointProcessor): class CustomDiffusionDataset(Dataset): + def __init__( self, concepts_list, From 73fe5d5d653082516127eb07b3ebd5b40ca719bd Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 15:52:36 +0800 Subject: [PATCH 42/87] precommit --- .../custom_diffusion/custom_diffusion_trainer.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 82f3a494..4435645e 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -539,10 +539,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() - image_filename = ( - class_images_dir / - f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" - ) + save_index = example['index'][i] + cur_class_images + image_filename = class_images_dir / f'{save_index}-{hash_image}.jpg' image.save(image_filename) del pipeline @@ -593,9 +591,10 @@ class CustomDiffusionTrainer(EpochBasedTrainer): index_grads_to_zero = index_grads_to_zero & ( torch.arange(len(self.model.tokenizer)) != self.modifier_token_id[i]) + grads_data = grads_text_encoder.data[ + index_grads_to_zero, :].fill_(0) grads_text_encoder.data[ - index_grads_to_zero, :] = grads_text_encoder.data[ - index_grads_to_zero, :].fill_(0) + index_grads_to_zero, :] = grads_data # Value changed after the hooks are invoked, do not move them above the invoke_hook code. del self.data_batch self._iter += 1 From d18cf31dfac04431f9aa400d4ad996779c9af75c Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 16:00:18 +0800 Subject: [PATCH 43/87] custom diffusion --- .../multi_modal/custom_diffusion/custom_diffusion_trainer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 4435645e..51289dd6 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -5,7 +5,6 @@ import os import random import shutil import warnings -from collections.abc import Mapping from pathlib import Path from typing import Union @@ -23,7 +22,7 @@ from tqdm.auto import tqdm from modelscope.metainfo import Trainers from modelscope.msdatasets import MsDataset -from modelscope.outputs import ModelOutputBase, OutputKeys +from modelscope.outputs import OutputKeys from modelscope.trainers.builder import TRAINERS from modelscope.trainers.hooks.checkpoint.checkpoint_hook import CheckpointHook from modelscope.trainers.hooks.checkpoint.checkpoint_processor import \ @@ -33,7 +32,6 @@ from modelscope.trainers.trainer import EpochBasedTrainer from modelscope.utils.config import ConfigDict from modelscope.utils.constant import ModeKeys, TrainerStages from modelscope.utils.data_utils import to_device -from modelscope.utils.file_utils import func_receive_dict_inputs from modelscope.utils.torch_utils import is_dist From c1aa12002915c9cbd3fcb46bd295cee5ebd22ada Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 17:15:10 +0800 Subject: [PATCH 44/87] custom diffusion --- .../multi_modal/custom_diffusion/custom_diffusion_trainer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 51289dd6..a0a1861e 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -3,7 +3,6 @@ import hashlib import itertools import os import random -import shutil import warnings from pathlib import Path from typing import Union @@ -296,8 +295,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): if os.path.isdir(instance_data_name): self.instance_data_dir = instance_data_name else: - ds = MsDataset.load(instance_data_name) - self.instance_data_dir = next(iter(ds))[-1] + '/data' + ds = MsDataset.load(instance_data_name, split='train') + self.instance_data_dir = os.path.dirname(next(iter(ds))['Target:FILE']) self.concepts_list = [{ 'instance_prompt': self.instance_prompt, From cfa363d433dcecfbc9ba252346d287bc36bc1b73 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 17:35:57 +0800 Subject: [PATCH 45/87] custom diffusion --- .../multi_modal/custom_diffusion/custom_diffusion_trainer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index a0a1861e..99c6cb88 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -585,9 +585,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): index_grads_to_zero = torch.arange( len(self.model.tokenizer)) != self.modifier_token_id[0] for i in range(len(self.modifier_token_id[1:])): - index_grads_to_zero = index_grads_to_zero & ( - torch.arange(len(self.model.tokenizer)) != - self.modifier_token_id[i]) + modifier_flag = torch.arange(len(self.model.tokenizer)) != self.modifier_token_id[i] + index_grads_to_zero = index_grads_to_zero & modifier_flag grads_data = grads_text_encoder.data[ index_grads_to_zero, :].fill_(0) grads_text_encoder.data[ From 34ab7173932d79118ebab65e2d90ab5fa319a836 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 12 Jul 2023 19:47:32 +0800 Subject: [PATCH 46/87] custom_diffusion --- .../custom_diffusion/custom_diffusion_trainer.py | 7 +++++-- tests/trainers/test_custom_diffusion_trainer.py | 10 +++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 99c6cb88..e4925af8 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -296,7 +296,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.instance_data_dir = instance_data_name else: ds = MsDataset.load(instance_data_name, split='train') - self.instance_data_dir = os.path.dirname(next(iter(ds))['Target:FILE']) + self.instance_data_dir = os.path.dirname( + next(iter(ds))['Target:FILE']) self.concepts_list = [{ 'instance_prompt': self.instance_prompt, @@ -585,7 +586,9 @@ class CustomDiffusionTrainer(EpochBasedTrainer): index_grads_to_zero = torch.arange( len(self.model.tokenizer)) != self.modifier_token_id[0] for i in range(len(self.modifier_token_id[1:])): - modifier_flag = torch.arange(len(self.model.tokenizer)) != self.modifier_token_id[i] + modifier_flag = torch.arange( + len(self.model.tokenizer) + ) != self.modifier_token_id[i] index_grads_to_zero = index_grads_to_zero & modifier_flag grads_data = grads_text_encoder.data[ index_grads_to_zero, :].fill_(0) diff --git a/tests/trainers/test_custom_diffusion_trainer.py b/tests/trainers/test_custom_diffusion_trainer.py index 84bdbcf0..6c647401 100644 --- a/tests/trainers/test_custom_diffusion_trainer.py +++ b/tests/trainers/test_custom_diffusion_trainer.py @@ -20,11 +20,11 @@ class TestCustomDiffusionTrainer(unittest.TestCase): print(('Testing %s.%s' % (type(self).__name__, self._testMethodName))) self.train_dataset = MsDataset.load( - 'buptwq/lora-stable-diffusion-finetune', + 'buptwq/lora-stable-diffusion-finetune-dog', split='train', download_mode=DownloadMode.FORCE_REDOWNLOAD) self.eval_dataset = MsDataset.load( - 'buptwq/lora-stable-diffusion-finetune', + 'buptwq/lora-stable-diffusion-finetune-dog', split='validation', download_mode=DownloadMode.FORCE_REDOWNLOAD) @@ -41,7 +41,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_custom_diffusion_train(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' - model_revision = 'v1.0.8' + model_revision = 'v1.0.9' prompt = 'a dog.' def cfg_modify_fn(cfg): @@ -51,7 +51,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): 'lr_lambda': lambda _: 1, 'last_epoch': -1 } - cfg.train.optimizer.lr = 5e-6 + cfg.train.optimizer.lr = 1e-5 return cfg kwargs = dict( @@ -79,7 +79,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_dreambooth_diffusion_eval(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' - model_revision = 'v1.0.8' + model_revision = 'v1.0.9' kwargs = dict( model=model_id, From 0b85979f2eb8931995781e0ffa91b092a4efc98d Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Fri, 14 Jul 2023 19:02:52 +0800 Subject: [PATCH 47/87] Update diffusers version to 0.18.0 (#377) * update diffusers to 0.18.0 * fix bugs --- requirements/multi-modal.txt | 2 +- tests/trainers/test_lora_diffusion_trainer.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements/multi-modal.txt b/requirements/multi-modal.txt index fe2d45a1..4cf4d19a 100644 --- a/requirements/multi-modal.txt +++ b/requirements/multi-modal.txt @@ -1,7 +1,7 @@ accelerate cloudpickle decord>=0.6.0 -diffusers==0.15.0 +diffusers==0.18.0 fairseq ftfy>=6.0.3 librosa==0.9.2 diff --git a/tests/trainers/test_lora_diffusion_trainer.py b/tests/trainers/test_lora_diffusion_trainer.py index a9b9e299..2ffef2db 100644 --- a/tests/trainers/test_lora_diffusion_trainer.py +++ b/tests/trainers/test_lora_diffusion_trainer.py @@ -35,7 +35,7 @@ class TestLoraDiffusionTrainer(unittest.TestCase): shutil.rmtree(self.tmp_dir) super().tearDown() - @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_lora_diffusion_train(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' model_revision = 'v1.0.9' @@ -67,7 +67,7 @@ class TestLoraDiffusionTrainer(unittest.TestCase): results_files = os.listdir(self.tmp_dir) self.assertIn(f'{trainer.timestamp}.log.json', results_files) - @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_lora_diffusion_eval(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' model_revision = 'v1.0.9' From 442bdc74a43f69dd058028a2e66d9020b6c9ac16 Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Fri, 14 Jul 2023 19:04:29 +0800 Subject: [PATCH 48/87] Use more compatible DiffusionPipeline (#383) --- .../stable_diffusion/stable_diffusion_pipeline.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py index 1b75656e..f55cc95f 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py @@ -6,8 +6,7 @@ import cv2 import numpy as np import torch import torchvision.transforms as transforms -from diffusers import \ - StableDiffusionPipeline as DiffuserStableDiffusionPipeline +from diffusers import DiffusionPipeline from PIL import Image from modelscope.metainfo import Pipelines @@ -35,7 +34,7 @@ class StableDiffusionPipeline(DiffusersPipeline): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' # load pipeline torch_type = torch.float16 if self.device == 'cuda' else torch.float32 - self.pipeline = DiffuserStableDiffusionPipeline.from_pretrained( + self.pipeline = DiffusionPipeline.from_pretrained( model, torch_dtype=torch_type) self.pipeline = self.pipeline.to(self.device) # load lora moudle to unet From c6189d68a0bc37451df7fc6ba0e3d53f9c8b38d4 Mon Sep 17 00:00:00 2001 From: Jintao Date: Sat, 15 Jul 2023 09:59:53 +0800 Subject: [PATCH 49/87] Fix/chatglm2 (#384) --- examples/pytorch/llm/_common.py | 121 ++++++++++-------- examples/pytorch/llm/baichuan_infer.py | 22 ++-- examples/pytorch/llm/baichuan_sft.py | 25 +--- examples/pytorch/llm/chatglm2_infer.py | 21 ++- examples/pytorch/llm/chatglm2_sft.py | 29 +---- examples/pytorch/llm_agent/_common.py | 117 +++++++++-------- .../pytorch/llm_agent/baichuan_infer.ipynb | 10 +- examples/pytorch/llm_agent/baichuan_sft.ipynb | 27 ++-- .../pytorch/llm_agent/chatglm2_infer.ipynb | 12 +- examples/pytorch/llm_agent/chatglm2_sft.ipynb | 34 ++--- .../models/nlp/chatglm2/text_generation.py | 1 + 11 files changed, 190 insertions(+), 229 deletions(-) diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/_common.py index 79a958ec..161c99bf 100644 --- a/examples/pytorch/llm/_common.py +++ b/examples/pytorch/llm/_common.py @@ -51,25 +51,15 @@ from modelscope.utils.config import Config, ConfigDict from modelscope.utils.registry import default_group # -TEST_SPLIT_P = 0.01 -SPLIT_SEED = 42 -MAX_LENGTH: Optional[int] = 2048 COLOR, COLOR_S = '#FFE2D9', '#FF7043' -PROMPT = """### 用户 -{instruction} -### AI助手 -""" +PROMPT = """Human: {instruction} +AI: """ logger = get_logger() # -def get_model_dir(model_id: str, model_revision: Optional[str] = None) -> str: - model_dir = snapshot_download(model_id, model_revision) - return model_dir - - def _get_version(work_dir: str) -> int: if os.path.isdir(work_dir): fnames = os.listdir(work_dir) @@ -96,28 +86,40 @@ def get_work_dir(work_dir: str) -> str: return work_dir -def select_device(device_ids: List[int]) -> Device: +def _format_device(device: Union[List[int], str]) -> Tuple[List[int], str]: + if isinstance(device, list): + device_ids = device + device_str = ','.join([str(d) for d in device]) + else: + device_ids = [int(d) for d in device.split(',') if d != '-1'] + device_str = device + device_str = device_str.replace(' ', '') + return device_ids, device_str + + +def select_device(device: Union[List[int], str]) -> Device: """Call this function before cuda is initialized. - Return: master device + device: e.g. []: 'cpu', [0], [0, 1, 2] + e.g. '-1': 'cpu', '0', '0,1,2' """ if torch.cuda.is_initialized(): logger.warning('CUDA has been initialized! Device selection fails!') return torch.device('cuda:0') # + device_ids, device_str = _format_device(device) + # + os.environ['CUDA_VISIBLE_DEVICES'] = device_str log_s = 'Using device: ' - if len(device_ids) == 0: # cpu - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' - device: str = 'cpu' - log_s += device + if len(device_ids) == 0: + master_device: str = 'cpu' + log_s += 'cpu' else: - os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( - [str(d) for d in device_ids]) assert torch.cuda.is_available( ) and torch.cuda.device_count() >= len(device_ids) - log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. 'cuda:1,7,8' - device = 'cuda:0' + master_device = 'cuda:0' + log_s += f'cuda:{device_str}' logger.info(log_s) - return torch.device(device) + return torch.device(master_device) def seed_everything(seed: Optional[int] = None, gpu_dtm: bool = False) -> int: @@ -148,7 +150,9 @@ def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, return T_max -def tokenize_function(example: Dict[str, str], tokenizer) -> Dict[str, Any]: +def tokenize_function(example: Dict[str, str], + tokenizer, + max_length: Optional[int] = 2048) -> Dict[str, Any]: """Only applicable to baichuan and chatglm2. Other models need to be tested""" instruction = example['instruction'] input_: str = example['input'] @@ -159,12 +163,12 @@ def tokenize_function(example: Dict[str, str], tokenizer) -> Dict[str, Any]: else: instruction = instruction + input_ output = example['output'] - src_text = PROMPT.format(instruction=instruction, add_special_tokens=False) + src_text = PROMPT.format(instruction=instruction) src_input_ids: List[int] = tokenizer( src_text, return_attention_mask=False, add_special_tokens=True)['input_ids'] - # tokenizer.bos_token_id: Avoid `tgt_input_ids` being empty - tgt_input_ids = [tokenizer.bos_token_id] + # + tgt_input_ids = [] if output is not None: tgt_input_ids += tokenizer( output, return_attention_mask=False, @@ -175,10 +179,10 @@ def tokenize_function(example: Dict[str, str], tokenizer) -> Dict[str, Any]: labels = None input_ids = src_input_ids + tgt_input_ids # - if MAX_LENGTH is not None: - input_ids = input_ids[-MAX_LENGTH:] + if max_length is not None: + input_ids = input_ids[-max_length:] if labels is not None: - labels = labels[-MAX_LENGTH:] + labels = labels[-max_length:] # return {'input_ids': input_ids, 'labels': labels} @@ -200,8 +204,10 @@ def stat_dataset(dataset: HFDataset) -> None: def print_examples(examples: Dict[str, Any], tokenizer) -> None: input_ids, labels = examples['input_ids'], examples['labels'] - print(f'[INPUT_IDS] {tokenizer.decode(input_ids)}') + print(f'[INPUT_IDS] {input_ids}') + print(f'[INPUT] {tokenizer.decode(input_ids)}') print() + print(f'[LABLES_IDS] {labels}') print( f'[LABLES] {tokenizer.decode([lb if lb != -100 else 0 for lb in labels])}' ) @@ -283,16 +289,25 @@ class MyMetric(Metric): } def merge(self, other: 'MyMetric') -> None: - """This script does not support ddp""" + """This script does not support ddp. TODO""" raise NotImplementedError -def get_baichuan7B_model_tokenizer(model_dir: Optional[str] = None, - load_model: bool = True): - if model_dir is None: - model_id = 'baichuan-inc/baichuan-7B' - model_dir = get_model_dir(model_id, None) - # +def _add_special_token(tokenizer): + if tokenizer.eos_token_id is None: + tokenizer.eos_token_id = 2 + if tokenizer.bos_token_id is None: + tokenizer.bos_token_id = 1 + if tokenizer.pad_token_id is None: + tokenizer.pad_token_id = 0 + logger.info(f'bos_token_id: {tokenizer.bos_token_id}, ' + f'eos_token_id: {tokenizer.eos_token_id}, ' + f'pad_token_id: {tokenizer.pad_token_id}') + + +def get_baichuan7B_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): sys.path.insert(0, model_dir) from configuration_baichuan import BaiChuanConfig from tokenization_baichuan import BaiChuanTokenizer @@ -309,15 +324,14 @@ def get_baichuan7B_model_tokenizer(model_dir: Optional[str] = None, device_map='auto', torch_dtype=torch.float16) # + if add_special_token: + _add_special_token(tokenizer) return model, tokenizer -def get_baichuan13B_model_tokenizer(model_dir: Optional[str] = None, - load_model: bool = True): - if model_dir is None: - model_id = 'baichuan-inc/Baichuan-13B-Base' - model_dir = get_model_dir(model_id, 'v1.0.1') - # +def get_baichuan13B_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): sys.path.insert(0, model_dir) from configuration_baichuan import BaichuanConfig from tokenization_baichuan import BaichuanTokenizer @@ -334,15 +348,14 @@ def get_baichuan13B_model_tokenizer(model_dir: Optional[str] = None, device_map='auto', torch_dtype=torch.float16) # + if add_special_token: + _add_special_token(tokenizer) return model, tokenizer -def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, - load_model: bool = True): - if model_dir is None: - model_id = 'ZhipuAI/chatglm2-6b' - model_dir = snapshot_download(model_id, None) - # +def get_chatglm2_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): config = read_config(model_dir) config['model'] = ConfigDict({'type': 'chatglm2-6b'}) tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) @@ -353,12 +366,16 @@ def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, cfg_dict=config, device_map='auto', torch_dtype=torch.float16) + if add_special_token: + _add_special_token(tokenizer) return model, tokenizer def get_alpaca_en_zh_dataset( tokenize_function, - only_val: bool = False) -> Tuple[HFDataset, HFDataset]: + only_val: bool = False, + test_split_p: float = 0.01, + split_seed: int = 42) -> Tuple[HFDataset, HFDataset]: """ split: Literal['train', 'validation', None] """ @@ -371,7 +388,7 @@ def get_alpaca_en_zh_dataset( dataset: HFDataset = concatenate_datasets([dataset_zh, dataset_en]) # # dataset = dataset.select(range(1000)) # for debug - dataset = dataset.train_test_split(TEST_SPLIT_P, seed=SPLIT_SEED) + dataset = dataset.train_test_split(test_split_p, seed=split_seed) if only_val: dataset = dataset['test'] if tokenize_function is not None: diff --git a/examples/pytorch/llm/baichuan_infer.py b/examples/pytorch/llm/baichuan_infer.py index f9a49c09..6e027347 100644 --- a/examples/pytorch/llm/baichuan_infer.py +++ b/examples/pytorch/llm/baichuan_infer.py @@ -3,24 +3,22 @@ from _common import * from transformers import TextStreamer device_ids = [0, 1] -logger.info(device_ids) select_device(device_ids) +# Note: You need to set the value of `CKPT_FPATH` +CKPT_FAPTH = '/path/to/your/iter_xxx.pth' # ### Loading Model and Tokenizer -# Note: You need to set the value of `CKPT_FPATH` BAICHUAN_TYPE = '13B' # Literal['7B', '13B'] -CKPT_FAPTH = '/path/to/your/xxx.pth' -LORA_TARGET_MODULES = ['W_pack'] - if BAICHUAN_TYPE == '7B': - model, tokenizer = get_baichuan7B_model_tokenizer() + model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') + model, tokenizer = get_baichuan7B_model_tokenizer(model_dir) else: - model, tokenizer = get_baichuan13B_model_tokenizer() -if tokenizer.pad_token_id is None: - tokenizer.pad_token_id = tokenizer.eos_token_id + model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') + model, tokenizer = get_baichuan13B_model_tokenizer(model_dir) model.bfloat16() # Consistent with training # ### Preparing lora +LORA_TARGET_MODULES = ['W_pack'] LORA_RANK = 8 LORA_ALPHA = 32 LORA_DROPOUT_P = 0 # Arbitrary value @@ -38,7 +36,8 @@ _, test_dataset = get_alpaca_en_zh_dataset(None, True) # ### Inference streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) -for d in test_dataset[:5]: +mini_test_dataset = test_dataset.select(range(5)) +for d in mini_test_dataset: output = d['output'] d['output'] = None input_ids = tokenize_function(d, tokenizer)['input_ids'] @@ -50,9 +49,10 @@ for d in test_dataset[:5]: max_new_tokens=512, attention_mask=attention_mask, streamer=streamer, - pad_token_id=tokenizer.pad_token_id, + pad_token_id=tokenizer.eos_token_id, temperature=0.7, top_k=50, + top_p=0.7, do_sample=True) print() print(f'[LABELS]{output}') diff --git a/examples/pytorch/llm/baichuan_sft.py b/examples/pytorch/llm/baichuan_sft.py index 18f71d22..4addc8b5 100644 --- a/examples/pytorch/llm/baichuan_sft.py +++ b/examples/pytorch/llm/baichuan_sft.py @@ -3,35 +3,27 @@ pip install modelscope pip install numpy pandas matplotlib scikit-learn pip install transformers datasets -pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -pip install tqdm -pip install tensorboard -pip install torchmetrics -pip install sentencepiece -pip install accelerate +conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia +pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer accelerate pip install numpy -U # Resolve torchmetrics dependencies and update numpy """ from _common import * -device_ids = [0, 1, 2, 3] -logger.info(device_ids) +device_ids = [0, 1] select_device(device_ids) seed_everything(42) # ### Loading Model and Tokenizer BAICHUAN_TYPE = '13B' # Literal['7B', '13B'] WORK_DIR = f'runs/baichuan_{BAICHUAN_TYPE}' -LORA_TARGET_MODULES = ['W_pack'] # if BAICHUAN_TYPE == '7B': - model_id = 'baichuan-inc/baichuan-7B' - model_dir = get_model_dir(model_id, None) + model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') model, tokenizer = get_baichuan7B_model_tokenizer(model_dir) else: - model_id = 'baichuan-inc/Baichuan-13B-Base' - model_dir = get_model_dir(model_id, 'v1.0.1') + model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') model, tokenizer = get_baichuan13B_model_tokenizer(model_dir) # GRADIENT_CHECKPOINTING = True @@ -46,14 +38,9 @@ if GRADIENT_CHECKPOINTING: model) model.gradient_checkpointing_enable() model.enable_input_require_grads() -if tokenizer.pad_token_id is None: - tokenizer.pad_token_id = tokenizer.eos_token_id -# -logger.info( - f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, ' - f'pad_token_id: {tokenizer.pad_token_id}') # ### Preparing lora +LORA_TARGET_MODULES = ['W_pack'] LORA_RANK = 8 LORA_ALPHA = 32 LORA_DROPOUT_P = 0.1 diff --git a/examples/pytorch/llm/chatglm2_infer.py b/examples/pytorch/llm/chatglm2_infer.py index 741f9b18..c1a544cb 100644 --- a/examples/pytorch/llm/chatglm2_infer.py +++ b/examples/pytorch/llm/chatglm2_infer.py @@ -3,22 +3,17 @@ from _common import * from transformers import TextStreamer device_ids = [0, 1] -logger.info(device_ids) select_device(device_ids) +# Note: You need to set the value of `CKPT_FPATH` +CKPT_FAPTH = '/path/to/your/iter_xxx.pth' # ### Loading Model and Tokenizer -# Note: You need to set the value of `CKPT_FPATH` -CKPT_FAPTH = '/path/to/your/xxx.pth' -LORA_TARGET_MODULES = ['query_key_value'] - -model, tokenizer = get_chatglm2_model_tokenizer() -if tokenizer.eos_token_id is None: - tokenizer.eos_token_id = tokenizer.pad_token_id -if tokenizer.bos_token_id is None: - tokenizer.bos_token_id = 1 +model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') +model, tokenizer = get_chatglm2_model_tokenizer(model_dir) model.bfloat16() # Consistent with training # ### Preparing lora +LORA_TARGET_MODULES = ['query_key_value'] LORA_RANK = 8 LORA_ALPHA = 32 LORA_DROPOUT_P = 0 # Arbitrary value @@ -36,7 +31,8 @@ _, test_dataset = get_alpaca_en_zh_dataset(None, True) # ### Inference streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) -for d in test_dataset[:5]: +mini_test_dataset = test_dataset.select(range(5)) +for d in mini_test_dataset: output = d['output'] d['output'] = None input_ids = tokenize_function(d, tokenizer)['input_ids'] @@ -48,9 +44,10 @@ for d in test_dataset[:5]: max_new_tokens=512, attention_mask=attention_mask, streamer=streamer, - pad_token_id=tokenizer.pad_token_id, + pad_token_id=tokenizer.eos_token_id, temperature=0.7, top_k=50, + top_p=0.7, do_sample=True) print() print(f'[LABELS]{output}') diff --git a/examples/pytorch/llm/chatglm2_sft.py b/examples/pytorch/llm/chatglm2_sft.py index ecd497a2..4876025b 100644 --- a/examples/pytorch/llm/chatglm2_sft.py +++ b/examples/pytorch/llm/chatglm2_sft.py @@ -3,46 +3,31 @@ pip install modelscope pip install numpy pandas matplotlib scikit-learn pip install transformers datasets -pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 -pip install tqdm -pip install tensorboard -pip install torchmetrics -pip install sentencepiece -pip install accelerate +conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia +pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer accelerate pip install numpy -U # Resolve torchmetrics dependencies and update numpy """ from _common import * -device_ids = [0, 1, 2, 3] -logger.info(device_ids) +device_ids = [0, 1] select_device(device_ids) seed_everything(42) # ### Loading Model and Tokenizer -model_id = 'ZhipuAI/chatglm2-6b' WORK_DIR = 'runs/chatglm2' -LORA_TARGET_MODULES = ['query_key_value'] # -model_dir = get_model_dir(model_id, None) +model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') model, tokenizer = get_chatglm2_model_tokenizer(model_dir) -# chatglm2 does not support gradient_checkpointing -GRADIENT_CHECKPOINTING = False +# +GRADIENT_CHECKPOINTING = True if GRADIENT_CHECKPOINTING: model.gradient_checkpointing_enable() model.enable_input_require_grads() -logger.info(tokenizer.special_tokens) -if tokenizer.eos_token_id is None: - tokenizer.eos_token_id = tokenizer.pad_token_id -if tokenizer.bos_token_id is None: - tokenizer.bos_token_id = 1 -# -logger.info( - f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, ' - f'pad_token_id: {tokenizer.pad_token_id}') # ### Preparing lora +LORA_TARGET_MODULES = ['query_key_value'] LORA_RANK = 8 LORA_ALPHA = 32 LORA_DROPOUT_P = 0.1 diff --git a/examples/pytorch/llm_agent/_common.py b/examples/pytorch/llm_agent/_common.py index 04097b50..dd07ef31 100644 --- a/examples/pytorch/llm_agent/_common.py +++ b/examples/pytorch/llm_agent/_common.py @@ -49,11 +49,9 @@ from modelscope.utils.config import Config, ConfigDict from modelscope.utils.registry import default_group # -SYSTEM_TEXT = """{system}""" -USER_TEXT = """\n\n### 用户 -{user}""" -ASSISTANT_PROMPT = """\n\n### 助手 -""" +PROMPT = """System: {system} +Human: {user} +AI: """ MAX_LENGTH = 2048 TEST_MAX_LENGTH = MAX_LENGTH @@ -62,11 +60,6 @@ logger = get_logger() # -def get_model_dir(model_id: str, model_revision: Optional[str] = None) -> str: - model_dir = snapshot_download(model_id, model_revision) - return model_dir - - def _get_version(work_dir: str) -> int: if os.path.isdir(work_dir): fnames = os.listdir(work_dir) @@ -93,28 +86,40 @@ def get_work_dir(work_dir: str) -> str: return work_dir -def select_device(device_ids: List[int]) -> Device: +def _format_device(device: Union[List[int], str]) -> Tuple[List[int], str]: + if isinstance(device, list): + device_ids = device + device_str = ','.join([str(d) for d in device]) + else: + device_ids = [int(d) for d in device.split(',') if d != '-1'] + device_str = device + device_str = device_str.replace(' ', '') + return device_ids, device_str + + +def select_device(device: Union[List[int], str]) -> Device: """Call this function before cuda is initialized. - Return: master device + device: e.g. []: 'cpu', [0], [0, 1, 2] + e.g. '-1': 'cpu', '0', '0,1,2' """ if torch.cuda.is_initialized(): logger.warning('CUDA has been initialized! Device selection fails!') return torch.device('cuda:0') # + device_ids, device_str = _format_device(device) + # + os.environ['CUDA_VISIBLE_DEVICES'] = device_str log_s = 'Using device: ' - if len(device_ids) == 0: # cpu - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' - device: str = 'cpu' - log_s += device + if len(device_ids) == 0: + master_device: str = 'cpu' + log_s += 'cpu' else: - os.environ['CUDA_VISIBLE_DEVICES'] = ','.join( - [str(d) for d in device_ids]) assert torch.cuda.is_available( ) and torch.cuda.device_count() >= len(device_ids) - log_s += f"cuda:{','.join([str(d) for d in device_ids])}" # e.g. 'cuda:1,7,8' - device = 'cuda:0' + master_device = 'cuda:0' + log_s += f'cuda:{device_str}' logger.info(log_s) - return torch.device(device) + return torch.device(master_device) def seed_everything(seed: Optional[int] = None, gpu_dtm: bool = False) -> int: @@ -148,37 +153,27 @@ def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, def tokenize_function(system: str, user: str, assistant: Optional[str], tokenizer) -> Dict[str, Any]: """Only applicable to baichuan and chatglm2. Other models need to be tested""" - system_text = SYSTEM_TEXT.format(system=system) - user_text = USER_TEXT.format(user=user) - system_text_ids: List[int] = tokenizer( - system_text, return_attention_mask=False, + src_text = PROMPT.format(system=system, user=user) + src_input_ids: List[int] = tokenizer( + src_text, return_attention_mask=False, add_special_tokens=True)['input_ids'] - user_text_ids: List[int] = tokenizer( - user_text, return_attention_mask=False, - add_special_tokens=False)['input_ids'] - assistant_p_input_ids: List[int] = tokenizer( - ASSISTANT_PROMPT, - return_attention_mask=False, - add_special_tokens=False)['input_ids'] - - # tokenizer.bos_token_id: Avoid `assistant` being empty - assistant_input_ids: List[int] = [tokenizer.bos_token_id] + # + tgt_input_ids: List[int] = [] if assistant is not None: - assistant_input_ids += tokenizer( + tgt_input_ids += tokenizer( assistant, return_attention_mask=False, add_special_tokens=False)['input_ids'] - assistant_input_ids += [tokenizer.eos_token_id] + tgt_input_ids += [tokenizer.eos_token_id] + labels = [-100] * len(src_input_ids) + tgt_input_ids + else: + labels = None + input_ids = src_input_ids + tgt_input_ids # - input_ids = system_text_ids + user_text_ids + assistant_p_input_ids + assistant_input_ids - if assistant is not None: # train, val + if assistant is not None: if len(input_ids) > MAX_LENGTH: return {} - len_mask = len(input_ids) - len(assistant_input_ids) - labels = [-100] * len_mask + assistant_input_ids - else: # test + else: input_ids = input_ids[-TEST_MAX_LENGTH:] - labels = None - # return {'input_ids': input_ids, 'labels': labels} @@ -305,12 +300,21 @@ class MyMetric(Metric): raise NotImplementedError -def get_baichuan_model_tokenizer(model_dir: Optional[str] = None, - load_model: bool = True): - if model_dir is None: - model_id = 'baichuan-inc/baichuan-7B' - model_dir = get_model_dir(model_id, None) - # +def _add_special_token(tokenizer): + if tokenizer.eos_token_id is None: + tokenizer.eos_token_id = 2 + if tokenizer.bos_token_id is None: + tokenizer.bos_token_id = 1 + if tokenizer.pad_token_id is None: + tokenizer.pad_token_id = 0 + logger.info(f'bos_token_id: {tokenizer.bos_token_id}, ' + f'eos_token_id: {tokenizer.eos_token_id}, ' + f'pad_token_id: {tokenizer.pad_token_id}') + + +def get_baichuan7B_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): sys.path.insert(0, model_dir) from configuration_baichuan import BaiChuanConfig from tokenization_baichuan import BaiChuanTokenizer @@ -327,15 +331,14 @@ def get_baichuan_model_tokenizer(model_dir: Optional[str] = None, device_map='auto', torch_dtype=torch.float16) # + if add_special_token: + _add_special_token(tokenizer) return model, tokenizer -def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, - load_model: bool = True): - if model_dir is None: - model_id = 'ZhipuAI/chatglm2-6b' - model_dir = snapshot_download(model_id, None) - # +def get_chatglm2_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): config = read_config(model_dir) config['model'] = ConfigDict({'type': 'chatglm2-6b'}) tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) @@ -346,6 +349,8 @@ def get_chatglm2_model_tokenizer(model_dir: Optional[str] = None, cfg_dict=config, device_map='auto', torch_dtype=torch.float16) + if add_special_token: + _add_special_token(tokenizer) return model, tokenizer diff --git a/examples/pytorch/llm_agent/baichuan_infer.ipynb b/examples/pytorch/llm_agent/baichuan_infer.ipynb index 03f8f46b..7ef29951 100644 --- a/examples/pytorch/llm_agent/baichuan_infer.ipynb +++ b/examples/pytorch/llm_agent/baichuan_infer.ipynb @@ -54,7 +54,6 @@ "from _common import *\n", "from transformers import TextStreamer\n", "device_ids = [0, 1]\n", - "logger.info(device_ids)\n", "select_device(device_ids)" ] }, @@ -146,9 +145,8 @@ "CKPT_FAPTH = '/home/hackathon/my_git/agent/runs/baichuan/v10-20230702-172449/output_best/pytorch_model.bin'\n", "LORA_TARGET_MODULES = ['W_pack']\n", "\n", - "model, tokenizer = get_baichuan_model_tokenizer()\n", - "if tokenizer.pad_token_id is None:\n", - " tokenizer.pad_token_id = tokenizer.eos_token_id\n", + "model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5')\n", + "model, tokenizer = get_baichuan7B_model_tokenizer(model_dir)\n", "model.bfloat16() # Consistent with training" ] }, @@ -451,8 +449,8 @@ " attention_mask = torch.ones_like(input_ids)\n", " generate_ids = model.generate(input_ids=input_ids, max_new_tokens=512,\n", " attention_mask=attention_mask,\n", - " streamer=streamer, pad_token_id=tokenizer.pad_token_id, \n", - " temperature=0.7, top_k=50, do_sample=True)\n", + " streamer=streamer, pad_token_id=tokenizer.eos_token_id, \n", + " temperature=0.7, top_k=50, top_p=0.7, do_sample=True)\n", " print()\n", " print(f'[LABELS]{assistant}')\n", " print('-----------------------------------------------------------------------------------')\n", diff --git a/examples/pytorch/llm_agent/baichuan_sft.ipynb b/examples/pytorch/llm_agent/baichuan_sft.ipynb index cb732612..75a9240e 100644 --- a/examples/pytorch/llm_agent/baichuan_sft.ipynb +++ b/examples/pytorch/llm_agent/baichuan_sft.ipynb @@ -33,16 +33,12 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install modelscope -U\n", + "# !pip install modelscope\n", "# !pip install numpy pandas matplotlib scikit-learn\n", "# !pip install transformers datasets\n", - "# !pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n", - "# !pip install tqdm\n", - "# !pip install tensorboard\n", - "# !pip install torchmetrics\n", - "# !pip install sentencepiece\n", - "# !pip install accelerate\n", - "#\n", + "# !conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n", + "# !pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer accelerate\n", + "\n", "# !pip install numpy -U # Resolve torchmetrics dependencies and update numpy" ] }, @@ -75,8 +71,7 @@ ], "source": [ "from _common import *\n", - "device_ids = [0, 1, 2, 3]\n", - "logger.info(device_ids)\n", + "device_ids = [0, 1]\n", "select_device(device_ids)\n", "_ = seed_everything(42)" ] @@ -132,22 +127,16 @@ } ], "source": [ - "model_id = 'baichuan-inc/baichuan-7B'\n", "WORK_DIR = 'runs/baichuan'\n", "LORA_TARGET_MODULES = ['W_pack']\n", "#\n", - "model_dir = get_model_dir(model_id, None)\n", - "model, tokenizer = get_baichuan_model_tokenizer(model_dir)\n", + "model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5')\n", + "model, tokenizer = get_baichuan7B_model_tokenizer(model_dir)\n", "#\n", "GRADIENT_CHECKPOINTING = True\n", "if GRADIENT_CHECKPOINTING:\n", " model.gradient_checkpointing_enable()\n", - " model.enable_input_require_grads()\n", - "if tokenizer.pad_token_id is None:\n", - " tokenizer.pad_token_id = tokenizer.eos_token_id\n", - "#\n", - "logger.info(f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, '\n", - " f'pad_token_id: {tokenizer.pad_token_id}')" + " model.enable_input_require_grads()" ] }, { diff --git a/examples/pytorch/llm_agent/chatglm2_infer.ipynb b/examples/pytorch/llm_agent/chatglm2_infer.ipynb index 237d27c8..821da5e6 100644 --- a/examples/pytorch/llm_agent/chatglm2_infer.ipynb +++ b/examples/pytorch/llm_agent/chatglm2_infer.ipynb @@ -55,7 +55,6 @@ "from _common import *\n", "from transformers import TextStreamer\n", "device_ids = [0, 1]\n", - "logger.info(device_ids)\n", "select_device(device_ids)" ] }, @@ -143,11 +142,8 @@ "CKPT_FAPTH = '/home/hackathon/my_git/agent/runs/chatglm2/v1-20230702-203505/output_best/pytorch_model.bin'\n", "LORA_TARGET_MODULES = ['query_key_value']\n", "\n", - "model, tokenizer = get_chatglm2_model_tokenizer()\n", - "if tokenizer.eos_token_id is None:\n", - " tokenizer.eos_token_id = tokenizer.pad_token_id\n", - "if tokenizer.bos_token_id is None:\n", - " tokenizer.bos_token_id = 1\n", + "model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6')\n", + "model, tokenizer = get_chatglm2_model_tokenizer(model_dir)\n", "model.bfloat16() # Consistent with training" ] }, @@ -484,8 +480,8 @@ " attention_mask = torch.ones_like(input_ids)\n", " generate_ids = model.generate(input_ids=input_ids, max_new_tokens=512,\n", " attention_mask=attention_mask,\n", - " streamer=streamer, pad_token_id=tokenizer.pad_token_id, \n", - " temperature=0.7, top_k=50, do_sample=True)\n", + " streamer=streamer, pad_token_id=tokenizer.eos_token_id, \n", + " temperature=0.7, top_k=50, top_p=0.7, do_sample=True)\n", " print()\n", " print(f'[LABELS]{assistant}')\n", " print('-----------------------------------------------------------------------------------')\n", diff --git a/examples/pytorch/llm_agent/chatglm2_sft.ipynb b/examples/pytorch/llm_agent/chatglm2_sft.ipynb index 70d9b8a1..4810e4b9 100644 --- a/examples/pytorch/llm_agent/chatglm2_sft.ipynb +++ b/examples/pytorch/llm_agent/chatglm2_sft.ipynb @@ -40,22 +40,18 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install modelscope -U\n", + "# !pip install modelscope\n", "# !pip install numpy pandas matplotlib scikit-learn\n", "# !pip install transformers datasets\n", - "# !pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118\n", - "# !pip install tqdm\n", - "# !pip install tensorboard\n", - "# !pip install torchmetrics\n", - "# !pip install sentencepiece\n", - "# !pip install accelerate\n", - "#\n", + "# !conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n", + "# !pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer accelerate\n", + "\n", "# !pip install numpy -U # Resolve torchmetrics dependencies and update numpy" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [ { @@ -80,8 +76,7 @@ ], "source": [ "from _common import *\n", - "device_ids = [0, 1, 2, 3]\n", - "logger.info(device_ids)\n", + "device_ids = [0, 1]\n", "select_device(device_ids)\n", "_ = seed_everything(42)" ] @@ -136,25 +131,16 @@ } ], "source": [ - "model_id = 'ZhipuAI/chatglm2-6b'\n", "WORK_DIR = 'runs/chatglm2'\n", "LORA_TARGET_MODULES = ['query_key_value']\n", "#\n", - "model_dir = get_model_dir(model_id, None)\n", + "model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6')\n", "model, tokenizer = get_chatglm2_model_tokenizer(model_dir)\n", - "# chatglm2 does not support gradient_checkpointing\n", - "GRADIENT_CHECKPOINTING = False\n", + "#\n", + "GRADIENT_CHECKPOINTING = True\n", "if GRADIENT_CHECKPOINTING:\n", " model.gradient_checkpointing_enable()\n", - " model.enable_input_require_grads()\n", - "logger.info(tokenizer.special_tokens)\n", - "if tokenizer.eos_token_id is None:\n", - " tokenizer.eos_token_id = tokenizer.pad_token_id\n", - "if tokenizer.bos_token_id is None:\n", - " tokenizer.bos_token_id = 1\n", - "#\n", - "logger.info(f'bos_token_id: {tokenizer.bos_token_id}, eos_token_id: {tokenizer.eos_token_id}, '\n", - " f'pad_token_id: {tokenizer.pad_token_id}')" + " model.enable_input_require_grads()" ] }, { diff --git a/modelscope/models/nlp/chatglm2/text_generation.py b/modelscope/models/nlp/chatglm2/text_generation.py index aed855cb..082e16e7 100644 --- a/modelscope/models/nlp/chatglm2/text_generation.py +++ b/modelscope/models/nlp/chatglm2/text_generation.py @@ -1095,6 +1095,7 @@ class ChatGLM2ForConditionalGeneration(ChatGLMPreTrainedModel): shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-100) + shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) From fc85b57a3e24d530969c77835b459a607c166dd1 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Sat, 15 Jul 2023 23:16:16 +0800 Subject: [PATCH 50/87] fix bugs of chinese stable diffusion pipeline lora_scale --- .../chinese_stable_diffusion_pipeline.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/chinese_stable_diffusion_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/chinese_stable_diffusion_pipeline.py index 539fd4ba..0f15e5b4 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/chinese_stable_diffusion_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/chinese_stable_diffusion_pipeline.py @@ -146,7 +146,8 @@ class _DiffuersChineseStableDiffusionPipeline(StableDiffusionPipeline): do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None): + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + lora_scale: Optional[float] = None): r""" Encodes the prompt into text encoder hidden states. @@ -169,7 +170,14 @@ class _DiffuersChineseStableDiffusionPipeline(StableDiffusionPipeline): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + lora_scale (`float`, *optional*): + A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): From 12bc1603a92218dbada3a27c9404f3d33948a238 Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Sun, 16 Jul 2023 08:45:20 +0800 Subject: [PATCH 51/87] Fix amp + device_map (#386) 1. Fix the amp + device_map bug in chatglm2 finetune code 2. Optional to save optimizer state 3. Fix the logger double print problem --- examples/pytorch/chatglm6b/finetune.py | 22 +++++++++++++++++-- .../hooks/checkpoint/checkpoint_hook.py | 9 ++++++-- .../hooks/checkpoint/checkpoint_processor.py | 15 ++++++++----- .../hooks/distributed/deepspeed_hook.py | 3 ++- .../hooks/distributed/megatron_hook.py | 7 +++--- .../dreambooth_diffusion_trainer.py | 3 ++- .../lora_diffusion/lora_diffusion_trainer.py | 3 ++- modelscope/utils/logger.py | 2 +- 8 files changed, 48 insertions(+), 16 deletions(-) diff --git a/examples/pytorch/chatglm6b/finetune.py b/examples/pytorch/chatglm6b/finetune.py index f994d9a5..feed76f2 100644 --- a/examples/pytorch/chatglm6b/finetune.py +++ b/examples/pytorch/chatglm6b/finetune.py @@ -142,6 +142,14 @@ class Chatglm6bArguments(TrainingArgs): metadata={'help': 'The lora alpha'}, ) + use_amp: int = field( + default=0, + metadata={ + 'help': + 'Whether to use amp(automatic mixed precision) to train the model.' + }, + ) + args = Chatglm6bArguments(eval_metrics='chatglm').parse_cli() print(args) @@ -159,6 +167,13 @@ def cfg_modify_fn(cfg): cfg.merge_from_dict(config) else: cfg = config + if args.use_amp: + if not getattr(cfg.train, 'hooks', None): + cfg.train.hooks = [] + cfg.train.hooks.append({ + 'type': 'TorchAMPOptimizerHook', + # Optional loss_scale parameter here. + }) if cfg.train.lr_scheduler.type == 'LinearLR': cfg.train.lr_scheduler['total_iters'] = \ int(len(train_dataset) / cfg.train.dataloader.batch_size_per_gpu) * cfg.train.max_epochs @@ -192,7 +207,7 @@ tokenizer = ChatGLMTokenizer.from_pretrained(model_dir, trust_remote_code=True) device_map_kwargs = {} device_kwargs = {} -if args.use_lora != 0: +if args.use_lora != 0 and torch.cuda.device_count() > 1: device_map_kwargs['device_map'] = 'auto' # No placement for model, leave the model to `device_map` device_kwargs['device'] = 'cpu' @@ -228,7 +243,10 @@ if args.use_lora != 0: rank=args.lora_rank, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout) - model = model.bfloat16() + if args.use_amp: + model = model.float() + else: + model = model.bfloat16() Swift.prepare_model(model, lora_config) prefix = args.source_prefix if args.source_prefix is not None else '' diff --git a/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py b/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py index 9bea8aaf..49d20278 100644 --- a/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py +++ b/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py @@ -50,6 +50,7 @@ class CheckpointHook(Hook): hub_revision (str): Which branch to push the model to, default is `master`. upload_strategy (str): The action adopted when the previous uploading is not done and the next one is coming, can be `cancel` or `wait`. + save_trainer_state (bool): Save the trainer state for continue training, default True. kwargs: by_epoch (bool): Same with `save_strategy`, but has a higher priority, legacy argument. output_sub_dir (str): The folder under the `save_dir` to save the output checkpoint for inference. @@ -75,6 +76,7 @@ class CheckpointHook(Hook): private_hub: Optional[bool] = True, hub_revision: Optional[str] = DEFAULT_REPOSITORY_REVISION, upload_strategy: Optional[str] = UploadStrategy.cancel, + save_trainer_state: Optional[bool] = True, **kwargs): self.interval = interval self.save_dir = save_dir @@ -97,6 +99,7 @@ class CheckpointHook(Hook): self.private_hub = private_hub self.hub_revision = hub_revision self.upload_strategy = upload_strategy + self.save_trainer_state = save_trainer_state self.tag = -1 self.is_model_id = None self.max_checkpoint_num = None @@ -219,7 +222,8 @@ class CheckpointHook(Hook): checkpoint_path_prefix = os.path.join(self.save_dir, prefix) meta = self._create_training_state(trainer) self.processor.save_checkpoints(trainer, checkpoint_path_prefix, - self.output_dir, meta) + self.output_dir, meta, + self.save_trainer_state) self.save_evaluate_results(trainer) self.history_checkpoints.append(checkpoint_path_prefix) self._remove_obsolete_checkpoints(trainer) @@ -399,7 +403,8 @@ class BestCkptSaverHook(CheckpointHook): self._best_ckpt_file = checkpoint_path_prefix meta = self._create_training_state(trainer) self.processor.save_checkpoints(trainer, checkpoint_path_prefix, - self.output_dir, meta) + self.output_dir, meta, + self.save_trainer_state) self.save_evaluate_results(trainer) self.history_checkpoints.add(checkpoint_path_prefix) self._remove_obsolete_checkpoints(trainer) diff --git a/modelscope/trainers/hooks/checkpoint/checkpoint_processor.py b/modelscope/trainers/hooks/checkpoint/checkpoint_processor.py index 4693968a..43a533c6 100644 --- a/modelscope/trainers/hooks/checkpoint/checkpoint_processor.py +++ b/modelscope/trainers/hooks/checkpoint/checkpoint_processor.py @@ -104,7 +104,8 @@ class CheckpointProcessor: trainer, checkpoint_path_prefix, output_dir, - meta=None): + meta=None, + save_optimizers=True): """Save the state dict for trainer and model. This is a strategic function which can be registered by other hook's function. @@ -115,13 +116,15 @@ class CheckpointProcessor: like: /tmp/test/epoch_0 output_dir(`str`): The output dir for inference. meta: (`dict`): The meta info needed to be saved into files. + save_optimizers: (`bool`): Do save the optimizers state """ model = trainer.unwrap_module(trainer.model) _model_file, _train_state_file = self._get_state_file_name( checkpoint_path_prefix) # Save pth file without model state_dict - self.save_trainer_state(trainer, model, _train_state_file, meta) + self.save_trainer_state(trainer, model, _train_state_file, meta, + save_optimizers) self.save_model_state(model, _model_file) self.link(model, _model_file, output_dir) @@ -175,7 +178,8 @@ class CheckpointProcessor: 'changing to copy the bin file, this may use more disk space.') shutil.copyfile(src_file, dest_file) - def save_trainer_state(self, trainer, model, train_state_file, meta): + def save_trainer_state(self, trainer, model, train_state_file, meta, + save_optimizers): """Save the trainer state, including optimizer/lr_scheduler's state dict, random states etc. Args: @@ -183,12 +187,13 @@ class CheckpointProcessor: model: The model instance. train_state_file: The target file name for saving trainer states. meta: Some extra meta info. + save_optimizers: Save optimizers state or not. """ save_checkpoint( model, train_state_file, - trainer.optimizer, - trainer.lr_scheduler, + trainer.optimizer if save_optimizers else None, + trainer.lr_scheduler if save_optimizers else None, meta=meta, with_model=False) diff --git a/modelscope/trainers/hooks/distributed/deepspeed_hook.py b/modelscope/trainers/hooks/distributed/deepspeed_hook.py index 28d5d79b..868912ba 100644 --- a/modelscope/trainers/hooks/distributed/deepspeed_hook.py +++ b/modelscope/trainers/hooks/distributed/deepspeed_hook.py @@ -156,7 +156,8 @@ class DeepspeedProcessor(CheckpointProcessor, LrSchedulerProcessor, trainer, checkpoint_path_prefix, output_dir, - meta=None): + meta=None, + save_optimizers=True): model = trainer.unwrap_module(trainer.model) _train_state_file = checkpoint_path_prefix + self.rank_name( ) + CheckpointProcessor.TRAINER_STATE_SUFFIX diff --git a/modelscope/trainers/hooks/distributed/megatron_hook.py b/modelscope/trainers/hooks/distributed/megatron_hook.py index 66c857df..302f3f36 100644 --- a/modelscope/trainers/hooks/distributed/megatron_hook.py +++ b/modelscope/trainers/hooks/distributed/megatron_hook.py @@ -57,7 +57,8 @@ class MpuProcessor(CheckpointProcessor): trainer, checkpoint_path_prefix, output_dir, - meta=None): + meta=None, + save_optimizers=True): model = trainer.unwrap_module(trainer.model) _train_state_file = checkpoint_path_prefix + self.rank_name( ) + CheckpointProcessor.TRAINER_STATE_SUFFIX @@ -65,8 +66,8 @@ class MpuProcessor(CheckpointProcessor): save_checkpoint( model, _train_state_file, - trainer.optimizer, - trainer.lr_scheduler, + trainer.optimizer if save_optimizers else None, + trainer.lr_scheduler if save_optimizers else None, meta=meta, with_model=False) diff --git a/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py b/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py index 65623ed8..3623e2b4 100644 --- a/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/dreambooth_diffusion/dreambooth_diffusion_trainer.py @@ -41,7 +41,8 @@ class DreamboothCheckpointProcessor(CheckpointProcessor): trainer, checkpoint_path_prefix, output_dir, - meta=None): + meta=None, + save_optimizers=True): """Save the state dict for dreambooth model. """ pipeline_args = {} diff --git a/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py b/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py index 40da164e..99351fef 100644 --- a/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py @@ -21,7 +21,8 @@ class LoraDiffusionCheckpointProcessor(CheckpointProcessor): trainer, checkpoint_path_prefix, output_dir, - meta=None): + meta=None, + save_optimizers=True): """Save the state dict for lora tune model. """ trainer.model.unet = trainer.model.unet.to(torch.float32) diff --git a/modelscope/utils/logger.py b/modelscope/utils/logger.py index 17923a6d..58d007c5 100644 --- a/modelscope/utils/logger.py +++ b/modelscope/utils/logger.py @@ -25,7 +25,7 @@ def get_logger(log_file: Optional[str] = None, logger_name = __name__.split('.')[0] logger = logging.getLogger(logger_name) - + logger.propagate = False if logger_name in init_loggers: add_file_handler_if_needed(logger, log_file, file_mode, log_level) return logger From 66795aa3ffbce29c232fd3f66a23e93b43170538 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 19 Jul 2023 09:41:21 +0800 Subject: [PATCH 52/87] change tests level --- tests/trainers/test_custom_diffusion_trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/trainers/test_custom_diffusion_trainer.py b/tests/trainers/test_custom_diffusion_trainer.py index 6c647401..025168d3 100644 --- a/tests/trainers/test_custom_diffusion_trainer.py +++ b/tests/trainers/test_custom_diffusion_trainer.py @@ -38,7 +38,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): shutil.rmtree(self.tmp_dir) super().tearDown() - @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_custom_diffusion_train(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' model_revision = 'v1.0.9' @@ -76,7 +76,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): output = pipe({'text': prompt}) cv2.imwrite('./custom_result.png', output['output_imgs'][0]) - @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') def test_dreambooth_diffusion_eval(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' model_revision = 'v1.0.9' From 3283a46c52ebf22f7bcc98334cf5731b9576fe48 Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Wed, 19 Jul 2023 17:22:03 +0800 Subject: [PATCH 53/87] Add Llama2 from hf (#394) * update llama code * add llama2 * revert llama code * add llama2 registry * fix bug --- modelscope/metainfo.py | 1 + modelscope/models/nlp/__init__.py | 5 + modelscope/models/nlp/llama2/__init__.py | 29 + modelscope/models/nlp/llama2/backbone.py | 667 ++++++++++++++++++ modelscope/models/nlp/llama2/configuration.py | 161 +++++ .../models/nlp/llama2/text_generation.py | 182 +++++ modelscope/models/nlp/llama2/tokenization.py | 393 +++++++++++ .../models/nlp/llama2/tokenization_fast.py | 249 +++++++ 8 files changed, 1687 insertions(+) create mode 100644 modelscope/models/nlp/llama2/__init__.py create mode 100755 modelscope/models/nlp/llama2/backbone.py create mode 100644 modelscope/models/nlp/llama2/configuration.py create mode 100644 modelscope/models/nlp/llama2/text_generation.py create mode 100644 modelscope/models/nlp/llama2/tokenization.py create mode 100644 modelscope/models/nlp/llama2/tokenization_fast.py diff --git a/modelscope/metainfo.py b/modelscope/metainfo.py index 2b5bca26..3d9ba089 100644 --- a/modelscope/metainfo.py +++ b/modelscope/metainfo.py @@ -165,6 +165,7 @@ class Models(object): doc2bot = 'doc2bot' peer = 'peer' llama = 'llama' + llama2 = 'llama2' chatglm_6b = 'chatglm6b' chatglm2_6b = 'chatglm2-6b' diff --git a/modelscope/models/nlp/__init__.py b/modelscope/models/nlp/__init__.py index c99f04ec..5b37b754 100644 --- a/modelscope/models/nlp/__init__.py +++ b/modelscope/models/nlp/__init__.py @@ -75,6 +75,7 @@ if TYPE_CHECKING: DocumentGroundedDialogRerankModel) from .xlm_roberta import XLMRobertaConfig, XLMRobertaModel from .llama import LlamaForTextGeneration, LlamaConfig, LlamaModel, LlamaTokenizer, LlamaTokenizerFast + from .llama2 import Llama2ForTextGeneration, Llama2Config, Llama2Model, Llama2Tokenizer, Llama2TokenizerFast else: _import_structure = { @@ -170,6 +171,10 @@ else: 'LlamaForTextGeneration', 'LlamaConfig', 'LlamaModel', 'LlamaTokenizer', 'LlamaTokenizerFast' ], + 'llama2': [ + 'Llama2ForTextGeneration', 'Llama2Config', 'Llama2Model', + 'Llama2Tokenizer', 'Llama2TokenizerFast' + ], } import sys diff --git a/modelscope/models/nlp/llama2/__init__.py b/modelscope/models/nlp/llama2/__init__.py new file mode 100644 index 00000000..12a295b6 --- /dev/null +++ b/modelscope/models/nlp/llama2/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import TYPE_CHECKING + +from modelscope.utils.import_utils import LazyImportModule + +if TYPE_CHECKING: + from .configuration import Llama2Config + from .text_generation import Llama2ForTextGeneration + from .backbone import Llama2Model + from .tokenization import Llama2Tokenizer + from .tokenization_fast import Llama2TokenizerFast +else: + _import_structure = { + 'configuration': ['Llama2Config'], + 'text_generation': ['Llama2ForTextGeneration'], + 'backbone': ['Llama2Model'], + 'tokenization': ['Llama2Tokenizer'], + 'tokenization_fast': ['Llama2TokenizerFast'], + } + + import sys + + sys.modules[__name__] = LazyImportModule( + __name__, + globals()['__file__'], + _import_structure, + module_spec=__spec__, + extra_objects={}, + ) diff --git a/modelscope/models/nlp/llama2/backbone.py b/modelscope/models/nlp/llama2/backbone.py new file mode 100755 index 00000000..b8998e78 --- /dev/null +++ b/modelscope/models/nlp/llama2/backbone.py @@ -0,0 +1,667 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch LLaMA model.""" +from typing import List, Optional, Tuple, Union + +import math +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutputWithPast +from transformers.modeling_utils import PreTrainedModel + +from modelscope import TorchModel, Model +from modelscope.metainfo import Models +from modelscope.utils.constant import Tasks +from modelscope.utils.logger import get_logger +from .configuration import Llama2Config +from ... import MODELS + +logger = get_logger(__name__) + +_CONFIG_FOR_DOC = 'Llama2Config' + + +# This file is mainly copied from the llama code of transformers +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + LlamaRMSNorm is equivalent to T5LayerNorm + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return self.weight * hidden_states.to(input_dtype) + + +class LlamaRotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache( + seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() + ) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + + +class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" + + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ( + (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) + ) ** (self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq) + + t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. + cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] + sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] + cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +class LlamaMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.pretraining_tp = config.pretraining_tp + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + def forward(self, x): + if self.pretraining_tp > 1: + slice = self.intermediate_size // self.pretraining_tp + gate_proj_slices = self.gate_proj.weight.split(slice, dim=0) + up_proj_slices = self.up_proj.weight.split(slice, dim=0) + down_proj_slices = self.down_proj.weight.split(slice, dim=1) + + gate_proj = torch.cat([F.linear(x, gate_proj_slices[i]) for i in range(self.pretraining_tp)], dim=-1) + up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.pretraining_tp)], dim=-1) + + intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) + down_proj = [F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.pretraining_tp)] + down_proj = sum(down_proj) + else: + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + return down_proj + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class LlamaAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: Llama2Config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.pretraining_tp = config.pretraining_tp + self.max_position_embeddings = config.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + if scaling_type == "linear": + self.rotary_emb = LlamaLinearScalingRotaryEmbedding( + self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor + ) + elif scaling_type == "dynamic": + self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( + self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + if self.pretraining_tp > 1: + key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.pretraining_tp + query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.pretraining_tp, dim=0) + key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) + value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) + + query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.pretraining_tp)] + query_states = torch.cat(query_states, dim=-1) + + key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.pretraining_tp)] + key_states = torch.cat(key_states, dim=-1) + + value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.pretraining_tp)] + value_states = torch.cat(value_states, dim=-1) + + else: + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + kv_seq_len = key_states.shape[-2] + if past_key_value is not None: + kv_seq_len += past_key_value[0].shape[-2] + cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + + if past_key_value is not None: + # reuse k, v, self_attention + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + + past_key_value = (key_states, value_states) if use_cache else None + + # repeat k/v heads if n_kv_heads < n_heads + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): + raise ValueError( + f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights + attention_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + if self.pretraining_tp > 1: + attn_output = attn_output.split(self.hidden_size // self.pretraining_tp, dim=2) + o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.pretraining_tp, dim=1) + attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.pretraining_tp)]) + else: + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +class LlamaDecoderLayer(nn.Module): + def __init__(self, config: Llama2Config): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = LlamaAttention(config=config) + self.mlp = LlamaMLP(config) + self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class LlamaPreTrainedModel(TorchModel, PreTrainedModel): + config_class = Llama2Config + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["LlamaDecoderLayer"] + _skip_keys_device_placement = "past_key_values" + + def __init__(self, config, **kwargs): + super().__init__(config.name_or_path, **kwargs) + super(Model, self).__init__(config) + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, LlamaModel): + module.gradient_checkpointing = value + + @classmethod + def _instantiate(cls, **kwargs): + """Instantiate the model. + + Args: + kwargs: Input args. + model_dir: The model dir used to load the checkpoint and the label information. + num_labels: An optional arg to tell the model how many classes to initialize. + Method will call utils.parse_label_mapping if num_labels not supplied. + If num_labels is not found, the model will use the default setting (2 classes). + + Returns: + The loaded model, which is initialized by transformers.PreTrainedModel.from_pretrained + """ + + model_dir = kwargs.pop('model_dir', None) + if model_dir is None: + config = Llama2Config(**kwargs) + model = cls(config) + else: + model = super(Model, cls).from_pretrained( + pretrained_model_name_or_path=model_dir, **kwargs) + model.model_dir = model_dir + return model + + +@MODELS.register_module(Tasks.backbone, module_name=Models.llama2) +class Llama2Model(LlamaPreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] + + Args: + config: Llama2Config + """ + + def __init__(self, config: Llama2Config): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if position_ids is None: + device = input_ids.device if input_ids is not None else inputs_embeds.device + position_ids = torch.arange( + past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device + ) + position_ids = position_ids.unsqueeze(0).view(-1, seq_length) + else: + position_ids = position_ids.view(-1, seq_length).long() + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device + ) + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length + ) + + hidden_states = inputs_embeds + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + position_ids, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) diff --git a/modelscope/models/nlp/llama2/configuration.py b/modelscope/models/nlp/llama2/configuration.py new file mode 100644 index 00000000..f68e20e9 --- /dev/null +++ b/modelscope/models/nlp/llama2/configuration.py @@ -0,0 +1,161 @@ +# coding=utf-8 +# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" LLaMA model configuration""" + +from transformers.configuration_utils import PretrainedConfig +from modelscope.utils.logger import get_logger + + +logger = get_logger(__name__) + +LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {} + + +class Llama2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the LLaMA-7B. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 32000): + Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`LlamaModel`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + pretraining_tp (`int`, *optional*, defaults to `1`): + Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this + document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is + necessary to ensure exact reproducibility of the pretraining results. Please refer to [this + issue](https://github.com/pytorch/pytorch/issues/76232). + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + rms_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the rms normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + tie_word_embeddings(`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling + strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format + is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. See the following thread for more information on how + these scaling strategies behave: + https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an + experimental feature, subject to breaking API changes in future versions. + """ + model_type = "llama" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=32000, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + rms_norm_eps=1e-6, + use_cache=True, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + pretraining_tp=1, + tie_word_embeddings=False, + rope_scaling=None, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.rms_norm_eps = rms_norm_eps + self.pretraining_tp = pretraining_tp + self.use_cache = use_cache + self.rope_scaling = rope_scaling + self._rope_scaling_validation() + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " + f"got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}") diff --git a/modelscope/models/nlp/llama2/text_generation.py b/modelscope/models/nlp/llama2/text_generation.py new file mode 100644 index 00000000..9d43b185 --- /dev/null +++ b/modelscope/models/nlp/llama2/text_generation.py @@ -0,0 +1,182 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +from transformers.modeling_outputs import CausalLMOutputWithPast + +from modelscope.metainfo import Models +from modelscope.utils.constant import Tasks +from .backbone import LlamaPreTrainedModel, Llama2Model +from ... import MODELS + + +# This file is mainly copied from the llama code of transformers +@MODELS.register_module(Tasks.text_generation, module_name=Models.llama2) +class Llama2ForTextGeneration(LlamaPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.model = Llama2Model(config) + self.pretraining_tp = config.pretraining_tp + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + """ + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + if self.pretraining_tp > 1: + lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.pretraining_tp, dim=0) + logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.pretraining_tp)] + logits = torch.cat(logits, dim=-1) + else: + logits = self.lm_head(hidden_states) + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs + ): + if past_key_values: + input_ids = input_ids[:, -1:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -1].unsqueeze(-1) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "position_ids": position_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/modelscope/models/nlp/llama2/tokenization.py b/modelscope/models/nlp/llama2/tokenization.py new file mode 100644 index 00000000..faa14c71 --- /dev/null +++ b/modelscope/models/nlp/llama2/tokenization.py @@ -0,0 +1,393 @@ +# coding=utf-8 +# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tokenization classes for LLaMA.""" +import os +from shutil import copyfile +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple + +import sentencepiece as spm + +from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer +from modelscope.utils.logger import get_logger + + +if TYPE_CHECKING: + from transformers.pipelines.conversational import Conversation + +logger = get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model", + }, + "tokenizer_file": { + "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json", + }, +} +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "hf-internal-testing/llama-tokenizer": 2048, +} +SPIECE_UNDERLINE = "▁" + +B_INST, E_INST = "[INST]", "[/INST]" +B_SYS, E_SYS = "<>\n", "\n<>\n\n" + +# fmt: off +DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your\ +answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ +that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not\ +correct. If you don't know the answer to a question, please don't share false information.""" +# fmt: on + + +class Llama2Tokenizer(PreTrainedTokenizer): + """ + Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is + no padding token in the original model. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + legacy (`bool`, *optional*, defaults to `True`): + Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622 + which includes fixes to properly handle tokens that appear after special tokens. A simple example: + + - `legacy=True`: + ```python + >>> from transformers import T5Tokenizer + + >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True) + >>> tokenizer.encode("Hello .") + [8774, 32099, 3, 5, 1] + ``` + - `legacy=False`: + ```python + >>> from transformers import T5Tokenizer + + >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False) + >>> tokenizer.encode("Hello .") # the extra space `[3]` is no longer here + [8774, 32099, 5, 1] + ``` + Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for + more details. + + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + unk_token="", + bos_token="", + eos_token="", + pad_token=None, + sp_model_kwargs: Optional[Dict[str, Any]] = None, + add_bos_token=True, + add_eos_token=False, + clean_up_tokenization_spaces=False, + legacy=True, + **kwargs, + ): + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + pad_token=pad_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + sp_model_kwargs=self.sp_model_kwargs, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + legacy=legacy, + **kwargs, + ) + if legacy: + logger.warning_once( + f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to" + " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" + ) + self.legacy = legacy + self.vocab_file = vocab_file + self.add_bos_token = add_bos_token + self.add_eos_token = add_eos_token + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(vocab_file) + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + state["sp_model_proto"] = self.sp_model.serialized_model_proto() + return state + + def __setstate__(self, d): + self.__dict__ = d + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.LoadFromSerializedProto(self.sp_model_proto) + + @property + def vocab_size(self): + """Returns vocab size""" + return self.sp_model.get_piece_size() + + def get_vocab(self): + """Returns vocab as a dict""" + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize + def tokenize(self, text, **kwargs) -> List[str]: + # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at + # the beginning of the text + if not self.legacy: + text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ") + return super().tokenize(text, **kwargs) + + # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize + def _tokenize(self, text): + """ + Returns a tokenized string. + + Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text, + we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize` + function is called with specials tokens: the input is split on the special tokens, and each subsequence is + passed to `_tokenize`. Thus if a subsequence did not start with a `" "` or SPIECE_UNDERLINE, we have to remove + the extra `SPIECE_UNDERLINE` prepended. + """ + if not self.legacy: + is_first = text.startswith(SPIECE_UNDERLINE) + if is_first: + text = text[1:] + + tokens = self.sp_model.encode(text, out_type=str) + + if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(SPIECE_UNDERLINE): + tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:] + return tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.sp_model.piece_to_id(token) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + token = self.sp_model.IdToPiece(index) + return token + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + current_sub_tokens = [] + out_string = "" + prev_is_special = False + for i, token in enumerate(tokens): + # make sure that special tokens are not decoded using sentencepiece model + if token in self.all_special_tokens: + if not prev_is_special and i != 0: + out_string += " " + out_string += self.sp_model.decode(current_sub_tokens) + token + prev_is_special = True + current_sub_tokens = [] + else: + current_sub_tokens.append(token) + prev_is_special = False + out_string += self.sp_model.decode(current_sub_tokens) + return out_string + + def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + bos_token_id = [1] if self.add_bos_token else [] + eos_token_id = [1] if self.add_eos_token else [] + + if token_ids_1 is None: + return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id + return ( + bos_token_id + + ([0] * len(token_ids_0)) + + eos_token_id + + bos_token_id + + ([0] * len(token_ids_1)) + + eos_token_id + ) + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + if token_ids_1 is None, only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of ids. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = [0] * len(bos_token_id + token_ids_0 + eos_token_id) + + if token_ids_1 is not None: + output += [1] * len(bos_token_id + token_ids_1 + eos_token_id) + + return output + + def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]: + """Builds the input ids for a conversation. + This is the format used in the provided examples. System prompts should be manually added at the beginning of + the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used. + ``` + [INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer + [INST] Prompt [/INST] Answer + [INST] Prompt [/INST] + ``` + + If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following: + ```python + >>> from transformers import Conversation + + >>> Conversation( + ... "<>\n Only answer with emojis, and charades\n<>\n\nHow can I build a house in 10 septs?" + ... ) + ``` + Args: + conversation (`Conversation`): + Conversation to build input ids for. + Returns: + `List[int]`: + Input ids for the conversation. + """ + dialogue = list(conversation.iter_texts()) + if not all([is_user for is_user, msg in dialogue[::2]]) or not all( + [not is_user for is_user, msg in dialogue[1::2]] + ): + raise ValueError( + "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" + ) + + dialog_tokens: List[int] = [] + if len(conversation.past_user_inputs) > 0: + if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]: + conversation.past_user_inputs[0] = ( + B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0] + ) + elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]: + dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1]) + + dialog_tokens += sum( + [ + [self.bos_token_id] + + self.encode( + f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False + ) + + [self.eos_token_id] + for prompt, answer in zip(dialogue[::2], dialogue[1::2]) + ], + [], + ) + if not (dialogue[-1][0]): + raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}") + dialog_tokens += [self.bos_token_id] + self.encode( + f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False + ) + return dialog_tokens diff --git a/modelscope/models/nlp/llama2/tokenization_fast.py b/modelscope/models/nlp/llama2/tokenization_fast.py new file mode 100644 index 00000000..655da9dc --- /dev/null +++ b/modelscope/models/nlp/llama2/tokenization_fast.py @@ -0,0 +1,249 @@ +# coding=utf-8 +# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2020 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +from shutil import copyfile +from typing import TYPE_CHECKING, Optional, Tuple + +from tokenizers import processors + +from transformers.tokenization_utils_fast import PreTrainedTokenizerFast +from transformers.utils import is_sentencepiece_available, logging +from transformers.utils.versions import require_version + + +if TYPE_CHECKING: + from transformers.pipelines.conversational import Conversation + +require_version("tokenizers>=0.13.3") + +if is_sentencepiece_available(): + from .tokenization import Llama2Tokenizer +else: + Llama2Tokenizer = None + +logger = logging.get_logger(__name__) +VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"} + +B_INST, E_INST = "[INST]", "[/INST]" +B_SYS, E_SYS = "<>\n", "\n<>\n\n" + +# fmt: off +DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your\ +answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure\ +that your responses are socially unbiased and positive in nature. + +If a question does not make any sense, or is not factually coherent, explain why instead of answering something not\ +correct. If you don't know the answer to a question, please don't share false information.""" +# fmt: on + + +class Llama2TokenizerFast(PreTrainedTokenizerFast): + """ + Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. + + This uses notably ByteFallback and no normalization. + + ``` + from transformers import LlamaTokenizerFast + + tokenizer = LlaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer") + tokenizer.encode("Hello this is a test") + >>> [1, 15043, 445, 338, 263, 1243] + ``` + + If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or + call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the + values of the first token and final token of an encoded sequence will not be correct). For more details, checkout + [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that + contains the vocabulary necessary to instantiate a tokenizer. + tokenizer_file (`str`): + [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that + contains everything needed to load the tokenizer. + + clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`): + Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra + spaces. + + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = Llama2Tokenizer + padding_side = "left" + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + clean_up_tokenization_spaces=False, + unk_token="", + bos_token="", + eos_token="", + add_bos_token=True, + add_eos_token=False, + **kwargs, + ): + super().__init__( + vocab_file=vocab_file, + tokenizer_file=tokenizer_file, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + **kwargs, + ) + self._add_bos_token = add_bos_token + self._add_eos_token = add_eos_token + self.update_post_processor() + + self.vocab_file = vocab_file + self.can_save_slow_tokenizer = False if not self.vocab_file else True + + def update_post_processor(self): + """ + Updates the underlying post processor with the current `bos_token` and `eos_token`. + """ + bos = self.bos_token + bos_token_id = self.bos_token_id + + eos = self.eos_token + eos_token_id = self.eos_token_id + + single = f"{(bos+':0 ') * self.add_bos_token}$A:0{(' '+eos+':0') * self.add_eos_token}" + pair = f"{single}{(' '+bos+':1') * self.add_bos_token} $B:1{(' '+eos+':1') * self.add_eos_token}" + + special_tokens = [] + if self.add_bos_token: + special_tokens.append((bos, bos_token_id)) + if self.add_eos_token: + special_tokens.append((eos, eos_token_id)) + self._tokenizer.post_processor = processors.TemplateProcessing( + single=single, pair=pair, special_tokens=special_tokens + ) + + @property + def add_eos_token(self): + return self._add_eos_token + + @property + def add_bos_token(self): + return self._add_bos_token + + @add_eos_token.setter + def add_eos_token(self, value): + self._add_eos_token = value + self.update_post_processor() + + @add_bos_token.setter + def add_bos_token(self, value): + self._add_bos_token = value + self.update_post_processor() + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not self.can_save_slow_tokenizer: + raise ValueError( + "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " + "tokenizer." + ) + + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): + copyfile(self.vocab_file, out_vocab_file) + + return (out_vocab_file,) + + def _build_conversation_input_ids(self, conversation: "Conversation"): + """Builds the input ids for a conversation. + This is the format used in the provided examples. System prompts should be manually added at the beginning of + the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used. + ``` + [INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer + [INST] Prompt [/INST] Answer + [INST] Prompt [/INST] + ``` + + If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following: + ```python + >>> from transformers import Conversation + + >>> Conversation( + ... "<>\n Only answer with emojis, and charades\n<>\n\nHow can I build a house in 10 septs?" + ... ) + ``` + Args: + conversation (`Conversation`): + Conversation to build input ids for. + Returns: + `List[int]`: + Input ids for the conversation. + """ + dialogue = list(conversation.iter_texts()) + if not all([is_user for is_user, msg in dialogue[::2]]) or not all( + [not is_user for is_user, msg in dialogue[1::2]] + ): + raise ValueError( + "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" + ) + + dialog_tokens = [] + if len(conversation.past_user_inputs) > 0: + if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]: + conversation.past_user_inputs[0] = ( + B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0] + ) + elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]: + dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1]) + + dialog_tokens += sum( + [ + [self.bos_token_id] + + self.encode( + f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False + ) + + [self.eos_token_id] + for prompt, answer in zip(dialogue[::2], dialogue[1::2]) + ], + [], + ) + if not (dialogue[-1][0]): + raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}") + dialog_tokens += [self.bos_token_id] + self.encode( + f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False + ) + return dialog_tokens From 2f7c669f33bf0491ca1992f5bba54d2704a349c0 Mon Sep 17 00:00:00 2001 From: Jintao Date: Wed, 19 Jul 2023 17:34:27 +0800 Subject: [PATCH 54/87] support llama2 (#393) * Unify sft and infer code into a single file * update llama2 sft infer --- examples/pytorch/llm/_common.py | 92 +++++----- examples/pytorch/llm/baichuan_infer.py | 62 ------- examples/pytorch/llm/baichuan_sft.py | 186 ------------------- examples/pytorch/llm/chatglm2_infer.py | 57 ------ examples/pytorch/llm/chatglm2_sft.py | 173 ------------------ examples/pytorch/llm/llm_infer.py | 122 +++++++++++++ examples/pytorch/llm/llm_sft.py | 237 +++++++++++++++++++++++++ examples/pytorch/llm/run_infer.sh | 5 + examples/pytorch/llm/run_sft.sh | 8 + 9 files changed, 418 insertions(+), 524 deletions(-) delete mode 100644 examples/pytorch/llm/baichuan_infer.py delete mode 100644 examples/pytorch/llm/baichuan_sft.py delete mode 100644 examples/pytorch/llm/chatglm2_infer.py delete mode 100644 examples/pytorch/llm/chatglm2_sft.py create mode 100644 examples/pytorch/llm/llm_infer.py create mode 100644 examples/pytorch/llm/llm_sft.py create mode 100644 examples/pytorch/llm/run_infer.sh create mode 100644 examples/pytorch/llm/run_sft.sh diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/_common.py index 161c99bf..ad82e086 100644 --- a/examples/pytorch/llm/_common.py +++ b/examples/pytorch/llm/_common.py @@ -5,6 +5,7 @@ import os import random import re import sys +from dataclasses import dataclass, field from functools import partial from typing import Any, Callable, Dict, List, Optional, Tuple, Union @@ -15,7 +16,7 @@ import numpy as np import torch import torch.nn as nn import torch.optim as optim -from datasets import Dataset as HFDataset +from datasets import Dataset as HfDataset from datasets import concatenate_datasets from matplotlib.axes import Axes from matplotlib.figure import Figure @@ -36,6 +37,8 @@ from torch.utils.data import Dataset from torchmetrics import Accuracy, MeanMetric # from tqdm import tqdm +from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, + GenerationConfig, HfArgumentParser, TextStreamer) # from modelscope import (Model, MsDataset, get_logger, read_config, @@ -57,6 +60,7 @@ PROMPT = """Human: {instruction} AI: """ logger = get_logger() +os.environ['TOKENIZERS_PARALLELISM'] = 'true' # @@ -150,12 +154,12 @@ def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, return T_max -def tokenize_function(example: Dict[str, str], +def tokenize_function(example: Dict[str, Optional[str]], tokenizer, max_length: Optional[int] = 2048) -> Dict[str, Any]: """Only applicable to baichuan and chatglm2. Other models need to be tested""" - instruction = example['instruction'] - input_: str = example['input'] + instruction: str = example['instruction'] + input_ = example['input'] if input_ is not None and input_ != '': # instruction = instruction + '\n' if input_.startswith('输入:'): @@ -187,7 +191,7 @@ def tokenize_function(example: Dict[str, str], return {'input_ids': input_ids, 'labels': labels} -def stat_dataset(dataset: HFDataset) -> None: +def stat_dataset(dataset: HfDataset) -> None: """Statistical analysis was performed on the data set""" _token_len = [] for d in dataset: @@ -202,8 +206,8 @@ def stat_dataset(dataset: HFDataset) -> None: ) -def print_examples(examples: Dict[str, Any], tokenizer) -> None: - input_ids, labels = examples['input_ids'], examples['labels'] +def print_example(example: Dict[str, Any], tokenizer) -> None: + input_ids, labels = example['input_ids'], example['labels'] print(f'[INPUT_IDS] {input_ids}') print(f'[INPUT] {tokenizer.decode(input_ids)}') print() @@ -305,48 +309,24 @@ def _add_special_token(tokenizer): f'pad_token_id: {tokenizer.pad_token_id}') -def get_baichuan7B_model_tokenizer(model_dir: str, - load_model: bool = True, - add_special_token: bool = True): +def get_baichuan_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): sys.path.insert(0, model_dir) - from configuration_baichuan import BaiChuanConfig - from tokenization_baichuan import BaiChuanTokenizer - from modeling_baichuan import BaiChuanForCausalLM - model_config = BaiChuanConfig.from_pretrained(model_dir) + model_config = AutoConfig.from_pretrained( + model_dir, trust_remote_code=True) model_config.torch_dtype = torch.float16 logger.info(f'model_config: {model_config}') - tokenizer = BaiChuanTokenizer.from_pretrained(model_dir) + tokenizer = AutoTokenizer.from_pretrained( + model_dir, trust_remote_code=True) model = None if load_model: - model = BaiChuanForCausalLM.from_pretrained( + model = AutoModelForCausalLM.from_pretrained( model_dir, config=model_config, device_map='auto', - torch_dtype=torch.float16) - # - if add_special_token: - _add_special_token(tokenizer) - return model, tokenizer - - -def get_baichuan13B_model_tokenizer(model_dir: str, - load_model: bool = True, - add_special_token: bool = True): - sys.path.insert(0, model_dir) - from configuration_baichuan import BaichuanConfig - from tokenization_baichuan import BaichuanTokenizer - from modeling_baichuan import BaichuanForCausalLM - model_config = BaichuanConfig.from_pretrained(model_dir) - model_config.torch_dtype = torch.float16 - logger.info(f'model_config: {model_config}') - tokenizer = BaichuanTokenizer.from_pretrained(model_dir) - model = None - if load_model: - model = BaichuanForCausalLM.from_pretrained( - model_dir, - config=model_config, - device_map='auto', - torch_dtype=torch.float16) + torch_dtype=torch.float16, + trust_remote_code=True) # if add_special_token: _add_special_token(tokenizer) @@ -371,23 +351,43 @@ def get_chatglm2_model_tokenizer(model_dir: str, return model, tokenizer +def get_llama2_model_tokenizer(model_dir: str, + load_model: bool = True, + add_special_token: bool = True): + config = AutoConfig.from_pretrained(model_dir) + tokenizer = AutoTokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = AutoModelForCausalLM.from_pretrained( + model_dir, + config=config, + device_map='auto', + torch_dtype=torch.float16, + ) + if add_special_token: + _add_special_token(tokenizer) + return model, tokenizer + + def get_alpaca_en_zh_dataset( tokenize_function, only_val: bool = False, test_split_p: float = 0.01, - split_seed: int = 42) -> Tuple[HFDataset, HFDataset]: + split_seed: int = 42, + data_sample: Optional[int] = None) -> Tuple[HfDataset, HfDataset]: """ split: Literal['train', 'validation', None] """ - dataset_en: HFDataset = MsDataset.load( + dataset_en: HfDataset = MsDataset.load( 'AI-ModelScope/alpaca-gpt4-data-en', split='train').to_hf_dataset() - dataset_zh: HFDataset = MsDataset.load( + dataset_zh: HfDataset = MsDataset.load( 'AI-ModelScope/alpaca-gpt4-data-zh', split='train').to_hf_dataset() dataset_en = dataset_en.remove_columns(['text']) - dataset: HFDataset = concatenate_datasets([dataset_zh, dataset_en]) + dataset: HfDataset = concatenate_datasets([dataset_zh, dataset_en]) # - # dataset = dataset.select(range(1000)) # for debug + if data_sample is not None: + dataset = dataset.select(range(data_sample)) dataset = dataset.train_test_split(test_split_p, seed=split_seed) if only_val: dataset = dataset['test'] diff --git a/examples/pytorch/llm/baichuan_infer.py b/examples/pytorch/llm/baichuan_infer.py deleted file mode 100644 index 6e027347..00000000 --- a/examples/pytorch/llm/baichuan_infer.py +++ /dev/null @@ -1,62 +0,0 @@ -# ### Setting up experimental environment. -from _common import * -from transformers import TextStreamer - -device_ids = [0, 1] -select_device(device_ids) -# Note: You need to set the value of `CKPT_FPATH` -CKPT_FAPTH = '/path/to/your/iter_xxx.pth' - -# ### Loading Model and Tokenizer -BAICHUAN_TYPE = '13B' # Literal['7B', '13B'] -if BAICHUAN_TYPE == '7B': - model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') - model, tokenizer = get_baichuan7B_model_tokenizer(model_dir) -else: - model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') - model, tokenizer = get_baichuan13B_model_tokenizer(model_dir) -model.bfloat16() # Consistent with training - -# ### Preparing lora -LORA_TARGET_MODULES = ['W_pack'] -LORA_RANK = 8 -LORA_ALPHA = 32 -LORA_DROPOUT_P = 0 # Arbitrary value -lora_config = LoRAConfig( - replace_modules=LORA_TARGET_MODULES, - rank=LORA_RANK, - lora_alpha=LORA_ALPHA, - lora_dropout=LORA_DROPOUT_P, - pretrained_weights=CKPT_FAPTH) -logger.info(f'lora_config: {lora_config}') -Swift.prepare_model(model, lora_config) - -# ### Loading Dataset -_, test_dataset = get_alpaca_en_zh_dataset(None, True) - -# ### Inference -streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) -mini_test_dataset = test_dataset.select(range(5)) -for d in mini_test_dataset: - output = d['output'] - d['output'] = None - input_ids = tokenize_function(d, tokenizer)['input_ids'] - print(f'[TEST]{tokenizer.decode(input_ids)}', end='') - input_ids = torch.tensor(input_ids)[None].cuda() - attention_mask = torch.ones_like(input_ids) - generate_ids = model.generate( - input_ids=input_ids, - max_new_tokens=512, - attention_mask=attention_mask, - streamer=streamer, - pad_token_id=tokenizer.eos_token_id, - temperature=0.7, - top_k=50, - top_p=0.7, - do_sample=True) - print() - print(f'[LABELS]{output}') - print( - '-----------------------------------------------------------------------------------' - ) - # input('next[ENTER]') diff --git a/examples/pytorch/llm/baichuan_sft.py b/examples/pytorch/llm/baichuan_sft.py deleted file mode 100644 index 4addc8b5..00000000 --- a/examples/pytorch/llm/baichuan_sft.py +++ /dev/null @@ -1,186 +0,0 @@ -# ### Setting up experimental environment. -""" -pip install modelscope -pip install numpy pandas matplotlib scikit-learn -pip install transformers datasets -conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia -pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer accelerate - -pip install numpy -U # Resolve torchmetrics dependencies and update numpy -""" - -from _common import * - -device_ids = [0, 1] -select_device(device_ids) -seed_everything(42) - -# ### Loading Model and Tokenizer -BAICHUAN_TYPE = '13B' # Literal['7B', '13B'] -WORK_DIR = f'runs/baichuan_{BAICHUAN_TYPE}' -# -if BAICHUAN_TYPE == '7B': - model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') - model, tokenizer = get_baichuan7B_model_tokenizer(model_dir) -else: - model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') - model, tokenizer = get_baichuan13B_model_tokenizer(model_dir) -# -GRADIENT_CHECKPOINTING = True -if GRADIENT_CHECKPOINTING: - # baichuan13B does not implement the `get_input_embeddings` function - if BAICHUAN_TYPE == '13B': - - def get_input_embeddings(self): - return self.model.embed_tokens - - model.__class__.get_input_embeddings = get_input_embeddings.__get__( - model) - model.gradient_checkpointing_enable() - model.enable_input_require_grads() - -# ### Preparing lora -LORA_TARGET_MODULES = ['W_pack'] -LORA_RANK = 8 -LORA_ALPHA = 32 -LORA_DROPOUT_P = 0.1 -lora_config = LoRAConfig( - replace_modules=LORA_TARGET_MODULES, - rank=LORA_RANK, - lora_alpha=LORA_ALPHA, - lora_dropout=LORA_DROPOUT_P) -logger.info(f'lora_config: {lora_config}') -Swift.prepare_model(model, lora_config) -# -show_freeze_layers(model) -print_model_info(model) -_p = list(model.parameters())[100] -logger.info(f'device: {_p.device}, dtype: {_p.dtype}') -model.bfloat16() - -# ### Loading Dataset -tokenize_function = partial(tokenize_function, tokenizer=tokenizer) -train_dataset, val_dataset = get_alpaca_en_zh_dataset(tokenize_function) -# Data analysis -stat_dataset(train_dataset) -stat_dataset(val_dataset) -data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer) -print_examples(train_dataset[0], tokenizer) - -# ### Setting Config -cfg_file = os.path.join(model_dir, 'configuration.json') -# -BATCH_SIZE = 1 -MAX_EPOCHS = 1 -T_max = get_T_max(len(train_dataset), BATCH_SIZE, MAX_EPOCHS, True) -WORK_DIR = get_work_dir(WORK_DIR) -EVAL_INTERVAL = 500 -CONFIG = Config({ - 'train': { - 'dataloader': { - 'batch_size_per_gpu': BATCH_SIZE, - 'workers_per_gpu': 1, - 'shuffle': True, - 'drop_last': True, - 'pin_memory': True - }, - 'max_epochs': - MAX_EPOCHS, - 'work_dir': - WORK_DIR, - 'optimizer': { - 'type': 'AdamW', - 'lr': 1e-4, - 'weight_decay': 0.01, - 'options': { - 'cumulative_iters': 16, - 'grad_clip': { - 'norm_type': 2, - 'max_norm': 2.0 - } - } - }, - 'lr_scheduler': { - 'type': 'CosineAnnealingLR', - 'T_max': T_max, - 'eta_min': 1e-5, - 'options': { - 'by_epoch': False, - 'warmup': { - 'type': 'LinearWarmup', - 'warmup_ratio': 0.1, - 'warmup_iters': 200 - } - } - }, - 'hooks': [ - { - 'type': 'CheckpointHook', - 'by_epoch': False, - 'interval': EVAL_INTERVAL, - 'max_checkpoint_num': 1 - }, - { - 'type': 'EvaluationHook', - 'by_epoch': False, - 'interval': EVAL_INTERVAL - }, - { - 'type': 'BestCkptSaverHook', - 'metric_key': 'acc', - 'save_best': True, - 'rule': 'max', - 'max_checkpoint_num': 1 - }, - { - 'type': 'TextLoggerHook', - 'by_epoch': True, # Whether EpochBasedTrainer is used - 'interval': 5 - }, - { - 'type': 'TensorboardHook', - 'by_epoch': False, - 'interval': 5 - } - ] - }, - 'evaluation': { - 'dataloader': { - 'batch_size_per_gpu': BATCH_SIZE, - 'workers_per_gpu': 1, - 'shuffle': False, - 'drop_last': False, - 'pin_memory': True - }, - 'metrics': [{ - 'type': 'my_metric', - 'vocab_size': tokenizer.vocab_size - }] - } -}) - -# ### Finetuning - - -def cfg_modify_fn(cfg: Config) -> Config: - cfg.update(CONFIG) - return cfg - - -trainer = EpochBasedTrainer( - model=model, - cfg_file=cfg_file, - data_collator=data_collate_fn, - train_dataset=train_dataset, - eval_dataset=val_dataset, - remove_unused_data=True, - seed=42, - device='cpu', # No placement for model, leave the model to `device_map` - cfg_modify_fn=cfg_modify_fn, -) - -trainer.train() - -# ### Visualization -tb_dir = os.path.join(WORK_DIR, 'tensorboard_output') -plot_image(tb_dir, ['loss'], 0.9) diff --git a/examples/pytorch/llm/chatglm2_infer.py b/examples/pytorch/llm/chatglm2_infer.py deleted file mode 100644 index c1a544cb..00000000 --- a/examples/pytorch/llm/chatglm2_infer.py +++ /dev/null @@ -1,57 +0,0 @@ -# ### Setting up experimental environment. -from _common import * -from transformers import TextStreamer - -device_ids = [0, 1] -select_device(device_ids) -# Note: You need to set the value of `CKPT_FPATH` -CKPT_FAPTH = '/path/to/your/iter_xxx.pth' - -# ### Loading Model and Tokenizer -model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') -model, tokenizer = get_chatglm2_model_tokenizer(model_dir) -model.bfloat16() # Consistent with training - -# ### Preparing lora -LORA_TARGET_MODULES = ['query_key_value'] -LORA_RANK = 8 -LORA_ALPHA = 32 -LORA_DROPOUT_P = 0 # Arbitrary value -lora_config = LoRAConfig( - replace_modules=LORA_TARGET_MODULES, - rank=LORA_RANK, - lora_alpha=LORA_ALPHA, - lora_dropout=LORA_DROPOUT_P, - pretrained_weights=CKPT_FAPTH) -logger.info(f'lora_config: {lora_config}') -Swift.prepare_model(model, lora_config) - -# ### Loading Dataset -_, test_dataset = get_alpaca_en_zh_dataset(None, True) - -# ### Inference -streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) -mini_test_dataset = test_dataset.select(range(5)) -for d in mini_test_dataset: - output = d['output'] - d['output'] = None - input_ids = tokenize_function(d, tokenizer)['input_ids'] - print(f'[TEST]{tokenizer.decode(input_ids)}', end='') - input_ids = torch.tensor(input_ids)[None].cuda() - attention_mask = torch.ones_like(input_ids) - generate_ids = model.generate( - input_ids=input_ids, - max_new_tokens=512, - attention_mask=attention_mask, - streamer=streamer, - pad_token_id=tokenizer.eos_token_id, - temperature=0.7, - top_k=50, - top_p=0.7, - do_sample=True) - print() - print(f'[LABELS]{output}') - print( - '-----------------------------------------------------------------------------------' - ) - # input('next[ENTER]') diff --git a/examples/pytorch/llm/chatglm2_sft.py b/examples/pytorch/llm/chatglm2_sft.py deleted file mode 100644 index 4876025b..00000000 --- a/examples/pytorch/llm/chatglm2_sft.py +++ /dev/null @@ -1,173 +0,0 @@ -# ### Setting up experimental environment. -""" -pip install modelscope -pip install numpy pandas matplotlib scikit-learn -pip install transformers datasets -conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia -pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer accelerate - -pip install numpy -U # Resolve torchmetrics dependencies and update numpy -""" - -from _common import * - -device_ids = [0, 1] -select_device(device_ids) -seed_everything(42) - -# ### Loading Model and Tokenizer -WORK_DIR = 'runs/chatglm2' -# -model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') -model, tokenizer = get_chatglm2_model_tokenizer(model_dir) -# -GRADIENT_CHECKPOINTING = True -if GRADIENT_CHECKPOINTING: - model.gradient_checkpointing_enable() - model.enable_input_require_grads() - -# ### Preparing lora -LORA_TARGET_MODULES = ['query_key_value'] -LORA_RANK = 8 -LORA_ALPHA = 32 -LORA_DROPOUT_P = 0.1 -lora_config = LoRAConfig( - replace_modules=LORA_TARGET_MODULES, - rank=LORA_RANK, - lora_alpha=LORA_ALPHA, - lora_dropout=LORA_DROPOUT_P) -logger.info(f'lora_config: {lora_config}') -Swift.prepare_model(model, lora_config) -# -show_freeze_layers(model) -print_model_info(model) -_p = list(model.parameters())[100] -logger.info(f'device: {_p.device}, dtype: {_p.dtype}') -model.bfloat16() - -# ### Loading Dataset -tokenize_function = partial(tokenize_function, tokenizer=tokenizer) -train_dataset, val_dataset = get_alpaca_en_zh_dataset(tokenize_function) -# Data analysis -stat_dataset(train_dataset) -stat_dataset(val_dataset) -data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer) -print_examples(train_dataset[0], tokenizer) - -# ### Setting Config -cfg_file = os.path.join(model_dir, 'configuration.json') -# -BATCH_SIZE = 1 -MAX_EPOCHS = 1 -T_max = get_T_max(len(train_dataset), BATCH_SIZE, MAX_EPOCHS, True) -WORK_DIR = get_work_dir(WORK_DIR) -EVAL_INTERVAL = 500 -CONFIG = Config({ - 'train': { - 'dataloader': { - 'batch_size_per_gpu': BATCH_SIZE, - 'workers_per_gpu': 1, - 'shuffle': True, - 'drop_last': True, - 'pin_memory': True - }, - 'max_epochs': - MAX_EPOCHS, - 'work_dir': - WORK_DIR, - 'optimizer': { - 'type': 'AdamW', - 'lr': 1e-4, - 'weight_decay': 0.01, - 'options': { - 'cumulative_iters': 16, - 'grad_clip': { - 'norm_type': 2, - 'max_norm': 2.0 - } - } - }, - 'lr_scheduler': { - 'type': 'CosineAnnealingLR', - 'T_max': T_max, - 'eta_min': 1e-5, - 'options': { - 'by_epoch': False, - 'warmup': { - 'type': 'LinearWarmup', - 'warmup_ratio': 0.1, - 'warmup_iters': 200 - } - } - }, - 'hooks': [ - { - 'type': 'CheckpointHook', - 'by_epoch': False, - 'interval': EVAL_INTERVAL, - 'max_checkpoint_num': 1 - }, - { - 'type': 'EvaluationHook', - 'by_epoch': False, - 'interval': EVAL_INTERVAL - }, - { - 'type': 'BestCkptSaverHook', - 'metric_key': 'acc', - 'save_best': True, - 'rule': 'max', - 'max_checkpoint_num': 1 - }, - { - 'type': 'TextLoggerHook', - 'by_epoch': True, # Whether EpochBasedTrainer is used - 'interval': 5 - }, - { - 'type': 'TensorboardHook', - 'by_epoch': False, - 'interval': 5 - } - ] - }, - 'evaluation': { - 'dataloader': { - 'batch_size_per_gpu': BATCH_SIZE, - 'workers_per_gpu': 1, - 'shuffle': False, - 'drop_last': False, - 'pin_memory': True - }, - 'metrics': [{ - 'type': 'my_metric', - 'vocab_size': tokenizer.vocab_size - }] - } -}) - -# ### Finetuning - - -def cfg_modify_fn(cfg: Config) -> Config: - cfg.update(CONFIG) - return cfg - - -trainer = EpochBasedTrainer( - model=model, - cfg_file=cfg_file, - data_collator=data_collate_fn, - train_dataset=train_dataset, - eval_dataset=val_dataset, - remove_unused_data=True, - seed=42, - device='cpu', # No placement for model, leave the model to `device_map` - cfg_modify_fn=cfg_modify_fn, -) - -trainer.train() - -# ### Visualization -tb_dir = os.path.join(WORK_DIR, 'tensorboard_output') -plot_image(tb_dir, ['loss'], 0.9) diff --git a/examples/pytorch/llm/llm_infer.py b/examples/pytorch/llm/llm_infer.py new file mode 100644 index 00000000..cac59bee --- /dev/null +++ b/examples/pytorch/llm/llm_infer.py @@ -0,0 +1,122 @@ +# ### Setting up experimental environment. +from _common import * + + +@dataclass +class Arguments: + device: str = '0' # e.g. '-1'; '0'; '0,1' + model_type: str = field( + default='baichuan-7b', + metadata={ + 'choices': + ['baichuan-7b', 'baichuan-13b', 'chatglm2', 'llama2-7b'] + }) + ckpt_fpath: str = '' # e.g. '/path/to/your/iter_xxx.pth' + eval_human: bool = False # False: eval test_dataset + data_sample: Optional[int] = None + # + lora_target_modules: Optional[List[str]] = None + lora_rank: int = 8 + lora_alpha: int = 32 + lora_dropout_p: float = 0.1 + # + max_new_tokens: int = 512 + temperature: float = 0.9 + top_k: int = 50 + top_p: float = 0.9 + + def __post_init__(self): + if self.lora_target_modules is None: + if self.model_type in {'baichuan-7b', 'baichuan-13b'}: + self.lora_target_modules = ['W_pack'] + elif self.model_type == 'chatglm2': + self.lora_target_modules = ['query_key_value'] + elif self.model_type == 'llama2-7b': + self.lora_target_modules = ['q_proj', 'k_proj', 'v_proj'] + else: + raise ValueError(f'model_type: {self.model_type}') + # + if not os.path.isfile(self.ckpt_fpath): + raise ValueError('Please enter a valid fpath') + + +def parse_args() -> Arguments: + args, = HfArgumentParser([Arguments]).parse_args_into_dataclasses() + return args + + +args = parse_args() +logger.info(args) +select_device(args.device) + +# ### Loading Model and Tokenizer +if args.model_type == 'baichuan-7b': + model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') + model, tokenizer = get_baichuan_model_tokenizer(model_dir) +elif args.model_type == 'baichuan-13b': + model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') + model, tokenizer = get_baichuan_model_tokenizer(model_dir) +elif args.model_type == 'chatglm2': + model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') + model, tokenizer = get_chatglm2_model_tokenizer(model_dir) +elif args.model_type == 'llama2-7b': + model_dir = snapshot_download('modelscope/Llama-2-7b-ms', 'v1.0.0') + model, tokenizer = get_llama2_model_tokenizer(model_dir) +else: + raise ValueError(f'model_type: {args.model_type}') + +# ### Preparing lora +lora_config = LoRAConfig( + replace_modules=args.lora_target_modules, + rank=args.lora_rank, + lora_alpha=args.lora_alpha, + lora_dropout=args.lora_dropout_p, + pretrained_weights=args.ckpt_fpath) +logger.info(f'lora_config: {lora_config}') +Swift.prepare_model(model, lora_config) +model.bfloat16() # Consistent with training + +# ### Inference +streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) +generation_config = GenerationConfig( + max_new_tokens=args.max_new_tokens, + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + do_sample=True, + pad_token_id=tokenizer.eos_token_id) +logger.info(generation_config) + + +def inference(data: Dict[str, Optional[str]]) -> str: + input_ids = tokenize_function(data, tokenizer)['input_ids'] + print(f'[TEST]{tokenizer.decode(input_ids)}', end='') + input_ids = torch.tensor(input_ids)[None].cuda() + attention_mask = torch.ones_like(input_ids) + generate_ids = model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + streamer=streamer, + generation_config=generation_config) + output_text = tokenizer.decode(generate_ids[0]) + return output_text + + +if args.eval_human: + while True: + instruction = input('<<< ') + data = {'instruction': instruction, 'input': None, 'output': None} + inference(data) + print('-' * 80) +else: + _, test_dataset = get_alpaca_en_zh_dataset( + None, True, split_seed=42, data_sample=None) + mini_test_dataset = test_dataset.select(range(10)) + for data in mini_test_dataset: + output = data['output'] + data['output'] = None + inference(data) + print() + print(f'[LABELS]{output}') + print('-' * 80) + # input('next[ENTER]') diff --git a/examples/pytorch/llm/llm_sft.py b/examples/pytorch/llm/llm_sft.py new file mode 100644 index 00000000..5e835625 --- /dev/null +++ b/examples/pytorch/llm/llm_sft.py @@ -0,0 +1,237 @@ +# ### Setting up experimental environment. +""" +pip install modelscope +pip install numpy pandas matplotlib scikit-learn +pip install transformers datasets +conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia +pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer +pip install accelerate transformers_stream_generator + +pip install numpy -U # Resolve torchmetrics dependencies and update numpy +""" + +from _common import * + + +@dataclass +class Arguments: + device: str = '0,1' # e.g. '-1'; '0'; '0,1' + seed: int = 42 + model_type: str = field( + default='baichuan-7b', + metadata={ + 'choices': + ['baichuan-7b', 'baichuan-13b', 'chatglm2', 'llama2-7b'] + }) + data_sample: Optional[int] = None + # + lora_target_modules: Optional[List[str]] = None + lora_rank: int = 8 + lora_alpha: int = 32 + lora_dropout_p: float = 0.1 + # + gradient_checkpoint: bool = True + batch_size: int = 1 + max_epochs: int = 1 + eval_interval: int = 500 + learning_rate: float = 1e-4 + weight_decay: float = 0.01 + n_accumulate_grad: int = 16 + grad_clip_norm: float = 1. + warmup_iters: int = 200 + last_max_checkpoint_num: int = 1 + best_max_checkpoint_num: int = 1 + # + logging_interval: int = 5 + tb_interval: int = 5 + + def __post_init__(self): + if self.lora_target_modules is None: + if self.model_type in {'baichuan-7b', 'baichuan-13b'}: + self.lora_target_modules = ['W_pack'] + elif self.model_type == 'chatglm2': + self.lora_target_modules = ['query_key_value'] + elif self.model_type == 'llama2-7b': + self.lora_target_modules = ['q_proj', 'k_proj', 'v_proj'] + else: + raise ValueError(f'model_type: {self.model_type}') + + +def parse_args() -> Arguments: + args, = HfArgumentParser([Arguments]).parse_args_into_dataclasses() + return args + + +args = parse_args() +logger.info(args) +select_device(args.device) +seed_everything(args.seed) + +# ### Loading Model and Tokenizer +if args.model_type == 'baichuan-7b': + model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') + model, tokenizer = get_baichuan_model_tokenizer(model_dir) +elif args.model_type == 'baichuan-13b': + model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') + model, tokenizer = get_baichuan_model_tokenizer(model_dir) +elif args.model_type == 'chatglm2': + model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') + model, tokenizer = get_chatglm2_model_tokenizer(model_dir) +elif args.model_type == 'llama2-7b': + model_dir = snapshot_download('modelscope/Llama-2-7b-ms', 'v1.0.0') + model, tokenizer = get_llama2_model_tokenizer(model_dir) +else: + raise ValueError(f'model_type: {args.model_type}') + +# +if args.gradient_checkpoint: + # baichuan13B does not implement the `get_input_embeddings` function + if args.model_type == 'baichuan-13b': + + def get_input_embeddings(self): + return self.model.embed_tokens + + model.__class__.get_input_embeddings = get_input_embeddings.__get__( + model) + model.gradient_checkpointing_enable() + model.enable_input_require_grads() + +# ### Preparing lora +lora_config = LoRAConfig( + replace_modules=args.lora_target_modules, + rank=args.lora_rank, + lora_alpha=args.lora_alpha, + lora_dropout=args.lora_dropout_p) +logger.info(f'lora_config: {lora_config}') +Swift.prepare_model(model, lora_config) +# +show_freeze_layers(model) +print_model_info(model) +_p: Parameter = list(model.parameters())[100] +logger.info(f'device: {_p.device}, dtype: {_p.dtype}') +model.bfloat16() + +# ### Loading Dataset +tokenize_function = partial(tokenize_function, tokenizer=tokenizer) +train_dataset, val_dataset = get_alpaca_en_zh_dataset( + tokenize_function, split_seed=42, data_sample=args.data_sample) +# Data analysis +stat_dataset(train_dataset) +stat_dataset(val_dataset) +data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer) +print_example(train_dataset[0], tokenizer) + +# ### Setting Config +cfg_file = os.path.join(model_dir, 'configuration.json') +# +T_max = get_T_max(len(train_dataset), args.batch_size, args.max_epochs, True) +work_dir = get_work_dir(f'runs/{args.model_type}') +config = Config({ + 'train': { + 'dataloader': { + 'batch_size_per_gpu': args.batch_size, + 'workers_per_gpu': 1, + 'shuffle': True, + 'drop_last': True, + 'pin_memory': True + }, + 'max_epochs': + args.max_epochs, + 'work_dir': + work_dir, + 'optimizer': { + 'type': 'AdamW', + 'lr': args.learning_rate, + 'weight_decay': args.weight_decay, + 'options': { + 'cumulative_iters': args.n_accumulate_grad, + 'grad_clip': { + 'norm_type': 2, + 'max_norm': args.grad_clip_norm + } + } + }, + 'lr_scheduler': { + 'type': 'CosineAnnealingLR', + 'T_max': T_max, + 'eta_min': 0, + 'options': { + 'by_epoch': False, + 'warmup': { + 'type': 'LinearWarmup', + 'warmup_ratio': 0.1, + 'warmup_iters': args.warmup_iters + } + } + }, + 'hooks': [ + { + 'type': 'CheckpointHook', + 'by_epoch': False, + 'interval': args.eval_interval, + 'max_checkpoint_num': args.last_max_checkpoint_num + }, + { + 'type': 'EvaluationHook', + 'by_epoch': False, + 'interval': args.eval_interval + }, + { + 'type': 'BestCkptSaverHook', + 'metric_key': 'loss', + 'save_best': True, + 'rule': 'min', + 'max_checkpoint_num': args.best_max_checkpoint_num + }, + { + 'type': 'TextLoggerHook', + 'by_epoch': True, # Whether EpochBasedTrainer is used + 'interval': args.logging_interval + }, + { + 'type': 'TensorboardHook', + 'by_epoch': False, + 'interval': args.tb_interval + } + ] + }, + 'evaluation': { + 'dataloader': { + 'batch_size_per_gpu': args.batch_size, + 'workers_per_gpu': 1, + 'shuffle': False, + 'drop_last': False, + 'pin_memory': True + }, + 'metrics': [{ + 'type': 'my_metric', + 'vocab_size': tokenizer.vocab_size + }] + } +}) + +# ### Finetuning + + +def cfg_modify_fn(cfg: Config) -> Config: + cfg.update(config) + return cfg + + +trainer = EpochBasedTrainer( + model=model, + cfg_file=cfg_file, + data_collator=data_collate_fn, + train_dataset=train_dataset, + eval_dataset=val_dataset, + remove_unused_data=True, + seed=42, + device='cpu', # No placement for model, leave the model to `device_map` + cfg_modify_fn=cfg_modify_fn, +) + +trainer.train() + +# ### Visualization +tb_dir = os.path.join(work_dir, 'tensorboard_output') +plot_image(tb_dir, ['loss'], 0.9) diff --git a/examples/pytorch/llm/run_infer.sh b/examples/pytorch/llm/run_infer.sh new file mode 100644 index 00000000..5bb008a8 --- /dev/null +++ b/examples/pytorch/llm/run_infer.sh @@ -0,0 +1,5 @@ +python llm_infer.py \ + --device 0 \ + --model_type llama2-7b \ + --ckpt_fpath "runs/llama2-7b/vx_xxx/output_best/pytorch_model.bin" \ + --eval_human true diff --git a/examples/pytorch/llm/run_sft.sh b/examples/pytorch/llm/run_sft.sh new file mode 100644 index 00000000..98ae2460 --- /dev/null +++ b/examples/pytorch/llm/run_sft.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +DATE=$(date +"%Y%m%d-%H%M%S") +nohup python llm_sft.py \ + --device 0 \ + --model_type llama2-7b \ + --data_sample 25000 \ +&> train_$DATE.out & From 78b3e74fd8352e40ccd0c1b61c3c4adaaf55f32c Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Wed, 19 Jul 2023 20:59:58 +0800 Subject: [PATCH 55/87] pre-commit passed (#395) --- modelscope/models/nlp/llama2/backbone.py | 376 ++++++++++++------ modelscope/models/nlp/llama2/configuration.py | 32 +- .../models/nlp/llama2/text_generation.py | 58 +-- modelscope/models/nlp/llama2/tokenization.py | 163 ++++---- .../models/nlp/llama2/tokenization_fast.py | 86 ++-- 5 files changed, 435 insertions(+), 280 deletions(-) diff --git a/modelscope/models/nlp/llama2/backbone.py b/modelscope/models/nlp/llama2/backbone.py index b8998e78..c0983478 100755 --- a/modelscope/models/nlp/llama2/backbone.py +++ b/modelscope/models/nlp/llama2/backbone.py @@ -18,9 +18,9 @@ # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LLaMA model.""" +import math from typing import List, Optional, Tuple, Union -import math import torch import torch.nn.functional as F import torch.utils.checkpoint @@ -29,12 +29,12 @@ from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.modeling_utils import PreTrainedModel -from modelscope import TorchModel, Model +from modelscope import Model, TorchModel from modelscope.metainfo import Models from modelscope.utils.constant import Tasks from modelscope.utils.logger import get_logger -from .configuration import Llama2Config from ... import MODELS +from .configuration import Llama2Config logger = get_logger(__name__) @@ -43,39 +43,51 @@ _CONFIG_FOR_DOC = 'Llama2Config' # This file is mainly copied from the llama code of transformers # Copied from transformers.models.bart.modeling_bart._make_causal_mask -def _make_causal_mask( - input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 -): +def _make_causal_mask(input_ids_shape: torch.Size, + dtype: torch.dtype, + device: torch.device, + past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape - mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) + mask = torch.full((tgt_len, tgt_len), + torch.finfo(dtype).min, + device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: - mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) - return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + _tmp_value = torch.zeros( + tgt_len, past_key_values_length, dtype=dtype, device=device) + mask = torch.cat([_tmp_value, mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, + tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): +def _expand_mask(mask: torch.Tensor, + dtype: torch.dtype, + tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, + src_len).to(dtype) inverted_mask = 1.0 - expanded_mask - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + return inverted_mask.masked_fill( + inverted_mask.to(torch.bool), + torch.finfo(dtype).min) class LlamaRMSNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm @@ -88,39 +100,55 @@ class LlamaRMSNorm(nn.Module): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) - hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + hidden_states = hidden_states * torch.rsqrt(variance + + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class LlamaRotaryEmbedding(torch.nn.Module): - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): + + def __init__(self, + dim, + max_position_embeddings=2048, + base=10000, + device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base - inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) - self.register_buffer("inv_freq", inv_freq) + inv_freq = 1.0 / (self.base**(torch.arange( + 0, self.dim, 2).float().to(device) / self.dim)) # noqa + self.register_buffer('inv_freq', inv_freq) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( - seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() - ) + seq_len=max_position_embeddings, + device=self.inv_freq.device, + dtype=torch.get_default_dtype()) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len - t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = torch.arange( + self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) - freqs = torch.einsum("i,j->ij", t, self.inv_freq) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) - self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) - self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer( + 'cos_cached', + emb.cos()[None, None, :, :].to(dtype), + persistent=False) + self.register_buffer( + 'sin_cached', + emb.sin()[None, None, :, :].to(dtype), + persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: - self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) + self._set_cos_sin_cache( + seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), @@ -131,26 +159,43 @@ class LlamaRotaryEmbedding(torch.nn.Module): class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + def __init__(self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len - t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = torch.arange( + self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor - freqs = torch.einsum("i,j->ij", t, self.inv_freq) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) - self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) - self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer( + 'cos_cached', + emb.cos()[None, None, :, :].to(dtype), + persistent=False) + self.register_buffer( + 'sin_cached', + emb.sin()[None, None, :, :].to(dtype), + persistent=False) class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla""" - def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + def __init__(self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) @@ -159,24 +204,33 @@ class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): if seq_len > self.max_position_embeddings: base = self.base * ( - (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) - ) ** (self.dim / (self.dim - 2)) - inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) - self.register_buffer("inv_freq", inv_freq) + (self.scaling_factor * seq_len / self.max_position_embeddings) + - (self.scaling_factor - 1))**( + self.dim / (self.dim - 2)) + inv_freq = 1.0 / (base**(torch.arange( + 0, self.dim, 2).float().to(device) / self.dim)) # noqa + self.register_buffer('inv_freq', inv_freq) - t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) + t = torch.arange( + self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) - freqs = torch.einsum("i,j->ij", t, self.inv_freq) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) - self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False) - self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False) + self.register_buffer( + 'cos_cached', + emb.cos()[None, None, :, :].to(dtype), + persistent=False) + self.register_buffer( + 'sin_cached', + emb.sin()[None, None, :, :].to(dtype), + persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" - x1 = x[..., : x.shape[-1] // 2] - x2 = x[..., x.shape[-1] // 2 :] + x1 = x[..., :x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) @@ -192,14 +246,18 @@ def apply_rotary_pos_emb(q, k, cos, sin, position_ids): class LlamaMLP(nn.Module): + def __init__(self, config): super().__init__() self.pretraining_tp = config.pretraining_tp self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size - self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) - self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) - self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.gate_proj = nn.Linear( + self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear( + self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear( + self.intermediate_size, self.hidden_size, bias=False) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x): @@ -209,14 +267,27 @@ class LlamaMLP(nn.Module): up_proj_slices = self.up_proj.weight.split(slice, dim=0) down_proj_slices = self.down_proj.weight.split(slice, dim=1) - gate_proj = torch.cat([F.linear(x, gate_proj_slices[i]) for i in range(self.pretraining_tp)], dim=-1) - up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.pretraining_tp)], dim=-1) + gate_proj = torch.cat([ + F.linear(x, gate_proj_slices[i]) + for i in range(self.pretraining_tp) + ], + dim=-1) # noqa + up_proj = torch.cat([ + F.linear(x, up_proj_slices[i]) + for i in range(self.pretraining_tp) + ], + dim=-1) # noqa - intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2) - down_proj = [F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.pretraining_tp)] + intermediate_states = (self.act_fn(gate_proj) * up_proj).split( + slice, dim=2) + down_proj = [ + F.linear(intermediate_states[i], down_proj_slices[i]) + for i in range(self.pretraining_tp) + ] down_proj = sum(down_proj) else: - down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + down_proj = self.down_proj( + self.act_fn(self.gate_proj(x)) * self.up_proj(x)) return down_proj @@ -229,8 +300,12 @@ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states - hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) - return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + hidden_states = hidden_states[:, :, + None, :, :].expand(batch, + num_key_value_heads, + n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, + head_dim) class LlamaAttention(nn.Module): @@ -249,34 +324,46 @@ class LlamaAttention(nn.Module): if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( - f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" - f" and `num_heads`: {self.num_heads})." - ) - self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) - self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) - self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) - self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}' + f' and `num_heads`: {self.num_heads}).') + self.q_proj = nn.Linear( + self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = nn.Linear( + self.hidden_size, + self.num_key_value_heads * self.head_dim, + bias=False) + self.v_proj = nn.Linear( + self.hidden_size, + self.num_key_value_heads * self.head_dim, + bias=False) + self.o_proj = nn.Linear( + self.num_heads * self.head_dim, self.hidden_size, bias=False) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: - self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) + self.rotary_emb = LlamaRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings) else: - scaling_type = self.config.rope_scaling["type"] - scaling_factor = self.config.rope_scaling["factor"] - if scaling_type == "linear": + scaling_type = self.config.rope_scaling['type'] + scaling_factor = self.config.rope_scaling['factor'] + if scaling_type == 'linear': self.rotary_emb = LlamaLinearScalingRotaryEmbedding( - self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor - ) - elif scaling_type == "dynamic": + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor) + elif scaling_type == 'dynamic': self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( - self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor - ) + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor) else: - raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + raise ValueError(f'Unknown RoPE scaling type {scaling_type}') def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + return tensor.view(bsz, seq_len, self.num_heads, + self.head_dim).transpose(1, 2).contiguous() def forward( self, @@ -286,22 +373,34 @@ class LlamaAttention(nn.Module): past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], + Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() if self.pretraining_tp > 1: - key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.pretraining_tp - query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.pretraining_tp, dim=0) + key_value_slicing = (self.num_key_value_heads + * self.head_dim) // self.pretraining_tp + query_slices = self.q_proj.weight.split( + (self.num_heads * self.head_dim) // self.pretraining_tp, dim=0) key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) - query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.pretraining_tp)] + query_states = [ + F.linear(hidden_states, query_slices[i]) + for i in range(self.pretraining_tp) + ] query_states = torch.cat(query_states, dim=-1) - key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.pretraining_tp)] + key_states = [ + F.linear(hidden_states, key_slices[i]) + for i in range(self.pretraining_tp) + ] key_states = torch.cat(key_states, dim=-1) - value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.pretraining_tp)] + value_states = [ + F.linear(hidden_states, value_slices[i]) + for i in range(self.pretraining_tp) + ] value_states = torch.cat(value_states, dim=-1) else: @@ -309,15 +408,19 @@ class LlamaAttention(nn.Module): key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) - query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) - value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + query_states = query_states.view(bsz, q_len, self.num_heads, + self.head_dim).transpose(1, 2) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, + self.head_dim).transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, + self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) - query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) + query_states, key_states = apply_rotary_pos_emb( + query_states, key_states, cos, sin, position_ids) if past_key_value is not None: # reuse k, v, self_attention @@ -330,38 +433,43 @@ class LlamaAttention(nn.Module): key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) - attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + attn_weights = torch.matmul(query_states, key_states.transpose( + 2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( - f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" - f" {attn_weights.size()}" - ) + f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is' + f' {attn_weights.size()}') if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( - f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" + f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}' ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) + f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is' + f' {attn_output.size()}') attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) if self.pretraining_tp > 1: - attn_output = attn_output.split(self.hidden_size // self.pretraining_tp, dim=2) - o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.pretraining_tp, dim=1) - attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.pretraining_tp)]) + attn_output = attn_output.split( + self.hidden_size // self.pretraining_tp, dim=2) + o_proj_slices = self.o_proj.weight.split( + self.hidden_size // self.pretraining_tp, dim=1) + attn_output = sum([ + F.linear(attn_output[i], o_proj_slices[i]) + for i in range(self.pretraining_tp) + ]) else: attn_output = self.o_proj(attn_output) @@ -372,13 +480,16 @@ class LlamaAttention(nn.Module): class LlamaDecoderLayer(nn.Module): + def __init__(self, config: Llama2Config): super().__init__() self.hidden_size = config.hidden_size self.self_attn = LlamaAttention(config=config) self.mlp = LlamaMLP(config) - self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) + self.input_layernorm = LlamaRMSNorm( + config.hidden_size, eps=config.rms_norm_eps) + self.post_attention_layernorm = LlamaRMSNorm( + config.hidden_size, eps=config.rms_norm_eps) def forward( self, @@ -388,7 +499,8 @@ class LlamaDecoderLayer(nn.Module): past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, - ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, + torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` @@ -424,23 +536,23 @@ class LlamaDecoderLayer(nn.Module): hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states - outputs = (hidden_states,) + outputs = (hidden_states, ) if output_attentions: - outputs += (self_attn_weights,) + outputs += (self_attn_weights, ) if use_cache: - outputs += (present_key_value,) + outputs += (present_key_value, ) return outputs class LlamaPreTrainedModel(TorchModel, PreTrainedModel): config_class = Llama2Config - base_model_prefix = "model" + base_model_prefix = 'model' supports_gradient_checkpointing = True - _no_split_modules = ["LlamaDecoderLayer"] - _skip_keys_device_placement = "past_key_values" + _no_split_modules = ['LlamaDecoderLayer'] + _skip_keys_device_placement = 'past_key_values' def __init__(self, config, **kwargs): super().__init__(config.name_or_path, **kwargs) @@ -501,8 +613,11 @@ class Llama2Model(LlamaPreTrainedModel): self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size - self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) - self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, + self.padding_idx) + self.layers = nn.ModuleList([ + LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers) + ]) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False @@ -516,7 +631,8 @@ class Llama2Model(LlamaPreTrainedModel): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask - def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, + inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None @@ -530,12 +646,12 @@ class Llama2Model(LlamaPreTrainedModel): if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( - inputs_embeds.device - ) + expanded_attn_mask = _expand_mask( + attention_mask, inputs_embeds.dtype, + tgt_len=input_shape[-1]).to(inputs_embeds.device) combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask - ) + expanded_attn_mask if combined_attention_mask is None else + expanded_attn_mask + combined_attention_mask) return combined_attention_mask @@ -553,21 +669,25 @@ class Llama2Model(LlamaPreTrainedModel): ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + raise ValueError( + 'You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time' + ) elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + raise ValueError( + 'You have to specify either decoder_input_ids or decoder_inputs_embeds' + ) seq_length_with_past = seq_length past_key_values_length = 0 @@ -579,8 +699,10 @@ class Llama2Model(LlamaPreTrainedModel): if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( - past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device - ) + past_key_values_length, + seq_length + past_key_values_length, + dtype=torch.long, + device=device) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() @@ -589,19 +711,19 @@ class Llama2Model(LlamaPreTrainedModel): inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: - attention_mask = torch.ones( - (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device - ) + attention_mask = torch.ones((batch_size, seq_length_with_past), + dtype=torch.bool, + device=inputs_embeds.device) attention_mask = self._prepare_decoder_attention_mask( - attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length - ) + attention_mask, (batch_size, seq_length), inputs_embeds, + past_key_values_length) hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' ) use_cache = False @@ -612,13 +734,15 @@ class Llama2Model(LlamaPreTrainedModel): for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: - all_hidden_states += (hidden_states,) + all_hidden_states += (hidden_states, ) - past_key_value = past_key_values[idx] if past_key_values is not None else None + past_key_value = past_key_values[ + idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): + def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) @@ -645,20 +769,24 @@ class Llama2Model(LlamaPreTrainedModel): hidden_states = layer_outputs[0] if use_cache: - next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + next_decoder_cache += ( + layer_outputs[2 if output_attentions else 1], ) if output_attentions: - all_self_attns += (layer_outputs[1],) + all_self_attns += (layer_outputs[1], ) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: - all_hidden_states += (hidden_states,) + all_hidden_states += (hidden_states, ) next_cache = next_decoder_cache if use_cache else None if not return_dict: - return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return tuple( + v for v in + [hidden_states, next_cache, all_hidden_states, all_self_attns] + if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, diff --git a/modelscope/models/nlp/llama2/configuration.py b/modelscope/models/nlp/llama2/configuration.py index f68e20e9..b95f9ddd 100644 --- a/modelscope/models/nlp/llama2/configuration.py +++ b/modelscope/models/nlp/llama2/configuration.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright (c) Alibaba, Inc. and its affiliates. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # @@ -21,8 +20,8 @@ """ LLaMA model configuration""" from transformers.configuration_utils import PretrainedConfig -from modelscope.utils.logger import get_logger +from modelscope.utils.logger import get_logger logger = get_logger(__name__) @@ -87,8 +86,8 @@ class Llama2Config(PretrainedConfig): https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. """ - model_type = "llama" - keys_to_ignore_at_inference = ["past_key_values"] + model_type = 'llama' + keys_to_ignore_at_inference = ['past_key_values'] def __init__( self, @@ -98,7 +97,7 @@ class Llama2Config(PretrainedConfig): num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=None, - hidden_act="silu", + hidden_act='silu', max_position_embeddings=2048, initializer_range=0.02, rms_norm_eps=1e-6, @@ -146,16 +145,21 @@ class Llama2Config(PretrainedConfig): if self.rope_scaling is None: return - if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + if not isinstance(self.rope_scaling, + dict) or len(self.rope_scaling) != 2: raise ValueError( - "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " - f"got {self.rope_scaling}" - ) - rope_scaling_type = self.rope_scaling.get("type", None) - rope_scaling_factor = self.rope_scaling.get("factor", None) - if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' + f'got {self.rope_scaling}') + rope_scaling_type = self.rope_scaling.get('type', None) + rope_scaling_factor = self.rope_scaling.get('factor', None) + if rope_scaling_type is None or rope_scaling_type not in [ + 'linear', 'dynamic' + ]: raise ValueError( f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) - if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: - raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}") + if rope_scaling_factor is None or not isinstance( + rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError( + f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" + ) diff --git a/modelscope/models/nlp/llama2/text_generation.py b/modelscope/models/nlp/llama2/text_generation.py index 9d43b185..5fe01cbe 100644 --- a/modelscope/models/nlp/llama2/text_generation.py +++ b/modelscope/models/nlp/llama2/text_generation.py @@ -28,21 +28,22 @@ from transformers.modeling_outputs import CausalLMOutputWithPast from modelscope.metainfo import Models from modelscope.utils.constant import Tasks -from .backbone import LlamaPreTrainedModel, Llama2Model from ... import MODELS +from .backbone import Llama2Model, LlamaPreTrainedModel # This file is mainly copied from the llama code of transformers @MODELS.register_module(Tasks.text_generation, module_name=Models.llama2) class Llama2ForTextGeneration(LlamaPreTrainedModel): - _tied_weights_keys = ["lm_head.weight"] + _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = Llama2Model(config) self.pretraining_tp = config.pretraining_tp self.vocab_size = config.vocab_size - self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.lm_head = nn.Linear( + config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() @@ -91,8 +92,8 @@ class Llama2ForTextGeneration(LlamaPreTrainedModel): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) @@ -110,8 +111,12 @@ class Llama2ForTextGeneration(LlamaPreTrainedModel): hidden_states = outputs[0] if self.pretraining_tp > 1: - lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.pretraining_tp, dim=0) - logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.pretraining_tp)] + lm_head_slices = self.lm_head.weight.split( + self.vocab_size // self.pretraining_tp, dim=0) + logits = [ + F.linear(hidden_states, lm_head_slices[i]) + for i in range(self.pretraining_tp) + ] logits = torch.cat(logits, dim=-1) else: logits = self.lm_head(hidden_states) @@ -131,8 +136,8 @@ class Llama2ForTextGeneration(LlamaPreTrainedModel): loss = loss_fct(shift_logits, shift_labels) if not return_dict: - output = (logits,) + outputs[1:] - return (loss,) + output if loss is not None else output + output = (logits, ) + outputs[1:] + return (loss, ) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, @@ -142,13 +147,16 @@ class Llama2ForTextGeneration(LlamaPreTrainedModel): attentions=outputs.attentions, ) - def prepare_inputs_for_generation( - self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs - ): + def prepare_inputs_for_generation(self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + **kwargs): if past_key_values: input_ids = input_ids[:, -1:] - position_ids = kwargs.get("position_ids", None) + position_ids = kwargs.get('position_ids', None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 @@ -158,25 +166,23 @@ class Llama2ForTextGeneration(LlamaPreTrainedModel): # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: - model_inputs = {"inputs_embeds": inputs_embeds} + model_inputs = {'inputs_embeds': inputs_embeds} else: - model_inputs = {"input_ids": input_ids} + model_inputs = {'input_ids': input_ids} - model_inputs.update( - { - "position_ids": position_ids, - "past_key_values": past_key_values, - "use_cache": kwargs.get("use_cache"), - "attention_mask": attention_mask, - } - ) + model_inputs.update({ + 'position_ids': position_ids, + 'past_key_values': past_key_values, + 'use_cache': kwargs.get('use_cache'), + 'attention_mask': attention_mask, + }) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: - reordered_past += ( - tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), - ) + reordered_past += (tuple( + past_state.index_select(0, beam_idx.to(past_state.device)) + for past_state in layer_past), ) return reordered_past diff --git a/modelscope/models/nlp/llama2/tokenization.py b/modelscope/models/nlp/llama2/tokenization.py index faa14c71..d57c6017 100644 --- a/modelscope/models/nlp/llama2/tokenization.py +++ b/modelscope/models/nlp/llama2/tokenization.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright (c) Alibaba, Inc. and its affiliates. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # @@ -18,40 +17,40 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Tokenization classes for LLaMA.""" import os from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm - from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer -from modelscope.utils.logger import get_logger +from modelscope.utils.logger import get_logger if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation logger = get_logger(__name__) -VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"} +VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model'} PRETRAINED_VOCAB_FILES_MAP = { - "vocab_file": { - "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model", + 'vocab_file': { + 'hf-internal-testing/llama-tokenizer': + 'https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer.model', }, - "tokenizer_file": { - "hf-internal-testing/llama-tokenizer": "https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json", + 'tokenizer_file': { + 'hf-internal-testing/llama-tokenizer': + 'https://huggingface.co/hf-internal-testing/llama-tokenizer/resolve/main/tokenizer_config.json', }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { - "hf-internal-testing/llama-tokenizer": 2048, + 'hf-internal-testing/llama-tokenizer': 2048, } -SPIECE_UNDERLINE = "▁" +SPIECE_UNDERLINE = '▁' -B_INST, E_INST = "[INST]", "[/INST]" -B_SYS, E_SYS = "<>\n", "\n<>\n\n" +B_INST, E_INST = '[INST]', '[/INST]' +B_SYS, E_SYS = '<>\n', '\n<>\n\n' # fmt: off DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your\ @@ -99,14 +98,14 @@ class Llama2Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - model_input_names = ["input_ids", "attention_mask"] + model_input_names = ['input_ids', 'attention_mask'] def __init__( self, vocab_file, - unk_token="", - bos_token="", - eos_token="", + unk_token='', + bos_token='', + eos_token='', pad_token=None, sp_model_kwargs: Optional[Dict[str, Any]] = None, add_bos_token=True, @@ -116,10 +115,18 @@ class Llama2Tokenizer(PreTrainedTokenizer): **kwargs, ): self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs - bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token - eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token - unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token - pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + bos_token = AddedToken( + bos_token, lstrip=False, rstrip=False) if isinstance( + bos_token, str) else bos_token + eos_token = AddedToken( + eos_token, lstrip=False, rstrip=False) if isinstance( + eos_token, str) else eos_token + unk_token = AddedToken( + unk_token, lstrip=False, rstrip=False) if isinstance( + unk_token, str) else unk_token + pad_token = AddedToken( + pad_token, lstrip=False, rstrip=False) if isinstance( + pad_token, str) else pad_token super().__init__( bos_token=bos_token, eos_token=eos_token, @@ -134,8 +141,10 @@ class Llama2Tokenizer(PreTrainedTokenizer): ) if legacy: logger.warning_once( - f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to" - " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" + f'You are using the legacy behaviour of the {self.__class__}. ' + f'This means that tokens that come after special ' + f'tokens will not be properly handled. We recommend you to' + ' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' ) self.legacy = legacy self.vocab_file = vocab_file @@ -146,8 +155,8 @@ class Llama2Tokenizer(PreTrainedTokenizer): def __getstate__(self): state = self.__dict__.copy() - state["sp_model"] = None - state["sp_model_proto"] = self.sp_model.serialized_model_proto() + state['sp_model'] = None + state['sp_model_proto'] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): @@ -162,7 +171,10 @@ class Llama2Tokenizer(PreTrainedTokenizer): def get_vocab(self): """Returns vocab as a dict""" - vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab = { + self.convert_ids_to_tokens(i): i + for i in range(self.vocab_size) + } vocab.update(self.added_tokens_encoder) return vocab @@ -171,7 +183,7 @@ class Llama2Tokenizer(PreTrainedTokenizer): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: - text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " ") + text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, ' ') return super().tokenize(text, **kwargs) # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize @@ -192,8 +204,10 @@ class Llama2Tokenizer(PreTrainedTokenizer): tokens = self.sp_model.encode(text, out_type=str) - if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(SPIECE_UNDERLINE): - tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:] + if not self.legacy and not is_first and not text.startswith( + ' ') and tokens[0].startswith(SPIECE_UNDERLINE): + tokens = ([tokens[0][1:]] + if len(tokens[0]) > 1 else []) + tokens[1:] return tokens def _convert_token_to_id(self, token): @@ -208,13 +222,13 @@ class Llama2Tokenizer(PreTrainedTokenizer): def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] - out_string = "" + out_string = '' prev_is_special = False for i, token in enumerate(tokens): # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special and i != 0: - out_string += " " + out_string += ' ' out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] @@ -224,7 +238,9 @@ class Llama2Tokenizer(PreTrainedTokenizer): out_string += self.sp_model.decode(current_sub_tokens) return out_string - def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]: + def save_vocabulary(self, + save_directory, + filename_prefix: Optional[str] = None) -> Tuple[str]: """ Save the vocabulary and special tokens file to a directory. @@ -236,20 +252,22 @@ class Llama2Tokenizer(PreTrainedTokenizer): `Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") + logger.error( + f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) + save_directory, (filename_prefix + '-' if filename_prefix else '') + + VOCAB_FILES_NAMES['vocab_file']) - if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + if os.path.abspath(self.vocab_file) != os.path.abspath( + out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): - with open(out_vocab_file, "wb") as fi: + with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) - return (out_vocab_file,) + return (out_vocab_file, ) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): bos_token_id = [self.bos_token_id] if self.add_bos_token else [] @@ -263,8 +281,10 @@ class Llama2Tokenizer(PreTrainedTokenizer): return output def get_special_tokens_mask( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False - ) -> List[int]: + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None, + already_has_special_tokens: bool = False) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. @@ -282,26 +302,23 @@ class Llama2Tokenizer(PreTrainedTokenizer): """ if already_has_special_tokens: return super().get_special_tokens_mask( - token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True - ) + token_ids_0=token_ids_0, + token_ids_1=token_ids_1, + already_has_special_tokens=True) bos_token_id = [1] if self.add_bos_token else [] eos_token_id = [1] if self.add_eos_token else [] if token_ids_1 is None: return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id - return ( - bos_token_id - + ([0] * len(token_ids_0)) - + eos_token_id - + bos_token_id - + ([0] * len(token_ids_1)) - + eos_token_id - ) + return bos_token_id + ( + [0] * len(token_ids_0)) + eos_token_id + bos_token_id + ( + [0] * len(token_ids_1)) + eos_token_id # noqa def create_token_type_ids_from_sequences( - self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None - ) -> List[int]: + self, + token_ids_0: List[int], + token_ids_1: Optional[List[int]] = None) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT sequence pair mask has the following format: @@ -332,7 +349,8 @@ class Llama2Tokenizer(PreTrainedTokenizer): return output - def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]: + def _build_conversation_input_ids( + self, conversation: 'Conversation') -> List[int]: """Builds the input ids for a conversation. This is the format used in the provided examples. System prompts should be manually added at the beginning of the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used. @@ -359,35 +377,34 @@ class Llama2Tokenizer(PreTrainedTokenizer): """ dialogue = list(conversation.iter_texts()) if not all([is_user for is_user, msg in dialogue[::2]]) or not all( - [not is_user for is_user, msg in dialogue[1::2]] - ): + [not is_user for is_user, msg in dialogue[1::2]]): # noqa raise ValueError( - "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" - ) + "The model only supports 'user' and 'assistant' roles, " + 'starting with user and alternating (u/a/u/a/u...)') dialog_tokens: List[int] = [] if len(conversation.past_user_inputs) > 0: - if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]: + if not conversation.past_user_inputs[0].startswith( + B_SYS) or E_SYS not in conversation.past_user_inputs[0]: conversation.past_user_inputs[0] = ( - B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0] - ) - elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]: - dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1]) + B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + + conversation.past_user_inputs[0]) + elif not dialogue[0][1].startswith( + B_SYS) or E_SYS not in dialogue[0][1]: + dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + + E_SYS + dialogue[0][1]) dialog_tokens += sum( - [ - [self.bos_token_id] - + self.encode( - f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False - ) - + [self.eos_token_id] - for prompt, answer in zip(dialogue[::2], dialogue[1::2]) - ], + [[self.bos_token_id] + self.encode( + f'{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ', + add_special_tokens=False) + [self.eos_token_id] + for prompt, answer in zip(dialogue[::2], dialogue[1::2])], [], ) if not (dialogue[-1][0]): - raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}") + raise ValueError( + f"Last message must be from user, got {dialogue[-1]['role']}") dialog_tokens += [self.bos_token_id] + self.encode( - f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False - ) + f'{B_INST} {(dialogue[-1][1]).strip()} {E_INST}', + add_special_tokens=False) return dialog_tokens diff --git a/modelscope/models/nlp/llama2/tokenization_fast.py b/modelscope/models/nlp/llama2/tokenization_fast.py index 655da9dc..6cfae2ff 100644 --- a/modelscope/models/nlp/llama2/tokenization_fast.py +++ b/modelscope/models/nlp/llama2/tokenization_fast.py @@ -1,4 +1,3 @@ -# coding=utf-8 # Copyright (c) Alibaba, Inc. and its affiliates. # Copyright 2020 The HuggingFace Inc. team. # @@ -18,16 +17,14 @@ from shutil import copyfile from typing import TYPE_CHECKING, Optional, Tuple from tokenizers import processors - from transformers.tokenization_utils_fast import PreTrainedTokenizerFast from transformers.utils import is_sentencepiece_available, logging from transformers.utils.versions import require_version - if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation -require_version("tokenizers>=0.13.3") +require_version('tokenizers>=0.13.3') if is_sentencepiece_available(): from .tokenization import Llama2Tokenizer @@ -35,10 +32,13 @@ else: Llama2Tokenizer = None logger = logging.get_logger(__name__) -VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"} +VOCAB_FILES_NAMES = { + 'vocab_file': 'tokenizer.model', + 'tokenizer_file': 'tokenizer.json' +} -B_INST, E_INST = "[INST]", "[/INST]" -B_SYS, E_SYS = "<>\n", "\n<>\n\n" +B_INST, E_INST = '[INST]', '[/INST]' +B_SYS, E_SYS = '<>\n', '\n<>\n\n' # fmt: off DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your\ @@ -98,17 +98,17 @@ class Llama2TokenizerFast(PreTrainedTokenizerFast): vocab_files_names = VOCAB_FILES_NAMES slow_tokenizer_class = Llama2Tokenizer - padding_side = "left" - model_input_names = ["input_ids", "attention_mask"] + padding_side = 'left' + model_input_names = ['input_ids', 'attention_mask'] def __init__( self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, - unk_token="", - bos_token="", - eos_token="", + unk_token='', + bos_token='', + eos_token='', add_bos_token=True, add_eos_token=False, **kwargs, @@ -148,8 +148,7 @@ class Llama2TokenizerFast(PreTrainedTokenizerFast): if self.add_eos_token: special_tokens.append((eos, eos_token_id)) self._tokenizer.post_processor = processors.TemplateProcessing( - single=single, pair=pair, special_tokens=special_tokens - ) + single=single, pair=pair, special_tokens=special_tokens) @property def add_eos_token(self): @@ -169,26 +168,28 @@ class Llama2TokenizerFast(PreTrainedTokenizerFast): self._add_bos_token = value self.update_post_processor() - def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + def save_vocabulary(self, + save_directory: str, + filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( - "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " - "tokenizer." - ) + 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' + 'tokenizer.') if not os.path.isdir(save_directory): - logger.error(f"Vocabulary path ({save_directory}) should be a directory") + logger.error( + f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join( - save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] - ) + save_directory, (filename_prefix + '-' if filename_prefix else '') + + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) - return (out_vocab_file,) + return (out_vocab_file, ) - def _build_conversation_input_ids(self, conversation: "Conversation"): + def _build_conversation_input_ids(self, conversation: 'Conversation'): """Builds the input ids for a conversation. This is the format used in the provided examples. System prompts should be manually added at the beginning of the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used. @@ -215,35 +216,34 @@ class Llama2TokenizerFast(PreTrainedTokenizerFast): """ dialogue = list(conversation.iter_texts()) if not all([is_user for is_user, msg in dialogue[::2]]) or not all( - [not is_user for is_user, msg in dialogue[1::2]] - ): + [not is_user for is_user, msg in dialogue[1::2]]): # noqa raise ValueError( - "The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)" - ) + "The model only supports 'user' and 'assistant' roles, " + 'starting with user and alternating (u/a/u/a/u...)') dialog_tokens = [] if len(conversation.past_user_inputs) > 0: - if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]: + if not conversation.past_user_inputs[0].startswith( + B_SYS) or E_SYS not in conversation.past_user_inputs[0]: conversation.past_user_inputs[0] = ( - B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0] - ) - elif not dialogue[0][1].startswith(B_SYS) or E_SYS not in dialogue[0][1]: - dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + dialogue[0][1]) + B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + + conversation.past_user_inputs[0]) + elif not dialogue[0][1].startswith( + B_SYS) or E_SYS not in dialogue[0][1]: + dialogue[0] = (dialogue[0][0], B_SYS + DEFAULT_SYSTEM_PROMPT + + E_SYS + dialogue[0][1]) dialog_tokens += sum( - [ - [self.bos_token_id] - + self.encode( - f"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ", add_special_tokens=False - ) - + [self.eos_token_id] - for prompt, answer in zip(dialogue[::2], dialogue[1::2]) - ], + [[self.bos_token_id] + self.encode( + f'{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} ', + add_special_tokens=False) + [self.eos_token_id] + for prompt, answer in zip(dialogue[::2], dialogue[1::2])], [], ) if not (dialogue[-1][0]): - raise ValueError(f"Last message must be from user, got {dialogue[-1]['role']}") + raise ValueError( + f"Last message must be from user, got {dialogue[-1]['role']}") dialog_tokens += [self.bos_token_id] + self.encode( - f"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}", add_special_tokens=False - ) + f'{B_INST} {(dialogue[-1][1]).strip()} {E_INST}', + add_special_tokens=False) return dialog_tokens From 19ea082fec5e3b0c36d7289476e6c48285cf7ae6 Mon Sep 17 00:00:00 2001 From: "LingFeng.Chen.Cn" Date: Thu, 20 Jul 2023 09:28:55 +0800 Subject: [PATCH 56/87] Update speaker_verification_pipeline.py (#364) * Update speaker_verification_pipeline.py * fix log too long when input tuple(bytes,bytes) * Update speaker_verification_pipeline.py * fix when bytes print too much * Update speaker_verification_pipeline forward log * pre-commit linter format --------- Co-authored-by: wenmeng.zwm --- .../pipelines/audio/speaker_diarization_pipeline.py | 7 ++++++- .../pipelines/audio/speaker_verification_pipeline.py | 9 +++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/modelscope/pipelines/audio/speaker_diarization_pipeline.py b/modelscope/pipelines/audio/speaker_diarization_pipeline.py index a20cfcad..24ebb130 100644 --- a/modelscope/pipelines/audio/speaker_diarization_pipeline.py +++ b/modelscope/pipelines/audio/speaker_diarization_pipeline.py @@ -232,7 +232,12 @@ class SpeakerDiarizationPipeline(Pipeline): def forward(self, audio_in: Union[tuple, str, Any] = None) -> list: """Decoding """ - logger.info('Speaker Diarization Processing: {0} ...'.format(audio_in)) + # log file_path/url or tuple (str, str) + if isinstance(audio_in, str) or \ + (isinstance(audio_in, tuple) and all(isinstance(item, str) for item in audio_in)): + logger.info(f'Speaker Verification Processing: {audio_in} ...') + else: + logger.info(f'Speaker Verification Processing: {str(audio_in)[:100]} ...') data_cmd, raw_inputs = None, None if isinstance(audio_in, tuple) or isinstance(audio_in, list): diff --git a/modelscope/pipelines/audio/speaker_verification_pipeline.py b/modelscope/pipelines/audio/speaker_verification_pipeline.py index c10f6a95..c23058be 100644 --- a/modelscope/pipelines/audio/speaker_verification_pipeline.py +++ b/modelscope/pipelines/audio/speaker_verification_pipeline.py @@ -180,8 +180,13 @@ class SpeakerVerificationPipeline(Pipeline): def forward(self, audio_in: Union[tuple, str, Any] = None) -> list: """Decoding """ - logger.info( - 'Speaker Verification Processing: {0} ...'.format(audio_in)) + # log file_path/url or tuple (str, str) + if isinstance(audio_in, str) or \ + (isinstance(audio_in, tuple) and all(isinstance(item, str) for item in audio_in)): + logger.info(f'Speaker Verification Processing: {audio_in} ...') + else: + logger.info( + f'Speaker Verification Processing: {str(audio_in)[:100]} ...') data_cmd, raw_inputs = None, None if isinstance(audio_in, tuple) or isinstance(audio_in, list): From 9c4d9cdb68d37c8af5e6017217dc51371f4332d5 Mon Sep 17 00:00:00 2001 From: "LingFeng.Chen.Cn" Date: Fri, 21 Jul 2023 15:34:19 +0800 Subject: [PATCH 57/87] fix file not found cause TypeError (#385) * fix file not found cause TypeError * fix linter test * fix linter test * format code --------- Co-authored-by: Krasus Co-authored-by: wenmeng.zwm --- .../models/cv/video_stabilization/utils/WarpUtils.py | 5 +++-- modelscope/models/nlp/gpt_moe/moe/sharded_moe.py | 6 +++--- modelscope/pipelines/audio/asr_inference_pipeline.py | 7 ++++++- modelscope/pipelines/audio/speaker_diarization_pipeline.py | 3 ++- 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/modelscope/models/cv/video_stabilization/utils/WarpUtils.py b/modelscope/models/cv/video_stabilization/utils/WarpUtils.py index e8f713cd..eefd8697 100644 --- a/modelscope/models/cv/video_stabilization/utils/WarpUtils.py +++ b/modelscope/models/cv/video_stabilization/utils/WarpUtils.py @@ -46,8 +46,9 @@ def mesh_warp_frame(frame, x_motion, y_motion, cap_width, cap_height): homo, origin_kp.contiguous().view(2, -1).permute(1, 0)).permute(1, 0) - projection.append(projected_kp.contiguous().view( - *origin_kp.shape).permute(1, 2, 0)) # 2, H, W --> H, W, 2 + projection.append( + projected_kp.contiguous().view(*origin_kp.shape).permute( + 1, 2, 0)) # 2, H, W --> H, W, 2 projection = torch.stack(projection, 0) projection[:, :, :, 0] = projection[:, :, :, 0] / cfg.MODEL.WIDTH * 2. - 1. diff --git a/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py b/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py index f8eed00f..ff12e13a 100644 --- a/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py +++ b/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py @@ -430,9 +430,9 @@ class MOELayer(Base): self.use_expert_residual_network = use_expert_residual_network if self.use_expert_residual_network: - self.expert_network = nn.Sequential(*([ - ExpertResidualLayer(self.gate.model_dim) for _ in range(6) - ])) # noqa + self.expert_network = nn.Sequential( + *([ExpertResidualLayer(self.gate.model_dim) + for _ in range(6)])) # noqa self.use_tutel = use_tutel and TUTEL_INSTALLED diff --git a/modelscope/pipelines/audio/asr_inference_pipeline.py b/modelscope/pipelines/audio/asr_inference_pipeline.py index 823964e5..2379274c 100644 --- a/modelscope/pipelines/audio/asr_inference_pipeline.py +++ b/modelscope/pipelines/audio/asr_inference_pipeline.py @@ -210,7 +210,12 @@ class AutomaticSpeechRecognitionPipeline(Pipeline): if isinstance(audio_in, str): # for funasr code, generate wav.scp from url or local path - self.audio_in, self.raw_inputs = generate_scp_from_url(audio_in) + if audio_in.startswith('http') or os.path.isfile(audio_in): + self.audio_in, self.raw_inputs = generate_scp_from_url( + audio_in) + else: + raise FileNotFoundError( + f'file {audio_in} NOT FOUND, please CHECK!') elif isinstance(audio_in, bytes): self.audio_in = audio_in self.raw_inputs = None diff --git a/modelscope/pipelines/audio/speaker_diarization_pipeline.py b/modelscope/pipelines/audio/speaker_diarization_pipeline.py index 24ebb130..dfb808d0 100644 --- a/modelscope/pipelines/audio/speaker_diarization_pipeline.py +++ b/modelscope/pipelines/audio/speaker_diarization_pipeline.py @@ -237,7 +237,8 @@ class SpeakerDiarizationPipeline(Pipeline): (isinstance(audio_in, tuple) and all(isinstance(item, str) for item in audio_in)): logger.info(f'Speaker Verification Processing: {audio_in} ...') else: - logger.info(f'Speaker Verification Processing: {str(audio_in)[:100]} ...') + logger.info( + f'Speaker Verification Processing: {str(audio_in)[:100]} ...') data_cmd, raw_inputs = None, None if isinstance(audio_in, tuple) or isinstance(audio_in, list): From 96a528202138f07f99329ccfa5ff03f07d913107 Mon Sep 17 00:00:00 2001 From: "xingjun.wang" Date: Fri, 21 Jul 2023 23:09:24 +0800 Subject: [PATCH 58/87] check format --- examples/pytorch/llm/_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/_common.py index ad82e086..77952f3c 100644 --- a/examples/pytorch/llm/_common.py +++ b/examples/pytorch/llm/_common.py @@ -445,7 +445,7 @@ def plot_image(tb_dir: str, fname = os.listdir(tb_dir)[0] tb_path = os.path.join(tb_dir, fname) data = read_tensorboard_file(tb_path) - # + for k in data.keys(): _data = data[k] steps = [d['step'] for d in _data] From 5a49093422d45711af0fa602309057e5819ac01d Mon Sep 17 00:00:00 2001 From: "xingjun.wang" Date: Fri, 21 Jul 2023 23:37:17 +0800 Subject: [PATCH 59/87] fix yapf --- modelscope/models/cv/video_stabilization/utils/WarpUtils.py | 5 ++--- modelscope/models/nlp/gpt_moe/moe/sharded_moe.py | 6 +++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/modelscope/models/cv/video_stabilization/utils/WarpUtils.py b/modelscope/models/cv/video_stabilization/utils/WarpUtils.py index eefd8697..e8f713cd 100644 --- a/modelscope/models/cv/video_stabilization/utils/WarpUtils.py +++ b/modelscope/models/cv/video_stabilization/utils/WarpUtils.py @@ -46,9 +46,8 @@ def mesh_warp_frame(frame, x_motion, y_motion, cap_width, cap_height): homo, origin_kp.contiguous().view(2, -1).permute(1, 0)).permute(1, 0) - projection.append( - projected_kp.contiguous().view(*origin_kp.shape).permute( - 1, 2, 0)) # 2, H, W --> H, W, 2 + projection.append(projected_kp.contiguous().view( + *origin_kp.shape).permute(1, 2, 0)) # 2, H, W --> H, W, 2 projection = torch.stack(projection, 0) projection[:, :, :, 0] = projection[:, :, :, 0] / cfg.MODEL.WIDTH * 2. - 1. diff --git a/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py b/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py index ff12e13a..f8eed00f 100644 --- a/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py +++ b/modelscope/models/nlp/gpt_moe/moe/sharded_moe.py @@ -430,9 +430,9 @@ class MOELayer(Base): self.use_expert_residual_network = use_expert_residual_network if self.use_expert_residual_network: - self.expert_network = nn.Sequential( - *([ExpertResidualLayer(self.gate.model_dim) - for _ in range(6)])) # noqa + self.expert_network = nn.Sequential(*([ + ExpertResidualLayer(self.gate.model_dim) for _ in range(6) + ])) # noqa self.use_tutel = use_tutel and TUTEL_INSTALLED From f77237b0495c0e725a4e099d4b28850da8deca1b Mon Sep 17 00:00:00 2001 From: mushenL <125954878+mushenL@users.noreply.github.com> Date: Sat, 22 Jul 2023 21:53:04 +0800 Subject: [PATCH 60/87] add llama2 pipeline (#399) * Modify the parameter passing of the text_generation_pipeline class * add llama2 pipeline * add llama pipeline v1.1 * add llama pipeline v1.2 * add llama pipeline v1.3 * add llama pipeline v1.0.4 --- modelscope/metainfo.py | 1 + .../nlp/llama2_text_generation_pipeline.py | 99 +++++++++++++++++++ .../test_llama2_text_generation_pipeline.py | 47 +++++++++ 3 files changed, 147 insertions(+) create mode 100644 modelscope/pipelines/nlp/llama2_text_generation_pipeline.py create mode 100644 tests/pipelines/test_llama2_text_generation_pipeline.py diff --git a/modelscope/metainfo.py b/modelscope/metainfo.py index 3d9ba089..ab22b9ba 100644 --- a/modelscope/metainfo.py +++ b/modelscope/metainfo.py @@ -523,6 +523,7 @@ class Pipelines(object): soonet_video_temporal_grounding = 'soonet-video-temporal-grounding' efficient_diffusion_tuning = 'efficient-diffusion-tuning' multimodal_dialogue = 'multimodal-dialogue' + llama2_text_generation_pipeline = 'llama2-text-generation-pipeline' # science tasks protein_structure = 'unifold-protein-structure' diff --git a/modelscope/pipelines/nlp/llama2_text_generation_pipeline.py b/modelscope/pipelines/nlp/llama2_text_generation_pipeline.py new file mode 100644 index 00000000..3a9d3d44 --- /dev/null +++ b/modelscope/pipelines/nlp/llama2_text_generation_pipeline.py @@ -0,0 +1,99 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright (c) 2022 Zhipu.AI +from typing import Any, Dict, Union + +import torch + +from modelscope import Model, snapshot_download +from modelscope.metainfo import Pipelines, Preprocessors +from modelscope.models.nlp.llama2 import Llama2Tokenizer +from modelscope.pipelines.base import Pipeline +from modelscope.pipelines.builder import PIPELINES +from modelscope.pipelines.nlp.text_generation_pipeline import \ + TextGenerationPipeline +from modelscope.preprocessors import Preprocessor +from modelscope.utils.constant import Fields, Tasks + + +@PIPELINES.register_module( + Tasks.text_generation, + module_name=Pipelines.llama2_text_generation_pipeline) +class Llama2TaskPipeline(TextGenerationPipeline): + + def __init__(self, + model: Union[Model, str], + preprocessor: Preprocessor = None, + config_file: str = None, + device: str = 'gpu', + auto_collate=True, + **kwargs): + """Use `model` and `preprocessor` to create a generation pipeline for prediction. + + Args: + model (str or Model): Supply either a local model dir which supported the text generation task, + or a model id from the model hub, or a torch model instance. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. + kwargs (dict, `optional`): + Extra kwargs passed into the preprocessor's constructor. + Examples: + >>> from modelscope.utils.constant import Tasks + >>> import torch + >>> from modelscope.pipelines import pipeline + >>> from modelscope import snapshot_download, Model + >>> model_dir = snapshot_download("modelscope/Llama-2-13b-chat-ms", + >>> ignore_file_pattern = [r'\\w+\\.safetensors']) + >>> pipe = pipeline(task=Tasks.text_generation, model=model_dir, device_map='auto', + >>> torch_dtype=torch.float16) + >>> inputs="咖啡的作用是什么?" + >>> result = pipe(inputs,max_length=200, do_sample=True, top_p=0.85, + >>> temperature=1.0, repetition_penalty=1., eos_token_id=2, bos_token_id=1, pad_token_id=0) + >>> print(result['text']) + + To view other examples plese check tests/pipelines/test_llama2_text_generation_pipeline.py. + """ + self.model = Model.from_pretrained( + model, device_map='auto', torch_dtype=torch.float16) + self.tokenizer = Llama2Tokenizer.from_pretrained(model) + super().__init__(model=self.model, **kwargs) + + def preprocess(self, inputs, **preprocess_params) -> Dict[str, Any]: + return inputs + + def _sanitize_parameters(self, **pipeline_parameters): + return {}, pipeline_parameters, {} + + def forward(self, + inputs, + max_length=50, + do_sample=True, + top_p=0.85, + temperature=1.0, + repetition_penalty=1., + eos_token_id=2, + bos_token_id=1, + pad_token_id=0, + **forward_params) -> Dict[str, Any]: + output = {} + inputs = self.tokenizer(inputs, return_tensors='pt') + generate_ids = self.model.generate( + inputs.input_ids.to('cuda'), + max_length=max_length, + do_sample=do_sample, + top_p=top_p, + temperature=temperature, + repetition_penalty=repetition_penalty, + eos_token_id=eos_token_id, + bos_token_id=bos_token_id, + pad_token_id=pad_token_id, + **forward_params) + out = self.tokenizer.batch_decode( + generate_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False)[0] + output['text'] = out + return output + + # format the outputs from pipeline + def postprocess(self, input, **kwargs) -> Dict[str, Any]: + return input diff --git a/tests/pipelines/test_llama2_text_generation_pipeline.py b/tests/pipelines/test_llama2_text_generation_pipeline.py new file mode 100644 index 00000000..2a532257 --- /dev/null +++ b/tests/pipelines/test_llama2_text_generation_pipeline.py @@ -0,0 +1,47 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +import unittest + +import torch + +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.utils.test_utils import test_level + + +class Llama2TextGenerationPipelineTest(unittest.TestCase): + + def setUp(self) -> None: + self.llama2_model_id_7B_chat_ms = 'modelscope/Llama-2-7b-chat-ms' + self.llama2_input_chat_ch = '天空为什么是蓝色的?' + + def run_pipeline_with_model_id(self, + model_id, + input, + init_kwargs={}, + run_kwargs={}): + pipeline_ins = pipeline( + task=Tasks.text_generation, model=model_id, **init_kwargs) + pipeline_ins._model_prepare = True + result = pipeline_ins(input, **run_kwargs) + print(result['text']) + + # 7B_ms_chat + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_llama2_7B_chat_ms_with_model_name_with_chat_ch_with_args(self): + self.run_pipeline_with_model_id( + self.llama2_model_id_7B_chat_ms, + self.llama2_input_chat_ch, + init_kwargs={ + 'device_map': 'auto', + 'torch_dtype': torch.float16 + }, + run_kwargs={ + 'max_length': 200, + 'do_sample': True, + 'top_p': 0.85 + }) + + +if __name__ == '__main__': + unittest.main() From 9dedac31fd671f228e6c091128a861762da9ac85 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 24 Jul 2023 12:14:16 +0900 Subject: [PATCH 61/87] Add Japanese README (#405) --- README.md | 3 +- README_ja.md | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++ README_zh.md | 3 +- 3 files changed, 304 insertions(+), 2 deletions(-) create mode 100644 README_ja.md diff --git a/README.md b/README.md index 4a4ce792..d3d92865 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,8 @@

English | - 中文 + 中文 | + 日本語

diff --git a/README_ja.md b/README_ja.md new file mode 100644 index 00000000..073b0c48 --- /dev/null +++ b/README_ja.md @@ -0,0 +1,300 @@ + +

+
+ +
+

+ +

+ +[![PyPI](https://img.shields.io/pypi/v/modelscope)](https://pypi.org/project/modelscope/) + +[![license](https://img.shields.io/github/license/modelscope/modelscope.svg)](https://github.com/modelscope/modelscope/blob/master/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/modelscope/modelscope.svg)](https://github.com/modelscope/modelscope/issues) +[![GitHub pull-requests](https://img.shields.io/github/issues-pr/modelscope/modelscope.svg)](https://GitHub.com/modelscope/modelscope/pull/) +[![GitHub latest commit](https://badgen.net/github/last-commit/modelscope/modelscope)](https://GitHub.com/modelscope/modelscope/commit/) +[![Leaderboard](https://img.shields.io/badge/ModelScope-Check%20Your%20Contribution-orange)](https://opensource.alibaba.com/contribution_leaderboard/details?projectValue=modelscope) + + + + +

+

+ English | + 中文 | + 日本語 +

+

+ + +
+ +# はじめに + +[ModelScope](https://www.modelscope.cn) は、"Model-as-a-Service"(MaaS) の概念に基づいて構築されています。AI コミュニティから最も先進的な機械学習モデルを集め、実世界のアプリケーションで AI モデルを活用するプロセスを合理化することを目指しています。このリポジトリでオープンソース化されている中核となる ModelScope ライブラリは、開発者がモデルの推論、トレーニング、評価を実行するためのインターフェースと実装を提供します。 + + +特に、API 抽象化の豊富なレイヤーにより、ModelScope ライブラリは、CV、NLP、音声、マルチモダリティ、科学計算などのドメインにまたがる最先端のモデルを探索するための統一された体験を提供します。様々な分野のモデル貢献者は、レイヤー化された API を通じてモデルを ModelScope エコシステムに統合することができ、モデルへの容易で統一されたアクセスを可能にします。一旦統合されると、モデルの推論、微調整、および評価は、わずか数行のコードで行うことができます。一方、モデルアプリケーションの様々なコンポーネントを必要に応じてカスタマイズできるように、柔軟性も提供されています。 + +ModelScope ライブラリは、様々なモデルの実装を保持するだけでなく、ModelScope のバックエンドサービス、特に Model-Hub と Dataset-Hub との必要な相互作用も可能にします。このような相互作用により、エンティティの検索、バージョン管理、キャッシュ管理など、様々なエンティティ(モデルやデータセット)の管理をアンダーザフードでシームレスに実行することができます。 + +# モデルとオンラインアクセシビリティ + +[ModelScope](https://www.modelscope.cn) では、NLP、CV、オーディオ、マルチモダリティ、科学のための AI などの分野の最新開発を網羅した、何百ものモデルが一般公開されています(700 以上、カウント中)。これらのモデルの多くは、特定の分野における SOTA を代表するものであり、ModelScope でオープンソースとしてデビューしました。ユーザーは、ModelScope([modelscope.cn](http://www.modelscope.cn)) にアクセスし、数回クリックするだけで、オンライン体験を通じて、これらのモデルがどのように機能するかを直接体験することができます。また、[ModelScope](https://www.modelscope.cn) をワンクリックするだけで、クラウド上のすぐに使える CPU/GPU 開発環境に支えられた ModelScope ノートブックを通じて、すぐに開発者体験が可能です。 + + +

+
+ +
+

+ +代表的な例をいくつか挙げると: + +NLP: + +* [nlp_gpt3_text-generation_2.7B](https://modelscope.cn/models/damo/nlp_gpt3_text-generation_2.7B) + +* [ChatYuan-large](https://modelscope.cn/models/ClueAI/ChatYuan-large) + +* [mengzi-t5-base](https://modelscope.cn/models/langboat/mengzi-t5-base) + +* [nlp_csanmt_translation_en2zh](https://modelscope.cn/models/damo/nlp_csanmt_translation_en2zh) + +* [nlp_raner_named-entity-recognition_chinese-base-news](https://modelscope.cn/models/damo/nlp_raner_named-entity-recognition_chinese-base-news) + +* [nlp_structbert_word-segmentation_chinese-base](https://modelscope.cn/models/damo/nlp_structbert_word-segmentation_chinese-base) + +* [Erlangshen-RoBERTa-330M-Sentiment](https://modelscope.cn/models/fengshenbang/Erlangshen-RoBERTa-330M-Sentiment) + +* [nlp_convai_text2sql_pretrain_cn](https://modelscope.cn/models/damo/nlp_convai_text2sql_pretrain_cn) + +マルチモーダル: + +* [multi-modal_clip-vit-base-patch16_zh](https://modelscope.cn/models/damo/multi-modal_clip-vit-base-patch16_zh) + +* [ofa_pretrain_base_zh](https://modelscope.cn/models/damo/ofa_pretrain_base_zh) + +* [Taiyi-Stable-Diffusion-1B-Chinese-v0.1](https://modelscope.cn/models/fengshenbang/Taiyi-Stable-Diffusion-1B-Chinese-v0.1) + +* [mplug_visual-question-answering_coco_large_en](https://modelscope.cn/models/damo/mplug_visual-question-answering_coco_large_en) + +CV: + +* [cv_controlnet_controllable-image-generation_nine-annotators](https://modelscope.cn/models/dienstag/cv_controlnet_controllable-image-generation_nine-annotators/summary) + +* [cv_tinynas_object-detection_damoyolo](https://modelscope.cn/models/damo/cv_tinynas_object-detection_damoyolo) + +* [cv_unet_person-image-cartoon_compound-models](https://modelscope.cn/models/damo/cv_unet_person-image-cartoon_compound-models) + +* [cv_convnextTiny_ocr-recognition-general_damo](https://modelscope.cn/models/damo/cv_convnextTiny_ocr-recognition-general_damo) + +* [cv_resnet18_human-detection](https://modelscope.cn/models/damo/cv_resnet18_human-detection) + +* [cv_resnet50_face-detection_retinaface](https://modelscope.cn/models/damo/cv_resnet50_face-detection_retinaface) + +* [cv_unet_image-matting](https://modelscope.cn/models/damo/cv_unet_image-matting) + +* [cv_F3Net_product-segmentation](https://modelscope.cn/models/damo/cv_F3Net_product-segmentation) + +* [cv_resnest101_general_recognition](https://modelscope.cn/models/damo/cv_resnest101_general_recognition) + + +音声: + +* [speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch](https://modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch) + +* [speech_sambert-hifigan_tts_zh-cn_16k](https://modelscope.cn/models/damo/speech_sambert-hifigan_tts_zh-cn_16k) + +* [speech_charctc_kws_phone-xiaoyun](https://modelscope.cn/models/damo/speech_charctc_kws_phone-xiaoyun) + +* [u2pp_conformer-asr-cn-16k-online](https://modelscope.cn/models/wenet/u2pp_conformer-asr-cn-16k-online) + +* [speech_fsmn_vad_zh-cn-16k-common-pytorch](https://modelscope.cn/models/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/summary) + +* [punc_ct-transformer_zh-cn-common-vocab272727-pytorch](https://modelscope.cn/models/damo/punc_ct-transformer_zh-cn-common-vocab272727-pytorch/summary) + +* [speech_frcrn_ans_cirm_16k](https://modelscope.cn/models/damo/speech_frcrn_ans_cirm_16k) + +* [speech_dfsmn_aec_psm_16k](https://modelscope.cn/models/damo/speech_dfsmn_aec_psm_16k) + + + +科学用 AI: + +* [uni-fold-monomer](https://modelscope.cn/models/DPTech/uni-fold-monomer/summary) + +* [uni-fold-multimer](https://modelscope.cn/models/DPTech/uni-fold-multimer/summary) + +**注:** ModelScope のほとんどのモデルは公開されており、アカウント登録なしで modelscope のウェブサイト([www.modelscope.cn](www.modelscope.cn))からダウンロードすることができます。modelscope のライブラリや git が提供する api を使用してモデルをダウンロードするには、[モデルのダウンロード](https://modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E4%B8%8B%E8%BD%BD)の説明を参照してください。 + +# クイックツアー + +様々なタスクに対して、`pipeline` による推論、`Trainer` による微調整と評価のための統一されたインターフェースを提供します。 + +入力の種類(画像、テキスト、音声、動画...)を問わず、推論パイプラインはわずか数行のコードで実装することができます。: + +```python +>>> from modelscope.pipelines import pipeline +>>> word_segmentation = pipeline('word-segmentation',model='damo/nlp_structbert_word-segmentation_chinese-base') +>>> word_segmentation('今天天气不错,适合出去游玩') +{'output': '今天 天气 不错 , 适合 出去 游玩'} +``` + +画像があれば、ポートレート・マット(別名、背景除去)は次のコード・スニペットで実現できます: + +![image](data/resource/portrait_input.png) + +```python +>>> import cv2 +>>> from modelscope.pipelines import pipeline + +>>> portrait_matting = pipeline('portrait-matting') +>>> result = portrait_matting('https://modelscope.oss-cn-beijing.aliyuncs.com/test/images/image_matting.png') +>>> cv2.imwrite('result.png', result['output_img']) +``` + +背景を除去した出力画像は次のようになります: +![image](data/resource/portrait_output.png) + + +ファインチューニングと評価も、トレーニングデータセットとトレーナーをセットアップする数行のコードで行うことができ、モデルのトレーニングと評価の重い作業は `traner.train()` と `trainer.evaluate()` インターフェースの実装に +カプセル化されています。 + +例えば、gpt3 の基本モデル(1.3B)を中国語詩のデータセットでファインチューニングすることで、中国語詩の生成に使用できるモデルを得ることができる。 + +```python +>>> from modelscope.metainfo import Trainers +>>> from modelscope.msdatasets import MsDataset +>>> from modelscope.trainers import build_trainer + +>>> train_dataset = MsDataset.load('chinese-poetry-collection', split='train'). remap_columns({'text1': 'src_txt'}) +>>> eval_dataset = MsDataset.load('chinese-poetry-collection', split='test').remap_columns({'text1': 'src_txt'}) +>>> max_epochs = 10 +>>> tmp_dir = './gpt3_poetry' + +>>> kwargs = dict( + model='damo/nlp_gpt3_text-generation_1.3B', + train_dataset=train_dataset, + eval_dataset=eval_dataset, + max_epochs=max_epochs, + work_dir=tmp_dir) + +>>> trainer = build_trainer(name=Trainers.gpt3_trainer, default_args=kwargs) +>>> trainer.train() +``` + +# ModelScope ライブラリを使用する理由 + +1. 統一された簡潔なユーザーインターフェースは、異なるタスクや異なるモデル用に抽象化されている。モデルの推論とトレーニングは、それぞれわずか 3 行と 10 行のコードで実装できる。ModelScope コミュニティで異なる分野のモデルを探索するのに便利です。ModelScope に統合されたモデルはすべてすぐに使用できるため、教育現場でも産業現場でも、AI を簡単に使い始めることができます。 + +2. ModelScope は、モデル中心の開発とアプリケーション体験を提供します。モデルのトレーニング、推論、エクスポート、デプロイメントのサポートを合理化し、ユーザーが ModelScope エコシステムに基づいて独自の MLO を構築することを容易にします。 + +3. モデルの推論とトレーニングのプロセスでは、モジュール設計が導入され、豊富な機能モジュールの実装が提供され、ユーザーが独自のモデルの推論、トレーニング、その他のプロセスをカスタマイズするのに便利です。 + +4. 分散モデル学習、特に大規模モデルに対しては、データ並列、モデル並列、ハイブリッド並列など、豊富な学習ストラテジーサポートを提供する。 + +# インストール + +## Docker + +ModelScope ライブラリは現在、PyTorch、TensorFlow、ONNX を含む、モデルの学習と推論のための一般的なディープラーニングフレームワークをサポートしています。すべてのリリースは、Python 3.7+、Pytorch 1.8+、Tensorflow1.15、または Tensorflow2.0+ でテストされ、実行されます。 + +ModelScope のすべてのモデルをすぐに使えるようにするため、すべてのリリースで公式の docker イメージが提供されています。開発者はこの docker イメージをベースに、環境のインストールや設定をすべて省略して直接使用することができます。現在、CPU イメージと GPU イメージの最新バージョンは以下から入手できます: + +CPU docker イメージ +```shell +# py37 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py37-torch1.11.0-tf1.15.5-1.6.1 + +# py38 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-py38-torch1.11.0-tf1.15.5-1.6.1 +``` + +GPU docker イメージ +```shell +# py37 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py37-torch1.11.0-tf1.15.5-1.6.1 + +# py38 +registry.cn-hangzhou.aliyuncs.com/modelscope-repo/modelscope:ubuntu20.04-cuda11.3.0-py38-torch1.11.0-tf1.15.5-1.6.1 +``` + +## ローカル Python 環境のセットアップ + +pip と conda を使って、ModelScope のローカル環境を構築することもできます。 ローカルの Python 環境を構築するには [anaconda](https://docs.anaconda.com/anaconda/install/) をお勧めします: + +```shell +conda create -n modelscope python=3.7 +conda activate modelscope +``` + +PyTorch または TensorFlow は、それぞれのモデルの要件に応じて個別にインストールすることができます。 +* pytorch のインストール [doc](https://pytorch.org/get-started/locally/) +* Tensorflow のインストール [doc](https://www.tensorflow.org/install/pip) + +必要な機械学習フレームワークをインストールした後、以下のように modelscope ライブラリをインストールします: + +モデル/データセットのダウンロードを試したり、modelscope フレームワークで遊びたいだけなら、modelscope のコア・コンポーネントをインストールすることができます: +```shell +pip install modelscope +``` + +マルチモーダルモデルを使いたい場合: +```shell +pip install modelscope[multi-modal] +``` + +nlp モデルを使いたい場合: +```shell +pip install modelscope[nlp] -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +``` + +CV モデルを使いたい場合: +```shell +pip install modelscope[cv] -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +``` + +オーディオモデルを使用したい場合: +```shell +pip install modelscope[audio] -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +``` + +科学モデルを使いたい場合: +```shell +pip install modelscope[science] -f https://modelscope.oss-cn-beijing.aliyuncs.com/releases/repo.html +``` + +`備考`: +1. 現在、一部のオーディオタスクモデルは python3.7、tensorflow1.15.4 の Linux 環境のみに対応しています。他のほとんどのモデルは Windows と Mac(x86) にインストールして使うことができます。 + +2. オーディオ分野では、wav ファイルの処理にサードパーティ製のライブラリ SoundFile を使用している機種がある。Linux では、SoundFile の libsndfile([doc link](https://github.com/bastibe/python-soundfile#installation)) を手動でインストールする必要があります。Windows や MacOS では、ユーザーが操作しなくても自動的にインストールされる。例えば、Ubuntu の場合、以下のコマンドでインストールできます: + ```shell + sudo apt-get update + sudo apt-get install libsndfile1 + ``` + +3. コンピュータビジョンのモデルによっては mmcv-full が必要です。mmcv [インストールガイド](https://github.com/open-mmlab/mmcv#installation)を参照してください。最小限のインストールは以下の通りです: + + ```shell + pip uninstall mmcv # mmcv をインストールしている場合は、アンインストールしてください + pip install -U openmim + mim install mmcv-full + ``` + + + +# 詳細 + +私たちは、以下のような追加書類を提供します: +* [より詳細なインストールガイド](https://modelscope.cn/docs/%E7%8E%AF%E5%A2%83%E5%AE%89%E8%A3%85) +* [タスクの紹介](https://modelscope.cn/docs/%E4%BB%BB%E5%8A%A1%E7%9A%84%E4%BB%8B%E7%BB%8D) +* [モデル推論にパイプラインを使う](https://modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E6%8E%A8%E7%90%86Pipeline) +* [ファインチューニング例](https://modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E8%AE%AD%E7%BB%83Train) +* [データの前処理](https://modelscope.cn/docs/%E6%95%B0%E6%8D%AE%E7%9A%84%E9%A2%84%E5%A4%84%E7%90%86) +* [評価](https://modelscope.cn/docs/%E6%A8%A1%E5%9E%8B%E7%9A%84%E8%AF%84%E4%BC%B0) +* [ModelScope に自分のモデルを投稿する](https://modelscope.cn/docs/ModelScope%E6%A8%A1%E5%9E%8B%E6%8E%A5%E5%85%A5%E6%B5%81%E7%A8%8B%E6%A6%82%E8%A7%88) + +# ライセンス + +このプロジェクトのライセンスは [Apache License (Version 2.0)](https://github.com/modelscope/modelscope/blob/master/LICENSE) です。 diff --git a/README_zh.md b/README_zh.md index f5401f33..7cac99fb 100644 --- a/README_zh.md +++ b/README_zh.md @@ -21,7 +21,8 @@

English | - 中文 + 中文 | + 日本語

From f805d86aedd80ae9443ef3bfd53941b7d5b635a6 Mon Sep 17 00:00:00 2001 From: lylalala Date: Mon, 24 Jul 2023 15:38:01 +0800 Subject: [PATCH 62/87] llama2 support chat (#404) * support chat * update llama2 chat testcase * add gen kwargs and devices * update unittest and support max_length in multi-turn dialogue --- .../models/nlp/llama2/text_generation.py | 82 ++++++++++++++++++- tests/models/test_llama2.py | 59 +++++++++++++ 2 files changed, 140 insertions(+), 1 deletion(-) create mode 100644 tests/models/test_llama2.py diff --git a/modelscope/models/nlp/llama2/text_generation.py b/modelscope/models/nlp/llama2/text_generation.py index 5fe01cbe..71ccaffe 100644 --- a/modelscope/models/nlp/llama2/text_generation.py +++ b/modelscope/models/nlp/llama2/text_generation.py @@ -17,7 +17,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn.functional as F @@ -27,11 +27,48 @@ from torch.nn import CrossEntropyLoss from transformers.modeling_outputs import CausalLMOutputWithPast from modelscope.metainfo import Models +from modelscope.outputs import OutputKeys from modelscope.utils.constant import Tasks from ... import MODELS from .backbone import Llama2Model, LlamaPreTrainedModel +def get_chat_prompt(system: str, text: str, history: List[Tuple[str, str]], + max_length: int, tokenizer): + system_prompt = f'[INST] <>\n{system}\n<>\n\n' + system_ids = tokenizer(system_prompt, return_tensors='pt').input_ids + + text_prompt = f'{text.strip()} [/INST]' + text_ids = tokenizer(text_prompt, return_tensors='pt').input_ids + + prompt_length = system_ids.shape[-1] + text_ids.shape[-1] + if prompt_length > max_length: + raise RuntimeError( + f'prepend prompt length {prompt_length} is bigger than max_length {max_length}' + ) + + history_prompt = '' + history_ids_list = [] + # traverse history in reverse order + for user, bot in history[::-1]: + assert isinstance(user, str) + assert isinstance(bot, str) + round_prompt = f'{user.strip()} [/INST] {bot.strip()} [INST] ' + round_ids = tokenizer(round_prompt, return_tensors='pt').input_ids + if prompt_length + round_ids.shape[-1] > max_length: + # excess history should not be appended to the prompt + break + else: + history_prompt = round_prompt + history_prompt + history_ids_list = [round_ids] + history_ids_list + prompt_length += round_ids.shape[-1] + + prompt_list = [system_prompt, history_prompt, text_prompt] + prompt_ids_list = [system_ids] + history_ids_list + [text_ids] + + return ''.join(prompt_list), torch.cat(prompt_ids_list, dim=1) + + # This file is mainly copied from the llama code of transformers @MODELS.register_module(Tasks.text_generation, module_name=Models.llama2) class Llama2ForTextGeneration(LlamaPreTrainedModel): @@ -186,3 +223,46 @@ class Llama2ForTextGeneration(LlamaPreTrainedModel): past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past + + def chat(self, input: Dict, tokenizer) -> Dict: + import copy + gen_kwargs = copy.copy(input) + if 'text' not in input: + text: str = '' + else: + text: str = input['text'] + gen_kwargs.pop('text') + + if 'system' not in input: + system: str = '' + else: + system: str = input['system'] + gen_kwargs.pop('system') + + if 'history' not in input: + history = [] + else: + history: List[Tuple] = copy.copy(input['history']) + gen_kwargs.pop('history') + + if 'max_length' not in gen_kwargs: + gen_kwargs['max_length'] = 4096 + + prompt, prompt_ids = get_chat_prompt( + system=system, + text=text, + history=history, + max_length=gen_kwargs['max_length'], + tokenizer=tokenizer) + input_ids = prompt_ids.to(self.device) + generate_ids = self.generate(input_ids, **gen_kwargs) + # remove input tokens + generate_ids = generate_ids[:, input_ids.shape[1]:] + response = tokenizer.batch_decode( + generate_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False)[0] + response = response.strip() + history.append((text, response)) + + return {OutputKeys.RESPONSE: response, OutputKeys.HISTORY: history} diff --git a/tests/models/test_llama2.py b/tests/models/test_llama2.py new file mode 100644 index 00000000..f31d2cad --- /dev/null +++ b/tests/models/test_llama2.py @@ -0,0 +1,59 @@ +import unittest + +import torch + +from modelscope import Model, snapshot_download +from modelscope.models.nlp.llama2 import Llama2Tokenizer +from modelscope.utils.test_utils import test_level + + +class Llama2Test(unittest.TestCase): + + def setUp(self) -> None: + self.model_name = 'modelscope/Llama-2-7b-chat-ms' + self.system = 'you are a helpful assistant!' + self.text_first_round = 'hello' + self.text_second_round = 'do you know peking university?' + self.text_third_round = 'where is it?' + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_chat(self): + model_dir = snapshot_download( + self.model_name, ignore_file_pattern=[r'\w+\.safetensors']) + model = Model.from_pretrained( + model_dir, device_map='auto', torch_dtype=torch.float16) + tokenizer = Llama2Tokenizer.from_pretrained(model_dir) + + inputs = { + 'text': self.text_first_round, + 'history': [], + 'system': self.system + } + result = model.chat(input=inputs, tokenizer=tokenizer) + self.assertIsInstance(result['history'], list) + self.assertEqual(len(result['history']), 1) + self.assertEqual(result['history'][0][0], self.text_first_round) + + inputs = { + 'text': self.text_second_round, + 'history': result['history'], + 'system': self.system + } + result = model.chat(input=inputs, tokenizer=tokenizer) + self.assertIsInstance(result['history'], list) + self.assertEqual(len(result['history']), 2) + self.assertEqual(result['history'][1][0], self.text_second_round) + + inputs = { + 'text': self.text_third_round, + 'history': result['history'], + 'system': self.system + } + result = model.chat(input=inputs, tokenizer=tokenizer) + self.assertIsInstance(result['history'], list) + self.assertEqual(len(result['history']), 3) + self.assertEqual(result['history'][2][0], self.text_third_round) + + +if __name__ == '__main__': + unittest.main() From ba4b9fc43f4c41e591254655c3e7028dada238f2 Mon Sep 17 00:00:00 2001 From: Jintao Date: Mon, 24 Jul 2023 15:52:09 +0800 Subject: [PATCH 63/87] Added full parameter sft to llm (#402) * Optimized code * update parse_args * fix get_logger bug * update parse_args * Added full parameter fine-tuning * Add support_bf16 warning * Modify the code format and fix bugs --- examples/pytorch/chatglm6b/chatglm_trainer.py | 2 +- examples/pytorch/llm/_common.py | 145 ++++--- examples/pytorch/llm/llm_infer.py | 149 ++++--- examples/pytorch/llm/llm_sft.py | 364 ++++++++++-------- modelscope/exporters/base.py | 2 +- .../cv/cartoon_translation_exporter.py | 2 +- .../nlp/csanmt_for_translation_exporter.py | 2 +- .../models/nlp/chatglm/configuration.py | 5 +- modelscope/models/nlp/chatglm/quantization.py | 5 +- .../models/nlp/chatglm/text_generation.py | 5 +- modelscope/models/nlp/chatglm/tokenization.py | 6 +- .../models/nlp/chatglm2/configuration.py | 5 +- .../models/nlp/chatglm2/quantization.py | 5 +- .../models/nlp/chatglm2/text_generation.py | 4 +- modelscope/models/nlp/fid_plug/backbone.py | 5 +- modelscope/models/nlp/llama/backbone.py | 2 +- modelscope/models/nlp/llama/tokenization.py | 2 +- .../models/nlp/llama/tokenization_fast.py | 2 +- modelscope/models/nlp/llama2/backbone.py | 4 +- modelscope/models/nlp/llama2/configuration.py | 2 +- modelscope/models/nlp/llama2/tokenization.py | 2 +- .../models/nlp/llama2/tokenization_fast.py | 6 +- modelscope/models/nlp/peer/backbone.py | 2 +- modelscope/models/nlp/peer/configuration.py | 2 +- .../hooks/checkpoint/checkpoint_hook.py | 5 +- modelscope/utils/data_collators.py | 2 +- 26 files changed, 409 insertions(+), 328 deletions(-) diff --git a/examples/pytorch/chatglm6b/chatglm_trainer.py b/examples/pytorch/chatglm6b/chatglm_trainer.py index efa4dfce..84167713 100644 --- a/examples/pytorch/chatglm6b/chatglm_trainer.py +++ b/examples/pytorch/chatglm6b/chatglm_trainer.py @@ -6,7 +6,7 @@ from transformers.deepspeed import is_deepspeed_zero3_enabled from modelscope import EpochBasedTrainer, get_logger -logger = get_logger(__name__) +logger = get_logger() class Seq2SeqTrainer(EpochBasedTrainer): diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/_common.py index 77952f3c..86531b0e 100644 --- a/examples/pytorch/llm/_common.py +++ b/examples/pytorch/llm/_common.py @@ -1,4 +1,3 @@ -import ast import datetime as dt import math import os @@ -7,12 +6,12 @@ import re import sys from dataclasses import dataclass, field from functools import partial +from types import MethodType from typing import Any, Callable, Dict, List, Optional, Tuple, Union import json import matplotlib.pyplot as plt import numpy as np -# import torch import torch.nn as nn import torch.optim as optim @@ -33,35 +32,35 @@ from torch.optim import Optimizer from torch.optim import lr_scheduler as lrs from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import Dataset -# from torchmetrics import Accuracy, MeanMetric -# from tqdm import tqdm from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, GenerationConfig, HfArgumentParser, TextStreamer) -# from modelscope import (Model, MsDataset, get_logger, read_config, snapshot_download) from modelscope.metrics.base import Metric from modelscope.metrics.builder import METRICS -from modelscope.models.nlp.chatglm2 import ChatGLM2Tokenizer -from modelscope.msdatasets.dataset_cls.custom_datasets import \ - TorchCustomDataset +from modelscope.models.nlp.chatglm2 import ChatGLM2Config, ChatGLM2Tokenizer +from modelscope.models.nlp.llama2 import Llama2Config, Llama2Tokenizer from modelscope.swift import LoRAConfig, Swift from modelscope.trainers import EpochBasedTrainer from modelscope.utils.config import Config, ConfigDict from modelscope.utils.registry import default_group -# COLOR, COLOR_S = '#FFE2D9', '#FF7043' -PROMPT = """Human: {instruction} -AI: """ +PROMPT = """Here's a conversation between a human and an AI assistant. \ +The AI assistant provides detailed, friendly answers for the human. + +### Human: +{instruction} + +### AI: +""" logger = get_logger() os.environ['TOKENIZERS_PARALLELISM'] = 'true' -# def _get_version(work_dir: str) -> int: @@ -84,7 +83,7 @@ def get_work_dir(work_dir: str) -> str: work_dir = os.path.abspath(work_dir) version = _get_version(work_dir) time = dt.datetime.now().strftime('%Y%m%d-%H%M%S') - # + work_dir = os.path.join(work_dir, f'v{version}-{time}') logger.info(f'work_dir: {work_dir}') return work_dir @@ -109,9 +108,8 @@ def select_device(device: Union[List[int], str]) -> Device: if torch.cuda.is_initialized(): logger.warning('CUDA has been initialized! Device selection fails!') return torch.device('cuda:0') - # + device_ids, device_str = _format_device(device) - # os.environ['CUDA_VISIBLE_DEVICES'] = device_str log_s = 'Using device: ' if len(device_ids) == 0: @@ -157,11 +155,9 @@ def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, def tokenize_function(example: Dict[str, Optional[str]], tokenizer, max_length: Optional[int] = 2048) -> Dict[str, Any]: - """Only applicable to baichuan and chatglm2. Other models need to be tested""" instruction: str = example['instruction'] input_ = example['input'] if input_ is not None and input_ != '': - # instruction = instruction + '\n' if input_.startswith('输入:'): instruction = instruction + input_[3:] else: @@ -171,7 +167,7 @@ def tokenize_function(example: Dict[str, Optional[str]], src_input_ids: List[int] = tokenizer( src_text, return_attention_mask=False, add_special_tokens=True)['input_ids'] - # + tgt_input_ids = [] if output is not None: tgt_input_ids += tokenizer( @@ -182,17 +178,17 @@ def tokenize_function(example: Dict[str, Optional[str]], else: labels = None input_ids = src_input_ids + tgt_input_ids - # + if max_length is not None: input_ids = input_ids[-max_length:] if labels is not None: labels = labels[-max_length:] - # + return {'input_ids': input_ids, 'labels': labels} def stat_dataset(dataset: HfDataset) -> None: - """Statistical analysis was performed on the data set""" + """Statistical analysis was performed on the dataset""" _token_len = [] for d in dataset: _token_len.append(len(d['input_ids'])) @@ -224,7 +220,7 @@ def data_collate_fn(batch: List[Dict[str, Any]], tokenizer) -> Dict[str, Any]: torch.ones(len(input_ids[i]), dtype=torch.int64) for i in range(len(input_ids)) ] - # + input_ids = pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) attention_mask = pad_sequence( @@ -240,11 +236,11 @@ def data_collate_fn(batch: List[Dict[str, Any]], tokenizer) -> Dict[str, Any]: def print_model_info(model: Module, name: Optional[str] = None) -> None: if name is None: name = model.__class__.__name__ - # + n_params = sum(p.numel() for p in model.parameters()) n_grads = sum(p.numel() for p in model.parameters() if p.requires_grad) n_buffers = sum(p.numel() for p in model.buffers()) - # + n_params /= 1e6 n_grads /= 1e6 n_buffers /= 1e6 @@ -276,7 +272,7 @@ class MyMetric(Metric): def add(self, outputs: Dict[str, Any], inputs: Dict[str, Any]) -> None: loss: Tensor = outputs.loss self.loss.update(loss) - # + labels: Tensor = inputs['labels'] labels = labels[:, 1:] labels_mask = labels != -100 @@ -311,11 +307,11 @@ def _add_special_token(tokenizer): def get_baichuan_model_tokenizer(model_dir: str, load_model: bool = True, - add_special_token: bool = True): - sys.path.insert(0, model_dir) + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): model_config = AutoConfig.from_pretrained( model_dir, trust_remote_code=True) - model_config.torch_dtype = torch.float16 + model_config.torch_dtype = torch_dtype logger.info(f'model_config: {model_config}') tokenizer = AutoTokenizer.from_pretrained( model_dir, trust_remote_code=True) @@ -325,9 +321,9 @@ def get_baichuan_model_tokenizer(model_dir: str, model_dir, config=model_config, device_map='auto', - torch_dtype=torch.float16, + torch_dtype=torch_dtype, trust_remote_code=True) - # + if add_special_token: _add_special_token(tokenizer) return model, tokenizer @@ -335,17 +331,22 @@ def get_baichuan_model_tokenizer(model_dir: str, def get_chatglm2_model_tokenizer(model_dir: str, load_model: bool = True, - add_special_token: bool = True): + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): config = read_config(model_dir) - config['model'] = ConfigDict({'type': 'chatglm2-6b'}) + logger.info(config) + model_config = ChatGLM2Config.from_pretrained(model_dir) + model_config.torch_dtype = torch_dtype + logger.info(model_config) tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) model = None if load_model: model = Model.from_pretrained( model_dir, cfg_dict=config, + config=model_config, device_map='auto', - torch_dtype=torch.float16) + torch_dtype=torch_dtype) if add_special_token: _add_special_token(tokenizer) return model, tokenizer @@ -353,39 +354,68 @@ def get_chatglm2_model_tokenizer(model_dir: str, def get_llama2_model_tokenizer(model_dir: str, load_model: bool = True, - add_special_token: bool = True): - config = AutoConfig.from_pretrained(model_dir) - tokenizer = AutoTokenizer.from_pretrained(model_dir) + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): + config = read_config(model_dir) + logger.info(config) + model_config = Llama2Config.from_pretrained(model_dir) + model_config.torch_dtype = torch_dtype + logger.info(model_config) + tokenizer = Llama2Tokenizer.from_pretrained(model_dir) model = None if load_model: - model = AutoModelForCausalLM.from_pretrained( + model = Model.from_pretrained( model_dir, - config=config, + cfg_dict=config, + config=model_config, device_map='auto', - torch_dtype=torch.float16, - ) + torch_dtype=torch_dtype) if add_special_token: _add_special_token(tokenizer) return model, tokenizer +def get_model_tokenizer(model_type: str, + load_model: bool = True, + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): + # ### Loading Model and Tokenizer + if model_type == 'baichuan-7b': + model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.7') + model, tokenizer = get_baichuan_model_tokenizer( + model_dir, load_model, add_special_token, torch_dtype) + elif model_type == 'baichuan-13b': + model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', + 'v1.0.3') + model, tokenizer = get_baichuan_model_tokenizer( + model_dir, load_model, add_special_token, torch_dtype) + elif model_type == 'chatglm2': + model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') + model, tokenizer = get_chatglm2_model_tokenizer( + model_dir, load_model, add_special_token, torch_dtype) + elif model_type == 'llama2-7b': + model_dir = snapshot_download('modelscope/Llama-2-7b-ms', 'v1.0.2') + model, tokenizer = get_llama2_model_tokenizer(model_dir, load_model, + add_special_token, + torch_dtype) + else: + raise ValueError(f'model_type: {model_type}') + return model, tokenizer, model_dir + + def get_alpaca_en_zh_dataset( tokenize_function, only_val: bool = False, test_split_p: float = 0.01, split_seed: int = 42, data_sample: Optional[int] = None) -> Tuple[HfDataset, HfDataset]: - """ - split: Literal['train', 'validation', None] - """ - dataset_en: HfDataset = MsDataset.load( 'AI-ModelScope/alpaca-gpt4-data-en', split='train').to_hf_dataset() dataset_zh: HfDataset = MsDataset.load( 'AI-ModelScope/alpaca-gpt4-data-zh', split='train').to_hf_dataset() dataset_en = dataset_en.remove_columns(['text']) dataset: HfDataset = concatenate_datasets([dataset_zh, dataset_en]) - # + if data_sample is not None: dataset = dataset.select(range(data_sample)) dataset = dataset.train_test_split(test_split_p, seed=split_seed) @@ -394,7 +424,7 @@ def get_alpaca_en_zh_dataset( if tokenize_function is not None: dataset = dataset.map(tokenize_function) dataset = dataset.remove_columns(['instruction', 'input', 'output']) - # + if only_val: return None, dataset else: @@ -428,7 +458,7 @@ def tensorboard_smoothing(values: List[float], for i in range(len(values)): x = x * smooth + values[i] # Exponential decay res.append(x / norm_factor) - # + norm_factor *= smooth norm_factor += 1 return res @@ -441,7 +471,7 @@ def plot_image(tb_dir: str, dpi: int = 100) -> None: image_dir = os.path.join(os.path.dirname(tb_dir), 'images') os.makedirs(image_dir, exist_ok=True) - # + fname = os.listdir(tb_dir)[0] tb_path = os.path.join(tb_dir, fname) data = read_tensorboard_file(tb_path) @@ -464,3 +494,22 @@ def plot_image(tb_dir: str, ax.plot(steps, values, color=COLOR_S) fpath = os.path.join(image_dir, k.replace('/', '_')) plt.savefig(fpath, dpi=dpi, bbox_inches='tight') + + +def inference(data: Dict[str, Optional[str]], + model, + tokenizer, + streamer: Optional[TextStreamer] = None, + generation_config: Optional[GenerationConfig] = None, + tag: str = '[INFERENCE]') -> str: + input_ids = tokenize_function(data, tokenizer)['input_ids'] + print(f'{tag}{tokenizer.decode(input_ids)}', end='') + input_ids = torch.tensor(input_ids)[None].cuda() + attention_mask = torch.ones_like(input_ids) + generate_ids = model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + streamer=streamer, + generation_config=generation_config) + output_text = tokenizer.decode(generate_ids[0]) + return output_text diff --git a/examples/pytorch/llm/llm_infer.py b/examples/pytorch/llm/llm_infer.py index cac59bee..a83dd6fe 100644 --- a/examples/pytorch/llm/llm_infer.py +++ b/examples/pytorch/llm/llm_infer.py @@ -3,7 +3,7 @@ from _common import * @dataclass -class Arguments: +class InferArguments: device: str = '0' # e.g. '-1'; '0'; '0,1' model_type: str = field( default='baichuan-7b', @@ -11,15 +11,17 @@ class Arguments: 'choices': ['baichuan-7b', 'baichuan-13b', 'chatglm2', 'llama2-7b'] }) - ckpt_fpath: str = '' # e.g. '/path/to/your/iter_xxx.pth' + sft_type: str = field( + default='lora', metadata={'choices': ['lora', 'full']}) + ckpt_fpath: str = '/path/to/your/iter_xxx.pth' eval_human: bool = False # False: eval test_dataset data_sample: Optional[int] = None - # + # sft_type: lora lora_target_modules: Optional[List[str]] = None lora_rank: int = 8 lora_alpha: int = 32 lora_dropout_p: float = 0.1 - # + max_new_tokens: int = 512 temperature: float = 0.9 top_k: int = 50 @@ -35,88 +37,79 @@ class Arguments: self.lora_target_modules = ['q_proj', 'k_proj', 'v_proj'] else: raise ValueError(f'model_type: {self.model_type}') - # + if not os.path.isfile(self.ckpt_fpath): - raise ValueError('Please enter a valid fpath') + raise ValueError(f'Please enter a valid fpath: {self.ckpt_fpath}') -def parse_args() -> Arguments: - args, = HfArgumentParser([Arguments]).parse_args_into_dataclasses() +def parse_args() -> InferArguments: + # return_remaining_strings=True for notebook compatibility + args, remaining_args = HfArgumentParser([ + InferArguments + ]).parse_args_into_dataclasses(return_remaining_strings=True) + logger.info(f'args: {args}') + if len(remaining_args) > 0: + logger.warning(f'remaining_args: {remaining_args}') return args -args = parse_args() -logger.info(args) -select_device(args.device) +def llm_infer(args: InferArguments) -> None: + select_device(args.device) + # ### Loading Model and Tokenizer + support_bf16 = torch.cuda.is_bf16_supported() + if not support_bf16: + logger.warning(f'support_bf16: {support_bf16}') + model, tokenizer, _ = get_model_tokenizer( + args.model_type, torch_dtype=torch.bfloat16) -# ### Loading Model and Tokenizer -if args.model_type == 'baichuan-7b': - model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') - model, tokenizer = get_baichuan_model_tokenizer(model_dir) -elif args.model_type == 'baichuan-13b': - model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') - model, tokenizer = get_baichuan_model_tokenizer(model_dir) -elif args.model_type == 'chatglm2': - model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') - model, tokenizer = get_chatglm2_model_tokenizer(model_dir) -elif args.model_type == 'llama2-7b': - model_dir = snapshot_download('modelscope/Llama-2-7b-ms', 'v1.0.0') - model, tokenizer = get_llama2_model_tokenizer(model_dir) -else: - raise ValueError(f'model_type: {args.model_type}') + # ### Preparing lora + if args.sft_type == 'lora': + lora_config = LoRAConfig( + replace_modules=args.lora_target_modules, + rank=args.lora_rank, + lora_alpha=args.lora_alpha, + lora_dropout=args.lora_dropout_p, + pretrained_weights=args.ckpt_fpath) + logger.info(f'lora_config: {lora_config}') + Swift.prepare_model(model, lora_config) + elif args.sft_type == 'full': + state_dict = torch.load(args.ckpt_fpath, map_location='cpu') + model.load_state_dict(state_dict) + else: + raise ValueError(f'args.sft_type: {args.sft_type}') -# ### Preparing lora -lora_config = LoRAConfig( - replace_modules=args.lora_target_modules, - rank=args.lora_rank, - lora_alpha=args.lora_alpha, - lora_dropout=args.lora_dropout_p, - pretrained_weights=args.ckpt_fpath) -logger.info(f'lora_config: {lora_config}') -Swift.prepare_model(model, lora_config) -model.bfloat16() # Consistent with training + # ### Inference + streamer = TextStreamer( + tokenizer, skip_prompt=True, skip_special_tokens=True) + generation_config = GenerationConfig( + max_new_tokens=args.max_new_tokens, + temperature=args.temperature, + top_k=args.top_k, + top_p=args.top_p, + do_sample=True, + pad_token_id=tokenizer.eos_token_id) + logger.info(f'generation_config: {generation_config}') -# ### Inference -streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) -generation_config = GenerationConfig( - max_new_tokens=args.max_new_tokens, - temperature=args.temperature, - top_k=args.top_k, - top_p=args.top_p, - do_sample=True, - pad_token_id=tokenizer.eos_token_id) -logger.info(generation_config) + if args.eval_human: + while True: + instruction = input('<<< ') + data = {'instruction': instruction, 'input': None, 'output': None} + inference(data, model, tokenizer, streamer, generation_config) + print('-' * 80) + else: + _, test_dataset = get_alpaca_en_zh_dataset( + None, True, split_seed=42, data_sample=args.data_sample) + mini_test_dataset = test_dataset.select(range(10)) + for data in mini_test_dataset: + output = data['output'] + data['output'] = None + inference(data, model, tokenizer, streamer, generation_config) + print() + print(f'[LABELS]{output}') + print('-' * 80) + # input('next[ENTER]') -def inference(data: Dict[str, Optional[str]]) -> str: - input_ids = tokenize_function(data, tokenizer)['input_ids'] - print(f'[TEST]{tokenizer.decode(input_ids)}', end='') - input_ids = torch.tensor(input_ids)[None].cuda() - attention_mask = torch.ones_like(input_ids) - generate_ids = model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - streamer=streamer, - generation_config=generation_config) - output_text = tokenizer.decode(generate_ids[0]) - return output_text - - -if args.eval_human: - while True: - instruction = input('<<< ') - data = {'instruction': instruction, 'input': None, 'output': None} - inference(data) - print('-' * 80) -else: - _, test_dataset = get_alpaca_en_zh_dataset( - None, True, split_seed=42, data_sample=None) - mini_test_dataset = test_dataset.select(range(10)) - for data in mini_test_dataset: - output = data['output'] - data['output'] = None - inference(data) - print() - print(f'[LABELS]{output}') - print('-' * 80) - # input('next[ENTER]') +if __name__ == '__main__': + args = parse_args() + llm_infer(args) diff --git a/examples/pytorch/llm/llm_sft.py b/examples/pytorch/llm/llm_sft.py index 5e835625..eb830abf 100644 --- a/examples/pytorch/llm/llm_sft.py +++ b/examples/pytorch/llm/llm_sft.py @@ -1,20 +1,25 @@ # ### Setting up experimental environment. """ -pip install modelscope pip install numpy pandas matplotlib scikit-learn pip install transformers datasets conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer pip install accelerate transformers_stream_generator -pip install numpy -U # Resolve torchmetrics dependencies and update numpy +# Install the latest version of modelscope from source +git clone https://github.com/modelscope/modelscope.git +cd modelscope +pip install . + +# Resolve torchmetrics dependencies and update numpy +pip install numpy -U """ from _common import * @dataclass -class Arguments: +class SftArguments: device: str = '0,1' # e.g. '-1'; '0'; '0,1' seed: int = 42 model_type: str = field( @@ -23,29 +28,52 @@ class Arguments: 'choices': ['baichuan-7b', 'baichuan-13b', 'chatglm2', 'llama2-7b'] }) + # baichuan-7b: 'lora': 16G; 'full': 80G + sft_type: str = field( + default='lora', metadata={'choices': ['lora', 'full']}) data_sample: Optional[int] = None - # + lora_target_modules: Optional[List[str]] = None lora_rank: int = 8 lora_alpha: int = 32 lora_dropout_p: float = 0.1 - # + gradient_checkpoint: bool = True batch_size: int = 1 max_epochs: int = 1 - eval_interval: int = 500 - learning_rate: float = 1e-4 + learning_rate: Optional[float] = None weight_decay: float = 0.01 n_accumulate_grad: int = 16 grad_clip_norm: float = 1. warmup_iters: int = 200 + + save_trainer_state: Optional[bool] = None + eval_interval: int = 500 + last_save_interval: Optional[int] = None last_max_checkpoint_num: int = 1 best_max_checkpoint_num: int = 1 - # logging_interval: int = 5 tb_interval: int = 5 def __post_init__(self): + if self.sft_type == 'lora': + if self.learning_rate is None: + self.learning_rate = 1e-4 + if self.save_trainer_state is None: + self.save_trainer_state = True + if self.last_save_interval is None: + self.last_save_interval = self.eval_interval + elif self.sft_type == 'full': + if self.learning_rate is None: + self.learning_rate = 1e-5 + if self.save_trainer_state is None: + self.save_trainer_state = False # save disk space + if self.last_save_interval is None: + # Saving the model takes a long time + self.last_save_interval = self.eval_interval * 4 + else: + raise ValueError(f'sft_type: {self.sft_type}') + if self.lora_target_modules is None: if self.model_type in {'baichuan-7b', 'baichuan-13b'}: self.lora_target_modules = ['W_pack'] @@ -57,181 +85,179 @@ class Arguments: raise ValueError(f'model_type: {self.model_type}') -def parse_args() -> Arguments: - args, = HfArgumentParser([Arguments]).parse_args_into_dataclasses() +def parse_args() -> SftArguments: + # return_remaining_strings=True for notebook compatibility + args, remaining_args = HfArgumentParser([ + SftArguments + ]).parse_args_into_dataclasses(return_remaining_strings=True) + logger.info(f'args: {args}') + if len(remaining_args) > 0: + logger.warning(f'remaining_args: {remaining_args}') return args -args = parse_args() -logger.info(args) -select_device(args.device) -seed_everything(args.seed) +def llm_sft(args: SftArguments) -> None: + select_device(args.device) + seed_everything(args.seed) -# ### Loading Model and Tokenizer -if args.model_type == 'baichuan-7b': - model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.5') - model, tokenizer = get_baichuan_model_tokenizer(model_dir) -elif args.model_type == 'baichuan-13b': - model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', 'v1.0.2') - model, tokenizer = get_baichuan_model_tokenizer(model_dir) -elif args.model_type == 'chatglm2': - model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') - model, tokenizer = get_chatglm2_model_tokenizer(model_dir) -elif args.model_type == 'llama2-7b': - model_dir = snapshot_download('modelscope/Llama-2-7b-ms', 'v1.0.0') - model, tokenizer = get_llama2_model_tokenizer(model_dir) -else: - raise ValueError(f'model_type: {args.model_type}') + # ### Loading Model and Tokenizer + support_bf16 = torch.cuda.is_bf16_supported() + if not support_bf16: + logger.warning(f'support_bf16: {support_bf16}') + model, tokenizer, model_dir = get_model_tokenizer( + args.model_type, torch_dtype=torch.bfloat16) -# -if args.gradient_checkpoint: - # baichuan13B does not implement the `get_input_embeddings` function - if args.model_type == 'baichuan-13b': + if args.gradient_checkpoint: + # baichuan-13b does not implement the `get_input_embeddings` function + if args.model_type == 'baichuan-13b': + model.__class__.get_input_embeddings = MethodType( + lambda self: self.model.embed_tokens, model) + model.gradient_checkpointing_enable() + model.enable_input_require_grads() - def get_input_embeddings(self): - return self.model.embed_tokens + # ### Preparing lora + if args.sft_type == 'lora': + lora_config = LoRAConfig( + replace_modules=args.lora_target_modules, + rank=args.lora_rank, + lora_alpha=args.lora_alpha, + lora_dropout=args.lora_dropout_p) + logger.info(f'lora_config: {lora_config}') + Swift.prepare_model(model, lora_config) - model.__class__.get_input_embeddings = get_input_embeddings.__get__( - model) - model.gradient_checkpointing_enable() - model.enable_input_require_grads() + show_freeze_layers(model) + print_model_info(model) + # check the device and dtype of the model + _p: Parameter = list(model.parameters())[-1] + logger.info(f'device: {_p.device}, dtype: {_p.dtype}') -# ### Preparing lora -lora_config = LoRAConfig( - replace_modules=args.lora_target_modules, - rank=args.lora_rank, - lora_alpha=args.lora_alpha, - lora_dropout=args.lora_dropout_p) -logger.info(f'lora_config: {lora_config}') -Swift.prepare_model(model, lora_config) -# -show_freeze_layers(model) -print_model_info(model) -_p: Parameter = list(model.parameters())[100] -logger.info(f'device: {_p.device}, dtype: {_p.dtype}') -model.bfloat16() + # ### Loading Dataset + tokenize_func = partial(tokenize_function, tokenizer=tokenizer) + train_dataset, val_dataset = get_alpaca_en_zh_dataset( + tokenize_func, split_seed=42, data_sample=args.data_sample) + # Data analysis + stat_dataset(train_dataset) + stat_dataset(val_dataset) + data_collator = partial(data_collate_fn, tokenizer=tokenizer) + print_example(train_dataset[0], tokenizer) -# ### Loading Dataset -tokenize_function = partial(tokenize_function, tokenizer=tokenizer) -train_dataset, val_dataset = get_alpaca_en_zh_dataset( - tokenize_function, split_seed=42, data_sample=args.data_sample) -# Data analysis -stat_dataset(train_dataset) -stat_dataset(val_dataset) -data_collate_fn = partial(data_collate_fn, tokenizer=tokenizer) -print_example(train_dataset[0], tokenizer) + # ### Setting Config + cfg_file = os.path.join(model_dir, 'configuration.json') -# ### Setting Config -cfg_file = os.path.join(model_dir, 'configuration.json') -# -T_max = get_T_max(len(train_dataset), args.batch_size, args.max_epochs, True) -work_dir = get_work_dir(f'runs/{args.model_type}') -config = Config({ - 'train': { - 'dataloader': { - 'batch_size_per_gpu': args.batch_size, - 'workers_per_gpu': 1, - 'shuffle': True, - 'drop_last': True, - 'pin_memory': True - }, - 'max_epochs': - args.max_epochs, - 'work_dir': - work_dir, - 'optimizer': { - 'type': 'AdamW', - 'lr': args.learning_rate, - 'weight_decay': args.weight_decay, - 'options': { - 'cumulative_iters': args.n_accumulate_grad, - 'grad_clip': { - 'norm_type': 2, - 'max_norm': args.grad_clip_norm + T_max = get_T_max( + len(train_dataset), args.batch_size, args.max_epochs, True) + work_dir = get_work_dir(f'runs/{args.model_type}') + config = Config({ + 'train': { + 'dataloader': { + 'batch_size_per_gpu': args.batch_size, + 'workers_per_gpu': 1, + 'shuffle': True, + 'drop_last': True, + 'pin_memory': True + }, + 'max_epochs': + args.max_epochs, + 'work_dir': + work_dir, + 'optimizer': { + 'type': 'AdamW', + 'lr': args.learning_rate, + 'weight_decay': args.weight_decay, + 'options': { + 'cumulative_iters': args.n_accumulate_grad, + 'grad_clip': { + 'norm_type': 2, + 'max_norm': args.grad_clip_norm + } } - } - }, - 'lr_scheduler': { - 'type': 'CosineAnnealingLR', - 'T_max': T_max, - 'eta_min': 0, - 'options': { - 'by_epoch': False, - 'warmup': { - 'type': 'LinearWarmup', - 'warmup_ratio': 0.1, - 'warmup_iters': args.warmup_iters + }, + 'lr_scheduler': { + 'type': 'CosineAnnealingLR', + 'T_max': T_max, + 'eta_min': args.learning_rate * 0.1, + 'options': { + 'by_epoch': False, + 'warmup': { + 'type': 'LinearWarmup', + 'warmup_ratio': 0.1, + 'warmup_iters': args.warmup_iters + } } - } + }, + 'hooks': [ + { + 'type': 'CheckpointHook', + 'by_epoch': False, + 'interval': args.last_save_interval, + 'max_checkpoint_num': args.last_max_checkpoint_num, + 'save_trainer_state': args.save_trainer_state + }, + { + 'type': 'EvaluationHook', + 'by_epoch': False, + 'interval': args.eval_interval + }, + { + 'type': 'BestCkptSaverHook', + 'metric_key': 'loss', + 'save_best': True, + 'rule': 'min', + 'max_checkpoint_num': args.best_max_checkpoint_num, + 'save_trainer_state': args.save_trainer_state + }, + { + 'type': 'TextLoggerHook', + 'by_epoch': True, # Whether EpochBasedTrainer is used + 'interval': args.logging_interval + }, + { + 'type': 'TensorboardHook', + 'by_epoch': False, + 'interval': args.tb_interval + } + ] }, - 'hooks': [ - { - 'type': 'CheckpointHook', - 'by_epoch': False, - 'interval': args.eval_interval, - 'max_checkpoint_num': args.last_max_checkpoint_num + 'evaluation': { + 'dataloader': { + 'batch_size_per_gpu': args.batch_size, + 'workers_per_gpu': 1, + 'shuffle': False, + 'drop_last': False, + 'pin_memory': True }, - { - 'type': 'EvaluationHook', - 'by_epoch': False, - 'interval': args.eval_interval - }, - { - 'type': 'BestCkptSaverHook', - 'metric_key': 'loss', - 'save_best': True, - 'rule': 'min', - 'max_checkpoint_num': args.best_max_checkpoint_num - }, - { - 'type': 'TextLoggerHook', - 'by_epoch': True, # Whether EpochBasedTrainer is used - 'interval': args.logging_interval - }, - { - 'type': 'TensorboardHook', - 'by_epoch': False, - 'interval': args.tb_interval - } - ] - }, - 'evaluation': { - 'dataloader': { - 'batch_size_per_gpu': args.batch_size, - 'workers_per_gpu': 1, - 'shuffle': False, - 'drop_last': False, - 'pin_memory': True - }, - 'metrics': [{ - 'type': 'my_metric', - 'vocab_size': tokenizer.vocab_size - }] - } -}) + 'metrics': [{ + 'type': 'my_metric', + 'vocab_size': tokenizer.vocab_size + }] + } + }) -# ### Finetuning + # ### Finetuning + + def cfg_modify_fn(cfg: Config) -> Config: + cfg.update(config) + return cfg + + trainer = EpochBasedTrainer( + model=model, + cfg_file=cfg_file, + data_collator=data_collator, + train_dataset=train_dataset, + eval_dataset=val_dataset, + remove_unused_data=True, + seed=42, + device='cpu', # No placement for model, leave the model to `device_map` + cfg_modify_fn=cfg_modify_fn, + ) + + trainer.train() + + # ### Visualization + tb_dir = os.path.join(work_dir, 'tensorboard_output') + plot_image(tb_dir, ['loss'], 0.9) -def cfg_modify_fn(cfg: Config) -> Config: - cfg.update(config) - return cfg - - -trainer = EpochBasedTrainer( - model=model, - cfg_file=cfg_file, - data_collator=data_collate_fn, - train_dataset=train_dataset, - eval_dataset=val_dataset, - remove_unused_data=True, - seed=42, - device='cpu', # No placement for model, leave the model to `device_map` - cfg_modify_fn=cfg_modify_fn, -) - -trainer.train() - -# ### Visualization -tb_dir = os.path.join(work_dir, 'tensorboard_output') -plot_image(tb_dir, ['loss'], 0.9) +if __name__ == '__main__': + args = parse_args() + llm_sft(args) diff --git a/modelscope/exporters/base.py b/modelscope/exporters/base.py index d105afd2..1bfed176 100644 --- a/modelscope/exporters/base.py +++ b/modelscope/exporters/base.py @@ -9,7 +9,7 @@ from modelscope.utils.constant import ModelFile from modelscope.utils.logger import get_logger from .builder import build_exporter -logger = get_logger(__name__) +logger = get_logger() class Exporter(ABC): diff --git a/modelscope/exporters/cv/cartoon_translation_exporter.py b/modelscope/exporters/cv/cartoon_translation_exporter.py index 79b859cb..0cfd746f 100644 --- a/modelscope/exporters/cv/cartoon_translation_exporter.py +++ b/modelscope/exporters/cv/cartoon_translation_exporter.py @@ -9,7 +9,7 @@ from modelscope.exporters.tf_model_exporter import TfModelExporter from modelscope.models.cv.cartoon import CartoonModel from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() if version.parse(tf.__version__) < version.parse('2'): pass diff --git a/modelscope/exporters/nlp/csanmt_for_translation_exporter.py b/modelscope/exporters/nlp/csanmt_for_translation_exporter.py index 6b69595d..65b55b43 100644 --- a/modelscope/exporters/nlp/csanmt_for_translation_exporter.py +++ b/modelscope/exporters/nlp/csanmt_for_translation_exporter.py @@ -13,7 +13,7 @@ from modelscope.utils.constant import Tasks from modelscope.utils.logger import get_logger from modelscope.utils.test_utils import compare_arguments_nested -logger = get_logger(__name__) +logger = get_logger() if tf.__version__ >= '2.0': tf = tf.compat.v1 diff --git a/modelscope/models/nlp/chatglm/configuration.py b/modelscope/models/nlp/chatglm/configuration.py index 18fdca0f..5ecf3484 100644 --- a/modelscope/models/nlp/chatglm/configuration.py +++ b/modelscope/models/nlp/chatglm/configuration.py @@ -1,9 +1,10 @@ """ ChatGLM model configuration """ from transformers.configuration_utils import PretrainedConfig -from transformers.utils import logging -logger = logging.get_logger(__name__) +from modelscope.utils import logger as logging + +logger = logging.get_logger() class ChatGLMConfig(PretrainedConfig): diff --git a/modelscope/models/nlp/chatglm/quantization.py b/modelscope/models/nlp/chatglm/quantization.py index 9994d9c4..4e568c71 100644 --- a/modelscope/models/nlp/chatglm/quantization.py +++ b/modelscope/models/nlp/chatglm/quantization.py @@ -6,9 +6,10 @@ from typing import List import torch from torch.nn import Linear from torch.nn.parameter import Parameter -from transformers.utils import logging -logger = logging.get_logger(__name__) +from modelscope.utils import logger as logging + +logger = logging.get_logger() try: from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up diff --git a/modelscope/models/nlp/chatglm/text_generation.py b/modelscope/models/nlp/chatglm/text_generation.py index 64b82862..95ea33db 100644 --- a/modelscope/models/nlp/chatglm/text_generation.py +++ b/modelscope/models/nlp/chatglm/text_generation.py @@ -24,11 +24,12 @@ from transformers.modeling_outputs import ( from transformers.modeling_utils import PreTrainedModel from transformers.utils import (add_code_sample_docstrings, add_start_docstrings, - add_start_docstrings_to_model_forward, logging) + add_start_docstrings_to_model_forward) from modelscope.metainfo import Models from modelscope.models import MODELS, Model, TorchModel from modelscope.outputs import OutputKeys +from modelscope.utils import logger as logging from modelscope.utils.constant import Tasks from .configuration import ChatGLMConfig from .tokenization import ChatGLMTokenizer @@ -41,7 +42,7 @@ if sys.platform != 'darwin': torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) -logger = logging.get_logger(__name__) +logger = logging.get_logger() _CHECKPOINT_FOR_DOC = 'THUDM/ChatGLM-6B' _CONFIG_FOR_DOC = 'ChatGLM6BConfig' diff --git a/modelscope/models/nlp/chatglm/tokenization.py b/modelscope/models/nlp/chatglm/tokenization.py index 77bcde55..f5f8cd0c 100644 --- a/modelscope/models/nlp/chatglm/tokenization.py +++ b/modelscope/models/nlp/chatglm/tokenization.py @@ -6,9 +6,11 @@ import numpy as np import sentencepiece as spm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.tokenization_utils_base import BatchEncoding, EncodedInput -from transformers.utils import PaddingStrategy, logging +from transformers.utils import PaddingStrategy -logger = logging.get_logger(__name__) +from modelscope.utils import logger as logging + +logger = logging.get_logger() PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { 'THUDM/chatglm-6b': 2048, diff --git a/modelscope/models/nlp/chatglm2/configuration.py b/modelscope/models/nlp/chatglm2/configuration.py index 1583e886..ab40de0e 100644 --- a/modelscope/models/nlp/chatglm2/configuration.py +++ b/modelscope/models/nlp/chatglm2/configuration.py @@ -1,9 +1,10 @@ """ ChatGLM model configuration """ from transformers import PretrainedConfig -from transformers.utils import logging -logger = logging.get_logger(__name__) +from modelscope.utils import logger as logging + +logger = logging.get_logger() class ChatGLM2Config(PretrainedConfig): diff --git a/modelscope/models/nlp/chatglm2/quantization.py b/modelscope/models/nlp/chatglm2/quantization.py index 116bc0ea..a1e8b8f2 100644 --- a/modelscope/models/nlp/chatglm2/quantization.py +++ b/modelscope/models/nlp/chatglm2/quantization.py @@ -5,9 +5,10 @@ from typing import List import torch from torch.nn.parameter import Parameter -from transformers.utils import logging -logger = logging.get_logger(__name__) +from modelscope.utils import logger as logging + +logger = logging.get_logger() try: from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up diff --git a/modelscope/models/nlp/chatglm2/text_generation.py b/modelscope/models/nlp/chatglm2/text_generation.py index 082e16e7..1052b875 100644 --- a/modelscope/models/nlp/chatglm2/text_generation.py +++ b/modelscope/models/nlp/chatglm2/text_generation.py @@ -19,11 +19,11 @@ from transformers.generation.utils import (GenerationConfig, from transformers.modeling_outputs import (BaseModelOutputWithPast, CausalLMOutputWithPast) from transformers.modeling_utils import PreTrainedModel -from transformers.utils import logging from modelscope import Model, TorchModel from modelscope.metainfo import Models from modelscope.outputs import OutputKeys +from modelscope.utils import logger as logging from modelscope.utils.constant import Tasks from ... import MODELS from .configuration import ChatGLM2Config @@ -36,7 +36,7 @@ if sys.platform != 'darwin': torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) -logger = logging.get_logger(__name__) +logger = logging.get_logger() _CHECKPOINT_FOR_DOC = 'THUDM/ChatGLM2-6B' _CONFIG_FOR_DOC = 'ChatGLM6BConfig' diff --git a/modelscope/models/nlp/fid_plug/backbone.py b/modelscope/models/nlp/fid_plug/backbone.py index 70c45633..5dcddcc1 100644 --- a/modelscope/models/nlp/fid_plug/backbone.py +++ b/modelscope/models/nlp/fid_plug/backbone.py @@ -26,10 +26,11 @@ import torch.nn.functional as F from torch import Tensor, nn from torch.nn.init import xavier_uniform_ from transformers import (BertConfig, BertModel, BertTokenizer, RobertaConfig, - RobertaModel, RobertaTokenizer, logging) + RobertaModel, RobertaTokenizer) from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel +from modelscope.utils import logger as logging from .configuration import PlugConfig CONFIG_NAME = 'config.json' @@ -729,7 +730,7 @@ class PlugForConditionalGeneration(PlugPreTrainedModel): def __init__(self, config, checkpoint=None, dataset: str = 'default'): super().__init__(config) - self.logger = logging.get_logger(__name__) + self.logger = logging.get_logger() self.config = config if config.encoder == 'roberta': tokenizer = RobertaTokenizer.from_pretrained( diff --git a/modelscope/models/nlp/llama/backbone.py b/modelscope/models/nlp/llama/backbone.py index 120581a9..16be099f 100755 --- a/modelscope/models/nlp/llama/backbone.py +++ b/modelscope/models/nlp/llama/backbone.py @@ -35,7 +35,7 @@ from modelscope.utils.constant import Tasks from modelscope.utils.logger import get_logger from .configuration import LlamaConfig -logger = get_logger(__name__) +logger = get_logger() _CONFIG_FOR_DOC = 'LlamaConfig' diff --git a/modelscope/models/nlp/llama/tokenization.py b/modelscope/models/nlp/llama/tokenization.py index b3d24dd9..cd423683 100644 --- a/modelscope/models/nlp/llama/tokenization.py +++ b/modelscope/models/nlp/llama/tokenization.py @@ -29,7 +29,7 @@ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer from modelscope.utils.logger import get_logger # This file is mainly copied from the llama code of transformers -logger = get_logger(__name__) +logger = get_logger() VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model'} diff --git a/modelscope/models/nlp/llama/tokenization_fast.py b/modelscope/models/nlp/llama/tokenization_fast.py index 7aa0ac1b..13696b59 100644 --- a/modelscope/models/nlp/llama/tokenization_fast.py +++ b/modelscope/models/nlp/llama/tokenization_fast.py @@ -31,7 +31,7 @@ if is_sentencepiece_available(): else: LlamaTokenizer = None -logger = get_logger(__name__) +logger = get_logger() VOCAB_FILES_NAMES = { 'vocab_file': 'tokenizer.model', 'tokenizer_file': 'tokenizer.json' diff --git a/modelscope/models/nlp/llama2/backbone.py b/modelscope/models/nlp/llama2/backbone.py index c0983478..ee0d742b 100755 --- a/modelscope/models/nlp/llama2/backbone.py +++ b/modelscope/models/nlp/llama2/backbone.py @@ -36,7 +36,7 @@ from modelscope.utils.logger import get_logger from ... import MODELS from .configuration import Llama2Config -logger = get_logger(__name__) +logger = get_logger() _CONFIG_FOR_DOC = 'Llama2Config' @@ -570,7 +570,7 @@ class LlamaPreTrainedModel(TorchModel, PreTrainedModel): module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, LlamaModel): + if isinstance(module, Llama2Model): module.gradient_checkpointing = value @classmethod diff --git a/modelscope/models/nlp/llama2/configuration.py b/modelscope/models/nlp/llama2/configuration.py index b95f9ddd..c9f38fe4 100644 --- a/modelscope/models/nlp/llama2/configuration.py +++ b/modelscope/models/nlp/llama2/configuration.py @@ -23,7 +23,7 @@ from transformers.configuration_utils import PretrainedConfig from modelscope.utils.logger import get_logger -logger = get_logger(__name__) +logger = get_logger() LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {} diff --git a/modelscope/models/nlp/llama2/tokenization.py b/modelscope/models/nlp/llama2/tokenization.py index d57c6017..bb276621 100644 --- a/modelscope/models/nlp/llama2/tokenization.py +++ b/modelscope/models/nlp/llama2/tokenization.py @@ -30,7 +30,7 @@ from modelscope.utils.logger import get_logger if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation -logger = get_logger(__name__) +logger = get_logger() VOCAB_FILES_NAMES = {'vocab_file': 'tokenizer.model'} diff --git a/modelscope/models/nlp/llama2/tokenization_fast.py b/modelscope/models/nlp/llama2/tokenization_fast.py index 6cfae2ff..13862955 100644 --- a/modelscope/models/nlp/llama2/tokenization_fast.py +++ b/modelscope/models/nlp/llama2/tokenization_fast.py @@ -18,9 +18,11 @@ from typing import TYPE_CHECKING, Optional, Tuple from tokenizers import processors from transformers.tokenization_utils_fast import PreTrainedTokenizerFast -from transformers.utils import is_sentencepiece_available, logging +from transformers.utils import is_sentencepiece_available from transformers.utils.versions import require_version +from modelscope.utils import logger as logging + if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation @@ -31,7 +33,7 @@ if is_sentencepiece_available(): else: Llama2Tokenizer = None -logger = logging.get_logger(__name__) +logger = logging.get_logger() VOCAB_FILES_NAMES = { 'vocab_file': 'tokenizer.model', 'tokenizer_file': 'tokenizer.json' diff --git a/modelscope/models/nlp/peer/backbone.py b/modelscope/models/nlp/peer/backbone.py index 2dca8dda..4bf376cd 100644 --- a/modelscope/models/nlp/peer/backbone.py +++ b/modelscope/models/nlp/peer/backbone.py @@ -36,7 +36,7 @@ from modelscope.utils.nlp.utils import parse_labels_in_order from .configuration import PeerConfig from .sas_utils import SequenceSideInfo -logger = logging.get_logger(__name__) +logger = logging.get_logger() PEER_PRETRAINED_MODEL_ARCHIVE_LIST = [ 'google/peer-small-generator', diff --git a/modelscope/models/nlp/peer/configuration.py b/modelscope/models/nlp/peer/configuration.py index da8b0a74..794b89f7 100644 --- a/modelscope/models/nlp/peer/configuration.py +++ b/modelscope/models/nlp/peer/configuration.py @@ -20,7 +20,7 @@ from transformers.configuration_utils import PretrainedConfig from modelscope.utils import logger as logging -logger = logging.get_logger(__name__) +logger = logging.get_logger() class PeerConfig(PretrainedConfig): diff --git a/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py b/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py index 49d20278..86ca61dd 100644 --- a/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py +++ b/modelscope/trainers/hooks/checkpoint/checkpoint_hook.py @@ -76,7 +76,7 @@ class CheckpointHook(Hook): private_hub: Optional[bool] = True, hub_revision: Optional[str] = DEFAULT_REPOSITORY_REVISION, upload_strategy: Optional[str] = UploadStrategy.cancel, - save_trainer_state: Optional[bool] = True, + save_trainer_state: bool = True, **kwargs): self.interval = interval self.save_dir = save_dir @@ -302,6 +302,7 @@ class BestCkptSaverHook(CheckpointHook): max_checkpoint_num (int): The max number of checkpoint files, default None which means never delete anything. If the number exceeding the limit, checkpoints with worse metric will be deleted, which is judged by the `rule` and `metric_key` arguments. + save_trainer_state (bool): Save the trainer state for continue training, default True. The `BestCkptSaverHook` class accepts `output_sub_dir` and `output_dir` argument as its super class do. If neither of them are passed, the default value is `{save_dir}/output_best`. @@ -320,6 +321,7 @@ class BestCkptSaverHook(CheckpointHook): save_file_name: Optional[str] = None, restore_best: Optional[bool] = False, max_checkpoint_num: Optional[int] = 1, + save_trainer_state: bool = True, **kwargs): assert rule in ['max', 'min'], 'Only support "max" or "min" rule now.' output_kwargs = {} @@ -329,6 +331,7 @@ class BestCkptSaverHook(CheckpointHook): kwargs.pop('save_strategy', None) super().__init__( max_checkpoint_num=max_checkpoint_num, + save_trainer_state=save_trainer_state, **kwargs, **output_kwargs, ) diff --git a/modelscope/utils/data_collators.py b/modelscope/utils/data_collators.py index 044b1993..0981c836 100644 --- a/modelscope/utils/data_collators.py +++ b/modelscope/utils/data_collators.py @@ -7,7 +7,7 @@ from typing import Any, List, Optional, Tuple from .logger import get_logger -logger = get_logger(__name__) +logger = get_logger() class RemoveColumnsCollator: From 6fb340e7f88634d7b731464e29c794ddd5d15c67 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Mon, 24 Jul 2023 19:17:49 +0800 Subject: [PATCH 64/87] add lora_rank for lora stable diffusion --- .../lora/finetune_stable_diffusion_lora.py | 7 +++++++ .../pytorch/stable_diffusion/lora/run_train_lora.sh | 1 + .../lora_diffusion/lora_diffusion_trainer.py | 11 ++++++++++- 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py index 8ad3a28b..b878b5f8 100644 --- a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py +++ b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py @@ -18,6 +18,12 @@ class StableDiffusionLoraArguments(TrainingArgs): default='dog', metadata={ 'help': 'The pipeline prompt.', }) + + lora_rank: int = field( + default=4, metadata={ + 'help': 'The rank size of lora intermediate linear.', + } + ) training_args = StableDiffusionLoraArguments( @@ -59,6 +65,7 @@ kwargs = dict( work_dir=training_args.work_dir, train_dataset=train_dataset, eval_dataset=validation_dataset, + lora_rank=args.lora_rank, cfg_modify_fn=cfg_modify_fn) # build trainer and training diff --git a/examples/pytorch/stable_diffusion/lora/run_train_lora.sh b/examples/pytorch/stable_diffusion/lora/run_train_lora.sh index 876a2475..570c36af 100644 --- a/examples/pytorch/stable_diffusion/lora/run_train_lora.sh +++ b/examples/pytorch/stable_diffusion/lora/run_train_lora.sh @@ -5,6 +5,7 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/lora/finetune_stable_dif --work_dir './tmp/lora_diffusion' \ --train_dataset_name 'buptwq/lora-stable-diffusion-finetune' \ --max_epochs 100 \ + --lora_rank 8 \ --save_ckpt_strategy 'by_epoch' \ --logging_interval 1 \ --train.dataloader.workers_per_gpu 0 \ diff --git a/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py b/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py index 99351fef..7c6644bd 100644 --- a/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/lora_diffusion/lora_diffusion_trainer.py @@ -34,6 +34,14 @@ class LoraDiffusionTrainer(EpochBasedTrainer): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + """Lora trainers for fine-tuning stable diffusion + + Args: + lora_rank: The rank size of lora intermediate linear. + + """ + lora_rank = kwargs.pop('lora_rank', 4) + # set lora save checkpoint processor ckpt_hook = list( filter(lambda hook: isinstance(hook, CheckpointHook), @@ -59,7 +67,8 @@ class LoraDiffusionTrainer(EpochBasedTrainer): lora_attn_procs[name] = LoRAAttnProcessor( hidden_size=hidden_size, - cross_attention_dim=cross_attention_dim) + cross_attention_dim=cross_attention_dim, + rank=lora_rank) self.model.unet.set_attn_processor(lora_attn_procs) From eb24e23d1993225a1f2781c3bdc9d7401c0edce5 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Mon, 24 Jul 2023 19:24:52 +0800 Subject: [PATCH 65/87] add lora_rank for lora stable diffusion --- examples/pytorch/stable_diffusion/lora/run_train_lora.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/stable_diffusion/lora/run_train_lora.sh b/examples/pytorch/stable_diffusion/lora/run_train_lora.sh index 570c36af..bf62f833 100644 --- a/examples/pytorch/stable_diffusion/lora/run_train_lora.sh +++ b/examples/pytorch/stable_diffusion/lora/run_train_lora.sh @@ -5,7 +5,7 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/lora/finetune_stable_dif --work_dir './tmp/lora_diffusion' \ --train_dataset_name 'buptwq/lora-stable-diffusion-finetune' \ --max_epochs 100 \ - --lora_rank 8 \ + --lora_rank 4 \ --save_ckpt_strategy 'by_epoch' \ --logging_interval 1 \ --train.dataloader.workers_per_gpu 0 \ From bc93e2dc96febadc6f285af1c5642c19871e1cea Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Mon, 24 Jul 2023 19:32:04 +0800 Subject: [PATCH 66/87] add lora_rank for lora stable diffusion --- .../stable_diffusion/lora/finetune_stable_diffusion_lora.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py index b878b5f8..e15aa444 100644 --- a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py +++ b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py @@ -22,8 +22,7 @@ class StableDiffusionLoraArguments(TrainingArgs): lora_rank: int = field( default=4, metadata={ 'help': 'The rank size of lora intermediate linear.', - } - ) + }) training_args = StableDiffusionLoraArguments( From 426f55d57b8932aeb41aa1941f2172475542e8ca Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Mon, 24 Jul 2023 19:43:20 +0800 Subject: [PATCH 67/87] add lora_rank for lora stable diffusion --- .../stable_diffusion/lora/finetune_stable_diffusion_lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py index e15aa444..325cc7ae 100644 --- a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py +++ b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py @@ -18,7 +18,7 @@ class StableDiffusionLoraArguments(TrainingArgs): default='dog', metadata={ 'help': 'The pipeline prompt.', }) - + lora_rank: int = field( default=4, metadata={ 'help': 'The rank size of lora intermediate linear.', From 8e00d85317ca3f7a44085ce04b551829c916633c Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Mon, 24 Jul 2023 19:46:22 +0800 Subject: [PATCH 68/87] fix bugs --- .../custom_diffusion_trainer.py | 19 +++++++++++++------ .../trainers/test_custom_diffusion_trainer.py | 4 ++-- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index e4925af8..3e6450d7 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -269,6 +269,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): sample_batch_size: Batch size (per device) for sampling images. train_batch_size: Batch size (per device) for the training dataloader. center_crop: execute center crop or not. + concepts_list: Path to json containing multiple concepts, will overwrite parameters like instance_prompt etc. instance_data_name: The instance data local dir or online ID. """ @@ -288,6 +289,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.sample_batch_size = kwargs.pop('sample_batch_size', 4) self.train_batch_size = kwargs.pop('train_batch_size', 2) self.center_crop = kwargs.pop('center_crop', False) + self.concepts_list = kwargs.pop('concepts_list', None) instance_data_name = kwargs.pop( 'instance_data_name', 'buptwq/lora-stable-diffusion-finetune-dog') @@ -299,12 +301,17 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.instance_data_dir = os.path.dirname( next(iter(ds))['Target:FILE']) - self.concepts_list = [{ - 'instance_prompt': self.instance_prompt, - 'class_prompt': self.class_prompt, - 'instance_data_dir': self.instance_data_dir, - 'class_data_dir': self.class_data_dir, - }] + if self.concepts_list is None: + self.concepts_list = [{ + 'instance_prompt': self.instance_prompt, + 'class_prompt': self.class_prompt, + 'instance_data_dir': self.instance_data_dir, + 'class_data_dir': self.class_data_dir, + }] + else: + with open(args.concepts_list, "r") as f: + self.concepts_list = json.load(f) + print("--------self.concepts_list: ", self.concepts_list) # Adding a modifier token which is optimized self.modifier_token_id = [] diff --git a/tests/trainers/test_custom_diffusion_trainer.py b/tests/trainers/test_custom_diffusion_trainer.py index 025168d3..6c647401 100644 --- a/tests/trainers/test_custom_diffusion_trainer.py +++ b/tests/trainers/test_custom_diffusion_trainer.py @@ -38,7 +38,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): shutil.rmtree(self.tmp_dir) super().tearDown() - @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_custom_diffusion_train(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' model_revision = 'v1.0.9' @@ -76,7 +76,7 @@ class TestCustomDiffusionTrainer(unittest.TestCase): output = pipe({'text': prompt}) cv2.imwrite('./custom_result.png', output['output_imgs'][0]) - @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') def test_dreambooth_diffusion_eval(self): model_id = 'AI-ModelScope/stable-diffusion-v1-5' model_revision = 'v1.0.9' From 64203e89ee6688ec22810041006fa2a601ee696f Mon Sep 17 00:00:00 2001 From: wenmeng zhou Date: Mon, 24 Jul 2023 20:53:27 +0800 Subject: [PATCH 69/87] Compatibility for huggingface transformers (#391) --- modelscope/__init__.py | 7 ++ modelscope/models/base/base_model.py | 7 ++ modelscope/utils/hf_util.py | 178 +++++++++++++++++++++++++++ modelscope/utils/plugins.py | 65 +++++++++- tests/models/test_model_base.py | 41 ++++++ tests/utils/test_hf_util.py | 55 +++++++++ tests/utils/test_plugin.py | 4 + 7 files changed, 351 insertions(+), 6 deletions(-) create mode 100644 modelscope/utils/hf_util.py create mode 100644 tests/models/test_model_base.py create mode 100644 tests/utils/test_hf_util.py diff --git a/modelscope/__init__.py b/modelscope/__init__.py index f7553958..bf95cb81 100644 --- a/modelscope/__init__.py +++ b/modelscope/__init__.py @@ -26,6 +26,9 @@ if TYPE_CHECKING: from .pipelines import Pipeline, pipeline from .utils.hub import read_config, create_model_if_not_exist from .utils.logger import get_logger + from .utils.hf_util import AutoConfig, GenerationConfig + from .utils.hf_util import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM + from .utils.hf_util import AutoTokenizer from .msdatasets import MsDataset else: @@ -65,6 +68,10 @@ else: 'pipelines': ['Pipeline', 'pipeline'], 'utils.hub': ['read_config', 'create_model_if_not_exist'], 'utils.logger': ['get_logger'], + 'utils.hf_util': [ + 'AutoConfig', 'GenerationConfig', 'AutoModel', + 'AutoModelForCausalLM', 'AutoModelForSeq2SeqLM', 'AutoTokenizer' + ], 'msdatasets': ['MsDataset'] } diff --git a/modelscope/models/base/base_model.py b/modelscope/models/base/base_model.py index 0edb740e..02f50483 100644 --- a/modelscope/models/base/base_model.py +++ b/modelscope/models/base/base_model.py @@ -88,6 +88,8 @@ class Model(ABC): equal to the model saved. For example, load a `backbone` into a `text-classification` model. Other kwargs will be directly fed into the `model` key, to replace the default configs. + use_hf(bool): If set True, will use AutoModel in hf to initialize the model to keep compatibility + with huggingface transformers. Returns: A model instance. @@ -116,6 +118,11 @@ class Model(ABC): local_model_dir = snapshot_download( model_name_or_path, revision, user_agent=invoked_by) logger.info(f'initialize model from {local_model_dir}') + + if kwargs.pop('use_hf', False): + from modelscope import AutoModel + return AutoModel.from_pretrained(local_model_dir) + if cfg_dict is not None: cfg = cfg_dict else: diff --git a/modelscope/utils/hf_util.py b/modelscope/utils/hf_util.py new file mode 100644 index 00000000..64465092 --- /dev/null +++ b/modelscope/utils/hf_util.py @@ -0,0 +1,178 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +import os +import sys + +from transformers import AutoConfig as AutoConfigHF +from transformers import AutoModel as AutoModelHF +from transformers import AutoModelForCausalLM as AutoModelForCausalLMHF +from transformers import AutoModelForSeq2SeqLM as AutoModelForSeq2SeqLMHF +from transformers import AutoTokenizer as AutoTokenizerHF +from transformers import GenerationConfig as GenerationConfigHF +from transformers import PreTrainedModel, PreTrainedTokenizerBase + +from modelscope import snapshot_download +from modelscope.utils.constant import Invoke + + +def user_agent(invoked_by=None): + if invoked_by is None: + invoked_by = Invoke.PRETRAINED + uagent = '%s/%s' % (Invoke.KEY, invoked_by) + return uagent + + +def patch_tokenizer_base(): + """ Monkey patch PreTrainedTokenizerBase.from_pretrained to adapt to modelscope hub. + """ + ori_from_pretrained = PreTrainedTokenizerBase.from_pretrained.__func__ + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.bin', r'\w+\.safetensors'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern) + else: + model_dir = pretrained_model_name_or_path + return ori_from_pretrained(cls, model_dir, *model_args, **kwargs) + + PreTrainedTokenizerBase.from_pretrained = from_pretrained + + +def patch_model_base(): + """ Monkey patch PreTrainedModel.from_pretrained to adapt to modelscope hub. + """ + ori_from_pretrained = PreTrainedModel.from_pretrained.__func__ + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.safetensors'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern) + else: + model_dir = pretrained_model_name_or_path + return ori_from_pretrained(cls, model_dir, *model_args, **kwargs) + + PreTrainedModel.from_pretrained = from_pretrained + + +patch_tokenizer_base() +patch_model_base() + + +class AutoModel(AutoModelHF): + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.safetensors'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern, + user_agent=user_agent()) + else: + model_dir = pretrained_model_name_or_path + + return super().from_pretrained(model_dir, *model_args, **kwargs) + + +class AutoModelForCausalLM(AutoModelForCausalLMHF): + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.safetensors'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern, + user_agent=user_agent()) + else: + model_dir = pretrained_model_name_or_path + + return super().from_pretrained(model_dir, *model_args, **kwargs) + + +class AutoModelForSeq2SeqLM(AutoModelForSeq2SeqLMHF): + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.safetensors'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern, + user_agent=user_agent()) + else: + model_dir = pretrained_model_name_or_path + + return super().from_pretrained(model_dir, *model_args, **kwargs) + + +class AutoTokenizer(AutoTokenizerHF): + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.bin', r'\w+\.safetensors'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern) + else: + model_dir = pretrained_model_name_or_path + return super().from_pretrained(model_dir, *model_args, **kwargs) + + +class AutoConfig(AutoConfigHF): + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.bin', r'\w+\.py'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern) + else: + model_dir = pretrained_model_name_or_path + return super().from_pretrained(model_dir, *model_args, **kwargs) + + +class GenerationConfig(GenerationConfigHF): + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + ignore_file_pattern = [r'\w+\.bin', r'\w+\.py'] + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern) + else: + model_dir = pretrained_model_name_or_path + return super().from_pretrained(model_dir, *model_args, **kwargs) diff --git a/modelscope/utils/plugins.py b/modelscope/utils/plugins.py index e997f676..2c510dd2 100644 --- a/modelscope/utils/plugins.py +++ b/modelscope/utils/plugins.py @@ -3,6 +3,7 @@ # Part of the implementation is borrowed from wimglenn/johnnydep import copy +import filecmp import importlib import os import pkgutil @@ -28,6 +29,9 @@ logger = get_logger() storage = LocalStorage() MODELSCOPE_FILE_DIR = get_default_cache_dir() +MODELSCOPE_DYNAMIC_MODULE = 'modelscope_modules' +BASE_MODULE_DIR = os.path.join(MODELSCOPE_FILE_DIR, MODELSCOPE_DYNAMIC_MODULE) + PLUGINS_FILENAME = '.modelscope_plugins' OFFICIAL_PLUGINS = [ { @@ -322,6 +326,41 @@ def import_module_from_file(module_name, file_path): return module +def create_module_from_files(file_list, file_prefix, module_name): + """ + Create a python module from a list of files by copying them to the destination directory. + + Args: + file_list (List[str]): List of relative file paths to be copied. + file_prefix (str): Path prefix for each file in file_list. + module_name (str): Name of the module. + + Returns: + None + """ + + def create_empty_file(file_path): + with open(file_path, 'w') as _: + pass + + dest_dir = os.path.join(BASE_MODULE_DIR, module_name) + for file_path in file_list: + file_dir = os.path.dirname(file_path) + target_dir = os.path.join(dest_dir, file_dir) + os.makedirs(target_dir, exist_ok=True) + init_file = os.path.join(target_dir, '__init__.py') + if not os.path.exists(init_file): + create_empty_file(init_file) + + target_file = os.path.join(target_dir, file_path) + src_file = os.path.join(file_prefix, file_path) + if not os.path.exists(target_file) or not filecmp.cmp( + src_file, target_file): + shutil.copyfile(src_file, target_file) + + importlib.invalidate_caches() + + def import_module_from_model_dir(model_dir): """ import all the necessary module from a model dir @@ -340,12 +379,26 @@ def import_module_from_model_dir(model_dir): # install the requirements firstly install_requirements_by_files(requirements) - # then import the modules - import sys - sys.path.insert(0, model_dir) - for file in file_dirs: - module_name = Path(file).stem - import_module_from_file(module_name, file) + if BASE_MODULE_DIR not in sys.path: + sys.path.append(BASE_MODULE_DIR) + + module_name = Path(model_dir).stem + + # in order to keep forward compatibility, we add module path to + # sys.path so that submodule can be imported directly as before + MODULE_PATH = os.path.join(BASE_MODULE_DIR, module_name) + if MODULE_PATH not in sys.path: + sys.path.append(MODULE_PATH) + + relative_file_dirs = [ + file.replace(model_dir.rstrip(os.sep) + os.sep, '') + for file in file_dirs + ] + create_module_from_files(relative_file_dirs, model_dir, module_name) + for file in relative_file_dirs: + submodule = module_name + '.' + file.replace(os.sep, '.').replace( + '.py', '') + importlib.import_module(submodule) def install_requirements_by_names(plugins: List[str]): diff --git a/tests/models/test_model_base.py b/tests/models/test_model_base.py new file mode 100644 index 00000000..9d353ec5 --- /dev/null +++ b/tests/models/test_model_base.py @@ -0,0 +1,41 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +import os +import shutil +import tempfile +import unittest + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from modelscope.models.base import Model + + +class BaseTest(unittest.TestCase): + + def setUp(self): + print(('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + shutil.rmtree(self.tmp_dir) + super().tearDown() + + def test_from_pretrained(self): + model = Model.from_pretrained( + 'baichuan-inc/baichuan-7B', revision='v1.0.5') + self.assertIsNotNone(model) + + def test_from_pretrained_hf(self): + model = Model.from_pretrained( + 'damo/nlp_structbert_sentence-similarity_chinese-tiny', + use_hf=True) + self.assertIsNotNone(model) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/utils/test_hf_util.py b/tests/utils/test_hf_util.py new file mode 100644 index 00000000..7c10cca6 --- /dev/null +++ b/tests/utils/test_hf_util.py @@ -0,0 +1,55 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +import unittest + +from transformers import LlamaForCausalLM, LlamaTokenizer + +from modelscope import (AutoConfig, AutoModel, AutoModelForCausalLM, + AutoTokenizer, GenerationConfig) + + +class HFUtilTest(unittest.TestCase): + + def setUp(self): + pass + + def tearDown(self): + pass + + def test_auto_tokenizer(self): + tokenizer = AutoTokenizer.from_pretrained( + 'baichuan-inc/Baichuan-13B-Chat', + trust_remote_code=True, + revision='v1.0.3') + self.assertEqual(tokenizer.vocab_size, 64000) + self.assertEqual(tokenizer.model_max_length, 4096) + self.assertFalse(tokenizer.is_fast) + + def test_auto_model(self): + model = AutoModelForCausalLM.from_pretrained( + 'baichuan-inc/baichuan-7B', trust_remote_code=True) + self.assertTrue(model is not None) + + def test_auto_config(self): + config = AutoConfig.from_pretrained( + 'baichuan-inc/Baichuan-13B-Chat', + trust_remote_code=True, + revision='v1.0.3') + self.assertEqual(config.model_type, 'baichuan') + gen_config = GenerationConfig.from_pretrained( + 'baichuan-inc/Baichuan-13B-Chat', + trust_remote_code=True, + revision='v1.0.3') + self.assertEqual(gen_config.assistant_token_id, 196) + + def test_transformer_patch(self): + tokenizer = LlamaTokenizer.from_pretrained( + 'skyline2006/llama-7b', revision='v1.0.1') + self.assertIsNotNone(tokenizer) + model = LlamaForCausalLM.from_pretrained( + 'skyline2006/llama-7b', revision='v1.0.1') + self.assertIsNotNone(model) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/utils/test_plugin.py b/tests/utils/test_plugin.py index 447ce1c9..6bbba197 100644 --- a/tests/utils/test_plugin.py +++ b/tests/utils/test_plugin.py @@ -124,3 +124,7 @@ class PluginTest(unittest.TestCase): result = self.plugins_manager.list_plugins(show_all=True) self.assertEqual(len(result.items()), len(OFFICIAL_PLUGINS)) + + +if __name__ == '__main__': + unittest.main() From 8e157cfa1500d34f386d29a63e8d330c5d229a64 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Mon, 24 Jul 2023 22:10:52 +0800 Subject: [PATCH 70/87] precommit --- .../stable_diffusion/lora/finetune_stable_diffusion_lora.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py index 325cc7ae..6001af48 100644 --- a/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py +++ b/examples/pytorch/stable_diffusion/lora/finetune_stable_diffusion_lora.py @@ -20,7 +20,8 @@ class StableDiffusionLoraArguments(TrainingArgs): }) lora_rank: int = field( - default=4, metadata={ + default=4, + metadata={ 'help': 'The rank size of lora intermediate linear.', }) From 7b249400d9c7568ef3625b520fb2757d04f9f51f Mon Sep 17 00:00:00 2001 From: wenmeng zhou Date: Tue, 25 Jul 2023 10:13:02 +0800 Subject: [PATCH 71/87] refine class wrapper (#409) --- modelscope/utils/hf_util.py | 140 ++++++++++-------------------------- 1 file changed, 39 insertions(+), 101 deletions(-) diff --git a/modelscope/utils/hf_util.py b/modelscope/utils/hf_util.py index 64465092..8d877ef9 100644 --- a/modelscope/utils/hf_util.py +++ b/modelscope/utils/hf_util.py @@ -70,109 +70,47 @@ patch_tokenizer_base() patch_model_base() -class AutoModel(AutoModelHF): +def get_wrapped_class(module_class, ignore_file_pattern=[], **kwargs): + """Get a custom wrapper class for auto classes to download the models from the ModelScope hub + Args: + module_class: The actual module class + ignore_file_pattern (`str` or `List`, *optional*, default to `None`): + Any file pattern to be ignored in downloading, like exact file names or file extensions. + Returns: + The wrapper + """ - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, - **kwargs): - ignore_file_pattern = [r'\w+\.safetensors'] - if not os.path.exists(pretrained_model_name_or_path): - revision = kwargs.pop('revision', None) - model_dir = snapshot_download( - pretrained_model_name_or_path, - revision=revision, - ignore_file_pattern=ignore_file_pattern, - user_agent=user_agent()) - else: - model_dir = pretrained_model_name_or_path + class ClassWrapper(module_class): - return super().from_pretrained(model_dir, *model_args, **kwargs) + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + if not os.path.exists(pretrained_model_name_or_path): + revision = kwargs.pop('revision', None) + model_dir = snapshot_download( + pretrained_model_name_or_path, + revision=revision, + ignore_file_pattern=ignore_file_pattern, + user_agent=user_agent()) + else: + model_dir = pretrained_model_name_or_path + + return module_class.from_pretrained(model_dir, *model_args, + **kwargs) + + return ClassWrapper -class AutoModelForCausalLM(AutoModelForCausalLMHF): +AutoModel = get_wrapped_class( + AutoModelHF, ignore_file_pattern=[r'\w+\.safetensors']) +AutoModelForCausalLM = get_wrapped_class( + AutoModelForCausalLMHF, ignore_file_pattern=[r'\w+\.safetensors']) +AutoModelForSeq2SeqLM = get_wrapped_class( + AutoModelForSeq2SeqLMHF, ignore_file_pattern=[r'\w+\.safetensors']) - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, - **kwargs): - ignore_file_pattern = [r'\w+\.safetensors'] - if not os.path.exists(pretrained_model_name_or_path): - revision = kwargs.pop('revision', None) - model_dir = snapshot_download( - pretrained_model_name_or_path, - revision=revision, - ignore_file_pattern=ignore_file_pattern, - user_agent=user_agent()) - else: - model_dir = pretrained_model_name_or_path - - return super().from_pretrained(model_dir, *model_args, **kwargs) - - -class AutoModelForSeq2SeqLM(AutoModelForSeq2SeqLMHF): - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, - **kwargs): - ignore_file_pattern = [r'\w+\.safetensors'] - if not os.path.exists(pretrained_model_name_or_path): - revision = kwargs.pop('revision', None) - model_dir = snapshot_download( - pretrained_model_name_or_path, - revision=revision, - ignore_file_pattern=ignore_file_pattern, - user_agent=user_agent()) - else: - model_dir = pretrained_model_name_or_path - - return super().from_pretrained(model_dir, *model_args, **kwargs) - - -class AutoTokenizer(AutoTokenizerHF): - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, - **kwargs): - ignore_file_pattern = [r'\w+\.bin', r'\w+\.safetensors'] - if not os.path.exists(pretrained_model_name_or_path): - revision = kwargs.pop('revision', None) - model_dir = snapshot_download( - pretrained_model_name_or_path, - revision=revision, - ignore_file_pattern=ignore_file_pattern) - else: - model_dir = pretrained_model_name_or_path - return super().from_pretrained(model_dir, *model_args, **kwargs) - - -class AutoConfig(AutoConfigHF): - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, - **kwargs): - ignore_file_pattern = [r'\w+\.bin', r'\w+\.py'] - if not os.path.exists(pretrained_model_name_or_path): - revision = kwargs.pop('revision', None) - model_dir = snapshot_download( - pretrained_model_name_or_path, - revision=revision, - ignore_file_pattern=ignore_file_pattern) - else: - model_dir = pretrained_model_name_or_path - return super().from_pretrained(model_dir, *model_args, **kwargs) - - -class GenerationConfig(GenerationConfigHF): - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, - **kwargs): - ignore_file_pattern = [r'\w+\.bin', r'\w+\.py'] - if not os.path.exists(pretrained_model_name_or_path): - revision = kwargs.pop('revision', None) - model_dir = snapshot_download( - pretrained_model_name_or_path, - revision=revision, - ignore_file_pattern=ignore_file_pattern) - else: - model_dir = pretrained_model_name_or_path - return super().from_pretrained(model_dir, *model_args, **kwargs) +AutoTokenizer = get_wrapped_class( + AutoTokenizerHF, ignore_file_pattern=[r'\w+\.bin', r'\w+\.safetensors']) +AutoConfig = get_wrapped_class( + AutoConfigHF, ignore_file_pattern=[r'\w+\.bin', r'\w+\.safetensors']) +GenerationConfig = get_wrapped_class( + GenerationConfigHF, ignore_file_pattern=[r'\w+\.bin', r'\w+\.safetensors']) From ffbf77fcf25c4253dd850ae1d83fd54cbec61c28 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Tue, 25 Jul 2023 14:47:45 +0800 Subject: [PATCH 72/87] update --- .../custom/finetune_stable_diffusion_custom.py | 8 ++++++++ .../pytorch/stable_diffusion/custom/run_train_custom.sh | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py index 35eb3792..47089bef 100644 --- a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py +++ b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py @@ -89,6 +89,13 @@ class StableDiffusionCustomArguments(TrainingArgs): 'help': 'The instance data local dir or online ID.', }) + concepts_list: str = field( + default=None, + metadata={ + 'help': 'Path to json containing multiple concepts.', + }) + + training_args = StableDiffusionCustomArguments( task='text-to-image-synthesis').parse_cli() @@ -134,6 +141,7 @@ kwargs = dict( sample_batch_size=args.sample_batch_size, initializer_token=args.initializer_token, class_data_dir=args.class_data_dir, + concepts_list=args.concepts_list, resolution=args.resolution, prior_loss_weight=args.prior_loss_weight, freeze_model=args.freeze_model, diff --git a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh index fe8de203..3e1ff024 100644 --- a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh +++ b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh @@ -7,7 +7,8 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/custom/finetune_stable_d --class_data_dir './tmp/class_data' \ --train_dataset_name 'buptwq/lora-stable-diffusion-finetune-dog' \ --max_epochs 250 \ - --modifier_token "" \ + --concepts_list '/mnt/user/E-yijing.wq-401594/github/custom_diffusion/modelscope/json/concept_list.json' \ + --modifier_token "+" \ --num_class_images=200 \ --save_ckpt_strategy 'by_epoch' \ --logging_interval 1 \ From 3412a074c559f496d8d81d705e4a058552bd6607 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Tue, 25 Jul 2023 15:00:28 +0800 Subject: [PATCH 73/87] precommit --- .../custom/finetune_stable_diffusion_custom.py | 3 +-- .../multi_modal/custom_diffusion/custom_diffusion_trainer.py | 5 +++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py index 47089bef..007ea82b 100644 --- a/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py +++ b/examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py @@ -94,7 +94,6 @@ class StableDiffusionCustomArguments(TrainingArgs): metadata={ 'help': 'Path to json containing multiple concepts.', }) - training_args = StableDiffusionCustomArguments( @@ -160,7 +159,7 @@ pipe = pipeline( task=Tasks.text_to_image_synthesis, model=training_args.model, custom_dir=training_args.work_dir + '/output', - modifier_token='', + modifier_token='+', model_revision=args.model_revision) output = pipe({'text': args.instance_prompt}) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 3e6450d7..2421d145 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -7,6 +7,7 @@ import warnings from pathlib import Path from typing import Union +import json import numpy as np import torch import torch.nn.functional as F @@ -309,9 +310,9 @@ class CustomDiffusionTrainer(EpochBasedTrainer): 'class_data_dir': self.class_data_dir, }] else: - with open(args.concepts_list, "r") as f: + with open(self.concepts_list, 'r') as f: self.concepts_list = json.load(f) - print("--------self.concepts_list: ", self.concepts_list) + print('--------self.concepts_list: ', self.concepts_list) # Adding a modifier token which is optimized self.modifier_token_id = [] From f03898626ec2871de321cbbea36ede8c2ad80c3e Mon Sep 17 00:00:00 2001 From: Jintao Date: Tue, 25 Jul 2023 19:27:11 +0800 Subject: [PATCH 74/87] ckpt output directory ignore *.safetensors (#410) * ckpt output file ignore *.safetensors * update --- examples/pytorch/llm/_common.py | 22 +++++++++++++--------- examples/pytorch/llm/llm_infer.py | 13 +++++++------ examples/pytorch/llm/llm_sft.py | 11 ++++++++--- examples/pytorch/llm/run_infer.sh | 2 +- modelscope/utils/checkpoint.py | 1 + 5 files changed, 30 insertions(+), 19 deletions(-) diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/_common.py index 86531b0e..b8921581 100644 --- a/examples/pytorch/llm/_common.py +++ b/examples/pytorch/llm/_common.py @@ -394,7 +394,11 @@ def get_model_tokenizer(model_type: str, model, tokenizer = get_chatglm2_model_tokenizer( model_dir, load_model, add_special_token, torch_dtype) elif model_type == 'llama2-7b': - model_dir = snapshot_download('modelscope/Llama-2-7b-ms', 'v1.0.2') + # use `.safetensors` + model_dir = snapshot_download( + 'modelscope/Llama-2-7b-ms', + 'v1.0.2', + ignore_file_pattern=[r'.+\.bin$']) model, tokenizer = get_llama2_model_tokenizer(model_dir, load_model, add_special_token, torch_dtype) @@ -464,13 +468,13 @@ def tensorboard_smoothing(values: List[float], return res -def plot_image(tb_dir: str, - smooth_key: List[str], - smooth_val: float = 0.9, - figsize: Tuple[int, int] = (8, 5), - dpi: int = 100) -> None: - image_dir = os.path.join(os.path.dirname(tb_dir), 'images') - os.makedirs(image_dir, exist_ok=True) +def plot_images(tb_dir: str, + smooth_key: List[str], + smooth_val: float = 0.9, + figsize: Tuple[int, int] = (8, 5), + dpi: int = 100) -> None: + images_dir = os.path.join(os.path.dirname(tb_dir), 'images') + os.makedirs(images_dir, exist_ok=True) fname = os.listdir(tb_dir)[0] tb_path = os.path.join(tb_dir, fname) @@ -492,7 +496,7 @@ def plot_image(tb_dir: str, ax.plot(steps, values_s, color=COLOR_S) else: ax.plot(steps, values, color=COLOR_S) - fpath = os.path.join(image_dir, k.replace('/', '_')) + fpath = os.path.join(images_dir, k.replace('/', '_')) plt.savefig(fpath, dpi=dpi, bbox_inches='tight') diff --git a/examples/pytorch/llm/llm_infer.py b/examples/pytorch/llm/llm_infer.py index a83dd6fe..8b9c1bb1 100644 --- a/examples/pytorch/llm/llm_infer.py +++ b/examples/pytorch/llm/llm_infer.py @@ -13,10 +13,10 @@ class InferArguments: }) sft_type: str = field( default='lora', metadata={'choices': ['lora', 'full']}) - ckpt_fpath: str = '/path/to/your/iter_xxx.pth' + ckpt_path: str = '/path/to/your/iter_xxx.pth' eval_human: bool = False # False: eval test_dataset data_sample: Optional[int] = None - # sft_type: lora + lora_target_modules: Optional[List[str]] = None lora_rank: int = 8 lora_alpha: int = 32 @@ -38,8 +38,9 @@ class InferArguments: else: raise ValueError(f'model_type: {self.model_type}') - if not os.path.isfile(self.ckpt_fpath): - raise ValueError(f'Please enter a valid fpath: {self.ckpt_fpath}') + if not os.path.isfile(self.ckpt_path): + raise ValueError( + f'Please enter a valid ckpt_path: {self.ckpt_path}') def parse_args() -> InferArguments: @@ -69,11 +70,11 @@ def llm_infer(args: InferArguments) -> None: rank=args.lora_rank, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout_p, - pretrained_weights=args.ckpt_fpath) + pretrained_weights=args.ckpt_path) logger.info(f'lora_config: {lora_config}') Swift.prepare_model(model, lora_config) elif args.sft_type == 'full': - state_dict = torch.load(args.ckpt_fpath, map_location='cpu') + state_dict = torch.load(args.ckpt_path, map_location='cpu') model.load_state_dict(state_dict) else: raise ValueError(f'args.sft_type: {args.sft_type}') diff --git a/examples/pytorch/llm/llm_sft.py b/examples/pytorch/llm/llm_sft.py index eb830abf..07f1fd5e 100644 --- a/examples/pytorch/llm/llm_sft.py +++ b/examples/pytorch/llm/llm_sft.py @@ -110,7 +110,7 @@ def llm_sft(args: SftArguments) -> None: if args.gradient_checkpoint: # baichuan-13b does not implement the `get_input_embeddings` function if args.model_type == 'baichuan-13b': - model.__class__.get_input_embeddings = MethodType( + model.get_input_embeddings = MethodType( lambda self: self.model.embed_tokens, model) model.gradient_checkpointing_enable() model.enable_input_require_grads() @@ -239,6 +239,11 @@ def llm_sft(args: SftArguments) -> None: cfg.update(config) return cfg + device_kwargs = {} + if torch.cuda.device_count() > 1: + # No placement for model, leave the model to `device_map` + device_kwargs['device'] = 'cpu' + trainer = EpochBasedTrainer( model=model, cfg_file=cfg_file, @@ -247,15 +252,15 @@ def llm_sft(args: SftArguments) -> None: eval_dataset=val_dataset, remove_unused_data=True, seed=42, - device='cpu', # No placement for model, leave the model to `device_map` cfg_modify_fn=cfg_modify_fn, + **device_kwargs, ) trainer.train() # ### Visualization tb_dir = os.path.join(work_dir, 'tensorboard_output') - plot_image(tb_dir, ['loss'], 0.9) + plot_images(tb_dir, ['loss'], 0.9) if __name__ == '__main__': diff --git a/examples/pytorch/llm/run_infer.sh b/examples/pytorch/llm/run_infer.sh index 5bb008a8..efe48958 100644 --- a/examples/pytorch/llm/run_infer.sh +++ b/examples/pytorch/llm/run_infer.sh @@ -1,5 +1,5 @@ python llm_infer.py \ --device 0 \ --model_type llama2-7b \ - --ckpt_fpath "runs/llama2-7b/vx_xxx/output_best/pytorch_model.bin" \ + --ckpt_path "runs/llama2-7b/vx_xxx/output_best/pytorch_model.bin" \ --eval_human true diff --git a/modelscope/utils/checkpoint.py b/modelscope/utils/checkpoint.py index bbde6034..147b80e9 100644 --- a/modelscope/utils/checkpoint.py +++ b/modelscope/utils/checkpoint.py @@ -622,6 +622,7 @@ def save_pretrained(model, origin_file_to_be_ignored = [save_checkpoint_name] ignore_file_set = set(origin_file_to_be_ignored) ignore_file_set.add(ModelFile.CONFIGURATION) + ignore_file_set.add('*.safetensors') ignore_file_set.add('.*') if hasattr(model, 'model_dir') and model.model_dir is not None and is_master(): From 0db3d1d53bf888b66a469ba349445efc20972675 Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Tue, 25 Jul 2023 19:28:00 +0800 Subject: [PATCH 75/87] Fix bug of amp and device_map (#397) * fix amp * remove useless code * Fix bug --- examples/pytorch/baichuan/finetune_baichuan.py | 4 +--- examples/pytorch/chatglm6b/finetune.py | 6 +----- examples/pytorch/llm_agent/baichuan_sft.ipynb | 1 - examples/pytorch/llm_agent/chatglm2_sft.ipynb | 1 - modelscope/pipelines/base.py | 3 ++- modelscope/trainers/trainer.py | 5 +++-- modelscope/utils/torch_utils.py | 5 +++++ 7 files changed, 12 insertions(+), 13 deletions(-) diff --git a/examples/pytorch/baichuan/finetune_baichuan.py b/examples/pytorch/baichuan/finetune_baichuan.py index c7a240f9..2788302e 100644 --- a/examples/pytorch/baichuan/finetune_baichuan.py +++ b/examples/pytorch/baichuan/finetune_baichuan.py @@ -226,9 +226,7 @@ kwargs = dict( train_dataset=train_dataset, eval_dataset=validation_dataset, seed=args.seed, - cfg_modify_fn=cfg_modify_fn, - # No placement for model, leave the model to `device_map` - device='cpu') + cfg_modify_fn=cfg_modify_fn) trainer: EpochBasedTrainer = build_trainer( name=args.trainer, default_args=kwargs) diff --git a/examples/pytorch/chatglm6b/finetune.py b/examples/pytorch/chatglm6b/finetune.py index feed76f2..a98a576c 100644 --- a/examples/pytorch/chatglm6b/finetune.py +++ b/examples/pytorch/chatglm6b/finetune.py @@ -206,11 +206,8 @@ model_config['model']['prefix_projection'] = args.prefix_projection tokenizer = ChatGLMTokenizer.from_pretrained(model_dir, trust_remote_code=True) device_map_kwargs = {} -device_kwargs = {} if args.use_lora != 0 and torch.cuda.device_count() > 1: device_map_kwargs['device_map'] = 'auto' - # No placement for model, leave the model to `device_map` - device_kwargs['device'] = 'cpu' model = Model.from_pretrained( model_dir, cfg_dict=model_config, **device_map_kwargs) @@ -396,7 +393,6 @@ trainer = Seq2SeqTrainer( seed=args.seed, data_collator=data_collator, remove_unused_data=True, - cfg_modify_fn=cfg_modify_fn, - **device_kwargs) + cfg_modify_fn=cfg_modify_fn) trainer.tokenizer = tokenizer trainer.train() diff --git a/examples/pytorch/llm_agent/baichuan_sft.ipynb b/examples/pytorch/llm_agent/baichuan_sft.ipynb index 75a9240e..6c41ff25 100644 --- a/examples/pytorch/llm_agent/baichuan_sft.ipynb +++ b/examples/pytorch/llm_agent/baichuan_sft.ipynb @@ -1698,7 +1698,6 @@ " eval_dataset=val_dataset,\n", " remove_unused_data=True,\n", " seed=42,\n", - " device='cpu', # No placement for model, leave the model to `device_map`\n", " cfg_modify_fn=cfg_modify_fn,\n", ")\n", "\n", diff --git a/examples/pytorch/llm_agent/chatglm2_sft.ipynb b/examples/pytorch/llm_agent/chatglm2_sft.ipynb index 4810e4b9..f1943086 100644 --- a/examples/pytorch/llm_agent/chatglm2_sft.ipynb +++ b/examples/pytorch/llm_agent/chatglm2_sft.ipynb @@ -1797,7 +1797,6 @@ " eval_dataset=val_dataset,\n", " remove_unused_data=True,\n", " seed=42,\n", - " device='cpu', # No placement for model, leave the model to `device_map`\n", " cfg_modify_fn=cfg_modify_fn,\n", ")\n", "\n", diff --git a/modelscope/pipelines/base.py b/modelscope/pipelines/base.py index 48d328d9..5968dba8 100644 --- a/modelscope/pipelines/base.py +++ b/modelscope/pipelines/base.py @@ -144,7 +144,8 @@ class Pipeline(ABC): if not isinstance(model, torch.nn.Module): return model.eval() - if self.device_map is None: + from modelscope.utils.torch_utils import is_on_same_device + if is_on_same_device(model): model.to(self.device) if not self._model_prepare: diff --git a/modelscope/trainers/trainer.py b/modelscope/trainers/trainer.py index 2e62be89..2dc300c5 100644 --- a/modelscope/trainers/trainer.py +++ b/modelscope/trainers/trainer.py @@ -43,7 +43,8 @@ from modelscope.utils.logger import get_logger from modelscope.utils.registry import build_from_cfg from modelscope.utils.torch_utils import (compile_model, get_dist_info, get_local_rank, init_dist, is_dist, - is_master, set_random_seed) + is_master, is_on_same_device, + set_random_seed) from ..swift import Swift from .base import BaseTrainer from .builder import TRAINERS @@ -257,7 +258,7 @@ class EpochBasedTrainer(BaseTrainer): # If not working in parallel scenario, put model to device as a default logic. device_name = self.device if self.device is not None else 'gpu' self.device = create_device(device_name) - if self.device.type == 'cuda': + if self.device.type == 'cuda' and is_on_same_device(self.model): self.model.to(self.device) self.print_cfg() diff --git a/modelscope/utils/torch_utils.py b/modelscope/utils/torch_utils.py index 1b2e1094..cb87c788 100644 --- a/modelscope/utils/torch_utils.py +++ b/modelscope/utils/torch_utils.py @@ -354,3 +354,8 @@ def all_gather(data, group=None): data_list.append(pickle.loads(buffer)) return data_list + + +def is_on_same_device(model: torch.nn.Module) -> bool: + device_set = set(map(lambda p: p.device, model.parameters())) + return len(device_set) == 1 From 41c2c95083e72d07b11f98e2211b501d35418366 Mon Sep 17 00:00:00 2001 From: "Xingjun.Wang" Date: Tue, 25 Jul 2023 22:18:25 +0800 Subject: [PATCH 76/87] dataset fix version (#413) * modify datasets version --- requirements/framework.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/framework.txt b/requirements/framework.txt index d98765bf..e748026e 100644 --- a/requirements/framework.txt +++ b/requirements/framework.txt @@ -1,6 +1,6 @@ addict attrs -datasets +datasets>=2.8.0,<=2.13.0 einops filelock>=3.3.0 gast>=0.2.2 From c0b62cb0fba24a81d6130cae0d00205ce234a9e6 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 26 Jul 2023 15:03:46 +0800 Subject: [PATCH 77/87] fix bugs --- .../stable_diffusion_pipeline.py | 6 +- .../custom_diffusion_trainer.py | 65 ++++++++++--------- 2 files changed, 39 insertions(+), 32 deletions(-) diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py index a95e3555..7a2b6c1d 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py @@ -63,8 +63,10 @@ class StableDiffusionPipeline(DiffusersPipeline): assert os.path.exists(custom_dir), f"{custom_dir} isn't exist" self.pipeline.unet.load_attn_procs( custom_dir, weight_name='pytorch_custom_diffusion_weights.bin') - self.pipeline.load_textual_inversion( - custom_dir, weight_name=f'{modifier_token}.bin') + modifier_token = modifier_token.split('+') + for modifier_token_name in modifier_token: + self.pipeline.load_textual_inversion( + custom_dir, weight_name=f'{modifier_token_name}.bin') def preprocess(self, inputs: Dict[str, Any], **kwargs) -> Dict[str, Any]: return inputs diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 2421d145..84a3eeb4 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -59,7 +59,9 @@ class CustomCheckpointProcessor(CheckpointProcessor): learned_embeds = trainer.model.text_encoder.get_input_embeddings( ).weight - for x, y in zip([self.modifier_token_id], self.modifier_token): + if not isinstance(self.modifier_token_id, list): + self.modifier_token_id = [self.modifier_token_id] + for x, y in zip(self.modifier_token_id, self.modifier_token): learned_embeds_dict = {} learned_embeds_dict[y] = learned_embeds[x] torch.save(learned_embeds_dict, f'{output_dir}/{y}.bin') @@ -276,11 +278,11 @@ class CustomDiffusionTrainer(EpochBasedTrainer): """ self.with_prior_preservation = kwargs.pop('with_prior_preservation', True) - self.instance_prompt = kwargs.pop('instance_prompt', + instance_prompt = kwargs.pop('instance_prompt', 'a photo of sks dog') - self.class_prompt = kwargs.pop('class_prompt', 'dog') + class_prompt = kwargs.pop('class_prompt', 'dog') + class_data_dir = kwargs.pop('class_data_dir', '/tmp/class_data') self.real_prior = kwargs.pop('real_prior', False) - self.class_data_dir = kwargs.pop('class_data_dir', '/tmp/class_data') self.num_class_images = kwargs.pop('num_class_images', 200) self.resolution = kwargs.pop('resolution', 512) self.prior_loss_weight = kwargs.pop('prior_loss_weight', 1.0) @@ -294,26 +296,27 @@ class CustomDiffusionTrainer(EpochBasedTrainer): instance_data_name = kwargs.pop( 'instance_data_name', 'buptwq/lora-stable-diffusion-finetune-dog') - # Extract downloaded image folder - if os.path.isdir(instance_data_name): - self.instance_data_dir = instance_data_name - else: - ds = MsDataset.load(instance_data_name, split='train') - self.instance_data_dir = os.path.dirname( - next(iter(ds))['Target:FILE']) - if self.concepts_list is None: self.concepts_list = [{ - 'instance_prompt': self.instance_prompt, - 'class_prompt': self.class_prompt, - 'instance_data_dir': self.instance_data_dir, - 'class_data_dir': self.class_data_dir, + 'instance_prompt': instance_prompt, + 'class_prompt': class_prompt, + 'instance_data_dir': instance_data_dir, + 'class_data_dir': class_data_dir, }] else: with open(self.concepts_list, 'r') as f: self.concepts_list = json.load(f) print('--------self.concepts_list: ', self.concepts_list) + # Extract downloaded image folder + for concept in self.concepts_list: + if os.path.isdir(instance_data_name): + concept['instance_data_dir'] = instance_data_name + else: + ds = MsDataset.load(instance_data_name, split='train') + concept['instance_data_dir'] = os.path.dirname( + next(iter(ds))['Target:FILE']) + # Adding a modifier token which is optimized self.modifier_token_id = [] initializer_token_id = [] @@ -437,20 +440,22 @@ class CustomDiffusionTrainer(EpochBasedTrainer): # Check for conflicts and conflicts if self.with_prior_preservation: - if self.class_data_dir is None: - raise ValueError( - 'You must specify a data directory for class images.') - if self.class_prompt is None: - raise ValueError('You must specify prompt for class images.') + for concept in self.concepts_list: + if concept['class_data_dir'] is None: + raise ValueError( + 'You must specify a data directory for class images.') + if concept['class_prompt'] is None: + raise ValueError('You must specify prompt for class images.') else: - if self.class_data_dir is not None: - warnings.warn( - 'You need not use --class_data_dir without --with_prior_preservation.' - ) - if self.class_prompt is not None: - warnings.warn( - 'You need not use --class_prompt without --with_prior_preservation.' - ) + for concept in self.concepts_list: + if concept['class_data_dir'] is not None: + warnings.warn( + 'You need not use --class_data_dir without --with_prior_preservation.' + ) + if concept['class_prompt'] is not None: + warnings.warn( + 'You need not use --class_prompt without --with_prior_preservation.' + ) # Generate class images if prior preservation is enabled. if self.with_prior_preservation: @@ -529,7 +534,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): num_new_images = self.num_class_images - cur_class_images - sample_dataset = PromptDataset(self.class_prompt, + sample_dataset = PromptDataset(concept['class_prompt'], num_new_images) sample_dataloader = torch.utils.data.DataLoader( sample_dataset, batch_size=self.sample_batch_size) From a7678bbb774c11b2e28367217c79536b67e86416 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 26 Jul 2023 15:06:42 +0800 Subject: [PATCH 78/87] precommit --- .../custom_diffusion/custom_diffusion_trainer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 84a3eeb4..7de3cb9e 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -278,8 +278,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): """ self.with_prior_preservation = kwargs.pop('with_prior_preservation', True) - instance_prompt = kwargs.pop('instance_prompt', - 'a photo of sks dog') + instance_prompt = kwargs.pop('instance_prompt', 'a photo of sks dog') class_prompt = kwargs.pop('class_prompt', 'dog') class_data_dir = kwargs.pop('class_data_dir', '/tmp/class_data') self.real_prior = kwargs.pop('real_prior', False) @@ -445,7 +444,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): raise ValueError( 'You must specify a data directory for class images.') if concept['class_prompt'] is None: - raise ValueError('You must specify prompt for class images.') + raise ValueError( + 'You must specify prompt for class images.') else: for concept in self.concepts_list: if concept['class_data_dir'] is not None: From 70da8b7809ac5be497e6db48ad996e8f758f6faa Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 26 Jul 2023 15:54:37 +0800 Subject: [PATCH 79/87] fix bugs --- .../custom_diffusion_trainer.py | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 7de3cb9e..08987215 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -295,6 +295,15 @@ class CustomDiffusionTrainer(EpochBasedTrainer): instance_data_name = kwargs.pop( 'instance_data_name', 'buptwq/lora-stable-diffusion-finetune-dog') + # Extract downloaded image folder + if self.concepts_list is None: + if os.path.isdir(instance_data_name): + instance_data_dir = instance_data_name + else: + ds = MsDataset.load(instance_data_name, split='train') + instance_data_dir = os.path.dirname(next(iter(ds))['Target:FILE']) + + # construct concept list if self.concepts_list is None: self.concepts_list = [{ 'instance_prompt': instance_prompt, @@ -307,14 +316,11 @@ class CustomDiffusionTrainer(EpochBasedTrainer): self.concepts_list = json.load(f) print('--------self.concepts_list: ', self.concepts_list) - # Extract downloaded image folder for concept in self.concepts_list: - if os.path.isdir(instance_data_name): - concept['instance_data_dir'] = instance_data_name - else: - ds = MsDataset.load(instance_data_name, split='train') - concept['instance_data_dir'] = os.path.dirname( - next(iter(ds))['Target:FILE']) + if not os.path.exists(concept['class_data_dir']): + os.makedirs(concept['class_data_dir']) + if not os.path.exists(concept['instance_data_dir']): + raise Exception(f"instance dataset {concept['instance_data_dir']} does not exist.") # Adding a modifier token which is optimized self.modifier_token_id = [] @@ -518,6 +524,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): """ for i, concept in enumerate(self.concepts_list): class_images_dir = Path(concept['class_data_dir']) + print("-------class_images_dir: ", class_images_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True, exist_ok=True) From 25d67a0b83d6ded1497bca46cbd580e7c4c8eec2 Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 26 Jul 2023 16:08:24 +0800 Subject: [PATCH 80/87] pre --- .../custom_diffusion/custom_diffusion_trainer.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index 08987215..c07f2e06 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -301,7 +301,8 @@ class CustomDiffusionTrainer(EpochBasedTrainer): instance_data_dir = instance_data_name else: ds = MsDataset.load(instance_data_name, split='train') - instance_data_dir = os.path.dirname(next(iter(ds))['Target:FILE']) + instance_data_dir = os.path.dirname( + next(iter(ds))['Target:FILE']) # construct concept list if self.concepts_list is None: @@ -320,7 +321,9 @@ class CustomDiffusionTrainer(EpochBasedTrainer): if not os.path.exists(concept['class_data_dir']): os.makedirs(concept['class_data_dir']) if not os.path.exists(concept['instance_data_dir']): - raise Exception(f"instance dataset {concept['instance_data_dir']} does not exist.") + raise Exception( + f"instance dataset {concept['instance_data_dir']} does not exist." + ) # Adding a modifier token which is optimized self.modifier_token_id = [] @@ -524,7 +527,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): """ for i, concept in enumerate(self.concepts_list): class_images_dir = Path(concept['class_data_dir']) - print("-------class_images_dir: ", class_images_dir) + print('-------class_images_dir: ', class_images_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True, exist_ok=True) From 99aa7079952afa677b033f35fdb5df2c666826ee Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 26 Jul 2023 16:35:40 +0800 Subject: [PATCH 81/87] fix bugs --- examples/pytorch/stable_diffusion/custom/run_train_custom.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh index 3e1ff024..fab8e059 100644 --- a/examples/pytorch/stable_diffusion/custom/run_train_custom.sh +++ b/examples/pytorch/stable_diffusion/custom/run_train_custom.sh @@ -1,5 +1,5 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/custom/finetune_stable_diffusion_custom.py \ - --model 'AI-ModelScope/stable-diffusion-v1-5' \ + --model 'AI-ModelScope/stable-diffusion-v2-1' \ --model_revision 'v1.0.9' \ --class_prompt "dog" \ --instance_prompt="photo of a dog" \ @@ -7,7 +7,6 @@ PYTHONPATH=. torchrun examples/pytorch/stable_diffusion/custom/finetune_stable_d --class_data_dir './tmp/class_data' \ --train_dataset_name 'buptwq/lora-stable-diffusion-finetune-dog' \ --max_epochs 250 \ - --concepts_list '/mnt/user/E-yijing.wq-401594/github/custom_diffusion/modelscope/json/concept_list.json' \ --modifier_token "+" \ --num_class_images=200 \ --save_ckpt_strategy 'by_epoch' \ From 3a629bb2b43ffe2871d9b8ffe5fc83016cbeef5a Mon Sep 17 00:00:00 2001 From: XDUWQ <1300964705@qq.com> Date: Wed, 26 Jul 2023 16:58:58 +0800 Subject: [PATCH 82/87] pre --- .../multi_modal/custom_diffusion/custom_diffusion_trainer.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py index c07f2e06..28140fb2 100644 --- a/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py +++ b/modelscope/trainers/multi_modal/custom_diffusion/custom_diffusion_trainer.py @@ -272,7 +272,7 @@ class CustomDiffusionTrainer(EpochBasedTrainer): sample_batch_size: Batch size (per device) for sampling images. train_batch_size: Batch size (per device) for the training dataloader. center_crop: execute center crop or not. - concepts_list: Path to json containing multiple concepts, will overwrite parameters like instance_prompt etc. + concepts_list: Path to json containing multiple concepts, will overwrite parameters. instance_data_name: The instance data local dir or online ID. """ @@ -315,7 +315,6 @@ class CustomDiffusionTrainer(EpochBasedTrainer): else: with open(self.concepts_list, 'r') as f: self.concepts_list = json.load(f) - print('--------self.concepts_list: ', self.concepts_list) for concept in self.concepts_list: if not os.path.exists(concept['class_data_dir']): @@ -527,7 +526,6 @@ class CustomDiffusionTrainer(EpochBasedTrainer): """ for i, concept in enumerate(self.concepts_list): class_images_dir = Path(concept['class_data_dir']) - print('-------class_images_dir: ', class_images_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True, exist_ok=True) From ba4db97507c5343f430240f2150e656076dab3c7 Mon Sep 17 00:00:00 2001 From: Zackary Shen Date: Wed, 26 Jul 2023 17:20:13 +0800 Subject: [PATCH 83/87] upload cv_nerf_3d-reconstruction_vector-quantize-compression (#407) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add vq_compression model * add vq_compression model * check pre-commit for lint test * fix by flake8 * update * update * update * the last update * the laast update * update test_level>=0 --------- Co-authored-by: 剑匣 --- modelscope/metainfo.py | 6 + .../cv/nerf_recon_vq_compression/__init__.py | 24 + .../dataloader/__init__.py | 12 + .../dataloader/blender.py | 173 ++++++ .../dataloader/llff.py | 278 +++++++++ .../dataloader/nsvf.py | 182 ++++++ .../dataloader/ray_utils.py | 295 +++++++++ .../dataloader/tankstemple.py | 249 ++++++++ .../nerf_recon_vq_compression.py | 116 ++++ .../network/__init__.py | 2 + .../network/tensoRF.py | 579 ++++++++++++++++++ .../network/tensoRF_VQ.py | 292 +++++++++ .../network/tensorBase.py | 526 ++++++++++++++++ .../network/weighted_vq.py | 504 +++++++++++++++ .../cv/nerf_recon_vq_compression/renderer.py | 211 +++++++ .../cv/nerf_recon_vq_compression/utils.py | 269 ++++++++ modelscope/outputs/outputs.py | 1 + .../cv/nerf_recon_vq_compression_pipeline.py | 94 +++ modelscope/utils/constant.py | 1 + tests/pipelines/test_vqrf.py | 90 +++ 20 files changed, 3904 insertions(+) create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/__init__.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/dataloader/__init__.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/dataloader/blender.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/dataloader/llff.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/dataloader/nsvf.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/dataloader/ray_utils.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/dataloader/tankstemple.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/nerf_recon_vq_compression.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/network/__init__.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF_VQ.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/network/tensorBase.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/network/weighted_vq.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/renderer.py create mode 100644 modelscope/models/cv/nerf_recon_vq_compression/utils.py create mode 100644 modelscope/pipelines/cv/nerf_recon_vq_compression_pipeline.py create mode 100644 tests/pipelines/test_vqrf.py diff --git a/modelscope/metainfo.py b/modelscope/metainfo.py index ab22b9ba..c2f5f2e6 100644 --- a/modelscope/metainfo.py +++ b/modelscope/metainfo.py @@ -111,6 +111,7 @@ class Models(object): image_quality_assessment_degradation = 'image-quality-assessment-degradation' m2fp = 'm2fp' nerf_recon_acc = 'nerf-recon-acc' + nerf_recon_vq_compression = 'nerf-recon-vq-compression' bts_depth_estimation = 'bts-depth-estimation' vision_efficient_tuning = 'vision-efficient-tuning' bad_image_detecting = 'bad-image-detecting' @@ -404,6 +405,7 @@ class Pipelines(object): image_human_parsing = 'm2fp-image-human-parsing' object_detection_3d_depe = 'object-detection-3d-depe' nerf_recon_acc = 'nerf-recon-acc' + nerf_recon_vq_compression = 'nerf-recon-vq-compression' bad_image_detecting = 'bad-image-detecting' controllable_image_generation = 'controllable-image-generation' fast_instance_segmentation = 'fast-instance-segmentation' @@ -844,6 +846,9 @@ DEFAULT_MODEL_FOR_PIPELINE = { 'damo/cv_mobilenet-v2_bad-image-detecting'), Tasks.nerf_recon_acc: (Pipelines.nerf_recon_acc, 'damo/cv_nerf-3d-reconstruction-accelerate_damo'), + Tasks.nerf_recon_vq_compression: ( + Pipelines.nerf_recon_vq_compression, + 'damo/cv_nerf-3d-reconstruction-vq-compression_damo'), Tasks.siamese_uie: (Pipelines.siamese_uie, 'damo/nlp_structbert_siamese-uie_chinese-base'), Tasks.pedestrian_attribute_recognition: ( @@ -983,6 +988,7 @@ class Preprocessors(object): ocr_detection = 'ocr-detection' bad_image_detecting_preprocessor = 'bad-image-detecting-preprocessor' nerf_recon_acc_preprocessor = 'nerf-recon-acc-preprocessor' + nerf_recon_vq_compression_preprocessor = 'nerf-recon-vq-compression-preprocessor' controllable_image_generation_preprocessor = 'controllable-image-generation-preprocessor' image_classification_preprocessor = 'image-classification-preprocessor' diff --git a/modelscope/models/cv/nerf_recon_vq_compression/__init__.py b/modelscope/models/cv/nerf_recon_vq_compression/__init__.py new file mode 100644 index 00000000..40bdbffb --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import TYPE_CHECKING + +from modelscope.utils.import_utils import LazyImportModule + +if TYPE_CHECKING: + from .nerf_recon_vq_compression import NeRFReconVQCompression + # from .nerf_preprocess import NeRFReconPreprocessor + +else: + _import_structure = { + 'nerf_recon_vq_compression': ['NeRFReconVQCompression'] + } + # _import_structure = {'nerf_preprocess': ['NeRFReconPreprocessor']} + + import sys + + sys.modules[__name__] = LazyImportModule( + __name__, + globals()['__file__'], + _import_structure, + module_spec=__spec__, + extra_objects={}, + ) diff --git a/modelscope/models/cv/nerf_recon_vq_compression/dataloader/__init__.py b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/__init__.py new file mode 100644 index 00000000..4e3a7cd6 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/__init__.py @@ -0,0 +1,12 @@ +from .blender import BlenderDataset +from .llff import LLFFDataset +from .nsvf import NSVF +from .ray_utils import get_rays, ndc_rays_blender +from .tankstemple import TanksTempleDataset + +dataset_dict = { + 'blender': BlenderDataset, + 'llff': LLFFDataset, + 'tankstemple': TanksTempleDataset, + 'nsvf': NSVF +} diff --git a/modelscope/models/cv/nerf_recon_vq_compression/dataloader/blender.py b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/blender.py new file mode 100644 index 00000000..7174514c --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/blender.py @@ -0,0 +1,173 @@ +import os + +import cv2 +import json +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms as T +from tqdm import tqdm + +from .ray_utils import * + + +def trans_t(t): + return torch.Tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], + [0, 0, 0, 1]]).float() + + +def rot_phi(phi): + return torch.Tensor([[1, 0, 0, 0], [0, np.cos(phi), -np.sin(phi), 0], + [0, np.sin(phi), np.cos(phi), 0], [0, 0, 0, + 1]]).float() + + +def rot_theta(th): + return torch.Tensor([[np.cos(th), 0, -np.sin(th), 0], [0, 1, 0, 0], + [np.sin(th), 0, np.cos(th), 0], [0, 0, 0, + 1]]).float() + + +def pose_spherical(theta, phi, radius): + c2w = trans_t(radius) + c2w = rot_phi(phi / 180. * np.pi) @ c2w + c2w = rot_theta(theta / 180. * np.pi) @ c2w + c2w = torch.Tensor( + np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1] + ])) @ c2w + return c2w + + +class BlenderDataset(Dataset): + + def __init__(self, + datadir, + split='train', + downsample=1.0, + is_stack=False, + N_vis=-1): + + self.N_vis = N_vis + self.root_dir = datadir + self.split = split + self.is_stack = is_stack + self.img_wh = (int(800 / downsample), int(800 / downsample)) + self.define_transforms() + + self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) + self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], + [0, 0, -1, 0], [0, 0, 0, 1]]) + self.read_meta() + self.define_proj_mat() + + self.white_bg = True + self.near_far = [2.0, 6.0] + + self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3) + self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3) + self.downsample = downsample + + def read_depth(self, filename): + depth = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800) + return depth + + def read_meta(self): + + with open( + os.path.join(self.root_dir, f'transforms_{self.split}.json'), + 'r') as f: + self.meta = json.load(f) + + w, h = self.img_wh + self.focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) + self.focal *= self.img_wh[0] / 800 + + # ray directions for all pixels, same for all images (same H, W, focal) + self.directions = get_ray_directions( + h, w, [self.focal, self.focal]) # (h, w, 3) + self.directions = self.directions / torch.norm( + self.directions, dim=-1, keepdim=True) + self.intrinsics = torch.tensor([[self.focal, 0, w / 2], + [0, self.focal, h / 2], [0, 0, + 1]]).float() + + self.image_paths = [] + self.poses = [] + self.all_rays = [] + self.all_rgbs = [] + self.all_masks = [] + self.all_depth = [] + self.downsample = 1.0 + + img_eval_interval = 1 if self.N_vis < 0 else len( + self.meta['frames']) // self.N_vis + idxs = list(range(0, len(self.meta['frames']), img_eval_interval)) + for i in tqdm(idxs, desc=f'Loading data {self.split} ({len(idxs)})'): + + frame = self.meta['frames'][i] + pose = np.array(frame['transform_matrix']) @ self.blender2opencv + c2w = torch.FloatTensor(pose) + self.poses += [c2w] + + image_path = os.path.join(self.root_dir, + f"{frame['file_path']}.png") + self.image_paths += [image_path] + img = Image.open(image_path) + + if self.downsample != 1.0: + img = img.resize(self.img_wh, Image.LANCZOS) + img = self.transform(img) # (4, h, w) + img = img.view(4, -1).permute(1, 0) # (h*w, 4) RGBA + img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) + self.all_rgbs += [img] + + rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) + self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6) + + self.poses = torch.stack(self.poses) + if not self.is_stack: + self.all_rays = torch.cat(self.all_rays, 0) + self.all_rgbs = torch.cat(self.all_rgbs, 0) + + else: + self.all_rays = torch.stack(self.all_rays, 0) + self.all_rgbs = torch.stack(self.all_rgbs, + 0).reshape(-1, *self.img_wh[::-1], 3) + + def define_transforms(self): + self.transform = T.ToTensor() + + def define_proj_mat(self): + self.proj_mat = self.intrinsics.unsqueeze(0) @ torch.inverse( + self.poses)[:, :3] + + def world2ndc(self, points, lindisp=None): + device = points.device + return (points - self.center.to(device)) / self.radius.to(device) + + def __len__(self): + return len(self.all_rgbs) + + def __getitem__(self, idx): + + if self.split == 'train': # use data in the buffers + sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]} + + else: # create data for each image separately + + img = self.all_rgbs[idx] + rays = self.all_rays[idx] + mask = self.all_masks[idx] # for quantity evaluation + + sample = {'rays': rays, 'rgbs': img, 'mask': mask} + return sample + + def get_render_pose(self, N_cameras=120): + render_poses = torch.stack([ + pose_spherical(angle, -30.0, 4.0) + for angle in np.linspace(-180, 180, N_cameras + 1)[:-1] + ], 0) + blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], + [0, 0, 0, 1]]) + return render_poses @ torch.Tensor(blender2opencv).float() diff --git a/modelscope/models/cv/nerf_recon_vq_compression/dataloader/llff.py b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/llff.py new file mode 100644 index 00000000..5d25e358 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/llff.py @@ -0,0 +1,278 @@ +import glob +import os + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms as T + +from .ray_utils import * + + +def normalize(v): + """Normalize a vector.""" + return v / np.linalg.norm(v) + + +def average_poses(poses): + """ + Calculate the average pose, which is then used to center all poses + using @center_poses. Its computation is as follows: + 1. Compute the center: the average of pose centers. + 2. Compute the z axis: the normalized average z axis. + 3. Compute axis y': the average y axis. + 4. Compute x' = y' cross product z, then normalize it as the x axis. + 5. Compute the y axis: z cross product x. + + Note that at step 3, we cannot directly use y' as y axis since it's + not necessarily orthogonal to z axis. We need to pass from x to y. + Inputs: + poses: (N_images, 3, 4) + Outputs: + pose_avg: (3, 4) the average pose + """ + # 1. Compute the center + center = poses[..., 3].mean(0) # (3) + + # 2. Compute the z axis + z = normalize(poses[..., 2].mean(0)) # (3) + + # 3. Compute axis y' (no need to normalize as it's not the final output) + y_ = poses[..., 1].mean(0) # (3) + + # 4. Compute the x axis + x = normalize(np.cross(z, y_)) # (3) + + # 5. Compute the y axis (as z and x are normalized, y is already of norm 1) + y = np.cross(x, z) # (3) + + pose_avg = np.stack([x, y, z, center], 1) # (3, 4) + + return pose_avg + + +def center_poses(poses, blender2opencv): + """ + Center the poses so that we can use NDC. + See https://github.com/bmild/nerf/issues/34 + Inputs: + poses: (N_images, 3, 4) + Outputs: + poses_centered: (N_images, 3, 4) the centered poses + pose_avg: (3, 4) the average pose + """ + poses = poses @ blender2opencv + pose_avg = average_poses(poses) # (3, 4) + pose_avg_homo = np.eye(4) + pose_avg_homo[: + 3] = pose_avg # convert to homogeneous coordinate for faster computation + pose_avg_homo = pose_avg_homo + # by simply adding 0, 0, 0, 1 as the last row + last_row = np.tile(np.array([0, 0, 0, 1]), + (len(poses), 1, 1)) # (N_images, 1, 4) + poses_homo = \ + np.concatenate([poses, last_row], 1) # (N_images, 4, 4) homogeneous coordinate + + poses_centered = np.linalg.inv( + pose_avg_homo) @ poses_homo # (N_images, 4, 4) + # poses_centered = poses_centered @ blender2opencv + poses_centered = poses_centered[:, :3] # (N_images, 3, 4) + + return poses_centered, pose_avg_homo + + +def viewmatrix(z, up, pos): + vec2 = normalize(z) + vec1_avg = up + vec0 = normalize(np.cross(vec1_avg, vec2)) + vec1 = normalize(np.cross(vec2, vec0)) + m = np.eye(4) + m[:3] = np.stack([-vec0, vec1, vec2, pos], 1) + return m + + +def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, N_rots=2, N=120): + render_poses = [] + rads = np.array(list(rads) + [1.]) + + for theta in np.linspace(0., 2. * np.pi * N_rots, N + 1)[:-1]: + c = np.dot( + c2w[:3, :4], + np.array( + [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]) + * rads) + z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.]))) + render_poses.append(viewmatrix(z, up, c)) + return render_poses + + +def get_spiral(c2ws_all, near_fars, rads_scale=1.0, N_views=120): + # center pose + c2w = average_poses(c2ws_all) + + # Get average pose + up = normalize(c2ws_all[:, :3, 1].sum(0)) + + # Find a reasonable "focus depth" for this dataset + dt = 0.75 + close_depth, inf_depth = near_fars.min() * 0.9, near_fars.max() * 5.0 + focal = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth)) + + # Get radii for spiral path + zdelta = near_fars.min() * .2 + tt = c2ws_all[:, :3, 3] + rads = np.percentile(np.abs(tt), 90, 0) * rads_scale + render_poses = render_path_spiral( + c2w, up, rads, focal, zdelta, zrate=.5, N=N_views) + return np.stack(render_poses) + + +class LLFFDataset(Dataset): + + def __init__(self, + datadir, + split='train', + downsample=4, + is_stack=False, + hold_every=8): + """ + spheric_poses: whether the images are taken in a spheric inward-facing manner + default: False (forward-facing) + val_num: number of val images (used for multigpu training, validate same image for all gpus) + """ + + self.root_dir = datadir + self.split = split + self.hold_every = hold_every + self.is_stack = is_stack + self.downsample = downsample + self.define_transforms() + + self.blender2opencv = np.eye(4) + # np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) + self.read_meta() + self.white_bg = False + + # self.near_far = [np.min(self.near_fars[:,0]),np.max(self.near_fars[:,1])] + self.near_far = [0.0, 1.0] + self.scene_bbox = torch.tensor([[-1.5, -1.67, -1.0], [1.5, 1.67, 1.0]]) + # self.scene_bbox = torch.tensor([[-1.67, -1.5, -1.0], [1.67, 1.5, 1.0]]) + self.center = torch.mean(self.scene_bbox, dim=0).float().view(1, 1, 3) + self.invradius = 1.0 / (self.scene_bbox[1] - self.center).float().view( + 1, 1, 3) + + def read_meta(self): + + poses_bounds = np.load( + os.path.join(self.root_dir, 'poses_bounds.npy')) # (N_images, 17) + self.image_paths = sorted( + glob.glob(os.path.join(self.root_dir, 'images_4/*'))) + # load full resolution image then resize + if self.split in ['train', 'test']: + assert len(poses_bounds) == len(self.image_paths), \ + 'Mismatch between number of images and number of poses! Please rerun COLMAP!' + + poses = poses_bounds[:, :15].reshape(-1, 3, 5) # (N_images, 3, 5) + self.near_fars = poses_bounds[:, -2:] # (N_images, 2) + # hwf = poses[:, :, -1] + + # Step 1: rescale focal length according to training resolution + H, W, self.focal = poses[ + 0, :, -1] # original intrinsics, same for all images + self.img_wh = np.array( + [int(W / self.downsample), + int(H / self.downsample)]) + self.focal = [ + self.focal * self.img_wh[0] / W, self.focal * self.img_wh[1] / H + ] + + # Step 2: correct poses + # Original poses has rotation in form "down right back", change to "right up back" + # See https://github.com/bmild/nerf/issues/34 + poses = np.concatenate( + [poses[..., 1:2], -poses[..., :1], poses[..., 2:4]], -1) + # (N_images, 3, 4) exclude H, W, focal + self.poses, self.pose_avg = center_poses(poses, self.blender2opencv) + + # Step 3: correct scale so that the nearest depth is at a little more than 1.0 + # See https://github.com/bmild/nerf/issues/34 + near_original = self.near_fars.min() + scale_factor = near_original * 0.75 # 0.75 is the default parameter + # the nearest depth is at 1/0.75=1.33 + self.near_fars /= scale_factor + self.poses[..., 3] /= scale_factor + + # build rendering path + N_views = 120 + # N_rots = 2 + # tt = self.poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T + # up = normalize(self.poses[:, :3, 1].sum(0)) + # rads = np.percentile(np.abs(tt), 90, 0) + + self.render_path = get_spiral( + self.poses, self.near_fars, N_views=N_views) + + # distances_from_center = np.linalg.norm(self.poses[..., 3], axis=1) + # val_idx = np.argmin(distances_from_center) # choose val image as the closest to + # center image + + # ray directions for all pixels, same for all images (same H, W, focal) + W, H = self.img_wh + self.directions = get_ray_directions_blender(H, W, + self.focal) # (H, W, 3) + + # average_pose = average_poses(self.poses) + # dists = np.sum( + # np.square(average_pose[:3, 3] - self.poses[:, :3, 3]), -1) + i_test = np.arange(0, self.poses.shape[0], + self.hold_every) # [np.argmin(dists)] + img_list = i_test if self.split != 'train' else list( + set(np.arange(len(self.poses))) - set(i_test)) + + # use first N_images-1 to train, the LAST is val + self.all_rays = [] + self.all_rgbs = [] + for i in img_list: + image_path = self.image_paths[i] + c2w = torch.FloatTensor(self.poses[i]) + + img = Image.open(image_path).convert('RGB') + if self.downsample != 1.0: + img = img.resize(self.img_wh, Image.LANCZOS) + img = self.transform(img) # (3, h, w) + + img = img.view(3, -1).permute(1, 0) # (h*w, 3) RGB + self.all_rgbs += [img] + rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) + rays_o, rays_d = ndc_rays_blender(H, W, self.focal[0], 1.0, rays_o, + rays_d) + # viewdir = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) + + self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6) + + if not self.is_stack: + self.all_rays = torch.cat(self.all_rays, + 0) # (len(self.meta['frames])*h*w, 3) + self.all_rgbs = torch.cat(self.all_rgbs, + 0) # (len(self.meta['frames])*h*w,3) + else: + self.all_rays = torch.stack(self.all_rays, + 0) # (len(self.meta['frames]),h,w, 3) + self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape( + -1, *self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3) + + def define_transforms(self): + self.transform = T.ToTensor() + + def __len__(self): + return len(self.all_rgbs) + + def __getitem__(self, idx): + + sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]} + + return sample + + def get_render_pose(self, N_cameras=120): + return get_spiral(self.poses, self.near_fars, N_views=N_cameras) diff --git a/modelscope/models/cv/nerf_recon_vq_compression/dataloader/nsvf.py b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/nsvf.py new file mode 100644 index 00000000..e3cdbafa --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/nsvf.py @@ -0,0 +1,182 @@ +import os + +import torch +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms as T +from tqdm import tqdm + +from .ray_utils import * + + +def trans_t(t): + return torch.Tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], + [0, 0, 0, 1]]).float() + + +def rot_phi(phi): + return torch.Tensor([[1, 0, 0, 0], [0, np.cos(phi), -np.sin(phi), 0], + [0, np.sin(phi), np.cos(phi), 0], [0, 0, 0, + 1]]).float() + + +def rot_theta(th): + return torch.Tensor([[np.cos(th), 0, -np.sin(th), 0], [0, 1, 0, 0], + [np.sin(th), 0, np.cos(th), 0], [0, 0, 0, + 1]]).float() + + +def pose_spherical(theta, phi, radius): + c2w = trans_t(radius) + c2w = rot_phi(phi / 180. * np.pi) @ c2w + c2w = rot_theta(theta / 180. * np.pi) @ c2w + c2w = torch.Tensor( + np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1] + ])) @ c2w + return c2w + + +class NSVF(Dataset): + """NSVF Generic Dataset.""" + + def __init__(self, + datadir, + split='train', + downsample=1.0, + wh=[800, 800], + is_stack=False): + self.root_dir = datadir + self.split = split + self.is_stack = is_stack + self.downsample = downsample + self.img_wh = (int(wh[0] / downsample), int(wh[1] / downsample)) + self.define_transforms() + + self.white_bg = True + self.near_far = [0.5, 6.0] + self.scene_bbox = torch.from_numpy( + np.loadtxt(f'{self.root_dir}/bbox.txt')).float()[:6].view(2, 3) + self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], + [0, 0, -1, 0], [0, 0, 0, 1]]) + self.read_meta() + self.define_proj_mat() + + self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3) + self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3) + + def bbox2corners(self): + corners = self.scene_bbox.unsqueeze(0).repeat(4, 1, 1) + for i in range(3): + corners[i, [0, 1], i] = corners[i, [1, 0], i] + return corners.view(-1, 3) + + def read_meta(self): + with open(os.path.join(self.root_dir, 'intrinsics.txt')) as f: + focal = float(f.readline().split()[0]) + self.intrinsics = np.array([[focal, 0, 400.0], [0, focal, 400.0], + [0, 0, 1]]) + self.intrinsics[:2] *= (np.array(self.img_wh) + / np.array([800, 800])).reshape(2, 1) + + pose_files = sorted(os.listdir(os.path.join(self.root_dir, 'pose'))) + img_files = sorted(os.listdir(os.path.join(self.root_dir, 'rgb'))) + + if self.split == 'train': + pose_files = [x for x in pose_files if x.startswith('0_')] + img_files = [x for x in img_files if x.startswith('0_')] + elif self.split == 'val': + pose_files = [x for x in pose_files if x.startswith('1_')] + img_files = [x for x in img_files if x.startswith('1_')] + elif self.split == 'test': + test_pose_files = [x for x in pose_files if x.startswith('2_')] + test_img_files = [x for x in img_files if x.startswith('2_')] + if len(test_pose_files) == 0: + test_pose_files = [x for x in pose_files if x.startswith('1_')] + test_img_files = [x for x in img_files if x.startswith('1_')] + pose_files = test_pose_files + img_files = test_img_files + + # ray directions for all pixels, same for all images (same H, W, focal) + self.directions = get_ray_directions( + self.img_wh[1], + self.img_wh[0], [self.intrinsics[0, 0], self.intrinsics[1, 1]], + center=self.intrinsics[:2, 2]) # (h, w, 3) + self.directions = self.directions / torch.norm( + self.directions, dim=-1, keepdim=True) + + self.render_path = torch.stack([ + pose_spherical(angle, -30.0, 4.0) + for angle in np.linspace(-180, 180, 40 + 1)[:-1] + ], 0) + + self.poses = [] + self.all_rays = [] + self.all_rgbs = [] + + assert len(img_files) == len(pose_files) + for img_fname, pose_fname in tqdm( + zip(img_files, pose_files), + desc=f'Loading data {self.split} ({len(img_files)})'): + image_path = os.path.join(self.root_dir, 'rgb', img_fname) + img = Image.open(image_path) + if self.downsample != 1.0: + img = img.resize(self.img_wh, Image.LANCZOS) + img = self.transform(img) # (4, h, w) + img = img.view(img.shape[0], -1).permute(1, 0) # (h*w, 4) RGBA + if img.shape[-1] == 4: + img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:] + ) # blend A to RGB + self.all_rgbs += [img] + + c2w = np.loadtxt(os.path.join(self.root_dir, 'pose', pose_fname)) + c2w = torch.FloatTensor(c2w) + self.poses.append(c2w) # C2W + rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) + self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 8) + + self.poses = torch.stack(self.poses) + if 'train' == self.split: + if self.is_stack: + self.all_rays = torch.stack(self.all_rays, + 0).reshape(-1, *self.img_wh[::-1], + 6) + self.all_rgbs = torch.stack(self.all_rgbs, + 0).reshape(-1, *self.img_wh[::-1], + 3) + else: + self.all_rays = torch.cat(self.all_rays, 0) + self.all_rgbs = torch.cat(self.all_rgbs, 0) + else: + self.all_rays = torch.stack(self.all_rays, 0) + self.all_rgbs = torch.stack(self.all_rgbs, + 0).reshape(-1, *self.img_wh[::-1], 3) + + def define_transforms(self): + self.transform = T.ToTensor() + + def define_proj_mat(self): + self.proj_mat = torch.from_numpy( + self.intrinsics[:3, :3]).unsqueeze(0).float() @ torch.inverse( + self.poses)[:, :3] + + def world2ndc(self, points): + device = points.device + return (points - self.center.to(device)) / self.radius.to(device) + + def __len__(self): + if self.split == 'train': + return len(self.all_rays) + return len(self.all_rgbs) + + def __getitem__(self, idx): + + if self.split == 'train': # use data in the buffers + sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]} + + else: # create data for each image separately + + img = self.all_rgbs[idx] + rays = self.all_rays[idx] + + sample = {'rays': rays, 'rgbs': img} + return sample diff --git a/modelscope/models/cv/nerf_recon_vq_compression/dataloader/ray_utils.py b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/ray_utils.py new file mode 100644 index 00000000..39869fbc --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/ray_utils.py @@ -0,0 +1,295 @@ +import re + +import numpy as np +import torch +from kornia import create_meshgrid +from torch import searchsorted + +# from utils import index_point_feature + + +def depth2dist(z_vals, cos_angle): + # z_vals: [N_ray N_sample] + device = z_vals.device + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat( + [dists, + torch.Tensor([1e10]).to(device).expand(dists[..., :1].shape)], + -1) # [N_rays, N_samples] + dists = dists * cos_angle.unsqueeze(-1) + return dists + + +def ndc2dist(ndc_pts, cos_angle): + dists = torch.norm(ndc_pts[:, 1:] - ndc_pts[:, :-1], dim=-1) + dists = torch.cat([dists, 1e10 * cos_angle.unsqueeze(-1)], + -1) # [N_rays, N_samples] + return dists + + +def get_ray_directions(H, W, focal, center=None): + """ + Get ray directions for all pixels in camera coordinate. + Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ + ray-tracing-generating-camera-rays/standard-coordinate-systems + Inputs: + H, W, focal: image height, width and focal length + Outputs: + directions: (H, W, 3), the direction of the rays in camera coordinate + """ + grid = create_meshgrid(H, W, normalized_coordinates=False)[0] + 0.5 + + i, j = grid.unbind(-1) + # the direction here is without +0.5 pixel centering as calibration is not so accurate + # see https://github.com/bmild/nerf/issues/24 + cent = center if center is not None else [W / 2, H / 2] + directions = torch.stack([(i - cent[0]) / focal[0], + (j - cent[1]) / focal[1], + torch.ones_like(i)], -1) # (H, W, 3) + + return directions + + +def get_ray_directions_blender(H, W, focal, center=None): + """ + Get ray directions for all pixels in camera coordinate. + Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ + ray-tracing-generating-camera-rays/standard-coordinate-systems + Inputs: + H, W, focal: image height, width and focal length + Outputs: + directions: (H, W, 3), the direction of the rays in camera coordinate + """ + grid = create_meshgrid(H, W, normalized_coordinates=False)[0] + 0.5 + i, j = grid.unbind(-1) + # the direction here is without +0.5 pixel centering as calibration is not so accurate + # see https://github.com/bmild/nerf/issues/24 + cent = center if center is not None else [W / 2, H / 2] + directions = torch.stack([(i - cent[0]) / focal[0], + -(j - cent[1]) / focal[1], -torch.ones_like(i)], + -1) # (H, W, 3) + + return directions + + +def get_rays(directions, c2w): + """ + Get ray origin and normalized directions in world coordinate for all pixels in one image. + Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ + ray-tracing-generating-camera-rays/standard-coordinate-systems + Inputs: + directions: (H, W, 3) precomputed ray directions in camera coordinate + c2w: (3, 4) transformation matrix from camera coordinate to world coordinate + Outputs: + rays_o: (H*W, 3), the origin of the rays in world coordinate + rays_d: (H*W, 3), the normalized direction of the rays in world coordinate + """ + # Rotate ray directions from camera coordinate to the world coordinate + rays_d = directions @ c2w[:3, :3].T # (H, W, 3) + # rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True) + # The origin of all rays is the camera origin in world coordinate + rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3) + + rays_d = rays_d.view(-1, 3) + rays_o = rays_o.view(-1, 3) + + return rays_o, rays_d + + +def ndc_rays_blender(H, W, focal, near, rays_o, rays_d): + # Shift ray origins to near plane + t = -(near + rays_o[..., 2]) / rays_d[..., 2] + rays_o = rays_o + t[..., None] * rays_d + + # Projection + o0 = -1. / (W / (2. * focal)) * rays_o[..., 0] / rays_o[..., 2] + o1 = -1. / (H / (2. * focal)) * rays_o[..., 1] / rays_o[..., 2] + o2 = 1. + 2. * near / rays_o[..., 2] + + d0 = -1. / (W / (2. * focal)) * ( + rays_d[..., 0] / rays_d[..., 2] - rays_o[..., 0] / rays_o[..., 2]) + d1 = -1. / (H / (2. * focal)) * ( + rays_d[..., 1] / rays_d[..., 2] - rays_o[..., 1] / rays_o[..., 2]) + d2 = -2. * near / rays_o[..., 2] + + rays_o = torch.stack([o0, o1, o2], -1) + rays_d = torch.stack([d0, d1, d2], -1) + + return rays_o, rays_d + + +def ndc_rays(H, W, focal, near, rays_o, rays_d): + # Shift ray origins to near plane + t = (near - rays_o[..., 2]) / rays_d[..., 2] + rays_o = rays_o + t[..., None] * rays_d + + # Projection + o0 = 1. / (W / (2. * focal)) * rays_o[..., 0] / rays_o[..., 2] + o1 = 1. / (H / (2. * focal)) * rays_o[..., 1] / rays_o[..., 2] + o2 = 1. - 2. * near / rays_o[..., 2] + + d0 = 1. / (W / (2. * focal)) * ( + rays_d[..., 0] / rays_d[..., 2] - rays_o[..., 0] / rays_o[..., 2]) + d1 = 1. / (H / (2. * focal)) * ( + rays_d[..., 1] / rays_d[..., 2] - rays_o[..., 1] / rays_o[..., 2]) + d2 = 2. * near / rays_o[..., 2] + + rays_o = torch.stack([o0, o1, o2], -1) + rays_d = torch.stack([d0, d1, d2], -1) + + return rays_o, rays_d + + +# Hierarchical sampling (section 5.2) +def sample_pdf(bins, weights, N_samples, det=False, pytest=False): + device = weights.device + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], + -1) # (batch, len(bins)) + + # Take uniform samples + if det: + u = torch.linspace(0., 1., steps=N_samples, device=device) + u = u.expand(list(cdf.shape[:-1]) + [N_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [N_samples], device=device) + + # Pytest, overwrite u with numpy's fixed random numbers + if pytest: + np.random.seed(0) + new_shape = list(cdf.shape[:-1]) + [N_samples] + if det: + u = np.linspace(0., 1., N_samples) + u = np.broadcast_to(u, new_shape) + else: + u = np.random.rand(*new_shape) + u = torch.Tensor(u) + + # Invert CDF + u = u.contiguous() + inds = searchsorted(cdf.detach(), u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +def dda(rays_o, rays_d, bbox_3D): + inv_ray_d = 1.0 / (rays_d + 1e-6) + t_min = (bbox_3D[:1] - rays_o) * inv_ray_d # N_rays 3 + t_max = (bbox_3D[1:] - rays_o) * inv_ray_d + t = torch.stack((t_min, t_max)) # 2 N_rays 3 + t_min = torch.max(torch.min(t, dim=0)[0], dim=-1, keepdim=True)[0] + t_max = torch.min(torch.max(t, dim=0)[0], dim=-1, keepdim=True)[0] + return t_min, t_max + + +def ray_marcher(rays, N_samples=64, lindisp=False, perturb=0, bbox_3D=None): + """ + sample points along the rays + Inputs: + rays: () + + Returns: + + """ + + # Decompose the inputs + N_rays = rays.shape[0] + rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3) + near, far = rays[:, 6:7], rays[:, 7:8] # both (N_rays, 1) + + if bbox_3D is not None: + # cal aabb boundles + near, far = dda(rays_o, rays_d, bbox_3D) + + # Sample depth points + z_steps = torch.linspace( + 0, 1, N_samples, device=rays.device) # (N_samples) + if not lindisp: # use linear sampling in depth space + z_vals = near * (1 - z_steps) + far * z_steps + else: # use linear sampling in disparity space + z_vals = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) + + z_vals = z_vals.expand(N_rays, N_samples) + + if perturb > 0: # perturb sampling depths (z_vals) + z_vals_mid = 0.5 * (z_vals[:, :-1] + z_vals[:, 1:] + ) # (N_rays, N_samples-1) interval mid points + # get intervals between samples + upper = torch.cat([z_vals_mid, z_vals[:, -1:]], -1) + lower = torch.cat([z_vals[:, :1], z_vals_mid], -1) + + perturb_rand = perturb * torch.rand(z_vals.shape, device=rays.device) + z_vals = lower + (upper - lower) * perturb_rand + + # (N_rays, N_samples, 3) + xyz_coarse_sampled = rays_o.unsqueeze( + 1) + rays_d.unsqueeze(1) * z_vals.unsqueeze(2) + + return xyz_coarse_sampled, rays_o, rays_d, z_vals + + +def read_pfm(filename): + file = open(filename, 'rb') + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().decode('utf-8').rstrip() + if header == 'PF': + color = True + elif header == 'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8')) + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + file.close() + return data, scale + + +def ndc_bbox(all_rays): + near_min = torch.min(all_rays[..., :3].view(-1, 3), dim=0)[0] + near_max = torch.max(all_rays[..., :3].view(-1, 3), dim=0)[0] + far_min = torch.min( + (all_rays[..., :3] + all_rays[..., 3:6]).view(-1, 3), dim=0)[0] + far_max = torch.max( + (all_rays[..., :3] + all_rays[..., 3:6]).view(-1, 3), dim=0)[0] + print( + f'===> ndc bbox near_min:{near_min} near_max:{near_max} far_min:{far_min} far_max:{far_max}' + ) + return torch.stack( + (torch.minimum(near_min, far_min), torch.maximum(near_max, far_max))) diff --git a/modelscope/models/cv/nerf_recon_vq_compression/dataloader/tankstemple.py b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/tankstemple.py new file mode 100644 index 00000000..ba6aa717 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/dataloader/tankstemple.py @@ -0,0 +1,249 @@ +import os + +import torch +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms as T +from tqdm import tqdm + +from .ray_utils import * + + +def circle(radius=3.5, h=0.0, axis='z', t0=0, r=1): + if axis == 'z': + return lambda t: [ + radius * np.cos(r * t + t0), radius * np.sin(r * t + t0), h + ] + elif axis == 'y': + return lambda t: [ + radius * np.cos(r * t + t0), h, radius * np.sin(r * t + t0) + ] + else: + return lambda t: [ + h, radius * np.cos(r * t + t0), radius * np.sin(r * t + t0) + ] + + +def cross(x, y, axis=0): + T = torch if isinstance(x, torch.Tensor) else np + return T.cross(x, y, axis) + + +def normalize(x, axis=-1, order=2): + if isinstance(x, torch.Tensor): + l2 = x.norm(p=order, dim=axis, keepdim=True) + return x / (l2 + 1e-8), l2 + + else: + l2 = np.linalg.norm(x, order, axis) + l2 = np.expand_dims(l2, axis) + l2[l2 == 0] = 1 + return x / l2, + + +def cat(x, axis=1): + if isinstance(x[0], torch.Tensor): + return torch.cat(x, dim=axis) + return np.concatenate(x, axis=axis) + + +def look_at_rotation(camera_position, + at=None, + up=None, + inverse=False, + cv=False): + """ + This function takes a vector 'camera_position' which specifies the location + of the camera in world coordinates and two vectors `at` and `up` which + indicate the position of the object and the up directions of the world + coordinate system respectively. The object is assumed to be centered at + the origin. + The output is a rotation matrix representing the transformation + from world coordinates -> view coordinates. + Input: + camera_position: 3 + at: 1 x 3 or N x 3 (0, 0, 0) in default + up: 1 x 3 or N x 3 (0, 1, 0) in default + """ + + if at is None: + at = torch.zeros_like(camera_position) + else: + at = torch.tensor(at).type_as(camera_position) + if up is None: + up = torch.zeros_like(camera_position) + up[2] = -1 + else: + up = torch.tensor(up).type_as(camera_position) + + z_axis = normalize(at - camera_position)[0] + x_axis = normalize(cross(up, z_axis))[0] + y_axis = normalize(cross(z_axis, x_axis))[0] + + R = cat([x_axis[:, None], y_axis[:, None], z_axis[:, None]], axis=1) + return R + + +def gen_path(pos_gen, at=(0, 0, 0), up=(0, -1, 0), frames=180): + c2ws = [] + for t in range(frames): + c2w = torch.eye(4) + cam_pos = torch.tensor(pos_gen(t * (360.0 / frames) / 180 * np.pi)) + cam_rot = look_at_rotation( + cam_pos, at=at, up=up, inverse=False, cv=True) + c2w[:3, 3], c2w[:3, :3] = cam_pos, cam_rot + c2ws.append(c2w) + return torch.stack(c2ws) + + +class TanksTempleDataset(Dataset): + """NSVF Generic Dataset.""" + + def __init__(self, + datadir, + split='train', + downsample=1.0, + wh=[1920, 1080], + is_stack=False): + self.root_dir = datadir + self.split = split + self.is_stack = is_stack + self.downsample = downsample + self.img_wh = (int(wh[0] / downsample), int(wh[1] / downsample)) + self.define_transforms() + + self.white_bg = True + self.near_far = [0.01, 6.0] + self.scene_bbox = torch.from_numpy( + np.loadtxt(f'{self.root_dir}/bbox.txt')).float()[:6].view(2, + 3) * 1.2 + + self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], + [0, 0, -1, 0], [0, 0, 0, 1]]) + self.read_meta() + self.define_proj_mat() + + self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3) + self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3) + + def bbox2corners(self): + corners = self.scene_bbox.unsqueeze(0).repeat(4, 1, 1) + for i in range(3): + corners[i, [0, 1], i] = corners[i, [1, 0], i] + return corners.view(-1, 3) + + def read_meta(self): + + self.intrinsics = np.loadtxt( + os.path.join(self.root_dir, 'intrinsics.txt')) + self.intrinsics[:2] *= (np.array(self.img_wh) + / np.array([1920, 1080])).reshape(2, 1) + pose_files = sorted(os.listdir(os.path.join(self.root_dir, 'pose'))) + img_files = sorted(os.listdir(os.path.join(self.root_dir, 'rgb'))) + + if self.split == 'train': + pose_files = [x for x in pose_files if x.startswith('0_')] + img_files = [x for x in img_files if x.startswith('0_')] + elif self.split == 'val': + pose_files = [x for x in pose_files if x.startswith('1_')] + img_files = [x for x in img_files if x.startswith('1_')] + elif self.split == 'test': + test_pose_files = [x for x in pose_files if x.startswith('2_')] + test_img_files = [x for x in img_files if x.startswith('2_')] + if len(test_pose_files) == 0: + test_pose_files = [x for x in pose_files if x.startswith('1_')] + test_img_files = [x for x in img_files if x.startswith('1_')] + pose_files = test_pose_files + img_files = test_img_files + + # ray directions for all pixels, same for all images (same H, W, focal) + self.directions = get_ray_directions( + self.img_wh[1], + self.img_wh[0], [self.intrinsics[0, 0], self.intrinsics[1, 1]], + center=self.intrinsics[:2, 2]) # (h, w, 3) + self.directions = self.directions / torch.norm( + self.directions, dim=-1, keepdim=True) + + self.poses = [] + self.all_rays = [] + self.all_rgbs = [] + + assert len(img_files) == len(pose_files) + for img_fname, pose_fname in tqdm( + zip(img_files, pose_files), + desc=f'Loading data {self.split} ({len(img_files)})'): + image_path = os.path.join(self.root_dir, 'rgb', img_fname) + img = Image.open(image_path) + if self.downsample != 1.0: + img = img.resize(self.img_wh, Image.LANCZOS) + img = self.transform(img) # (4, h, w) + img = img.view(img.shape[0], -1).permute(1, 0) # (h*w, 4) RGBA + if img.shape[-1] == 4: + img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:] + ) # blend A to RGB + self.all_rgbs.append(img) + + c2w = np.loadtxt(os.path.join(self.root_dir, 'pose', + pose_fname)) # @ cam_trans + c2w = torch.FloatTensor(c2w) + self.poses.append(c2w) # C2W + rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3) + self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 8) + + self.poses = torch.stack(self.poses) + + center = torch.mean(self.scene_bbox, dim=0) + radius = torch.norm(self.scene_bbox[1] - center) * 1.2 + up = torch.mean(self.poses[:, :3, 1], dim=0).tolist() + pos_gen = circle(radius=radius, h=-0.2 * up[1], axis='y') + self.render_path = gen_path(pos_gen, up=up, frames=200) + self.render_path[:, :3, 3] += center + + if 'train' == self.split: + if self.is_stack: + self.all_rays = torch.stack(self.all_rays, 0).reshape( + -1, *self.img_wh[::-1], + 6) # (len(self.meta['frames])*h*w, 3) + self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape( + -1, *self.img_wh[::-1], + 3) # (len(self.meta['frames])*h*w, 3) + else: + self.all_rays = torch.cat( + self.all_rays, 0) # (len(self.meta['frames])*h*w, 3) + self.all_rgbs = torch.cat( + self.all_rgbs, 0) # (len(self.meta['frames])*h*w, 3) + else: + self.all_rays = torch.stack(self.all_rays, + 0) # (len(self.meta['frames]),h*w, 3) + self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape( + -1, *self.img_wh[::-1], 3) # (len(self.meta['frames]),h,w,3) + + def define_transforms(self): + self.transform = T.ToTensor() + + def define_proj_mat(self): + self.proj_mat = torch.from_numpy( + self.intrinsics[:3, :3]).unsqueeze(0).float() @ torch.inverse( + self.poses)[:, :3] + + def world2ndc(self, points): + device = points.device + return (points - self.center.to(device)) / self.radius.to(device) + + def __len__(self): + if self.split == 'train': + return len(self.all_rays) + return len(self.all_rgbs) + + def __getitem__(self, idx): + + if self.split == 'train': # use data in the buffers + sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]} + + else: # create data for each image separately + + img = self.all_rgbs[idx] + rays = self.all_rays[idx] + + sample = {'rays': rays, 'rgbs': img} + return sample diff --git a/modelscope/models/cv/nerf_recon_vq_compression/nerf_recon_vq_compression.py b/modelscope/models/cv/nerf_recon_vq_compression/nerf_recon_vq_compression.py new file mode 100644 index 00000000..041a7af8 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/nerf_recon_vq_compression.py @@ -0,0 +1,116 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import time +from functools import partial + +import cv2 +import numpy as np +import torch +import tqdm + +from modelscope.metainfo import Models +from modelscope.models.base import Tensor, TorchModel +from modelscope.models.builder import MODELS +from modelscope.utils.constant import ModelFile, Tasks +from modelscope.utils.logger import get_logger +from .dataloader import dataset_dict +from .network.tensoRF import TensorVM, raw2alpha +from .network.tensoRF_VQ import TensorVMSplitVQ +from .renderer import OctreeRender_trilinear_fast +from .renderer import evaluation as evaluation_render +from .renderer import render_path + +logger = get_logger() + +__all__ = ['NeRFReconVQCompression'] + + +@MODELS.register_module( + Tasks.nerf_recon_vq_compression, + module_name=Models.nerf_recon_vq_compression) +class NeRFReconVQCompression(TorchModel): + + def __init__(self, model_dir=None, **kwargs): + super().__init__(model_dir, **kwargs) + + if not torch.cuda.is_available(): + raise Exception('GPU is required') + + self.device = torch.device('cuda') + self.data_type = kwargs['dataset_name'] + self.data_dir = kwargs['data_dir'] + self.downsample = kwargs['downsample'] + self.ndc_ray = kwargs['ndc_ray'] + self.ckpt_path = os.path.join(model_dir, kwargs['ckpt_path']) + + if self.ckpt_path == '' or self.ckpt_path is None: + self.ckpt_path = os.path.join(model_dir, 'ficus_demo.pt') + if not os.path.exists(self.ckpt_path): + raise Exception('ckpt path not found') + + # load model + ckpt = torch.load(self.ckpt_path, map_location=self.device) + model_kwargs = ckpt['kwargs'] + model_kwargs['device'] = self.device + self.model = TensorVMSplitVQ(**model_kwargs) + self.model.extreme_load(ckpt) + + self.renderer = OctreeRender_trilinear_fast + + # load data + dataset = dataset_dict[self.data_type] + self.test_dataset = dataset( + self.data_dir, + split='test', + downsample=self.downsample, + is_stack=True) + + def evaluation(self, render_dir, N_vis=-1): + white_bg = self.test_dataset.white_bg + ndc_ray = self.ndc_ray + evaluation_test = partial( + evaluation_render, + test_dataset=self.test_dataset, + renderer=self.renderer, + white_bg=white_bg, + ndc_ray=ndc_ray, + device=self.device, + compute_extra_metrics=True, + im_save=True) + + logfolder = render_dir + os.makedirs(f'{logfolder}/evalution_test', exist_ok=True) + PSNRs = evaluation_test( + tensorf=self.model, + N_vis=N_vis, + savePath=f'{logfolder}/evalution_test') + logger.info( + f'VQRF-Evaluation: {self.ckpt_path} mean PSNR: {np.mean(PSNRs)}') + + def render_path(self, render_dir, N_vis=120): + white_bg = self.test_dataset.white_bg + ndc_ray = self.ndc_ray + + logfolder = render_dir + os.makedirs(f'{logfolder}/render_path', exist_ok=True) + + render_poses = self.get_render_pose(N_cameras=N_vis) + render_path( + self.test_dataset, + self.model, + render_poses, + self.renderer, + savePath=f'{logfolder}/render_path', + white_bg=white_bg, + ndc_ray=ndc_ray, + device=self.device) + logger.info( + f'VQRF-Render: {self.ckpt_path} render path video result saved in {logfolder}/render_path' + ) + + def get_render_pose(self, N_cameras=120): + if self.data_type == 'blender': + return self.test_dataset.get_render_pose(N_cameras=N_cameras) + elif self.data_type == 'llff': + return self.test_dataset.get_render_pose(N_cameras=N_cameras) diff --git a/modelscope/models/cv/nerf_recon_vq_compression/network/__init__.py b/modelscope/models/cv/nerf_recon_vq_compression/network/__init__.py new file mode 100644 index 00000000..4bd3630b --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/network/__init__.py @@ -0,0 +1,2 @@ +from .tensorBase import * +from .tensoRF import TensorVMSplit diff --git a/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF.py b/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF.py new file mode 100644 index 00000000..7ec0d867 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF.py @@ -0,0 +1,579 @@ +from .tensorBase import * + + +class TensorVM(TensorBase): + + def __init__(self, aabb, gridSize, device, **kargs): + super(TensorVM, self).__init__(aabb, gridSize, device, **kargs) + + def init_svd_volume(self, res, device): + self.plane_coef = torch.nn.Parameter(0.1 * torch.randn( + (3, self.app_n_comp + self.density_n_comp, res, res), + device=device)) + self.line_coef = torch.nn.Parameter(0.1 * torch.randn( + (3, self.app_n_comp + self.density_n_comp, res, 1), device=device)) + self.basis_mat = torch.nn.Linear( + self.app_n_comp * 3, self.app_dim, bias=False, device=device) + + def get_optparam_groups(self, + lr_init_spatialxyz=0.02, + lr_init_network=0.001): + grad_vars = [{ + 'params': self.line_coef, + 'lr': lr_init_spatialxyz + }, { + 'params': self.plane_coef, + 'lr': lr_init_spatialxyz + }, { + 'params': self.basis_mat.parameters(), + 'lr': lr_init_network + }] + if isinstance(self.renderModule, torch.nn.Module): + grad_vars += [{ + 'params': self.renderModule.parameters(), + 'lr': lr_init_network + }] + return grad_vars + + def compute_features(self, xyz_sampled): + + coordinate_plane = torch.stack( + (xyz_sampled[..., self.matMode[0]], xyz_sampled[..., + self.matMode[1]], + xyz_sampled[..., self.matMode[2]])).detach() + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach() + + plane_feats = F.grid_sample( + self.plane_coef[:, -self.density_n_comp:], + coordinate_plane, + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + line_feats = F.grid_sample( + self.line_coef[:, -self.density_n_comp:], + coordinate_line, + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + + sigma_feature = torch.sum(plane_feats * line_feats, dim=0) + + plane_feats = F.grid_sample( + self.plane_coef[:, :self.app_n_comp], + coordinate_plane, + align_corners=True).view(3 * self.app_n_comp, -1) + line_feats = F.grid_sample( + self.line_coef[:, :self.app_n_comp], + coordinate_line, + align_corners=True).view(3 * self.app_n_comp, -1) + + app_features = self.basis_mat((plane_feats * line_feats).T) + + return sigma_feature, app_features + + def compute_densityfeature(self, xyz_sampled): + coordinate_plane = torch.stack( + (xyz_sampled[..., self.matMode[0]], xyz_sampled[..., + self.matMode[1]], + xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2) + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach().view(3, -1, 1, 2) + + plane_feats = F.grid_sample( + self.plane_coef[:, -self.density_n_comp:], + coordinate_plane, + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + line_feats = F.grid_sample( + self.line_coef[:, -self.density_n_comp:], + coordinate_line, + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + + sigma_feature = torch.sum(plane_feats * line_feats, dim=0) + + return sigma_feature + + def compute_appfeature(self, xyz_sampled): + coordinate_plane = torch.stack( + (xyz_sampled[..., self.matMode[0]], xyz_sampled[..., + self.matMode[1]], + xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2) + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach().view(3, -1, 1, 2) + + plane_feats = F.grid_sample( + self.plane_coef[:, :self.app_n_comp], + coordinate_plane, + align_corners=True).view(3 * self.app_n_comp, -1) + line_feats = F.grid_sample( + self.line_coef[:, :self.app_n_comp], + coordinate_line, + align_corners=True).view(3 * self.app_n_comp, -1) + + app_features = self.basis_mat((plane_feats * line_feats).T) + + return app_features + + def vectorDiffs(self, vector_comps): + total = 0 + + for idx in range(len(vector_comps)): + n_comp, n_size = vector_comps[idx].shape[:-1] + + dotp = torch.matmul( + vector_comps[idx].view(n_comp, n_size), + vector_comps[idx].view(n_comp, n_size).transpose(-1, -2)) + non_diagonal = dotp.view(-1)[1:].view(n_comp - 1, + n_comp + 1)[..., :-1] + total = total + torch.mean(torch.abs(non_diagonal)) + return total + + def vector_comp_diffs(self): + + return self.vectorDiffs( + self.line_coef[:, -self.density_n_comp:]) + self.vectorDiffs( + self.line_coef[:, :self.app_n_comp]) + + @torch.no_grad() + def up_sampling_VM(self, plane_coef, line_coef, res_target): + + for i in range(len(self.vecMode)): + vec_id = self.vecMode[i] + mat_id_0, mat_id_1 = self.matMode[i] + + plane_coef[i] = torch.nn.Parameter( + F.interpolate( + plane_coef[i].data, + size=(res_target[mat_id_1], res_target[mat_id_0]), + mode='bilinear', + align_corners=True)) + line_coef[i] = torch.nn.Parameter( + F.interpolate( + line_coef[i].data, + size=(res_target[vec_id], 1), + mode='bilinear', + align_corners=True)) + return plane_coef, line_coef + + @torch.no_grad() + def upsample_volume_grid(self, res_target): + # assuming xyz have the same scale + scale = res_target[0] / self.line_coef.shape[2] + plane_coef = F.interpolate( + self.plane_coef.detach().data, + scale_factor=scale, + mode='bilinear', + align_corners=True) + line_coef = F.interpolate( + self.line_coef.detach().data, + size=(res_target[0], 1), + mode='bilinear', + align_corners=True) + self.plane_coef, self.line_coef = torch.nn.Parameter( + plane_coef), torch.nn.Parameter(line_coef) + self.compute_stepSize(res_target) + print(f'upsamping to {res_target}') + + +class TensorVMSplit(TensorBase): + + def __init__(self, aabb, gridSize, device, **kargs): + super(TensorVMSplit, self).__init__(aabb, gridSize, device, **kargs) + + def init_svd_volume(self, res, device): + self.density_plane, self.density_line = self.init_one_svd( + self.density_n_comp, self.gridSize, 0.1, device) + self.app_plane, self.app_line = self.init_one_svd( + self.app_n_comp, self.gridSize, 0.1, device) + self.basis_mat = torch.nn.Linear( + sum(self.app_n_comp), self.app_dim, bias=False).to(device) + print(self.basis_mat) + + def init_one_svd(self, n_component, gridSize, scale, device): + plane_coef, line_coef = [], [] + for i in range(len(self.vecMode)): + vec_id = self.vecMode[i] + mat_id_0, mat_id_1 = self.matMode[i] + plane_coef.append( + torch.nn.Parameter(scale * torch.randn( + (1, n_component[i], gridSize[mat_id_1], + gridSize[mat_id_0])))) # + line_coef.append( + torch.nn.Parameter(scale * torch.randn( + (1, n_component[i], gridSize[vec_id], 1)))) + + return torch.nn.ParameterList(plane_coef).to( + device), torch.nn.ParameterList(line_coef).to(device) + + def get_optparam_groups(self, + lr_init_spatialxyz=0.02, + lr_init_network=0.001): + grad_vars = [{ + 'params': self.density_line, + 'lr': lr_init_spatialxyz + }, { + 'params': self.density_plane, + 'lr': lr_init_spatialxyz + }, { + 'params': self.app_line, + 'lr': lr_init_spatialxyz + }, { + 'params': self.app_plane, + 'lr': lr_init_spatialxyz + }, { + 'params': self.basis_mat.parameters(), + 'lr': lr_init_network + }] + if isinstance(self.renderModule, torch.nn.Module): + grad_vars += [{ + 'params': self.renderModule.parameters(), + 'lr': lr_init_network + }] + return grad_vars + + def vectorDiffs(self, vector_comps): + total = 0 + + for idx in range(len(vector_comps)): + n_comp, n_size = vector_comps[idx].shape[1:-1] + + dotp = torch.matmul( + vector_comps[idx].view(n_comp, n_size), + vector_comps[idx].view(n_comp, n_size).transpose(-1, -2)) + non_diagonal = dotp.view(-1)[1:].view(n_comp - 1, + n_comp + 1)[..., :-1] + total = total + torch.mean(torch.abs(non_diagonal)) + return total + + def vector_comp_diffs(self): + return self.vectorDiffs(self.density_line) + self.vectorDiffs( + self.app_line) + + def density_L1(self): + total = 0 + for idx in range(len(self.density_plane)): + total = total + torch.mean(torch.abs( + self.density_plane[idx])) + torch.mean( + torch.abs(self.density_line[idx])) + return total + + def TV_loss_density(self, reg): + total = 0 + for idx in range(len(self.density_plane)): + total = total + reg(self.density_plane[idx]) * 1e-2 + return total + + def TV_loss_app(self, reg): + total = 0 + for idx in range(len(self.app_plane)): + total = total + reg(self.app_plane[idx]) * 1e-2 + return total + + def compute_densityfeature(self, xyz_sampled): + + # plane + line basis + coordinate_plane = torch.stack( + (xyz_sampled[..., self.matMode[0]], xyz_sampled[..., + self.matMode[1]], + xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2) + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach().view(3, -1, 1, 2) + + sigma_feature = torch.zeros((xyz_sampled.shape[0], ), + device=xyz_sampled.device) + for idx_plane in range(len(self.density_plane)): + plane_coef_point = F.grid_sample( + self.density_plane[idx_plane], + coordinate_plane[[idx_plane]], + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + line_coef_point = F.grid_sample( + self.density_line[idx_plane], + coordinate_line[[idx_plane]], + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + sigma_feature = sigma_feature + torch.sum( + plane_coef_point * line_coef_point, dim=0) + + return sigma_feature + + def compute_appfeature(self, xyz_sampled): + + # plane + line basis + coordinate_plane = torch.stack( + (xyz_sampled[..., self.matMode[0]], xyz_sampled[..., + self.matMode[1]], + xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2) + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach().view(3, -1, 1, 2) + # import ipdb; ipdb.set_trace() + plane_coef_point, line_coef_point = [], [] + for idx_plane in range(len(self.app_plane)): + plane_coef_point.append( + F.grid_sample( + self.app_plane[idx_plane], + coordinate_plane[[idx_plane]], + align_corners=True).view(-1, *xyz_sampled.shape[:1])) + line_coef_point.append( + F.grid_sample( + self.app_line[idx_plane], + coordinate_line[[idx_plane]], + align_corners=True).view(-1, *xyz_sampled.shape[:1])) + plane_coef_point, line_coef_point = torch.cat( + plane_coef_point), torch.cat(line_coef_point) + + return self.basis_mat((plane_coef_point * line_coef_point).T) + + @torch.no_grad() + def up_sampling_VM(self, plane_coef, line_coef, res_target): + + for i in range(len(self.vecMode)): + vec_id = self.vecMode[i] + mat_id_0, mat_id_1 = self.matMode[i] + plane_coef[i] = torch.nn.Parameter( + F.interpolate( + plane_coef[i].data, + size=(res_target[mat_id_1], res_target[mat_id_0]), + mode='bilinear', + align_corners=True)) + line_coef[i] = torch.nn.Parameter( + F.interpolate( + line_coef[i].data, + size=(res_target[vec_id], 1), + mode='bilinear', + align_corners=True)) + + return plane_coef, line_coef + + @torch.no_grad() + def upsample_volume_grid(self, res_target): + self.app_plane, self.app_line = self.up_sampling_VM( + self.app_plane, self.app_line, res_target) + self.density_plane, self.density_line = self.up_sampling_VM( + self.density_plane, self.density_line, res_target) + + self.update_stepSize(res_target) + print(f'upsamping to {res_target}') + + @torch.no_grad() + def shrink(self, new_aabb): + print('====> shrinking ...') + xyz_min, xyz_max = new_aabb + t_l, b_r = (xyz_min - self.aabb[0]) / self.units, ( + xyz_max - self.aabb[0]) / self.units + # print(new_aabb, self.aabb) + # print(t_l, b_r,self.alphaMask.alpha_volume.shape) + t_l, b_r = torch.round( + torch.round(t_l)).long(), torch.round(b_r).long() + 1 + b_r = torch.stack([b_r, self.gridSize]).amin(0) + + for i in range(len(self.vecMode)): + mode0 = self.vecMode[i] + self.density_line[i] = torch.nn.Parameter( + self.density_line[i].data[..., t_l[mode0]:b_r[mode0], :]) + self.app_line[i] = torch.nn.Parameter( + self.app_line[i].data[..., t_l[mode0]:b_r[mode0], :]) + mode0, mode1 = self.matMode[i] + self.density_plane[i] = torch.nn.Parameter( + self.density_plane[i].data[..., t_l[mode1]:b_r[mode1], + t_l[mode0]:b_r[mode0]]) + self.app_plane[i] = torch.nn.Parameter( + self.app_plane[i].data[..., t_l[mode1]:b_r[mode1], + t_l[mode0]:b_r[mode0]]) + + if not torch.all(self.alphaMask.gridSize == self.gridSize): + t_l_r, b_r_r = t_l / (self.gridSize - 1), (b_r - 1) / ( + self.gridSize - 1) + correct_aabb = torch.zeros_like(new_aabb) + correct_aabb[0] = (1 - t_l_r) * self.aabb[0] + t_l_r * self.aabb[1] + correct_aabb[1] = (1 - b_r_r) * self.aabb[0] + b_r_r * self.aabb[1] + print('aabb', new_aabb, '\ncorrect aabb', correct_aabb) + new_aabb = correct_aabb + + newSize = b_r - t_l + self.aabb = new_aabb + self.update_stepSize((newSize[0], newSize[1], newSize[2])) + + +class TensorCP(TensorBase): + + def __init__(self, aabb, gridSize, device, **kargs): + super(TensorCP, self).__init__(aabb, gridSize, device, **kargs) + + def init_svd_volume(self, res, device): + self.density_line = self.init_one_svd(self.density_n_comp[0], + self.gridSize, 0.2, device) + self.app_line = self.init_one_svd(self.app_n_comp[0], self.gridSize, + 0.2, device) + self.basis_mat = torch.nn.Linear( + self.app_n_comp[0], self.app_dim, bias=False).to(device) + + def init_one_svd(self, n_component, gridSize, scale, device): + line_coef = [] + for i in range(len(self.vecMode)): + vec_id = self.vecMode[i] + line_coef.append( + torch.nn.Parameter(scale * torch.randn( + (1, n_component, gridSize[vec_id], 1)))) + return torch.nn.ParameterList(line_coef).to(device) + + def get_optparam_groups(self, + lr_init_spatialxyz=0.02, + lr_init_network=0.001): + grad_vars = [{ + 'params': self.density_line, + 'lr': lr_init_spatialxyz + }, { + 'params': self.app_line, + 'lr': lr_init_spatialxyz + }, { + 'params': self.basis_mat.parameters(), + 'lr': lr_init_network + }] + if isinstance(self.renderModule, torch.nn.Module): + grad_vars += [{ + 'params': self.renderModule.parameters(), + 'lr': lr_init_network + }] + return grad_vars + + def compute_densityfeature(self, xyz_sampled): + + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach().view(3, -1, 1, 2) + + line_coef_point = F.grid_sample( + self.density_line[0], coordinate_line[[0]], + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + line_coef_point = line_coef_point * F.grid_sample( + self.density_line[1], coordinate_line[[1]], + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + line_coef_point = line_coef_point * F.grid_sample( + self.density_line[2], coordinate_line[[2]], + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + sigma_feature = torch.sum(line_coef_point, dim=0) + + return sigma_feature + + def compute_appfeature(self, xyz_sampled): + + coordinate_line = torch.stack( + (xyz_sampled[..., self.vecMode[0]], + xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., + self.vecMode[2]])) + coordinate_line = torch.stack( + (torch.zeros_like(coordinate_line), coordinate_line), + dim=-1).detach().view(3, -1, 1, 2) + + line_coef_point = F.grid_sample( + self.app_line[0], coordinate_line[[0]], + align_corners=True).view(-1, *xyz_sampled.shape[:1]) + line_coef_point = line_coef_point * F.grid_sample( + self.app_line[1], coordinate_line[[1]], align_corners=True).view( + -1, *xyz_sampled.shape[:1]) + line_coef_point = line_coef_point * F.grid_sample( + self.app_line[2], coordinate_line[[2]], align_corners=True).view( + -1, *xyz_sampled.shape[:1]) + + return self.basis_mat(line_coef_point.T) + + @torch.no_grad() + def up_sampling_Vector(self, density_line_coef, app_line_coef, res_target): + + for i in range(len(self.vecMode)): + vec_id = self.vecMode[i] + density_line_coef[i] = torch.nn.Parameter( + F.interpolate( + density_line_coef[i].data, + size=(res_target[vec_id], 1), + mode='bilinear', + align_corners=True)) + app_line_coef[i] = torch.nn.Parameter( + F.interpolate( + app_line_coef[i].data, + size=(res_target[vec_id], 1), + mode='bilinear', + align_corners=True)) + + return density_line_coef, app_line_coef + + @torch.no_grad() + def upsample_volume_grid(self, res_target): + self.density_line, self.app_line = self.up_sampling_Vector( + self.density_line, self.app_line, res_target) + + self.update_stepSize(res_target) + print(f'upsamping to {res_target}') + + @torch.no_grad() + def shrink(self, new_aabb): + print('====> shrinking ...') + xyz_min, xyz_max = new_aabb + t_l, b_r = (xyz_min - self.aabb[0]) / self.units, ( + xyz_max - self.aabb[0]) / self.units + + t_l, b_r = torch.round( + torch.round(t_l)).long(), torch.round(b_r).long() + 1 + b_r = torch.stack([b_r, self.gridSize]).amin(0) + + for i in range(len(self.vecMode)): + mode0 = self.vecMode[i] + self.density_line[i] = torch.nn.Parameter( + self.density_line[i].data[..., t_l[mode0]:b_r[mode0], :]) + self.app_line[i] = torch.nn.Parameter( + self.app_line[i].data[..., t_l[mode0]:b_r[mode0], :]) + + if not torch.all(self.alphaMask.gridSize == self.gridSize): + t_l_r, b_r_r = t_l / (self.gridSize - 1), (b_r - 1) / ( + self.gridSize - 1) + correct_aabb = torch.zeros_like(new_aabb) + correct_aabb[0] = (1 - t_l_r) * self.aabb[0] + t_l_r * self.aabb[1] + correct_aabb[1] = (1 - b_r_r) * self.aabb[0] + b_r_r * self.aabb[1] + print('aabb', new_aabb, '\ncorrect aabb', correct_aabb) + new_aabb = correct_aabb + + newSize = b_r - t_l + self.aabb = new_aabb + self.update_stepSize((newSize[0], newSize[1], newSize[2])) + + def density_L1(self): + total = 0 + for idx in range(len(self.density_line)): + total = total + torch.mean(torch.abs(self.density_line[idx])) + return total + + def TV_loss_density(self, reg): + total = 0 + for idx in range(len(self.density_line)): + total = total + reg(self.density_line[idx]) * 1e-3 + return total + + def TV_loss_app(self, reg): + total = 0 + for idx in range(len(self.app_line)): + total = total + reg(self.app_line[idx]) * 1e-3 + return total diff --git a/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF_VQ.py b/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF_VQ.py new file mode 100644 index 00000000..ee58de56 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/network/tensoRF_VQ.py @@ -0,0 +1,292 @@ +import os +import random +from typing import Callable, Iterator, List, Optional, Union + +import torch.nn as nn +from tqdm import tqdm + +from .tensorBase import * +from .tensoRF import TensorVMSplit +from .weighted_vq import VectorQuantize + + +class Timing: + """ + Timing environment + usage: + with Timing("message"): + your commands here + will print CUDA runtime in ms + """ + + def __init__(self, name, debug=False): + self.name = name + self.debug = debug + + def __enter__(self): + if not self.debug: + return + + self.start = torch.cuda.Event(enable_timing=True) + self.end = torch.cuda.Event(enable_timing=True) + self.start.record() + + def __exit__(self, type, value, traceback): + if not self.debug: + return + + self.end.record() + torch.cuda.synchronize() + print(self.name, 'elapsed', self.start.elapsed_time(self.end), 'ms') + + +def dec2bin(x, bits): + mask = 2**torch.arange(bits - 1, -1, -1).to(x.device, x.dtype) + return x.unsqueeze(-1).bitwise_and(mask).ne(0).float() + + +def bin2dec(b, bits): + mask = 2**torch.arange(bits - 1, -1, -1).to(b.device, b.dtype) + return torch.sum(mask * b, -1) + + +class TensorVMSplitVQ(TensorVMSplit): + + def __init__(self, aabb, gridSize, device, **kargs): + super(TensorVMSplitVQ, self).__init__(aabb, gridSize, device, **kargs) + self.codebook_size = kargs['codebook_size'] + print('codebook size: ' + str(self.codebook_size)) + self.use_cosine_sim = kargs['use_cosine_sim'] == 1 + self.codebook_dim = None if kargs['codebook_dim'] == 0 else kargs[ + 'codebook_dim'] + self.vq = nn.ModuleList([ + VectorQuantize( + dim=self.app_n_comp[0], + codebook_size=self.codebook_size, # codebook size + decay=0.8, # specify number of quantizer + commitment_weight=1.0, + use_cosine_sim=self.use_cosine_sim, + codebook_dim=self.codebook_dim, + threshold_ema_dead_code=2.0, + ).to(self.device), + VectorQuantize( + dim=self.app_n_comp[1], + codebook_size=self.codebook_size, # codebook size + decay=0.8, # specify number of quantizer + commitment_weight=1.0, + use_cosine_sim=self.use_cosine_sim, + codebook_dim=self.codebook_dim, + threshold_ema_dead_code=2.0, + ).to(self.device), + VectorQuantize( + dim=self.app_n_comp[2], + codebook_size=self.codebook_size, # codebook size + decay=0.8, # specify number of quantizer + commitment_weight=1.0, + use_cosine_sim=self.use_cosine_sim, + codebook_dim=self.codebook_dim, + threshold_ema_dead_code=2.0, + ).to(self.device) + ]) + self.den_vq = nn.ModuleList([ + VectorQuantize( + dim=self.density_n_comp[0], + codebook_size=self.codebook_size, # codebook size + decay=0.8, # specify number of quantizer + commitment_weight=1.0, + use_cosine_sim=self.use_cosine_sim, + codebook_dim=self.codebook_dim, + threshold_ema_dead_code=2.0, + ).to(self.device), + VectorQuantize( + dim=self.density_n_comp[1], + codebook_size=self.codebook_size, # codebook size + decay=0.8, # specify number of quantizer + commitment_weight=1.0, + use_cosine_sim=self.use_cosine_sim, + codebook_dim=self.codebook_dim, + threshold_ema_dead_code=2.0, + ).to(self.device), + VectorQuantize( + dim=self.density_n_comp[2], + codebook_size=self.codebook_size, # codebook size + decay=0.8, # specify number of quantizer + commitment_weight=1.0, + use_cosine_sim=self.use_cosine_sim, + codebook_dim=self.codebook_dim, + threshold_ema_dead_code=2.0, + ).to(self.device) + ]) + self.importance = kargs.get('importance', None) + self.plane_mask = kargs.get('plane_mask', None) + self.all_indices = kargs.get('all_indices', None) + + def extreme_load(self, ckpt): + if 'alphaMask.aabb' in ckpt.keys(): + length = np.prod(ckpt['alphaMask.shape']) + alpha_volume = torch.from_numpy( + np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape( + ckpt['alphaMask.shape'])) + self.alphaMask = AlphaGridMask( + self.device, ckpt['alphaMask.aabb'].to(self.device), + alpha_volume.float().to(self.device)) + + # 1. load non-vq part + self.density_line.load_state_dict(ckpt['density_line']) + self.app_line.load_state_dict(ckpt['app_line']) + self.basis_mat.load_state_dict(ckpt['basis_mat']) + self.renderModule.load_state_dict(ckpt['mlp']) + + # 2. load vq part + # load vq_mask, keep_mask + self.plane_mask = [] + for i in range(3): + mask_shape = self.app_plane[i].shape[-2:] + vq_mask = np.unpackbits( + ckpt[f'vq_mask_{i}'], + count=np.prod(mask_shape)).reshape(mask_shape).astype(bool) + keep_mask = np.unpackbits( + ckpt[f'keep_mask_{i}'], + count=np.prod(mask_shape)).reshape(mask_shape).astype(bool) + self.plane_mask.append((vq_mask, keep_mask)) + + # recover app_plane, density_plane + import math + bits = int(math.log2(self.codebook_size)) + for idx_plane in range(3): + (vq_mask, keep_mask) = self.plane_mask[idx_plane] + # load appearance keep data from quantized data + int_repr = ckpt[f'quant_keep_data_{idx_plane}.int_repr'] + scale = ckpt[f'quant_keep_data_{idx_plane}.scale'] + zero_points = ckpt[f'quant_keep_data_{idx_plane}.zero_points'] + dequant = (int_repr - zero_points) * scale + keep_data = dequant.T.reshape( + *self.app_plane[idx_plane][:, :, keep_mask].shape) + self.app_plane[idx_plane].data[:, :, keep_mask] = keep_data + + # load appearance vq data from codebook + codebook = ckpt[f'codebook_{idx_plane}'].float() # + vq_count = int(vq_mask.sum()) + unpack1 = np.unpackbits( + ckpt[f'vq_indice_{idx_plane}'], count=vq_count * bits) + unpack2 = bin2dec( + torch.from_numpy(unpack1).reshape(vq_count, bits).long(), + bits=bits) + vq_data = codebook[0, unpack2, :] # N*len + vq_data = vq_data.T.reshape( + *(self.app_plane[idx_plane][:, :, vq_mask].shape)) + self.app_plane[idx_plane].data[:, :, vq_mask] = vq_data + + for idx_plane in range(3): + (vq_mask, keep_mask) = self.plane_mask[idx_plane] + # load density keep data from quantized data + int_repr = ckpt[f'quant_den_data_{idx_plane}.int_repr'] + scale = ckpt[f'quant_den_data_{idx_plane}.scale'] + zero_points = ckpt[f'quant_den_data_{idx_plane}.zero_points'] + dequant = (int_repr - zero_points) * scale + keep_data = dequant.T.reshape( + *self.density_plane[idx_plane][:, :, keep_mask].shape) + self.density_plane[idx_plane].data[:, :, keep_mask] = keep_data + + # load density vq data from codebook + codebook = ckpt[f'codebook_den_{idx_plane}'].float() # + vq_count = int(vq_mask.sum()) + unpack1 = np.unpackbits( + ckpt[f'den_vq_indice_{idx_plane}'], count=vq_count * bits) + unpack2 = bin2dec( + torch.from_numpy(unpack1).reshape(vq_count, bits).long(), + bits=bits) + vq_data = codebook[0, unpack2, :] # N*len + vq_data = vq_data.T.reshape( + *(self.density_plane[idx_plane][:, :, vq_mask].shape)) + self.density_plane[idx_plane].data[:, :, vq_mask] = vq_data + + def forward(self, + rays_chunk, + white_bg=True, + is_train=False, + ndc_ray=False, + N_samples=-1, + isvq=False): + # sample points + viewdirs = rays_chunk[:, 3:6] + if ndc_ray: + xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc( + rays_chunk[:, :3], + viewdirs, + is_train=is_train, + N_samples=N_samples) + dists = torch.cat( + (z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like( + z_vals[:, :1])), + dim=-1) + rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True) + dists = dists * rays_norm + viewdirs = viewdirs / rays_norm + else: + xyz_sampled, z_vals, ray_valid = self.sample_ray( + rays_chunk[:, :3], + viewdirs, + is_train=is_train, + N_samples=N_samples) + dists = torch.cat( + (z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like( + z_vals[:, :1])), + dim=-1) + viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape) + + if self.alphaMask is not None: + alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid]) + alpha_mask = alphas > 0 + ray_invalid = ~ray_valid + ray_invalid[ray_valid] |= (~alpha_mask) + ray_valid = ~ray_invalid + + sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device) + rgb = torch.zeros((*xyz_sampled.shape[:2], 3), + device=xyz_sampled.device) + + if ray_valid.any(): + xyz_sampled = self.normalize_coord(xyz_sampled) + sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid]) + + validsigma = self.feature2density(sigma_feature) + sigma[ray_valid] = validsigma + + alpha, weight, bg_weight = raw2alpha(sigma, + dists * self.distance_scale) + + app_mask = weight > self.rayMarch_weight_thres + + if app_mask.any(): + app_features = self.compute_appfeature(xyz_sampled[app_mask]) + valid_rgbs = self.renderModule(xyz_sampled[app_mask], + viewdirs[app_mask], app_features) + rgb[app_mask] = valid_rgbs + + acc_map = torch.sum(weight, -1) + rgb_map = torch.sum(weight[..., None] * rgb, -2) + + if white_bg or (is_train and torch.rand((1, )) < 0.5): + rgb_map = rgb_map + (1. - acc_map[..., None]) + + rgb_map = rgb_map.clamp(0, 1) + + with torch.no_grad(): + depth_map = torch.sum(weight * z_vals, -1) + depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1] + + return rgb_map, depth_map + + +def getsize(compressed_file, tag='MB'): + size = os.path.getsize(compressed_file) + if tag == 'B': + pass + elif tag == 'KB': + size = size / 1024 + elif tag == 'MB': + size = size / 1024 / 1024 + elif tag == 'GB': + size = size / 1024 / 1024 / 1024 + return f'{size} {tag}' diff --git a/modelscope/models/cv/nerf_recon_vq_compression/network/tensorBase.py b/modelscope/models/cv/nerf_recon_vq_compression/network/tensorBase.py new file mode 100644 index 00000000..2e87227a --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/network/tensorBase.py @@ -0,0 +1,526 @@ +import time + +import numpy as np +import torch +import torch.nn +import torch.nn.functional as F + + +def positional_encoding(positions, freqs): + + freq_bands = (2**torch.arange(freqs).float()).to(positions.device) # (F,) + pts = (positions[..., None] * freq_bands).reshape( + positions.shape[:-1] + (freqs * positions.shape[-1], )) # (..., DF) + pts = torch.cat([torch.sin(pts), torch.cos(pts)], dim=-1) + return pts + + +def raw2alpha(sigma, dist): + # sigma, dist [N_rays, N_samples] + alpha = 1. - torch.exp(-sigma * dist) + + T = torch.cumprod( + torch.cat([ + torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10 + ], -1), -1) + + weights = alpha * T[:, :-1] # [N_rays, N_samples] + return alpha, weights, T[:, -1:] + + +def RGBRender(xyz_sampled, viewdirs, features): + + rgb = features + return rgb + + +class AlphaGridMask(torch.nn.Module): + + def __init__(self, device, aabb, alpha_volume): + super(AlphaGridMask, self).__init__() + self.device = device + + self.aabb = aabb.to(self.device) + self.aabbSize = self.aabb[1] - self.aabb[0] + self.invgridSize = 1.0 / self.aabbSize * 2 + self.alpha_volume = alpha_volume.view(1, 1, *alpha_volume.shape[-3:]) + self.gridSize = torch.LongTensor([ + alpha_volume.shape[-1], alpha_volume.shape[-2], + alpha_volume.shape[-3] + ]).to(self.device) + + def sample_alpha(self, xyz_sampled): + xyz_sampled = self.normalize_coord(xyz_sampled) + alpha_vals = F.grid_sample( + self.alpha_volume, + xyz_sampled.view(1, -1, 1, 1, 3), + align_corners=True).view(-1) + + return alpha_vals + + def normalize_coord(self, xyz_sampled): + return (xyz_sampled - self.aabb[0]) * self.invgridSize - 1 + + +class MLPRender_Fea(torch.nn.Module): + + def __init__(self, inChanel, viewpe=6, feape=6, featureC=128): + super(MLPRender_Fea, self).__init__() + + self.in_mlpC = 2 * viewpe * 3 + 2 * feape * inChanel + 3 + inChanel + self.viewpe = viewpe + self.feape = feape + layer1 = torch.nn.Linear(self.in_mlpC, featureC) + layer2 = torch.nn.Linear(featureC, featureC) + layer3 = torch.nn.Linear(featureC, 3) + + self.mlp = torch.nn.Sequential(layer1, + torch.nn.ReLU(inplace=True), layer2, + torch.nn.ReLU(inplace=True), layer3) + torch.nn.init.constant_(self.mlp[-1].bias, 0) + + def forward(self, pts, viewdirs, features): + indata = [features, viewdirs] + if self.feape > 0: + indata += [positional_encoding(features, self.feape)] + if self.viewpe > 0: + indata += [positional_encoding(viewdirs, self.viewpe)] + mlp_in = torch.cat(indata, dim=-1) + rgb = self.mlp(mlp_in) + rgb = torch.sigmoid(rgb) + + return rgb + + +class MLPRender_PE(torch.nn.Module): + + def __init__(self, inChanel, viewpe=6, pospe=6, featureC=128): + super(MLPRender_PE, self).__init__() + + self.in_mlpC = (3 + 2 * viewpe * 3) + (3 + 2 * pospe * 3) + inChanel # + self.viewpe = viewpe + self.pospe = pospe + layer1 = torch.nn.Linear(self.in_mlpC, featureC) + layer2 = torch.nn.Linear(featureC, featureC) + layer3 = torch.nn.Linear(featureC, 3) + + self.mlp = torch.nn.Sequential(layer1, + torch.nn.ReLU(inplace=True), layer2, + torch.nn.ReLU(inplace=True), layer3) + torch.nn.init.constant_(self.mlp[-1].bias, 0) + + def forward(self, pts, viewdirs, features): + indata = [features, viewdirs] + if self.pospe > 0: + indata += [positional_encoding(pts, self.pospe)] + if self.viewpe > 0: + indata += [positional_encoding(viewdirs, self.viewpe)] + mlp_in = torch.cat(indata, dim=-1) + rgb = self.mlp(mlp_in) + rgb = torch.sigmoid(rgb) + + return rgb + + +class MLPRender(torch.nn.Module): + + def __init__(self, inChanel, viewpe=6, featureC=128): + super(MLPRender, self).__init__() + + self.in_mlpC = (3 + 2 * viewpe * 3) + inChanel + self.viewpe = viewpe + + layer1 = torch.nn.Linear(self.in_mlpC, featureC) + layer2 = torch.nn.Linear(featureC, featureC) + layer3 = torch.nn.Linear(featureC, 3) + + self.mlp = torch.nn.Sequential(layer1, + torch.nn.ReLU(inplace=True), layer2, + torch.nn.ReLU(inplace=True), layer3) + torch.nn.init.constant_(self.mlp[-1].bias, 0) + + def forward(self, pts, viewdirs, features): + indata = [features, viewdirs] + if self.viewpe > 0: + indata += [positional_encoding(viewdirs, self.viewpe)] + mlp_in = torch.cat(indata, dim=-1) + rgb = self.mlp(mlp_in) + rgb = torch.sigmoid(rgb) + + return rgb + + +class TensorBase(torch.nn.Module): + + def __init__(self, + aabb, + gridSize, + device, + density_n_comp=8, + appearance_n_comp=24, + app_dim=27, + shadingMode='MLP_PE', + alphaMask=None, + near_far=[2.0, 6.0], + density_shift=-10, + alphaMask_thres=0.001, + distance_scale=25, + rayMarch_weight_thres=0.0001, + pos_pe=6, + view_pe=6, + fea_pe=6, + featureC=128, + step_ratio=2.0, + fea2denseAct='softplus', + **kargs): + super(TensorBase, self).__init__() + + self.density_n_comp = density_n_comp + self.app_n_comp = appearance_n_comp + self.app_dim = app_dim + self.aabb = aabb + self.alphaMask = alphaMask + self.device = device + + self.density_shift = density_shift + self.alphaMask_thres = alphaMask_thres + self.distance_scale = distance_scale + self.rayMarch_weight_thres = rayMarch_weight_thres + self.fea2denseAct = fea2denseAct + + self.near_far = near_far + self.step_ratio = step_ratio + + self.update_stepSize(gridSize) + + self.matMode = [[0, 1], [0, 2], [1, 2]] + self.vecMode = [2, 1, 0] + self.comp_w = [1, 1, 1] + + self.init_svd_volume(gridSize[0], device) + + self.shadingMode, self.pos_pe, self.view_pe = shadingMode, pos_pe, view_pe + self.fea_pe, self.featureC = fea_pe, featureC + self.init_render_func(shadingMode, pos_pe, view_pe, fea_pe, featureC, + device) + + def init_render_func(self, shadingMode, pos_pe, view_pe, fea_pe, featureC, + device): + if shadingMode == 'MLP_PE': + self.renderModule = MLPRender_PE(self.app_dim, view_pe, pos_pe, + featureC).to(device) + elif shadingMode == 'MLP_Fea': + self.renderModule = MLPRender_Fea(self.app_dim, view_pe, fea_pe, + featureC).to(device) + elif shadingMode == 'MLP': + self.renderModule = MLPRender(self.app_dim, view_pe, + featureC).to(device) + elif shadingMode == 'RGB': + assert self.app_dim == 3 + self.renderModule = RGBRender + else: + print('Unrecognized shading module') + exit() + print(self.renderModule) + + def update_stepSize(self, gridSize): + self.aabbSize = self.aabb[1] - self.aabb[0] + self.invaabbSize = 2.0 / self.aabbSize + self.gridSize = torch.LongTensor(gridSize).to(self.device) + self.units = self.aabbSize / (self.gridSize - 1) + self.stepSize = torch.mean(self.units) * self.step_ratio + self.aabbDiag = torch.sqrt(torch.sum(torch.square(self.aabbSize))) + self.nSamples = int((self.aabbDiag / self.stepSize).item()) + 1 + + def init_svd_volume(self, res, device): + pass + + def compute_features(self, xyz_sampled): + pass + + def compute_densityfeature(self, xyz_sampled): + pass + + def compute_appfeature(self, xyz_sampled): + pass + + def normalize_coord(self, xyz_sampled): + return (xyz_sampled - self.aabb[0]) * self.invaabbSize - 1 + + def get_optparam_groups(self, lr_init_spatial=0.02, lr_init_network=0.001): + pass + + def get_kwargs(self): + return { + 'aabb': self.aabb, + 'gridSize': self.gridSize.tolist(), + 'density_n_comp': self.density_n_comp, + 'appearance_n_comp': self.app_n_comp, + 'app_dim': self.app_dim, + 'density_shift': self.density_shift, + 'alphaMask_thres': self.alphaMask_thres, + 'distance_scale': self.distance_scale, + 'rayMarch_weight_thres': self.rayMarch_weight_thres, + 'fea2denseAct': self.fea2denseAct, + 'near_far': self.near_far, + 'step_ratio': self.step_ratio, + 'shadingMode': self.shadingMode, + 'pos_pe': self.pos_pe, + 'view_pe': self.view_pe, + 'fea_pe': self.fea_pe, + 'featureC': self.featureC + } + + def save(self, path): + kwargs = self.get_kwargs() + ckpt = {'kwargs': kwargs, 'state_dict': self.state_dict()} + if self.alphaMask is not None: + alpha_volume = self.alphaMask.alpha_volume.bool().cpu().numpy() + ckpt.update({'alphaMask.shape': alpha_volume.shape}) + ckpt.update( + {'alphaMask.mask': np.packbits(alpha_volume.reshape(-1))}) + ckpt.update({'alphaMask.aabb': self.alphaMask.aabb.cpu()}) + torch.save(ckpt, path) + + def load(self, ckpt): + if 'alphaMask.aabb' in ckpt.keys(): + length = np.prod(ckpt['alphaMask.shape']) + alpha_volume = torch.from_numpy( + np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape( + ckpt['alphaMask.shape'])) + self.alphaMask = AlphaGridMask( + self.device, ckpt['alphaMask.aabb'].to(self.device), + alpha_volume.float().to(self.device)) + self.load_state_dict(ckpt['state_dict']) + + def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1): + N_samples = N_samples if N_samples > 0 else self.nSamples + near, far = self.near_far + interpx = torch.linspace(near, far, N_samples).unsqueeze(0).to(rays_o) + if is_train: + interpx += torch.rand_like(interpx).to(rays_o) * ( + (far - near) / N_samples) + + rays_pts = rays_o[..., + None, :] + rays_d[..., None, :] * interpx[..., None] + mask_outbbox = ((self.aabb[0] > rays_pts) + | (rays_pts > self.aabb[1])).any(dim=-1) + return rays_pts, interpx, ~mask_outbbox + + def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1): + N_samples = N_samples if N_samples > 0 else self.nSamples + stepsize = self.stepSize + near, far = self.near_far + vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d) + rate_a = (self.aabb[1] - rays_o) / vec + rate_b = (self.aabb[0] - rays_o) / vec + t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far) + + rng = torch.arange(N_samples)[None].float() + if is_train: + rng = rng.repeat(rays_d.shape[-2], 1) + rng += torch.rand_like(rng[:, [0]]) + step = stepsize * rng.to(rays_o.device) + interpx = (t_min[..., None] + step) + + rays_pts = rays_o[..., + None, :] + rays_d[..., None, :] * interpx[..., None] + mask_outbbox = ((self.aabb[0] > rays_pts) + | (rays_pts > self.aabb[1])).any(dim=-1) + + return rays_pts, interpx, ~mask_outbbox + + def shrink(self, new_aabb, voxel_size): + pass + + @torch.no_grad() + def getDenseAlpha(self, gridSize=None): + gridSize = self.gridSize if gridSize is None else gridSize + + samples = torch.stack( + torch.meshgrid( + torch.linspace(0, 1, gridSize[0]), + torch.linspace(0, 1, gridSize[1]), + torch.linspace(0, 1, gridSize[2]), + ), -1).to(self.device) + dense_xyz = self.aabb[0] * (1 - samples) + self.aabb[1] * samples + + alpha = torch.zeros_like(dense_xyz[..., 0]) + for i in range(gridSize[0]): + alpha[i] = self.compute_alpha(dense_xyz[i].view(-1, 3), + self.stepSize).view( + (gridSize[1], gridSize[2])) + return alpha, dense_xyz + + @torch.no_grad() + def updateAlphaMask(self, gridSize=(200, 200, 200)): + + alpha, dense_xyz = self.getDenseAlpha(gridSize) + dense_xyz = dense_xyz.transpose(0, 2).contiguous() + alpha = alpha.clamp(0, 1).transpose(0, 2).contiguous()[None, None] + total_voxels = gridSize[0] * gridSize[1] * gridSize[2] + + ks = 3 + alpha = F.max_pool3d( + alpha, kernel_size=ks, padding=ks // 2, + stride=1).view(gridSize[::-1]) + alpha[alpha >= self.alphaMask_thres] = 1 + alpha[alpha < self.alphaMask_thres] = 0 + + self.alphaMask = AlphaGridMask(self.device, self.aabb, alpha) + + valid_xyz = dense_xyz[alpha > 0.5] + + xyz_min = valid_xyz.amin(0) + xyz_max = valid_xyz.amax(0) + + new_aabb = torch.stack((xyz_min, xyz_max)) + + total = torch.sum(alpha) + print(f'bbox: {xyz_min, xyz_max} alpha rest %%%f' % + (total / total_voxels * 100)) + return new_aabb + + @torch.no_grad() + def filtering_rays(self, + all_rays, + all_rgbs, + N_samples=256, + chunk=10240 * 5, + bbox_only=False): + print('========> filtering rays ...') + tt = time.time() + + N = torch.tensor(all_rays.shape[:-1]).prod() + + mask_filtered = [] + idx_chunks = torch.split(torch.arange(N), chunk) + for idx_chunk in idx_chunks: + rays_chunk = all_rays[idx_chunk].to(self.device) + + rays_o, rays_d = rays_chunk[..., :3], rays_chunk[..., 3:6] + if bbox_only: + vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), + rays_d) + rate_a = (self.aabb[1] - rays_o) / vec + rate_b = (self.aabb[0] - rays_o) / vec + t_min = torch.minimum(rate_a, rate_b).amax(-1) + t_max = torch.maximum(rate_a, rate_b).amin(-1) + mask_inbbox = t_max > t_min + + else: + xyz_sampled, _, _ = self.sample_ray( + rays_o, rays_d, N_samples=N_samples, is_train=False) + mask_inbbox = (self.alphaMask.sample_alpha(xyz_sampled).view( + xyz_sampled.shape[:-1]) > 0).any(-1) + + mask_filtered.append(mask_inbbox.cpu()) + + mask_filtered = torch.cat(mask_filtered).view(all_rgbs.shape[:-1]) + + print(f'Ray filtering done! takes {time.time()-tt} s.' + f' ray mask ratio: {torch.sum(mask_filtered) / N}') + return all_rays[mask_filtered], all_rgbs[mask_filtered] + + def feature2density(self, density_features): + if self.fea2denseAct == 'softplus': + return F.softplus(density_features + self.density_shift) + elif self.fea2denseAct == 'relu': + return F.relu(density_features) + + def compute_alpha(self, xyz_locs, length=1): + + if self.alphaMask is not None: + alphas = self.alphaMask.sample_alpha(xyz_locs) + alpha_mask = alphas > 0 + else: + alpha_mask = torch.ones_like(xyz_locs[:, 0], dtype=bool) + + sigma = torch.zeros(xyz_locs.shape[:-1], device=xyz_locs.device) + + if alpha_mask.any(): + xyz_sampled = self.normalize_coord(xyz_locs[alpha_mask]) + sigma_feature = self.compute_densityfeature(xyz_sampled) + validsigma = self.feature2density(sigma_feature) + sigma[alpha_mask] = validsigma + + alpha = 1 - torch.exp(-sigma * length).view(xyz_locs.shape[:-1]) + + return alpha + + def forward(self, + rays_chunk, + white_bg=True, + is_train=False, + ndc_ray=False, + N_samples=-1): + + # sample points + viewdirs = rays_chunk[:, 3:6] + if ndc_ray: + xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc( + rays_chunk[:, :3], + viewdirs, + is_train=is_train, + N_samples=N_samples) + dists = torch.cat( + (z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like( + z_vals[:, :1])), + dim=-1) + rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True) + dists = dists * rays_norm + viewdirs = viewdirs / rays_norm + else: + xyz_sampled, z_vals, ray_valid = self.sample_ray( + rays_chunk[:, :3], + viewdirs, + is_train=is_train, + N_samples=N_samples) + dists = torch.cat( + (z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like( + z_vals[:, :1])), + dim=-1) + viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape) + + if self.alphaMask is not None: + alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid]) + alpha_mask = alphas > 0 + ray_invalid = ~ray_valid + ray_invalid[ray_valid] |= (~alpha_mask) + ray_valid = ~ray_invalid + + sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device) + rgb = torch.zeros((*xyz_sampled.shape[:2], 3), + device=xyz_sampled.device) + + if ray_valid.any(): + xyz_sampled = self.normalize_coord(xyz_sampled) + sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid]) + + validsigma = self.feature2density(sigma_feature) + sigma[ray_valid] = validsigma + + alpha, weight, bg_weight = raw2alpha(sigma, + dists * self.distance_scale) + + app_mask = weight > self.rayMarch_weight_thres + + if app_mask.any(): + app_features = self.compute_appfeature(xyz_sampled[app_mask]) + valid_rgbs = self.renderModule(xyz_sampled[app_mask], + viewdirs[app_mask], app_features) + rgb[app_mask] = valid_rgbs + + acc_map = torch.sum(weight, -1) + rgb_map = torch.sum(weight[..., None] * rgb, -2) + + if white_bg or (is_train and torch.rand((1, )) < 0.5): + rgb_map = rgb_map + (1. - acc_map[..., None]) + + rgb_map = rgb_map.clamp(0, 1) + + with torch.no_grad(): + depth_map = torch.sum(weight * z_vals, -1) + depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1] + + return rgb_map, depth_map diff --git a/modelscope/models/cv/nerf_recon_vq_compression/network/weighted_vq.py b/modelscope/models/cv/nerf_recon_vq_compression/network/weighted_vq.py new file mode 100644 index 00000000..eea10f18 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/network/weighted_vq.py @@ -0,0 +1,504 @@ +from contextlib import contextmanager + +import torch +import torch.distributed as distributed +import torch.nn.functional as F +from einops import rearrange, repeat +from torch import einsum, nn +from torch.cuda.amp import autocast + + +def exists(val): + return val is not None + + +def default(val, d): + return val if exists(val) else d + + +def noop(*args, **kwargs): + pass + + +def l2norm(t): + return F.normalize(t, p=2, dim=-1) + + +def log(t, eps=1e-20): + return torch.log(t.clamp(min=eps)) + + +def uniform_init(*shape): + t = torch.empty(shape) + nn.init.kaiming_uniform_(t) + return t + + +def gumbel_noise(t): + noise = torch.zeros_like(t).uniform_(0, 1) + return -log(-log(noise)) + + +def gumbel_sample(t, temperature=1., dim=-1): + if temperature == 0: + return t.argmax(dim=dim) + + return ((t / temperature) + gumbel_noise(t)).argmax(dim=dim) + + +def ema_inplace(moving_avg, new, decay): + moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) + + +def laplace_smoothing(x, n_categories, eps=1e-5): + return (x + eps) / (x.sum() + n_categories * eps) + + +def sample_vectors(samples, num): + num_samples, device = samples.shape[0], samples.device + if num_samples >= num: + indices = torch.randperm(num_samples, device=device)[:num] + else: + indices = torch.randint(0, num_samples, (num, ), device=device) + + return samples[indices] + + +def batched_sample_vectors(samples, num): + return torch.stack( + [sample_vectors(sample, num) for sample in samples.unbind(dim=0)], + dim=0) + + +def pad_shape(shape, size, dim=0): + return [size if i == dim else s for i, s in enumerate(shape)] + + +def sample_multinomial(total_count, probs): + device = probs.device + probs = probs.cpu() + + total_count = probs.new_full((), total_count) + remainder = probs.new_ones(()) + sample = torch.empty_like(probs, dtype=torch.long) + + for i, p in enumerate(probs): + s = torch.binomial(total_count, p / remainder) + sample[i] = s + total_count -= s + remainder -= p + + return sample.to(device) + + +def all_gather_sizes(x, dim): + size = torch.tensor(x.shape[dim], dtype=torch.long, device=x.device) + all_sizes = [ + torch.empty_like(size) for _ in range(distributed.get_world_size()) + ] + distributed.all_gather(all_sizes, size) + + return torch.stack(all_sizes) + + +def all_gather_variably_sized(x, sizes, dim=0): + rank = distributed.get_rank() + all_x = [] + + for i, size in enumerate(sizes): + t = x if i == rank else x.new_empty(pad_shape(x.shape, size, dim)) + distributed.broadcast(t, src=i, async_op=True) + all_x.append(t) + + distributed.barrier() + return all_x + + +def sample_vectors_distributed(local_samples, num): + rank = distributed.get_rank() + all_num_samples = all_gather_sizes(local_samples, dim=0) + + if rank == 0: + samples_per_rank = sample_multinomial( + num, all_num_samples / all_num_samples.sum()) + else: + samples_per_rank = torch.empty_like(all_num_samples) + + distributed.broadcast(samples_per_rank, src=0) + samples_per_rank = samples_per_rank.tolist() + + local_samples = batched_sample_vectors(local_samples, + samples_per_rank[rank]) + all_samples = all_gather_variably_sized( + local_samples, samples_per_rank, dim=0) + return torch.cat(all_samples, dim=0) + + +def batched_bincount(x, *, minlength): + batch, dtype, device = x.shape[0], x.dtype, x.device + target = torch.zeros(batch, minlength, dtype=dtype, device=device) + values = torch.ones_like(x) + target.scatter_add_(-1, x, values) + return target + + +def kmeans(samples, + num_clusters, + num_iters=10, + use_cosine_sim=False, + sample_fn=batched_sample_vectors, + all_reduce_fn=noop): + num_codebooks, dim, dtype = samples.shape[0], samples.shape[ + -1], samples.dtype + + means = sample_fn(samples, num_clusters) + + for _ in range(num_iters): + if use_cosine_sim: + dists = samples @ rearrange(means, 'h n d -> h d n') + else: + dists = -torch.cdist(samples, means, p=2) + + buckets = torch.argmax(dists, dim=-1) + bins = batched_bincount(buckets, minlength=num_clusters) + all_reduce_fn(bins) + + zero_mask = bins == 0 + bins_min_clamped = bins.masked_fill(zero_mask, 1) + + new_means = buckets.new_zeros( + num_codebooks, num_clusters, dim, dtype=dtype) + + new_means.scatter_add_(1, repeat(buckets, 'h n -> h n d', d=dim), + samples) + new_means = new_means / rearrange(bins_min_clamped, '... -> ... 1') + all_reduce_fn(new_means) + + if use_cosine_sim: + new_means = l2norm(new_means) + + means = torch.where( + rearrange(zero_mask, '... -> ... 1'), means, new_means) + + return means, bins + + +def batched_embedding(indices, embeds): + batch, dim = indices.shape[1], embeds.shape[-1] + indices = repeat(indices, 'h b n -> h b n d', d=dim) + embeds = repeat(embeds, 'h c d -> h b c d', b=batch) + return embeds.gather(2, indices) + + +# regularization losses + + +def orthogonal_loss_fn(t): + # eq (2) from https://arxiv.org/abs/2112.00384 + h, n = t.shape[:2] + normed_codes = l2norm(t) + identity = repeat(torch.eye(n, device=t.device), 'i j -> h i j', h=h) + cosine_sim = einsum('h i d, h j d -> h i j', normed_codes, normed_codes) + return ((cosine_sim - identity)**2).sum() / (h * n**2) + + +# distance types + + +class EuclideanCodebook(nn.Module): + + def __init__(self, + dim, + codebook_size, + num_codebooks=1, + kmeans_init=False, + kmeans_iters=10, + decay=0.8, + eps=1e-5, + threshold_ema_dead_code=2, + use_ddp=False, + learnable_codebook=False, + sample_codebook_temp=0): + super().__init__() + self.decay = decay + init_fn = uniform_init if not kmeans_init else torch.zeros + embed = init_fn(num_codebooks, codebook_size, dim) + + self.codebook_size = codebook_size + self.num_codebooks = num_codebooks + + self.kmeans_iters = kmeans_iters + self.eps = eps + self.threshold_ema_dead_code = threshold_ema_dead_code + self.sample_codebook_temp = sample_codebook_temp + + self.sample_fn = sample_vectors_distributed if use_ddp else batched_sample_vectors + self.all_reduce_fn = distributed.all_reduce if use_ddp else noop + + self.register_buffer('initted', torch.Tensor([not kmeans_init])) + self.register_buffer('cluster_size', + torch.zeros(num_codebooks, codebook_size)) + self.register_buffer('embed_avg', embed.clone()) + + self.learnable_codebook = learnable_codebook + if learnable_codebook: + self.embed = nn.Parameter(embed) + else: + self.register_buffer('embed', embed) + + @torch.jit.ignore + def init_embed_(self, data): + if self.initted: + return + + embed, cluster_size = kmeans( + data, + self.codebook_size, + self.kmeans_iters, + sample_fn=self.sample_fn, + all_reduce_fn=self.all_reduce_fn) + + self.embed.data.copy_(embed) + self.embed_avg.data.copy_(embed.clone()) + self.cluster_size.data.copy_(cluster_size) + self.initted.data.copy_(torch.Tensor([True])) + + def replace(self, batch_samples, batch_mask): + batch_samples = l2norm(batch_samples) + + for ind, (samples, mask) in enumerate( + zip(batch_samples.unbind(dim=0), batch_mask.unbind(dim=0))): + if not torch.any(mask): + continue + + sampled = self.sample_fn( + rearrange(samples, '... -> 1 ...'), + mask.sum().item()) + self.embed.data[ind][mask] = rearrange(sampled, '1 ... -> ...') + + def expire_codes_(self, batch_samples, verbose): + if self.threshold_ema_dead_code == 0: + return + + expired_codes = self.cluster_size < self.threshold_ema_dead_code + + if not torch.any(expired_codes): + return + if verbose: + print(f'expire code count: {expired_codes.sum()}') + batch_samples = rearrange(batch_samples, 'h ... d -> h (...) d') + self.replace(batch_samples, batch_mask=expired_codes) + + @autocast(enabled=False) + def forward(self, x, weight=None, verbose=False): + if weight is not None: + weight = weight * weight.numel() / weight.sum() + needs_codebook_dim = x.ndim < 4 + + x = x.float() + + if needs_codebook_dim: + x = rearrange(x, '... -> 1 ...') + + shape, dtype = x.shape, x.dtype + flatten = rearrange(x, 'h ... d -> h (...) d') + + self.init_embed_(flatten) + + embed = self.embed if not self.learnable_codebook else self.embed.detach( + ) + + dist = -torch.cdist(flatten, embed, p=2) + + embed_ind = gumbel_sample( + dist, dim=-1, temperature=self.sample_codebook_temp) + embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype) + embed_ind = embed_ind.view(*shape[:-1]) + + quantize = batched_embedding(embed_ind, self.embed) + + if self.training: + + if weight is not None: + cluster_size = (embed_onehot * weight).sum(dim=1) + else: + cluster_size = embed_onehot.sum(dim=1) + self.all_reduce_fn(cluster_size) + ema_inplace(self.cluster_size, cluster_size, self.decay) + + if weight is not None: + + embed_sum = einsum('h n d, h n c -> h c d', flatten * weight, + embed_onehot) + else: + embed_sum = einsum('h n d, h n c -> h c d', flatten, + embed_onehot) + self.all_reduce_fn(embed_sum) + cluster_size = laplace_smoothing( + self.cluster_size, self.codebook_size, + self.eps) * self.cluster_size.sum() + + # embed_normalized = self.embed_avg / rearrange(cluster_size, '... -> ... 1') + # print("embed_normalized: ",embed_normalized, + # "\n embed_avg: ",self.embed_avg, + # "\n cluster_size: ", cluster_size) + # self.embed.data.copy_(embed_normalized) + # print("before ema: self.embed:", self.embed, "embed_sum: ", embed_sum) + ema_inplace(self.embed, + embed_sum / rearrange(cluster_size, '... -> ... 1'), + self.decay) + # print("after ema: self.embed:", self.embed, "embed_sum: ", embed_sum) + self.expire_codes_(x, verbose) + # print("after expire: self.embed:", self.embed, "embed_sum: ", embed_sum) + + if needs_codebook_dim: + quantize, embed_ind = map(lambda t: rearrange(t, '1 ... -> ...'), + (quantize, embed_ind)) + + return quantize, embed_ind + + +# main class + + +class VectorQuantize(nn.Module): + + def __init__(self, + dim, + codebook_size, + codebook_dim=None, + heads=1, + separate_codebook_per_head=False, + decay=0.8, + eps=1e-5, + kmeans_init=False, + kmeans_iters=10, + use_cosine_sim=False, + threshold_ema_dead_code=0, + channel_last=True, + accept_image_fmap=False, + commitment_weight=1., + orthogonal_reg_weight=0., + orthogonal_reg_active_codes_only=False, + orthogonal_reg_max_codes=None, + sample_codebook_temp=0., + sync_codebook=False): + super().__init__() + self.heads = heads + self.separate_codebook_per_head = separate_codebook_per_head + + codebook_dim = default(codebook_dim, dim) + codebook_input_dim = codebook_dim * heads + + requires_projection = codebook_input_dim != dim + self.project_in = nn.Linear( + dim, codebook_input_dim) if requires_projection else nn.Identity() + self.project_out = nn.Linear( + codebook_input_dim, dim) if requires_projection else nn.Identity() + + self.eps = eps + self.commitment_weight = commitment_weight + + has_codebook_orthogonal_loss = orthogonal_reg_weight > 0 + self.orthogonal_reg_weight = orthogonal_reg_weight + self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only + self.orthogonal_reg_max_codes = orthogonal_reg_max_codes + + codebook_class = EuclideanCodebook + + self._codebook = codebook_class( + dim=codebook_dim, + num_codebooks=heads if separate_codebook_per_head else 1, + codebook_size=codebook_size, + kmeans_init=kmeans_init, + kmeans_iters=kmeans_iters, + decay=decay, + eps=eps, + threshold_ema_dead_code=threshold_ema_dead_code, + use_ddp=sync_codebook, + learnable_codebook=has_codebook_orthogonal_loss, + sample_codebook_temp=sample_codebook_temp) + + self.codebook_size = codebook_size + + self.accept_image_fmap = accept_image_fmap + self.channel_last = channel_last + + @property + def codebook(self): + codebook = self._codebook.embed + if self.separate_codebook_per_head: + return codebook + + return rearrange(codebook, '1 ... -> ...') + + def forward(self, x, weight=None, verbose=False): + device, heads, is_multiheaded = x.device, self.heads, self.heads > 1 + + need_transpose = not self.channel_last and not self.accept_image_fmap + + if self.accept_image_fmap: + height, width = x.shape[-2:] + x = rearrange(x, 'b c h w -> b (h w) c') + + if need_transpose: + x = rearrange(x, 'b d n -> b n d') + + x = self.project_in(x) + + if is_multiheaded: + ein_rhs_eq = 'h b n d' if self.separate_codebook_per_head else '1 (b h) n d' + x = rearrange(x, f'b n (h d) -> {ein_rhs_eq}', h=heads) + + quantize, embed_ind = self._codebook(x, weight, verbose) + + if self.training: + quantize = x + (quantize - x).detach() + + loss = torch.tensor([0.], device=device, requires_grad=self.training) + + if self.training: + if self.commitment_weight > 0: + commit_loss = F.mse_loss(quantize.detach(), x) + loss = loss + commit_loss * self.commitment_weight + + if self.orthogonal_reg_weight > 0: + codebook = self._codebook.embed + + if self.orthogonal_reg_active_codes_only: + # only calculate orthogonal loss for the activated codes for this batch + unique_code_ids = torch.unique(embed_ind) + codebook = codebook[unique_code_ids] + + num_codes = codebook.shape[0] + if exists(self.orthogonal_reg_max_codes + ) and num_codes > self.orthogonal_reg_max_codes: + rand_ids = torch.randperm( + num_codes, + device=device)[:self.orthogonal_reg_max_codes] + codebook = codebook[rand_ids] + + orthogonal_reg_loss = orthogonal_loss_fn(codebook) + loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight + + if is_multiheaded: + if self.separate_codebook_per_head: + quantize = rearrange(quantize, 'h b n d -> b n (h d)', h=heads) + embed_ind = rearrange(embed_ind, 'h b n -> b n h', h=heads) + else: + quantize = rearrange( + quantize, '1 (b h) n d -> b n (h d)', h=heads) + embed_ind = rearrange(embed_ind, '1 (b h) n -> b n h', h=heads) + + quantize = self.project_out(quantize) + + if need_transpose: + quantize = rearrange(quantize, 'b n d -> b d n') + + if self.accept_image_fmap: + quantize = rearrange( + quantize, 'b (h w) c -> b c h w', h=height, w=width) + embed_ind = rearrange( + embed_ind, 'b (h w) ... -> b h w ...', h=height, w=width) + + return quantize, embed_ind, loss diff --git a/modelscope/models/cv/nerf_recon_vq_compression/renderer.py b/modelscope/models/cv/nerf_recon_vq_compression/renderer.py new file mode 100644 index 00000000..03a96870 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/renderer.py @@ -0,0 +1,211 @@ +import os +import sys + +import imageio +import numpy as np +import torch +from tqdm.auto import tqdm + +from .dataloader.ray_utils import get_rays, ndc_rays_blender +from .network.tensoRF import (AlphaGridMask, TensorCP, TensorVM, TensorVMSplit, + raw2alpha) +from .network.tensoRF_VQ import TensorVMSplitVQ +from .utils import rgb_lpips, rgb_ssim, visualize_depth_numpy + + +def OctreeRender_trilinear_fast(rays, + tensorf, + chunk=4096, + N_samples=-1, + ndc_ray=False, + white_bg=True, + is_train=False, + device='cuda', + **kwargs): + + rgbs, depth_maps = [], [] + N_rays_all = rays.shape[0] + for chunk_idx in range(N_rays_all // chunk + int(N_rays_all % chunk > 0)): + rays_chunk = rays[chunk_idx * chunk:(chunk_idx + 1) * chunk].to(device) + + rgb_map, depth_map = tensorf( + rays_chunk, + is_train=is_train, + white_bg=white_bg, + ndc_ray=ndc_ray, + N_samples=N_samples, + **kwargs) + + rgbs.append(rgb_map) + depth_maps.append(depth_map) + + return torch.cat(rgbs), None, torch.cat(depth_maps), None, None + + +@torch.no_grad() +def evaluation(test_dataset, + tensorf, + renderer, + savePath=None, + N_vis=5, + prtx='', + N_samples=-1, + white_bg=False, + ndc_ray=False, + compute_extra_metrics=True, + device='cuda', + im_save=False): + if prtx is not None and len(prtx) > 0: + prtx = prtx + '_' + result_path = f'{savePath}/{prtx}res.txt' + PSNRs, rgb_maps, depth_maps = [], [], [] + ssims, l_alex, l_vgg = [], [], [] + if savePath is not None: + os.makedirs(savePath, exist_ok=True) + os.makedirs(savePath + '/rgbd', exist_ok=True) + + try: + tqdm._instances.clear() + except Exception: + pass + + near_far = test_dataset.near_far + img_eval_interval = 1 if N_vis < 0 else max( + test_dataset.all_rays.shape[0] // N_vis, 1) + # img_eval_interval = max(img_eval_interval, test_dataset.all_rays.shape[0]//49) + idxs = list(range(0, test_dataset.all_rays.shape[0], img_eval_interval)) + for idx, samples in tqdm( + enumerate(test_dataset.all_rays[0::img_eval_interval]), + file=sys.stdout): + + W, H = test_dataset.img_wh + rays = samples.view(-1, samples.shape[-1]) + + rgb_map, _, depth_map, _, _ = renderer( + rays, + tensorf, + chunk=4096, + N_samples=N_samples, + ndc_ray=ndc_ray, + white_bg=white_bg, + device=device) + rgb_map = rgb_map.clamp(0.0, 1.0) + + rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape( + H, W).cpu() + + depth_map, _ = visualize_depth_numpy(depth_map.numpy(), near_far) + if len(test_dataset.all_rgbs): + gt_rgb = test_dataset.all_rgbs[idxs[idx]].view(H, W, 3) + loss = torch.mean((rgb_map - gt_rgb)**2) + PSNRs.append(-10.0 * np.log(loss.item()) / np.log(10.0)) + + if compute_extra_metrics: + ssim = rgb_ssim(rgb_map, gt_rgb, 1) + l_a = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'alex', + tensorf.device) + l_v = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'vgg', + tensorf.device) + ssims.append(ssim) + l_alex.append(l_a) + l_vgg.append(l_v) + + rgb_map = (rgb_map.numpy() * 255).astype('uint8') + # rgb_map = np.concatenate((rgb_map, depth_map), axis=1) + rgb_maps.append(rgb_map) + depth_maps.append(depth_map) + if savePath is not None and im_save: + imageio.imwrite(f'{savePath}/{prtx}{idx:03d}.png', rgb_map) + rgb_map = np.concatenate((rgb_map, depth_map), axis=1) + imageio.imwrite(f'{savePath}/rgbd/{prtx}{idx:03d}.png', rgb_map) + if savePath is not None: + imageio.mimwrite( + f'{savePath}/{prtx}video.mp4', + np.stack(rgb_maps), + fps=30, + quality=10) + imageio.mimwrite( + f'{savePath}/{prtx}depthvideo.mp4', + np.stack(depth_maps), + fps=30, + quality=10) + + if PSNRs: + psnr = np.mean(np.asarray(PSNRs)) + if compute_extra_metrics: + ssim = np.mean(np.asarray(ssims)) + l_a = np.mean(np.asarray(l_alex)) + l_v = np.mean(np.asarray(l_vgg)) + if savePath is not None: + np.savetxt(result_path, np.asarray([psnr, ssim, l_a, l_v])) + else: + if savePath is not None: + np.savetxt(result_path, np.asarray([psnr])) + + return PSNRs + + +@torch.no_grad() +def render_path(test_dataset, + tensorf, + c2ws, + renderer, + savePath=None, + prtx='', + N_samples=-1, + white_bg=False, + ndc_ray=False, + device='cuda'): + rgb_maps, depth_maps = [], [] + os.makedirs(savePath, exist_ok=True) + os.makedirs(savePath + '/rgbd', exist_ok=True) + + try: + tqdm._instances.clear() + except Exception: + pass + + near_far = test_dataset.near_far + for idx, c2w in tqdm(enumerate(c2ws)): + + W, H = test_dataset.img_wh + + c2w = torch.FloatTensor(c2w) + rays_o, rays_d = get_rays(test_dataset.directions, + c2w) # both (h*w, 3) + if ndc_ray: + rays_o, rays_d = ndc_rays_blender(H, W, test_dataset.focal[0], 1.0, + rays_o, rays_d) + rays = torch.cat([rays_o, rays_d], 1) # (h*w, 6) + + rgb_map, _, depth_map, _, _ = renderer( + rays, + tensorf, + chunk=8192, + N_samples=N_samples, + ndc_ray=ndc_ray, + white_bg=white_bg, + device=device) + rgb_map = rgb_map.clamp(0.0, 1.0) + + rgb_map, depth_map = rgb_map.reshape(H, W, 3).cpu(), depth_map.reshape( + H, W).cpu() + + depth_map, _ = visualize_depth_numpy(depth_map.numpy(), near_far) + + rgb_map = (rgb_map.numpy() * 255).astype('uint8') + # rgb_map = np.concatenate((rgb_map, depth_map), axis=1) + rgb_maps.append(rgb_map) + depth_maps.append(depth_map) + if savePath is not None: + imageio.imwrite(f'{savePath}/{prtx}{idx:03d}.png', rgb_map) + rgb_map = np.concatenate((rgb_map, depth_map), axis=1) + imageio.imwrite(f'{savePath}/rgbd/{prtx}{idx:03d}.png', rgb_map) + + imageio.mimwrite( + f'{savePath}/{prtx}video.mp4', np.stack(rgb_maps), fps=30, quality=8) + imageio.mimwrite( + f'{savePath}/{prtx}depthvideo.mp4', + np.stack(depth_maps), + fps=30, + quality=8) diff --git a/modelscope/models/cv/nerf_recon_vq_compression/utils.py b/modelscope/models/cv/nerf_recon_vq_compression/utils.py new file mode 100644 index 00000000..2addb5f8 --- /dev/null +++ b/modelscope/models/cv/nerf_recon_vq_compression/utils.py @@ -0,0 +1,269 @@ +import cv2 +import numpy as np +import plyfile +import scipy.signal +import skimage.measure +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision.transforms as T +from PIL import Image + + +def mse2psnr(x): + return -10. * torch.log(x) / torch.log(torch.Tensor([10.])) + + +def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET): + """ + depth: (H, W) + """ + + x = np.nan_to_num(depth) # change nan to 0 + if minmax is None: + mi = np.min(x[x > 0]) # get minimum positive depth (ignore background) + ma = np.max(x) + else: + mi, ma = minmax + + x = (x - mi) / (ma - mi + 1e-8) # normalize to 0~1 + x = (255 * x).astype(np.uint8) + x_ = cv2.applyColorMap(x, cmap) + return x_, [mi, ma] + + +def init_log(log, keys): + for key in keys: + log[key] = torch.tensor([0.0], dtype=float) + return log + + +def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET): + """ + depth: (H, W) + """ + if type(depth) is not np.ndarray: + depth = depth.cpu().numpy() + + x = np.nan_to_num(depth) # change nan to 0 + if minmax is None: + mi = np.min(x[x > 0]) # get minimum positive depth (ignore background) + ma = np.max(x) + else: + mi, ma = minmax + + x = (x - mi) / (ma - mi + 1e-8) # normalize to 0~1 + x = (255 * x).astype(np.uint8) + x_ = Image.fromarray(cv2.applyColorMap(x, cmap)) + x_ = T.ToTensor()(x_) # (3, H, W) + return x_, [mi, ma] + + +def N_to_reso(n_voxels, bbox): + xyz_min, xyz_max = bbox + dim = len(xyz_min) + voxel_size = ((xyz_max - xyz_min).prod() / n_voxels).pow(1 / dim) + return ((xyz_max - xyz_min) / voxel_size).long().tolist() + + +def cal_n_samples(reso, step_ratio=0.5): + return int(np.linalg.norm(reso) / step_ratio) + + +__LPIPS__ = {} + + +def init_lpips(net_name, device): + assert net_name in ['alex', 'vgg'] + import lpips + print(f'init_lpips: lpips_{net_name}') + return lpips.LPIPS(net=net_name, version='0.1').eval().to(device) + + +def rgb_lpips(np_gt, np_im, net_name, device): + if net_name not in __LPIPS__: + __LPIPS__[net_name] = init_lpips(net_name, device) + gt = torch.from_numpy(np_gt).permute([2, 0, 1]).contiguous().to(device) + im = torch.from_numpy(np_im).permute([2, 0, 1]).contiguous().to(device) + return __LPIPS__[net_name](gt, im, normalize=True).item() + + +def findItem(items, target): + for one in items: + if one[:len(target)] == target: + return one + return None + + +''' Evaluation metrics (ssim, lpips) +''' + + +def rgb_ssim(img0, + img1, + max_val, + filter_size=11, + filter_sigma=1.5, + k1=0.01, + k2=0.03, + return_map=False): + # Modified from https://github.com/google/mipnerf/blob/16e73dfdb52044dcceb47cda5243a686391a6e0f/internal/math.py#L58 + assert len(img0.shape) == 3 + assert img0.shape[-1] == 3 + assert img0.shape == img1.shape + + # Construct a 1D Gaussian blur filter. + hw = filter_size // 2 + shift = (2 * hw - filter_size + 1) / 2 + f_i = ((np.arange(filter_size) - hw + shift) / filter_sigma)**2 + filt = np.exp(-0.5 * f_i) + filt /= np.sum(filt) + + # Blur in x and y (faster than the 2D convolution). + def convolve2d(z, f): + return scipy.signal.convolve2d(z, f, mode='valid') + + def filt_fn(z): + return np.stack([ + convolve2d(convolve2d(z[..., i], filt[:, None]), filt[None, :]) + for i in range(z.shape[-1]) + ], -1) + + mu0 = filt_fn(img0) + mu1 = filt_fn(img1) + mu00 = mu0 * mu0 + mu11 = mu1 * mu1 + mu01 = mu0 * mu1 + sigma00 = filt_fn(img0**2) - mu00 + sigma11 = filt_fn(img1**2) - mu11 + sigma01 = filt_fn(img0 * img1) - mu01 + + # Clip the variances and covariances to valid values. + # Variance must be non-negative: + sigma00 = np.maximum(0., sigma00) + sigma11 = np.maximum(0., sigma11) + sigma01 = np.sign(sigma01) * np.minimum( + np.sqrt(sigma00 * sigma11), np.abs(sigma01)) + c1 = (k1 * max_val)**2 + c2 = (k2 * max_val)**2 + numer = (2 * mu01 + c1) * (2 * sigma01 + c2) + denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2) + ssim_map = numer / denom + ssim = np.mean(ssim_map) + return ssim_map if return_map else ssim + + +class TVLoss(nn.Module): + + def __init__(self, TVLoss_weight=1): + super(TVLoss, self).__init__() + self.TVLoss_weight = TVLoss_weight + + def forward(self, x): + batch_size = x.size()[0] + h_x = x.size()[2] + w_x = x.size()[3] + count_h = self._tensor_size(x[:, :, 1:, :]) + count_w = self._tensor_size(x[:, :, :, 1:]) + h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum() + w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum() + return self.TVLoss_weight * 2 * (h_tv / count_h + + w_tv / count_w) / batch_size + + def _tensor_size(self, t): + return t.size()[1] * t.size()[2] * t.size()[3] + + +def convert_sdf_samples_to_ply( + pytorch_3d_sdf_tensor, + ply_filename_out, + bbox, + level=0.5, + offset=None, + scale=None, +): + """ + Convert sdf samples to .ply + + :param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n) + :voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid + :voxel_size: float, the size of the voxels + :ply_filename_out: string, path of the filename to save to + + This function adapted from: https://github.com/RobotLocomotion/spartan + """ + + numpy_3d_sdf_tensor = pytorch_3d_sdf_tensor.numpy() + voxel_size = list( + (bbox[1] - bbox[0]) / np.array(pytorch_3d_sdf_tensor.shape)) + + verts, faces, normals, values = skimage.measure.marching_cubes( + numpy_3d_sdf_tensor, level=level, spacing=voxel_size) + faces = faces[..., ::-1] # inverse face orientation + + # transform from voxel coordinates to camera coordinates + # note x and y are flipped in the output of marching_cubes + mesh_points = np.zeros_like(verts) + mesh_points[:, 0] = bbox[0, 0] + verts[:, 0] + mesh_points[:, 1] = bbox[0, 1] + verts[:, 1] + mesh_points[:, 2] = bbox[0, 2] + verts[:, 2] + + # apply additional offset and scale + if scale is not None: + mesh_points = mesh_points / scale + if offset is not None: + mesh_points = mesh_points - offset + + # try writing to the ply file + + num_verts = verts.shape[0] + num_faces = faces.shape[0] + + verts_tuple = np.zeros((num_verts, ), + dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) + + for i in range(0, num_verts): + verts_tuple[i] = tuple(mesh_points[i, :]) + + faces_building = [] + for i in range(0, num_faces): + faces_building.append(((faces[i, :].tolist(), ))) + faces_tuple = np.array( + faces_building, dtype=[('vertex_indices', 'i4', (3, ))]) + + el_verts = plyfile.PlyElement.describe(verts_tuple, 'vertex') + el_faces = plyfile.PlyElement.describe(faces_tuple, 'face') + + ply_data = plyfile.PlyData([el_verts, el_faces]) + print('saving mesh to %s' % (ply_filename_out)) + ply_data.write(ply_filename_out) + + +class Timing: + """ + Timing environment + usage: + with Timing("message"): + your commands here + will print CUDA runtime in ms + """ + + def __init__(self, name, debug=False): + self.name = name + self.debug = debug + + def __enter__(self): + if not self.debug: + return + + self.start = torch.cuda.Event(enable_timing=True) + self.end = torch.cuda.Event(enable_timing=True) + self.start.record() + + def __exit__(self, type, value, traceback): + if not self.debug: + return + + self.end.record() + torch.cuda.synchronize() + print(self.name, 'elapsed', self.start.elapsed_time(self.end), 'ms') diff --git a/modelscope/outputs/outputs.py b/modelscope/outputs/outputs.py index ab24a34c..a838d4d9 100644 --- a/modelscope/outputs/outputs.py +++ b/modelscope/outputs/outputs.py @@ -720,6 +720,7 @@ TASK_OUTPUTS = { Tasks.video_super_resolution: [OutputKeys.OUTPUT_VIDEO], Tasks.video_deinterlace: [OutputKeys.OUTPUT_VIDEO], Tasks.nerf_recon_acc: [OutputKeys.OUTPUT], + Tasks.nerf_recon_vq_compression: [OutputKeys.OUTPUT], Tasks.video_colorization: [OutputKeys.OUTPUT_VIDEO], # image quality assessment degradation result for single image diff --git a/modelscope/pipelines/cv/nerf_recon_vq_compression_pipeline.py b/modelscope/pipelines/cv/nerf_recon_vq_compression_pipeline.py new file mode 100644 index 00000000..5ee6a753 --- /dev/null +++ b/modelscope/pipelines/cv/nerf_recon_vq_compression_pipeline.py @@ -0,0 +1,94 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import Any, Dict + +from modelscope.metainfo import Pipelines +from modelscope.outputs import OutputKeys +from modelscope.pipelines.base import Input, Model, Pipeline +from modelscope.pipelines.builder import PIPELINES +from modelscope.pipelines.util import is_model, is_official_hub_path +from modelscope.utils.constant import Invoke, Tasks +from modelscope.utils.logger import get_logger + +logger = get_logger() + + +@PIPELINES.register_module( + Tasks.nerf_recon_vq_compression, + module_name=Pipelines.nerf_recon_vq_compression) +class NeRFReconVQCompressionPipeline(Pipeline): + """ NeRF reconstruction VQ compression pipeline + Example: + + ```python + >>> from modelscope.pipelines import pipeline + >>> nerf_recon_vq_compress = pipeline(Tasks.nerf_recon_vq_compression, + 'damo/cv_nerf-3d-reconstruction-vq-compression_damo') + >>> nerf_recon_vq_compress({ + 'data_dir': '/data/lego', # data dir path (str) + 'render_dir': 'save_dir', # save dir path (str) + 'ckpt_path': 'ckpt_path', # ckpt path (str) + }) + >>> # + ``` + """ + + def __init__(self, + model, + dataset_name='blender', + data_dir='', + downsample=1, + ndc_ray=False, + ckpt_path='', + device='gpu', + **kwargs): + """ + use model to create a image sky change pipeline for image editing + Args: + model (str or Model): model_id on modelscope hub + data_type (str): currently only support 'blender' and 'colmap' + use_mask (bool): segment the object or not + ckpt_path (str): the checkpoint ckpt_path + save_mesh (bool): render mesh or not + n_test_traj_steps (int): number of random sampled images for test view, only for colmap data. + test_ray_chunk (int): ray chunk size for test, avoid GPU OOM + device (str): only support gpu + """ + model = Model.from_pretrained( + model, + device=device, + model_prefetched=True, + invoked_by=Invoke.PIPELINE, + dataset_name=dataset_name, + data_dir=data_dir, + downsample=downsample, + ndc_ray=ndc_ray, + ckpt_path=ckpt_path) if is_model(model) else model + + super().__init__(model=model, **kwargs) + if not isinstance(self.model, Model): + logger.error('model object is not initialized.') + raise Exception('model object is not initialized.') + logger.info('init model done') + + def preprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + test_mode = inputs['test_mode'] + if 'test' in test_mode or 'eval' in test_mode: + inputs['test_mode'] = 'evaluation_test' + elif 'path' in test_mode: + inputs['test_mode'] = 'render_path' + return inputs + + def forward(self, input: Dict[str, Any]) -> Dict[str, Any]: + render_dir = input['render_dir'] + test_mode = input['test_mode'] + N_vis = input.get('N_vis', 5) + if test_mode == 'evaluation_test': + self.model.evaluation(render_dir, N_vis) + elif test_mode == 'render_path': + self.model.render_path(render_dir, N_vis) + else: + raise Exception('test mode {} is not support'.format(test_mode)) + return {OutputKeys.OUTPUT: 'Done'} + + def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]: + return inputs diff --git a/modelscope/utils/constant.py b/modelscope/utils/constant.py index 32b1e2da..9152484d 100644 --- a/modelscope/utils/constant.py +++ b/modelscope/utils/constant.py @@ -152,6 +152,7 @@ class CVTasks(object): motion_generation = 'motion-generation' # 3d reconstruction nerf_recon_acc = 'nerf-recon-acc' + nerf_recon_vq_compression = 'nerf-recon-vq-compression' # vision efficient tuning vision_efficient_tuning = 'vision-efficient-tuning' diff --git a/tests/pipelines/test_vqrf.py b/tests/pipelines/test_vqrf.py new file mode 100644 index 00000000..d8cb8c88 --- /dev/null +++ b/tests/pipelines/test_vqrf.py @@ -0,0 +1,90 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os +import unittest + +import torch + +from modelscope.hub.snapshot_download import snapshot_download +from modelscope.msdatasets import MsDataset +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.utils.constant import DownloadMode, Tasks +from modelscope.utils.test_utils import test_level + + +class NeRFReconVQCompressionBlender(unittest.TestCase): + + def setUp(self) -> None: + self.model_id = 'DAMOXR/cv_nerf_3d-reconstruction_vector-quantize-compression' + pretrained_model = 'ficus_demo.pt' + data_dir = MsDataset.load( + 'nerf_recon_dataset', namespace='damo', + split='train').config_kwargs['split_config']['train'] + nerf_synthetic_dataset = os.path.join(data_dir, 'nerf_synthetic') + self.blender_scene = 'ficus' + data_dir = os.path.join(nerf_synthetic_dataset, self.blender_scene) + + self.pipeline = pipeline( + Tasks.nerf_recon_vq_compression, + model=self.model_id, + dataset_name='blender', + data_dir=data_dir, + downsample=1, + ndc_ray=False, + ckpt_path=pretrained_model) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + @unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest only') + def test_evalutaion(self): + render_dir = f'./exp/{self.blender_scene}' + self.pipeline( + dict(test_mode='evaluation_test', render_dir=render_dir, N_vis=5)) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + @unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest only') + def test_render_path(self): + render_dir = f'./exp/{self.blender_scene}' + self.pipeline( + dict(test_mode='render_path', render_dir=render_dir, N_vis=30)) + + +class NeRFReconVQCompressionLLFF(unittest.TestCase): + + def setUp(self) -> None: + self.model_id = 'DAMOXR/cv_nerf_3d-reconstruction_vector-quantize-compression' + pretrained_model = 'fern_demo.pt' + data_dir = MsDataset.load( + 'DAMOXR/nerf_llff_data', + subset_name='default', + split='test', + ).config_kwargs['split_config']['test'] + nerf_llff = os.path.join(data_dir, 'nerf_llff_data') + self.llff_scene = 'fern' + data_dir = os.path.join(nerf_llff, self.llff_scene) + + self.pipeline = pipeline( + Tasks.nerf_recon_vq_compression, + model=self.model_id, + dataset_name='llff', + data_dir=data_dir, + downsample=4, + ndc_ray=True, + ckpt_path=pretrained_model) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + @unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest only') + def test_evalutaion(self): + render_dir = f'./exp/{self.llff_scene}' + self.pipeline( + dict(test_mode='evaluation_test', render_dir=render_dir, N_vis=5)) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + @unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest only') + def test_render_path(self): + render_dir = f'./exp/{self.llff_scene}' + self.pipeline( + dict(test_mode='render_path', render_dir=render_dir, N_vis=10)) + + +if __name__ == '__main__': + unittest.main() From 4ca937d2bad2899003d7b6deb3f94f7e8212dee1 Mon Sep 17 00:00:00 2001 From: Jintao Date: Wed, 26 Jul 2023 18:12:55 +0800 Subject: [PATCH 84/87] support openbuddy-llama2-13b (#416) --- examples/pytorch/llm/_parser.py | 69 ++++++ examples/pytorch/llm/llm_infer.py | 77 ++++--- examples/pytorch/llm/llm_sft.py | 94 ++++---- examples/pytorch/llm/run_infer.sh | 8 +- examples/pytorch/llm/run_sft.sh | 7 +- examples/pytorch/llm/utils/__init__.py | 5 + examples/pytorch/llm/utils/dataset.py | 72 ++++++ examples/pytorch/llm/utils/models.py | 133 +++++++++++ .../llm/{_common.py => utils/utils.py} | 216 +----------------- 9 files changed, 385 insertions(+), 296 deletions(-) create mode 100644 examples/pytorch/llm/_parser.py create mode 100644 examples/pytorch/llm/utils/__init__.py create mode 100644 examples/pytorch/llm/utils/dataset.py create mode 100644 examples/pytorch/llm/utils/models.py rename examples/pytorch/llm/{_common.py => utils/utils.py} (54%) diff --git a/examples/pytorch/llm/_parser.py b/examples/pytorch/llm/_parser.py new file mode 100644 index 00000000..480cfdce --- /dev/null +++ b/examples/pytorch/llm/_parser.py @@ -0,0 +1,69 @@ +import os +from dataclasses import dataclass, field +from typing import List, Optional, Tuple, Type, TypeVar, Union + +import torch +from torch import device as Device +from transformers import HfArgumentParser + +from modelscope import get_logger + +logger = get_logger() + + +def _format_device(device: Union[List[int], str]) -> Tuple[List[int], str]: + if isinstance(device, list): + device_ids = device + device_str = ','.join([str(d) for d in device]) + else: + device_ids = [int(d) for d in device.split(',') if d != '-1'] + device_str = device + device_str = device_str.replace(' ', '') + return device_ids, device_str + + +def select_device(device: Union[List[int], str]) -> Device: + """Call this function before cuda is initialized. + device: e.g. []: 'cpu', [0], [0, 1, 2] + e.g. '-1': 'cpu', '0', '0,1,2' + """ + if torch.cuda.is_initialized(): + logger.warning('CUDA has been initialized! Device selection fails!') + return torch.device('cuda:0') + + device_ids, device_str = _format_device(device) + os.environ['CUDA_VISIBLE_DEVICES'] = device_str + log_s = 'Using device: ' + if len(device_ids) == 0: + master_device: str = 'cpu' + log_s += 'cpu' + else: + assert torch.cuda.is_available( + ) and torch.cuda.device_count() >= len(device_ids) + master_device = 'cuda:0' + log_s += f'cuda:{device_str}' + logger.info(log_s) + return torch.device(master_device) + + +_T = TypeVar('_T') + + +def parse_args(class_type: Type[_T], + argv: Optional[List[str]] = None) -> Tuple[_T, List[str]]: + parser = HfArgumentParser([class_type]) + args, remaining_args = parser.parse_args_into_dataclasses( + argv, return_remaining_strings=True) + logger.info(f'args: {args}') + return args, remaining_args + + +@dataclass +class DeviceArguments: + device: str = '0' # e.g. '-1'; '0'; '0,1' + + +def parse_device(argv: Optional[List[str]] = None) -> List[str]: + args, remaining_args = parse_args(DeviceArguments, argv) + select_device(args.device) + return remaining_args diff --git a/examples/pytorch/llm/llm_infer.py b/examples/pytorch/llm/llm_infer.py index 8b9c1bb1..614e3d36 100644 --- a/examples/pytorch/llm/llm_infer.py +++ b/examples/pytorch/llm/llm_infer.py @@ -1,21 +1,32 @@ # ### Setting up experimental environment. -from _common import * + +if __name__ == '__main__': + # Avoid cuda initialization caused by library import (e.g. peft, accelerate) + from _parser import * + # argv = parse_device(['--device', '1']) + argv = parse_device() + +from utils import * @dataclass class InferArguments: - device: str = '0' # e.g. '-1'; '0'; '0,1' model_type: str = field( - default='baichuan-7b', - metadata={ - 'choices': - ['baichuan-7b', 'baichuan-13b', 'chatglm2', 'llama2-7b'] - }) + default='baichuan-7b', metadata={'choices': list(MODEL_MAPPER.keys())}) sft_type: str = field( default='lora', metadata={'choices': ['lora', 'full']}) ckpt_path: str = '/path/to/your/iter_xxx.pth' eval_human: bool = False # False: eval test_dataset - data_sample: Optional[int] = None + ignore_args_error: bool = True # False: notebook compatibility + + dataset: str = field( + default='alpaca-en,alpaca-zh', + metadata={'help': f'dataset choices: {list(DATASET_MAPPER.keys())}'}) + dataset_seed: int = 42 + dataset_sample: Optional[int] = None + dataset_test_size: float = 0.01 + prompt: str = DEFAULT_PROMPT + max_length: Optional[int] = 2048 lora_target_modules: Optional[List[str]] = None lora_rank: int = 8 @@ -29,33 +40,14 @@ class InferArguments: def __post_init__(self): if self.lora_target_modules is None: - if self.model_type in {'baichuan-7b', 'baichuan-13b'}: - self.lora_target_modules = ['W_pack'] - elif self.model_type == 'chatglm2': - self.lora_target_modules = ['query_key_value'] - elif self.model_type == 'llama2-7b': - self.lora_target_modules = ['q_proj', 'k_proj', 'v_proj'] - else: - raise ValueError(f'model_type: {self.model_type}') + self.lora_target_modules = MODEL_MAPPER[self.model_type]['lora_TM'] if not os.path.isfile(self.ckpt_path): raise ValueError( f'Please enter a valid ckpt_path: {self.ckpt_path}') -def parse_args() -> InferArguments: - # return_remaining_strings=True for notebook compatibility - args, remaining_args = HfArgumentParser([ - InferArguments - ]).parse_args_into_dataclasses(return_remaining_strings=True) - logger.info(f'args: {args}') - if len(remaining_args) > 0: - logger.warning(f'remaining_args: {remaining_args}') - return args - - def llm_infer(args: InferArguments) -> None: - select_device(args.device) # ### Loading Model and Tokenizer support_bf16 = torch.cuda.is_bf16_supported() if not support_bf16: @@ -72,7 +64,7 @@ def llm_infer(args: InferArguments) -> None: lora_dropout=args.lora_dropout_p, pretrained_weights=args.ckpt_path) logger.info(f'lora_config: {lora_config}') - Swift.prepare_model(model, lora_config) + model = Swift.prepare_model(model, lora_config) elif args.sft_type == 'full': state_dict = torch.load(args.ckpt_path, map_location='cpu') model.load_state_dict(state_dict) @@ -80,6 +72,11 @@ def llm_infer(args: InferArguments) -> None: raise ValueError(f'args.sft_type: {args.sft_type}') # ### Inference + tokenize_func = partial( + tokenize_function, + tokenizer=tokenizer, + prompt=args.prompt, + max_length=args.max_length) streamer = TextStreamer( tokenizer, skip_prompt=True, skip_special_tokens=True) generation_config = GenerationConfig( @@ -94,17 +91,22 @@ def llm_infer(args: InferArguments) -> None: if args.eval_human: while True: instruction = input('<<< ') - data = {'instruction': instruction, 'input': None, 'output': None} - inference(data, model, tokenizer, streamer, generation_config) + data = {'instruction': instruction} + input_ids = tokenize_func(data)['input_ids'] + inference(input_ids, model, tokenizer, streamer, generation_config) print('-' * 80) else: - _, test_dataset = get_alpaca_en_zh_dataset( - None, True, split_seed=42, data_sample=args.data_sample) + dataset = get_dataset(args.dataset) + _, test_dataset = process_dataset(dataset, args.dataset_test_size, + args.dataset_sample, + args.dataset_seed) mini_test_dataset = test_dataset.select(range(10)) + del dataset for data in mini_test_dataset: output = data['output'] data['output'] = None - inference(data, model, tokenizer, streamer, generation_config) + input_ids = tokenize_func(data)['input_ids'] + inference(input_ids, model, tokenizer, streamer, generation_config) print() print(f'[LABELS]{output}') print('-' * 80) @@ -112,5 +114,10 @@ def llm_infer(args: InferArguments) -> None: if __name__ == '__main__': - args = parse_args() + args, remaining_argv = parse_args(InferArguments, argv) + if len(remaining_argv) > 0: + if args.ignore_args_error: + logger.warning(f'remaining_argv: {remaining_argv}') + else: + raise ValueError(f'remaining_argv: {remaining_argv}') llm_infer(args) diff --git a/examples/pytorch/llm/llm_sft.py b/examples/pytorch/llm/llm_sft.py index 07f1fd5e..a7dabf77 100644 --- a/examples/pytorch/llm/llm_sft.py +++ b/examples/pytorch/llm/llm_sft.py @@ -1,37 +1,45 @@ # ### Setting up experimental environment. """ -pip install numpy pandas matplotlib scikit-learn -pip install transformers datasets -conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia -pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer -pip install accelerate transformers_stream_generator - # Install the latest version of modelscope from source git clone https://github.com/modelscope/modelscope.git cd modelscope pip install . -# Resolve torchmetrics dependencies and update numpy -pip install numpy -U +conda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia +pip install numpy pandas -U # Resolve torchmetrics dependencies and update numpy +pip install matplotlib scikit-learn -U +pip install transformers datasets -U +pip install tqdm tensorboard torchmetrics sentencepiece charset_normalizer -U +pip install accelerate transformers_stream_generator -U """ -from _common import * +if __name__ == '__main__': + # Avoid cuda initialization caused by library import (e.g. peft, accelerate) + from _parser import * + # argv = parse_device(['--device', '1']) + argv = parse_device() + +from utils import * @dataclass class SftArguments: - device: str = '0,1' # e.g. '-1'; '0'; '0,1' seed: int = 42 model_type: str = field( - default='baichuan-7b', - metadata={ - 'choices': - ['baichuan-7b', 'baichuan-13b', 'chatglm2', 'llama2-7b'] - }) + default='baichuan-7b', metadata={'choices': list(MODEL_MAPPER.keys())}) # baichuan-7b: 'lora': 16G; 'full': 80G sft_type: str = field( default='lora', metadata={'choices': ['lora', 'full']}) - data_sample: Optional[int] = None + ignore_args_error: bool = True # False: notebook compatibility + + dataset: str = field( + default='alpaca-en,alpaca-zh', + metadata={'help': f'dataset choices: {list(DATASET_MAPPER.keys())}'}) + dataset_seed: int = 42 + dataset_sample: Optional[int] = None + dataset_test_size: float = 0.01 + prompt: str = DEFAULT_PROMPT + max_length: Optional[int] = 2048 lora_target_modules: Optional[List[str]] = None lora_rank: int = 8 @@ -75,29 +83,10 @@ class SftArguments: raise ValueError(f'sft_type: {self.sft_type}') if self.lora_target_modules is None: - if self.model_type in {'baichuan-7b', 'baichuan-13b'}: - self.lora_target_modules = ['W_pack'] - elif self.model_type == 'chatglm2': - self.lora_target_modules = ['query_key_value'] - elif self.model_type == 'llama2-7b': - self.lora_target_modules = ['q_proj', 'k_proj', 'v_proj'] - else: - raise ValueError(f'model_type: {self.model_type}') - - -def parse_args() -> SftArguments: - # return_remaining_strings=True for notebook compatibility - args, remaining_args = HfArgumentParser([ - SftArguments - ]).parse_args_into_dataclasses(return_remaining_strings=True) - logger.info(f'args: {args}') - if len(remaining_args) > 0: - logger.warning(f'remaining_args: {remaining_args}') - return args + self.lora_target_modules = MODEL_MAPPER[self.model_type]['lora_TM'] def llm_sft(args: SftArguments) -> None: - select_device(args.device) seed_everything(args.seed) # ### Loading Model and Tokenizer @@ -123,18 +112,28 @@ def llm_sft(args: SftArguments) -> None: lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout_p) logger.info(f'lora_config: {lora_config}') - Swift.prepare_model(model, lora_config) + model = Swift.prepare_model(model, lora_config) show_freeze_layers(model) print_model_info(model) # check the device and dtype of the model - _p: Parameter = list(model.parameters())[-1] + _p: Tensor = list(model.parameters())[-1] logger.info(f'device: {_p.device}, dtype: {_p.dtype}') # ### Loading Dataset - tokenize_func = partial(tokenize_function, tokenizer=tokenizer) - train_dataset, val_dataset = get_alpaca_en_zh_dataset( - tokenize_func, split_seed=42, data_sample=args.data_sample) + dataset = get_dataset(args.dataset) + train_dataset, val_dataset = process_dataset(dataset, + args.dataset_test_size, + args.dataset_sample, + args.dataset_seed) + tokenize_func = partial( + tokenize_function, + tokenizer=tokenizer, + prompt=args.prompt, + max_length=args.max_length) + train_dataset = train_dataset.map(tokenize_func) + val_dataset = val_dataset.map(tokenize_func) + del dataset # Data analysis stat_dataset(train_dataset) stat_dataset(val_dataset) @@ -239,11 +238,6 @@ def llm_sft(args: SftArguments) -> None: cfg.update(config) return cfg - device_kwargs = {} - if torch.cuda.device_count() > 1: - # No placement for model, leave the model to `device_map` - device_kwargs['device'] = 'cpu' - trainer = EpochBasedTrainer( model=model, cfg_file=cfg_file, @@ -253,7 +247,6 @@ def llm_sft(args: SftArguments) -> None: remove_unused_data=True, seed=42, cfg_modify_fn=cfg_modify_fn, - **device_kwargs, ) trainer.train() @@ -264,5 +257,10 @@ def llm_sft(args: SftArguments) -> None: if __name__ == '__main__': - args = parse_args() + args, remaining_argv = parse_args(SftArguments, argv) + if len(remaining_argv) > 0: + if args.ignore_args_error: + logger.warning(f'remaining_argv: {remaining_argv}') + else: + raise ValueError(f'remaining_argv: {remaining_argv}') llm_sft(args) diff --git a/examples/pytorch/llm/run_infer.sh b/examples/pytorch/llm/run_infer.sh index efe48958..aa1a1a04 100644 --- a/examples/pytorch/llm/run_infer.sh +++ b/examples/pytorch/llm/run_infer.sh @@ -1,5 +1,7 @@ +#!/bin/bash + python llm_infer.py \ - --device 0 \ - --model_type llama2-7b \ - --ckpt_path "runs/llama2-7b/vx_xxx/output_best/pytorch_model.bin" \ + --device 0,1 \ + --model_type openbuddy-llama2-13b \ + --ckpt_path "runs/openbuddy-llama2-13b/vx_xxx/output_best/pytorch_model.bin" \ --eval_human true diff --git a/examples/pytorch/llm/run_sft.sh b/examples/pytorch/llm/run_sft.sh index 98ae2460..3a6d9ff4 100644 --- a/examples/pytorch/llm/run_sft.sh +++ b/examples/pytorch/llm/run_sft.sh @@ -2,7 +2,8 @@ DATE=$(date +"%Y%m%d-%H%M%S") nohup python llm_sft.py \ - --device 0 \ - --model_type llama2-7b \ - --data_sample 25000 \ + --device 0,1 \ + --model_type openbuddy-llama2-13b \ + --dataset alpaca-en,alpaca-zh \ + --dataset_sample 20000 \ &> train_$DATE.out & diff --git a/examples/pytorch/llm/utils/__init__.py b/examples/pytorch/llm/utils/__init__.py new file mode 100644 index 00000000..e4772c03 --- /dev/null +++ b/examples/pytorch/llm/utils/__init__.py @@ -0,0 +1,5 @@ +from _parser import * + +from .dataset import * +from .models import * +from .utils import * diff --git a/examples/pytorch/llm/utils/dataset.py b/examples/pytorch/llm/utils/dataset.py new file mode 100644 index 00000000..3035ba78 --- /dev/null +++ b/examples/pytorch/llm/utils/dataset.py @@ -0,0 +1,72 @@ +from typing import Optional, Tuple + +import numpy as np +from datasets import Dataset as HfDataset +from datasets import concatenate_datasets +from numpy.random import RandomState + +from modelscope import MsDataset + + +def _processing_alpaca(dataset: HfDataset) -> HfDataset: + instruction = dataset['instruction'] + input_ = dataset['input'] + res = [] + for inst, inp in zip(instruction, input_): + if inp is not None and inp != '': + if inp.startswith('输入:'): + inp = inp[3:] + inst = f'{inst}\n{inp}' + res.append(inst) + dataset = HfDataset.from_dict({ + 'instruction': res, + 'output': dataset['output'] + }) + return dataset + + +def get_alpaca_en_dataset() -> HfDataset: + dataset_en: HfDataset = MsDataset.load( + 'AI-ModelScope/alpaca-gpt4-data-en', split='train').to_hf_dataset() + dataset_en = dataset_en.remove_columns(['text']) + return _processing_alpaca(dataset_en) + + +def get_alpaca_zh_dataset() -> HfDataset: + dataset_zh: HfDataset = MsDataset.load( + 'AI-ModelScope/alpaca-gpt4-data-zh', split='train').to_hf_dataset() + return _processing_alpaca(dataset_zh) + + +def get_seed(random_state: RandomState) -> int: + seed_max = np.iinfo(np.int32).max + seed = random_state.randint(0, seed_max) + return seed + + +def process_dataset(dataset: HfDataset, dataset_test_size: float, + dataset_sample: Optional[int], + dataset_seed: int) -> Tuple[HfDataset, HfDataset]: + random_state = np.random.RandomState(dataset_seed) + if dataset_sample is not None: + index = random_state.permutation(len(dataset))[:dataset_sample] + dataset = dataset.select(index) + dataset = dataset.train_test_split( + dataset_test_size, seed=get_seed(random_state)) + return dataset['train'], dataset['test'] + + +DATASET_MAPPER = { + 'alpaca-en': get_alpaca_en_dataset, + 'alpaca-zh': get_alpaca_zh_dataset, +} + + +def get_dataset(dataset_names: str) -> HfDataset: + dataset_name_list = dataset_names.split(',') + dataset_list = [] + for dataset_name in dataset_name_list: + get_function = DATASET_MAPPER[dataset_name] + dataset_list.append(get_function()) + dataset = concatenate_datasets(dataset_list) + return dataset diff --git a/examples/pytorch/llm/utils/models.py b/examples/pytorch/llm/utils/models.py new file mode 100644 index 00000000..c95df561 --- /dev/null +++ b/examples/pytorch/llm/utils/models.py @@ -0,0 +1,133 @@ +from typing import NamedTuple + +import torch +from torch import dtype as Dtype + +from modelscope import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, Model, + get_logger, read_config, snapshot_download) +from modelscope.models.nlp.chatglm2 import ChatGLM2Config, ChatGLM2Tokenizer + +logger = get_logger() + + +def _add_special_token(tokenizer): + if tokenizer.eos_token_id is None: + tokenizer.eos_token_id = 2 + if tokenizer.bos_token_id is None: + tokenizer.bos_token_id = 1 + if tokenizer.pad_token_id is None: + tokenizer.pad_token_id = 0 + logger.info(f'bos_token_id: {tokenizer.bos_token_id}, ' + f'eos_token_id: {tokenizer.eos_token_id}, ' + f'pad_token_id: {tokenizer.pad_token_id}') + + +def get_model_tokenizer_default(model_dir: str, + load_model: bool = True, + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): + """load from an independent repository""" + model_config = AutoConfig.from_pretrained( + model_dir, trust_remote_code=True) + model_config.torch_dtype = torch_dtype + logger.info(f'model_config: {model_config}') + tokenizer = AutoTokenizer.from_pretrained( + model_dir, trust_remote_code=True) + model = None + if load_model: + model = AutoModelForCausalLM.from_pretrained( + model_dir, + config=model_config, + device_map='auto', + torch_dtype=torch_dtype, + trust_remote_code=True) + + if add_special_token: + _add_special_token(tokenizer) + return model, tokenizer + + +def get_model_tokenizer_chatglm2(model_dir: str, + load_model: bool = True, + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): + """load from ms library""" + config = read_config(model_dir) + logger.info(config) + model_config = ChatGLM2Config.from_pretrained(model_dir) + model_config.torch_dtype = torch_dtype + logger.info(model_config) + tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) + model = None + if load_model: + model = Model.from_pretrained( + model_dir, + cfg_dict=config, + config=model_config, + device_map='auto', + torch_dtype=torch_dtype) + if add_special_token: + _add_special_token(tokenizer) + return model, tokenizer + + +class LoRATM(NamedTuple): + # default lora target modules + baichuan = ['W_pack'] + chatglm2 = ['query_key_value'] + llama2 = ['q_proj', 'k_proj', 'v_proj'] + + +# Reference: 'https://modelscope.cn/models/{model_id}/summary' +MODEL_MAPPER = { + 'baichuan-7b': { + 'model_id': 'baichuan-inc/baichuan-7B', + 'revision': 'v1.0.7', + 'lora_TM': LoRATM.baichuan + }, + 'baichuan-13b': { + 'model_id': 'baichuan-inc/Baichuan-13B-Base', + 'revision': 'v1.0.3', + 'lora_TM': LoRATM.baichuan + }, + 'chatglm2': { + 'model_id': 'ZhipuAI/chatglm2-6b', + 'revision': 'v1.0.6', + 'get_function': get_model_tokenizer_chatglm2, + 'lora_TM': LoRATM.chatglm2 + }, + 'llama2-7b': { + 'model_id': 'modelscope/Llama-2-7b-ms', + 'revision': 'v1.0.2', + 'ignore_file_pattern': [r'.+\.bin$'], # use safetensors + 'lora_TM': LoRATM.llama2 + }, + 'llama2-13b': { + 'model_id': 'modelscope/Llama-2-13b-ms', + 'revision': 'v1.0.2', + 'ignore_file_pattern': [r'.+\.bin$'], + 'lora_TM': LoRATM.llama2 + }, + 'openbuddy-llama2-13b': { + 'model_id': 'OpenBuddy/openbuddy-llama2-13b-v8.1-fp16', + 'lora_TM': LoRATM.llama2 + } +} + + +def get_model_tokenizer(model_type: str, + load_model: bool = True, + add_special_token: bool = True, + torch_dtype: Dtype = torch.float16): + data = MODEL_MAPPER.get(model_type) + if data is None: + raise ValueError(f'model_type: {model_type}') + model_id = data['model_id'] + revision = data.get('revision', 'master') + get_function = data.get('get_function', get_model_tokenizer_default) + ignore_file_pattern = data.get('ignore_file_pattern', []) + model_dir = snapshot_download( + model_id, revision, ignore_file_pattern=ignore_file_pattern) + model, tokenizer = get_function(model_dir, load_model, add_special_token, + torch_dtype) + return model, tokenizer, model_dir diff --git a/examples/pytorch/llm/_common.py b/examples/pytorch/llm/utils/utils.py similarity index 54% rename from examples/pytorch/llm/_common.py rename to examples/pytorch/llm/utils/utils.py index b8921581..5b8ee163 100644 --- a/examples/pytorch/llm/_common.py +++ b/examples/pytorch/llm/utils/utils.py @@ -9,16 +9,10 @@ from functools import partial from types import MethodType from typing import Any, Callable, Dict, List, Optional, Tuple, Union -import json import matplotlib.pyplot as plt import numpy as np import torch -import torch.nn as nn -import torch.optim as optim from datasets import Dataset as HfDataset -from datasets import concatenate_datasets -from matplotlib.axes import Axes -from matplotlib.figure import Figure from numpy import ndarray from tensorboard.backend.event_processing.event_accumulator import \ EventAccumulator @@ -26,23 +20,14 @@ from torch import Tensor from torch import device as Device from torch import dtype as Dtype from torch.nn import Module -from torch.nn.parameter import Parameter from torch.nn.utils.rnn import pad_sequence -from torch.optim import Optimizer -from torch.optim import lr_scheduler as lrs -from torch.optim.lr_scheduler import _LRScheduler as LRScheduler -from torch.utils.data import Dataset from torchmetrics import Accuracy, MeanMetric from tqdm import tqdm -from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, - GenerationConfig, HfArgumentParser, TextStreamer) +from transformers import GenerationConfig, TextStreamer -from modelscope import (Model, MsDataset, get_logger, read_config, - snapshot_download) +from modelscope import get_logger from modelscope.metrics.base import Metric from modelscope.metrics.builder import METRICS -from modelscope.models.nlp.chatglm2 import ChatGLM2Config, ChatGLM2Tokenizer -from modelscope.models.nlp.llama2 import Llama2Config, Llama2Tokenizer from modelscope.swift import LoRAConfig, Swift from modelscope.trainers import EpochBasedTrainer from modelscope.utils.config import Config, ConfigDict @@ -50,7 +35,7 @@ from modelscope.utils.registry import default_group COLOR, COLOR_S = '#FFE2D9', '#FF7043' -PROMPT = """Here's a conversation between a human and an AI assistant. \ +DEFAULT_PROMPT = """Here's a conversation between a human and an AI assistant. \ The AI assistant provides detailed, friendly answers for the human. ### Human: @@ -89,41 +74,6 @@ def get_work_dir(work_dir: str) -> str: return work_dir -def _format_device(device: Union[List[int], str]) -> Tuple[List[int], str]: - if isinstance(device, list): - device_ids = device - device_str = ','.join([str(d) for d in device]) - else: - device_ids = [int(d) for d in device.split(',') if d != '-1'] - device_str = device - device_str = device_str.replace(' ', '') - return device_ids, device_str - - -def select_device(device: Union[List[int], str]) -> Device: - """Call this function before cuda is initialized. - device: e.g. []: 'cpu', [0], [0, 1, 2] - e.g. '-1': 'cpu', '0', '0,1,2' - """ - if torch.cuda.is_initialized(): - logger.warning('CUDA has been initialized! Device selection fails!') - return torch.device('cuda:0') - - device_ids, device_str = _format_device(device) - os.environ['CUDA_VISIBLE_DEVICES'] = device_str - log_s = 'Using device: ' - if len(device_ids) == 0: - master_device: str = 'cpu' - log_s += 'cpu' - else: - assert torch.cuda.is_available( - ) and torch.cuda.device_count() >= len(device_ids) - master_device = 'cuda:0' - log_s += f'cuda:{device_str}' - logger.info(log_s) - return torch.device(master_device) - - def seed_everything(seed: Optional[int] = None, gpu_dtm: bool = False) -> int: if seed is None: seed_max = np.iinfo(np.int32).max @@ -154,16 +104,11 @@ def get_T_max(dataset_len: int, batch_size: int, max_epochs: int, def tokenize_function(example: Dict[str, Optional[str]], tokenizer, + prompt: str = DEFAULT_PROMPT, max_length: Optional[int] = 2048) -> Dict[str, Any]: instruction: str = example['instruction'] - input_ = example['input'] - if input_ is not None and input_ != '': - if input_.startswith('输入:'): - instruction = instruction + input_[3:] - else: - instruction = instruction + input_ - output = example['output'] - src_text = PROMPT.format(instruction=instruction) + output = example.get('output') + src_text = prompt.format(instruction=instruction) src_input_ids: List[int] = tokenizer( src_text, return_attention_mask=False, add_special_tokens=True)['input_ids'] @@ -271,7 +216,7 @@ class MyMetric(Metric): def add(self, outputs: Dict[str, Any], inputs: Dict[str, Any]) -> None: loss: Tensor = outputs.loss - self.loss.update(loss) + self.loss.update(loss.cpu()) labels: Tensor = inputs['labels'] labels = labels[:, 1:] @@ -280,7 +225,7 @@ class MyMetric(Metric): logits = logits[labels_mask].contiguous().view(-1, logits.shape[-1]) pred = logits.argmax(dim=-1) labels = labels[labels_mask].to(logits.device) - self.acc.update(pred, labels) + self.acc.update(pred.cpu(), labels.cpu()) def evaluate(self): return { @@ -293,148 +238,6 @@ class MyMetric(Metric): raise NotImplementedError -def _add_special_token(tokenizer): - if tokenizer.eos_token_id is None: - tokenizer.eos_token_id = 2 - if tokenizer.bos_token_id is None: - tokenizer.bos_token_id = 1 - if tokenizer.pad_token_id is None: - tokenizer.pad_token_id = 0 - logger.info(f'bos_token_id: {tokenizer.bos_token_id}, ' - f'eos_token_id: {tokenizer.eos_token_id}, ' - f'pad_token_id: {tokenizer.pad_token_id}') - - -def get_baichuan_model_tokenizer(model_dir: str, - load_model: bool = True, - add_special_token: bool = True, - torch_dtype: Dtype = torch.float16): - model_config = AutoConfig.from_pretrained( - model_dir, trust_remote_code=True) - model_config.torch_dtype = torch_dtype - logger.info(f'model_config: {model_config}') - tokenizer = AutoTokenizer.from_pretrained( - model_dir, trust_remote_code=True) - model = None - if load_model: - model = AutoModelForCausalLM.from_pretrained( - model_dir, - config=model_config, - device_map='auto', - torch_dtype=torch_dtype, - trust_remote_code=True) - - if add_special_token: - _add_special_token(tokenizer) - return model, tokenizer - - -def get_chatglm2_model_tokenizer(model_dir: str, - load_model: bool = True, - add_special_token: bool = True, - torch_dtype: Dtype = torch.float16): - config = read_config(model_dir) - logger.info(config) - model_config = ChatGLM2Config.from_pretrained(model_dir) - model_config.torch_dtype = torch_dtype - logger.info(model_config) - tokenizer = ChatGLM2Tokenizer.from_pretrained(model_dir) - model = None - if load_model: - model = Model.from_pretrained( - model_dir, - cfg_dict=config, - config=model_config, - device_map='auto', - torch_dtype=torch_dtype) - if add_special_token: - _add_special_token(tokenizer) - return model, tokenizer - - -def get_llama2_model_tokenizer(model_dir: str, - load_model: bool = True, - add_special_token: bool = True, - torch_dtype: Dtype = torch.float16): - config = read_config(model_dir) - logger.info(config) - model_config = Llama2Config.from_pretrained(model_dir) - model_config.torch_dtype = torch_dtype - logger.info(model_config) - tokenizer = Llama2Tokenizer.from_pretrained(model_dir) - model = None - if load_model: - model = Model.from_pretrained( - model_dir, - cfg_dict=config, - config=model_config, - device_map='auto', - torch_dtype=torch_dtype) - if add_special_token: - _add_special_token(tokenizer) - return model, tokenizer - - -def get_model_tokenizer(model_type: str, - load_model: bool = True, - add_special_token: bool = True, - torch_dtype: Dtype = torch.float16): - # ### Loading Model and Tokenizer - if model_type == 'baichuan-7b': - model_dir = snapshot_download('baichuan-inc/baichuan-7B', 'v1.0.7') - model, tokenizer = get_baichuan_model_tokenizer( - model_dir, load_model, add_special_token, torch_dtype) - elif model_type == 'baichuan-13b': - model_dir = snapshot_download('baichuan-inc/Baichuan-13B-Base', - 'v1.0.3') - model, tokenizer = get_baichuan_model_tokenizer( - model_dir, load_model, add_special_token, torch_dtype) - elif model_type == 'chatglm2': - model_dir = snapshot_download('ZhipuAI/chatglm2-6b', 'v1.0.6') - model, tokenizer = get_chatglm2_model_tokenizer( - model_dir, load_model, add_special_token, torch_dtype) - elif model_type == 'llama2-7b': - # use `.safetensors` - model_dir = snapshot_download( - 'modelscope/Llama-2-7b-ms', - 'v1.0.2', - ignore_file_pattern=[r'.+\.bin$']) - model, tokenizer = get_llama2_model_tokenizer(model_dir, load_model, - add_special_token, - torch_dtype) - else: - raise ValueError(f'model_type: {model_type}') - return model, tokenizer, model_dir - - -def get_alpaca_en_zh_dataset( - tokenize_function, - only_val: bool = False, - test_split_p: float = 0.01, - split_seed: int = 42, - data_sample: Optional[int] = None) -> Tuple[HfDataset, HfDataset]: - dataset_en: HfDataset = MsDataset.load( - 'AI-ModelScope/alpaca-gpt4-data-en', split='train').to_hf_dataset() - dataset_zh: HfDataset = MsDataset.load( - 'AI-ModelScope/alpaca-gpt4-data-zh', split='train').to_hf_dataset() - dataset_en = dataset_en.remove_columns(['text']) - dataset: HfDataset = concatenate_datasets([dataset_zh, dataset_en]) - - if data_sample is not None: - dataset = dataset.select(range(data_sample)) - dataset = dataset.train_test_split(test_split_p, seed=split_seed) - if only_val: - dataset = dataset['test'] - if tokenize_function is not None: - dataset = dataset.map(tokenize_function) - dataset = dataset.remove_columns(['instruction', 'input', 'output']) - - if only_val: - return None, dataset - else: - return dataset['train'], dataset['test'] - - Item = Dict[str, float] @@ -500,13 +303,12 @@ def plot_images(tb_dir: str, plt.savefig(fpath, dpi=dpi, bbox_inches='tight') -def inference(data: Dict[str, Optional[str]], +def inference(input_ids: List[int], model, tokenizer, streamer: Optional[TextStreamer] = None, generation_config: Optional[GenerationConfig] = None, tag: str = '[INFERENCE]') -> str: - input_ids = tokenize_function(data, tokenizer)['input_ids'] print(f'{tag}{tokenizer.decode(input_ids)}', end='') input_ids = torch.tensor(input_ids)[None].cuda() attention_mask = torch.ones_like(input_ids) From 00c79428d50740dd6534e9619abb69f8e3d77f5f Mon Sep 17 00:00:00 2001 From: "yuze.zyz" Date: Wed, 26 Jul 2023 21:30:38 +0800 Subject: [PATCH 85/87] fix --- modelscope/utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modelscope/utils/torch_utils.py b/modelscope/utils/torch_utils.py index cb87c788..b8e48aa1 100644 --- a/modelscope/utils/torch_utils.py +++ b/modelscope/utils/torch_utils.py @@ -357,5 +357,6 @@ def all_gather(data, group=None): def is_on_same_device(model: torch.nn.Module) -> bool: - device_set = set(map(lambda p: p.device, model.parameters())) + device_set = set(map(lambda p: p.device.type, + model.parameters())) - {'cpu'} return len(device_set) == 1 From 9802dfe93b186d35bbbb5140c85912cf231ed50e Mon Sep 17 00:00:00 2001 From: tastelikefeet <58414341+tastelikefeet@users.noreply.github.com> Date: Thu, 27 Jul 2023 16:23:21 +0800 Subject: [PATCH 86/87] fix device error (#419) --- modelscope/utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modelscope/utils/torch_utils.py b/modelscope/utils/torch_utils.py index b8e48aa1..1a673458 100644 --- a/modelscope/utils/torch_utils.py +++ b/modelscope/utils/torch_utils.py @@ -359,4 +359,4 @@ def all_gather(data, group=None): def is_on_same_device(model: torch.nn.Module) -> bool: device_set = set(map(lambda p: p.device.type, model.parameters())) - {'cpu'} - return len(device_set) == 1 + return len(device_set) <= 1 From dca6143b8b72e1e8d51847d36414f1db46f12fc6 Mon Sep 17 00:00:00 2001 From: Wang Qiang <37444407+XDUWQ@users.noreply.github.com> Date: Thu, 27 Jul 2023 16:49:01 +0800 Subject: [PATCH 87/87] support safetensors weight pipeline (#421) --- .../stable_diffusion/stable_diffusion_pipeline.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py index ec267e47..7e56f24c 100644 --- a/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py +++ b/modelscope/pipelines/multi_modal/diffusers_wrapped/stable_diffusion/stable_diffusion_pipeline.py @@ -37,7 +37,9 @@ class StableDiffusionPipeline(DiffusersPipeline): lora_dir: lora weight dir for unet. custom_dir: custom diffusion weight dir for unet. modifier_token: token to use as a modifier for the concept of custom diffusion. + use_safetensors: load safetensors weights. """ + use_safetensors = kwargs.pop('use_safetensors', False) # check custom diffusion input value if custom_dir is None and modifier_token is not None: raise ValueError( @@ -50,7 +52,7 @@ class StableDiffusionPipeline(DiffusersPipeline): # load pipeline torch_type = torch.float16 if self.device == 'cuda' else torch.float32 self.pipeline = DiffusionPipeline.from_pretrained( - model, torch_dtype=torch_type) + model, use_safetensors=use_safetensors, torch_dtype=torch_type) self.pipeline = self.pipeline.to(self.device) # load lora moudle to unet