diff --git a/tracker/base_tracker.py b/tracker/base_tracker.py index a544408..71f9f63 100644 --- a/tracker/base_tracker.py +++ b/tracker/base_tracker.py @@ -7,14 +7,13 @@ from PIL import Image import torch import yaml +import torch.nn.functional as F from model.network import XMem from inference.inference_core import InferenceCore -from inference.data.mask_mapper import MaskMapper - -# for data transormation +from util.mask_mapper import MaskMapper from torchvision import transforms -from dataset.range_transform import im_normalization -import torch.nn.functional as F +from util.range_transform import im_normalization + import sys sys.path.insert(0, sys.path[0]+"/../") @@ -39,9 +38,11 @@ class BaseTracker: transforms.ToTensor(), im_normalization, ]) - self.mapper = MaskMapper() self.device = device + self.mapper = MaskMapper() + self.initialised = False + @torch.no_grad() def resize_mask(self, mask): # mask transform is applied AFTER mapper, so we need to post-process it in eval.py @@ -51,37 +52,42 @@ class BaseTracker: mode='nearest') @torch.no_grad() - def track(self, frames, first_frame_annotation): + def track(self, frame, first_frame_annotation=None): """ Input: - frames: numpy arrays: T, H, W, 3 (T: number of frames) - first_frame_annotation: numpy array: H, W + frames: numpy arrays (H, W, 3) + first_frame_annotation: numpy array (H, W) Output: - masks: numpy arrays: H, W + mask: numpy arrays (H, W) + prob: numpy arrays, probability map (H, W) + painted_image: numpy array (H, W, 3) """ - vid_length = len(frames) - masks = [] + if first_frame_annotation is not None: + # initialisation + mask, labels = self.mapper.convert_mask(first_frame_annotation) + mask = torch.Tensor(mask).to(self.device) + self.tracker.set_all_labels(list(self.mapper.remappings.values())) + else: + mask = None + labels = None - for ti, frame in enumerate(frames): - # convert to tensor - frame_tensor = self.im_transform(frame).to(self.device) - if ti == 0: - mask, labels = self.mapper.convert_mask(first_frame_annotation) - mask = torch.Tensor(mask).to(self.device) - self.tracker.set_all_labels(list(self.mapper.remappings.values())) - else: - mask = None - labels = None - - # track one frame - prob = self.tracker.step(frame_tensor, mask, labels, end=(ti==vid_length-1)) - # convert to mask - out_mask = torch.argmax(prob, dim=0) - out_mask = (out_mask.detach().cpu().numpy()).astype(np.uint8) - masks.append(out_mask) + # prepare inputs + frame_tensor = self.im_transform(frame).to(self.device) + # track one frame + prob = self.tracker.step(frame_tensor, mask, labels) + # convert to mask + out_mask = torch.argmax(prob, dim=0) + out_mask = (out_mask.detach().cpu().numpy()).astype(np.uint8) + painted_image = mask_painter(frame, out_mask) - return np.stack(masks, 0) + # mask, _, painted_frame + return out_mask, prob, painted_image + + @torch.no_grad() + def clear_memory(self): + self.tracker.clear_memory() + self.mapper.clear_labels() if __name__ == '__main__': @@ -106,11 +112,40 @@ if __name__ == '__main__': tracker = BaseTracker(device, XMEM_checkpoint) # track anything given in the first frame annotation - masks = tracker.track(frames, first_frame_annotation) - - # save - for ti, (frame, mask) in enumerate(zip(frames, masks)): - painted_image = mask_painter(frame, mask) + for ti, frame in enumerate(frames): + if ti == 0: + mask, prob, painted_image = tracker.track(frame, first_frame_annotation) + else: + mask, prob, painted_image = tracker.track(frame) # save painted_image = Image.fromarray(painted_image) - painted_image.save(f'/ssd1/gaomingqi/results/TrackA/{ti:05d}.png') + painted_image.save(f'/ssd1/gaomingqi/results/TrackA/dance-twirl/{ti:05d}.png') + + # ---------------------------------------------------------- + # another video + # ---------------------------------------------------------- + # video frames + video_path_list = glob.glob(os.path.join('/ssd1/gaomingqi/datasets/davis/JPEGImages/480p/camel', '*.jpg')) + video_path_list.sort() + # first frame + first_frame_path = '/ssd1/gaomingqi/datasets/davis/Annotations/480p/camel/00000.png' + # load frames + frames = [] + for video_path in video_path_list: + frames.append(np.array(Image.open(video_path).convert('RGB'))) + frames = np.stack(frames, 0) # N, H, W, C + # load first frame annotation + first_frame_annotation = np.array(Image.open(first_frame_path).convert('P')) # H, W, C + + print('first video done. clear.') + + tracker.clear_memory() + # track anything given in the first frame annotation + for ti, frame in enumerate(frames): + if ti == 0: + mask, prob, painted_image = tracker.track(frame, first_frame_annotation) + else: + mask, prob, painted_image = tracker.track(frame) + # save + painted_image = Image.fromarray(painted_image) + painted_image.save(f'/ssd1/gaomingqi/results/TrackA/camel/{ti:05d}.png') diff --git a/tracker/dataset/__init__.py b/tracker/dataset/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tracker/dataset/reseed.py b/tracker/dataset/reseed.py deleted file mode 100644 index 600c998..0000000 --- a/tracker/dataset/reseed.py +++ /dev/null @@ -1,6 +0,0 @@ -import torch -import random - -def reseed(seed): - random.seed(seed) - torch.manual_seed(seed) \ No newline at end of file diff --git a/tracker/dataset/static_dataset.py b/tracker/dataset/static_dataset.py deleted file mode 100644 index 5800f5f..0000000 --- a/tracker/dataset/static_dataset.py +++ /dev/null @@ -1,179 +0,0 @@ -import os -from os import path - -import torch -from torch.utils.data.dataset import Dataset -from torchvision import transforms -from torchvision.transforms import InterpolationMode -from PIL import Image -import numpy as np - -from dataset.range_transform import im_normalization, im_mean -from dataset.tps import random_tps_warp -from dataset.reseed import reseed - - -class StaticTransformDataset(Dataset): - """ - Generate pseudo VOS data by applying random transforms on static images. - Single-object only. - - Method 0 - FSS style (class/1.jpg class/1.png) - Method 1 - Others style (XXX.jpg XXX.png) - """ - def __init__(self, parameters, num_frames=3, max_num_obj=1): - self.num_frames = num_frames - self.max_num_obj = max_num_obj - - self.im_list = [] - for parameter in parameters: - root, method, multiplier = parameter - if method == 0: - # Get images - classes = os.listdir(root) - for c in classes: - imgs = os.listdir(path.join(root, c)) - jpg_list = [im for im in imgs if 'jpg' in im[-3:].lower()] - - joint_list = [path.join(root, c, im) for im in jpg_list] - self.im_list.extend(joint_list * multiplier) - - elif method == 1: - self.im_list.extend([path.join(root, im) for im in os.listdir(root) if '.jpg' in im] * multiplier) - - print(f'{len(self.im_list)} images found.') - - # These set of transform is the same for im/gt pairs, but different among the 3 sampled frames - self.pair_im_lone_transform = transforms.Compose([ - transforms.ColorJitter(0.1, 0.05, 0.05, 0), # No hue change here as that's not realistic - ]) - - self.pair_im_dual_transform = transforms.Compose([ - transforms.RandomAffine(degrees=20, scale=(0.9,1.1), shear=10, interpolation=InterpolationMode.BICUBIC, fill=im_mean), - transforms.Resize(384, InterpolationMode.BICUBIC), - transforms.RandomCrop((384, 384), pad_if_needed=True, fill=im_mean), - ]) - - self.pair_gt_dual_transform = transforms.Compose([ - transforms.RandomAffine(degrees=20, scale=(0.9,1.1), shear=10, interpolation=InterpolationMode.BICUBIC, fill=0), - transforms.Resize(384, InterpolationMode.NEAREST), - transforms.RandomCrop((384, 384), pad_if_needed=True, fill=0), - ]) - - - # These transform are the same for all pairs in the sampled sequence - self.all_im_lone_transform = transforms.Compose([ - transforms.ColorJitter(0.1, 0.05, 0.05, 0.05), - transforms.RandomGrayscale(0.05), - ]) - - self.all_im_dual_transform = transforms.Compose([ - transforms.RandomAffine(degrees=0, scale=(0.8, 1.5), fill=im_mean), - transforms.RandomHorizontalFlip(), - ]) - - self.all_gt_dual_transform = transforms.Compose([ - transforms.RandomAffine(degrees=0, scale=(0.8, 1.5), fill=0), - transforms.RandomHorizontalFlip(), - ]) - - # Final transform without randomness - self.final_im_transform = transforms.Compose([ - transforms.ToTensor(), - im_normalization, - ]) - - self.final_gt_transform = transforms.Compose([ - transforms.ToTensor(), - ]) - - def _get_sample(self, idx): - im = Image.open(self.im_list[idx]).convert('RGB') - gt = Image.open(self.im_list[idx][:-3]+'png').convert('L') - - sequence_seed = np.random.randint(2147483647) - - images = [] - masks = [] - for _ in range(self.num_frames): - reseed(sequence_seed) - this_im = self.all_im_dual_transform(im) - this_im = self.all_im_lone_transform(this_im) - reseed(sequence_seed) - this_gt = self.all_gt_dual_transform(gt) - - pairwise_seed = np.random.randint(2147483647) - reseed(pairwise_seed) - this_im = self.pair_im_dual_transform(this_im) - this_im = self.pair_im_lone_transform(this_im) - reseed(pairwise_seed) - this_gt = self.pair_gt_dual_transform(this_gt) - - # Use TPS only some of the times - # Not because TPS is bad -- just that it is too slow and I need to speed up data loading - if np.random.rand() < 0.33: - this_im, this_gt = random_tps_warp(this_im, this_gt, scale=0.02) - - this_im = self.final_im_transform(this_im) - this_gt = self.final_gt_transform(this_gt) - - images.append(this_im) - masks.append(this_gt) - - images = torch.stack(images, 0) - masks = torch.stack(masks, 0) - - return images, masks.numpy() - - def __getitem__(self, idx): - additional_objects = np.random.randint(self.max_num_obj) - indices = [idx, *np.random.randint(self.__len__(), size=additional_objects)] - - merged_images = None - merged_masks = np.zeros((self.num_frames, 384, 384), dtype=np.int64) - - for i, list_id in enumerate(indices): - images, masks = self._get_sample(list_id) - if merged_images is None: - merged_images = images - else: - merged_images = merged_images*(1-masks) + images*masks - merged_masks[masks[:,0]>0.5] = (i+1) - - masks = merged_masks - - labels = np.unique(masks[0]) - # Remove background - labels = labels[labels!=0] - target_objects = labels.tolist() - - # Generate one-hot ground-truth - cls_gt = np.zeros((self.num_frames, 384, 384), dtype=np.int64) - first_frame_gt = np.zeros((1, self.max_num_obj, 384, 384), dtype=np.int64) - for i, l in enumerate(target_objects): - this_mask = (masks==l) - cls_gt[this_mask] = i+1 - first_frame_gt[0,i] = (this_mask[0]) - cls_gt = np.expand_dims(cls_gt, 1) - - info = {} - info['name'] = self.im_list[idx] - info['num_objects'] = max(1, len(target_objects)) - - # 1 if object exist, 0 otherwise - selector = [1 if i < info['num_objects'] else 0 for i in range(self.max_num_obj)] - selector = torch.FloatTensor(selector) - - data = { - 'rgb': merged_images, - 'first_frame_gt': first_frame_gt, - 'cls_gt': cls_gt, - 'selector': selector, - 'info': info - } - - return data - - - def __len__(self): - return len(self.im_list) diff --git a/tracker/dataset/tps.py b/tracker/dataset/tps.py deleted file mode 100644 index 9ee3747..0000000 --- a/tracker/dataset/tps.py +++ /dev/null @@ -1,37 +0,0 @@ -import numpy as np -from PIL import Image -import cv2 -import thinplate as tps - -cv2.setNumThreads(0) - -def pick_random_points(h, w, n_samples): - y_idx = np.random.choice(np.arange(h), size=n_samples, replace=False) - x_idx = np.random.choice(np.arange(w), size=n_samples, replace=False) - return y_idx/h, x_idx/w - - -def warp_dual_cv(img, mask, c_src, c_dst): - dshape = img.shape - theta = tps.tps_theta_from_points(c_src, c_dst, reduced=True) - grid = tps.tps_grid(theta, c_dst, dshape) - mapx, mapy = tps.tps_grid_to_remap(grid, img.shape) - return cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR), cv2.remap(mask, mapx, mapy, cv2.INTER_NEAREST) - - -def random_tps_warp(img, mask, scale, n_ctrl_pts=12): - """ - Apply a random TPS warp of the input image and mask - Uses randomness from numpy - """ - img = np.asarray(img) - mask = np.asarray(mask) - - h, w = mask.shape - points = pick_random_points(h, w, n_ctrl_pts) - c_src = np.stack(points, 1) - c_dst = c_src + np.random.normal(scale=scale, size=c_src.shape) - warp_im, warp_gt = warp_dual_cv(img, mask, c_src, c_dst) - - return Image.fromarray(warp_im), Image.fromarray(warp_gt) - diff --git a/tracker/dataset/util.py b/tracker/dataset/util.py deleted file mode 100644 index f8e5523..0000000 --- a/tracker/dataset/util.py +++ /dev/null @@ -1,13 +0,0 @@ -import numpy as np - - -def all_to_onehot(masks, labels): - if len(masks.shape) == 3: - Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8) - else: - Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1]), dtype=np.uint8) - - for ni, l in enumerate(labels): - Ms[ni] = (masks == l).astype(np.uint8) - - return Ms diff --git a/tracker/dataset/vos_dataset.py b/tracker/dataset/vos_dataset.py deleted file mode 100644 index 2b5d365..0000000 --- a/tracker/dataset/vos_dataset.py +++ /dev/null @@ -1,216 +0,0 @@ -import os -from os import path, replace - -import torch -from torch.utils.data.dataset import Dataset -from torchvision import transforms -from torchvision.transforms import InterpolationMode -from PIL import Image -import numpy as np - -from dataset.range_transform import im_normalization, im_mean -from dataset.reseed import reseed - - -class VOSDataset(Dataset): - """ - Works for DAVIS/YouTubeVOS/BL30K training - For each sequence: - - Pick three frames - - Pick two objects - - Apply some random transforms that are the same for all frames - - Apply random transform to each of the frame - - The distance between frames is controlled - """ - def __init__(self, im_root, gt_root, max_jump, is_bl, subset=None, num_frames=3, max_num_obj=3, finetune=False): - self.im_root = im_root - self.gt_root = gt_root - self.max_jump = max_jump - self.is_bl = is_bl - self.num_frames = num_frames - self.max_num_obj = max_num_obj - - self.videos = [] - self.frames = {} - - vid_list = sorted(os.listdir(self.im_root)) - # Pre-filtering - for vid in vid_list: - if subset is not None: - if vid not in subset: - continue - frames = sorted(os.listdir(os.path.join(self.im_root, vid))) - if len(frames) < num_frames: - continue - self.frames[vid] = frames - self.videos.append(vid) - - print('%d out of %d videos accepted in %s.' % (len(self.videos), len(vid_list), im_root)) - - # These set of transform is the same for im/gt pairs, but different among the 3 sampled frames - self.pair_im_lone_transform = transforms.Compose([ - transforms.ColorJitter(0.01, 0.01, 0.01, 0), - ]) - - self.pair_im_dual_transform = transforms.Compose([ - transforms.RandomAffine(degrees=0 if finetune or self.is_bl else 15, shear=0 if finetune or self.is_bl else 10, interpolation=InterpolationMode.BILINEAR, fill=im_mean), - ]) - - self.pair_gt_dual_transform = transforms.Compose([ - transforms.RandomAffine(degrees=0 if finetune or self.is_bl else 15, shear=0 if finetune or self.is_bl else 10, interpolation=InterpolationMode.NEAREST, fill=0), - ]) - - # These transform are the same for all pairs in the sampled sequence - self.all_im_lone_transform = transforms.Compose([ - transforms.ColorJitter(0.1, 0.03, 0.03, 0), - transforms.RandomGrayscale(0.05), - ]) - - if self.is_bl: - # Use a different cropping scheme for the blender dataset because the image size is different - self.all_im_dual_transform = transforms.Compose([ - transforms.RandomHorizontalFlip(), - transforms.RandomResizedCrop((384, 384), scale=(0.25, 1.00), interpolation=InterpolationMode.BILINEAR) - ]) - - self.all_gt_dual_transform = transforms.Compose([ - transforms.RandomHorizontalFlip(), - transforms.RandomResizedCrop((384, 384), scale=(0.25, 1.00), interpolation=InterpolationMode.NEAREST) - ]) - else: - self.all_im_dual_transform = transforms.Compose([ - transforms.RandomHorizontalFlip(), - transforms.RandomResizedCrop((384, 384), scale=(0.36,1.00), interpolation=InterpolationMode.BILINEAR) - ]) - - self.all_gt_dual_transform = transforms.Compose([ - transforms.RandomHorizontalFlip(), - transforms.RandomResizedCrop((384, 384), scale=(0.36,1.00), interpolation=InterpolationMode.NEAREST) - ]) - - # Final transform without randomness - self.final_im_transform = transforms.Compose([ - transforms.ToTensor(), - im_normalization, - ]) - - def __getitem__(self, idx): - video = self.videos[idx] - info = {} - info['name'] = video - - vid_im_path = path.join(self.im_root, video) - vid_gt_path = path.join(self.gt_root, video) - frames = self.frames[video] - - trials = 0 - while trials < 5: - info['frames'] = [] # Appended with actual frames - - num_frames = self.num_frames - length = len(frames) - this_max_jump = min(len(frames), self.max_jump) - - # iterative sampling - frames_idx = [np.random.randint(length)] - acceptable_set = set(range(max(0, frames_idx[-1]-this_max_jump), min(length, frames_idx[-1]+this_max_jump+1))).difference(set(frames_idx)) - while(len(frames_idx) < num_frames): - idx = np.random.choice(list(acceptable_set)) - frames_idx.append(idx) - new_set = set(range(max(0, frames_idx[-1]-this_max_jump), min(length, frames_idx[-1]+this_max_jump+1))) - acceptable_set = acceptable_set.union(new_set).difference(set(frames_idx)) - - frames_idx = sorted(frames_idx) - if np.random.rand() < 0.5: - # Reverse time - frames_idx = frames_idx[::-1] - - sequence_seed = np.random.randint(2147483647) - images = [] - masks = [] - target_objects = [] - for f_idx in frames_idx: - jpg_name = frames[f_idx][:-4] + '.jpg' - png_name = frames[f_idx][:-4] + '.png' - info['frames'].append(jpg_name) - - reseed(sequence_seed) - this_im = Image.open(path.join(vid_im_path, jpg_name)).convert('RGB') - this_im = self.all_im_dual_transform(this_im) - this_im = self.all_im_lone_transform(this_im) - reseed(sequence_seed) - this_gt = Image.open(path.join(vid_gt_path, png_name)).convert('P') - this_gt = self.all_gt_dual_transform(this_gt) - - pairwise_seed = np.random.randint(2147483647) - reseed(pairwise_seed) - this_im = self.pair_im_dual_transform(this_im) - this_im = self.pair_im_lone_transform(this_im) - reseed(pairwise_seed) - this_gt = self.pair_gt_dual_transform(this_gt) - - this_im = self.final_im_transform(this_im) - this_gt = np.array(this_gt) - - images.append(this_im) - masks.append(this_gt) - - images = torch.stack(images, 0) - - labels = np.unique(masks[0]) - # Remove background - labels = labels[labels!=0] - - if self.is_bl: - # Find large enough labels - good_lables = [] - for l in labels: - pixel_sum = (masks[0]==l).sum() - if pixel_sum > 10*10: - # OK if the object is always this small - # Not OK if it is actually much bigger - if pixel_sum > 30*30: - good_lables.append(l) - elif max((masks[1]==l).sum(), (masks[2]==l).sum()) < 20*20: - good_lables.append(l) - labels = np.array(good_lables, dtype=np.uint8) - - if len(labels) == 0: - target_objects = [] - trials += 1 - else: - target_objects = labels.tolist() - break - - if len(target_objects) > self.max_num_obj: - target_objects = np.random.choice(target_objects, size=self.max_num_obj, replace=False) - - info['num_objects'] = max(1, len(target_objects)) - - masks = np.stack(masks, 0) - - # Generate one-hot ground-truth - cls_gt = np.zeros((self.num_frames, 384, 384), dtype=np.int64) - first_frame_gt = np.zeros((1, self.max_num_obj, 384, 384), dtype=np.int64) - for i, l in enumerate(target_objects): - this_mask = (masks==l) - cls_gt[this_mask] = i+1 - first_frame_gt[0,i] = (this_mask[0]) - cls_gt = np.expand_dims(cls_gt, 1) - - # 1 if object exist, 0 otherwise - selector = [1 if i < info['num_objects'] else 0 for i in range(self.max_num_obj)] - selector = torch.FloatTensor(selector) - - data = { - 'rgb': images, - 'first_frame_gt': first_frame_gt, - 'cls_gt': cls_gt, - 'selector': selector, - 'info': info, - } - - return data - - def __len__(self): - return len(self.videos) diff --git a/tracker/eval.py b/tracker/eval.py deleted file mode 100644 index 8d4e186..0000000 --- a/tracker/eval.py +++ /dev/null @@ -1,257 +0,0 @@ -import os -from os import path -from argparse import ArgumentParser -import shutil - -import torch -import torch.nn.functional as F -from torch.utils.data import DataLoader -import numpy as np -from PIL import Image - -from inference.data.test_datasets import LongTestDataset, DAVISTestDataset, YouTubeVOSTestDataset -from inference.data.mask_mapper import MaskMapper -from model.network import XMem -from inference.inference_core import InferenceCore - -from progressbar import progressbar - -try: - import hickle as hkl -except ImportError: - print('Failed to import hickle. Fine if not using multi-scale testing.') - - -""" -Arguments loading -""" -parser = ArgumentParser() -parser.add_argument('--model', default='/ssd1/gaomingqi/checkpoints/XMem-s012.pth') - -# Data options -parser.add_argument('--d16_path', default='../DAVIS/2016') -parser.add_argument('--d17_path', default='../DAVIS/2017') -parser.add_argument('--y18_path', default='/ssd1/gaomingqi/datasets/youtube-vos/2018') -parser.add_argument('--y19_path', default='../YouTube') -parser.add_argument('--lv_path', default='../long_video_set') -# For generic (G) evaluation, point to a folder that contains "JPEGImages" and "Annotations" -parser.add_argument('--generic_path') - -parser.add_argument('--dataset', help='D16/D17/Y18/Y19/LV1/LV3/G', default='D17') -parser.add_argument('--split', help='val/test', default='val') -parser.add_argument('--output', default=None) -parser.add_argument('--save_all', action='store_true', - help='Save all frames. Useful only in YouTubeVOS/long-time video', ) - -parser.add_argument('--benchmark', action='store_true', help='enable to disable amp for FPS benchmarking') - -# Long-term memory options -parser.add_argument('--disable_long_term', action='store_true') -parser.add_argument('--max_mid_term_frames', help='T_max in paper, decrease to save memory', type=int, default=10) -parser.add_argument('--min_mid_term_frames', help='T_min in paper, decrease to save memory', type=int, default=5) -parser.add_argument('--max_long_term_elements', help='LT_max in paper, increase if objects disappear for a long time', - type=int, default=10000) -parser.add_argument('--num_prototypes', help='P in paper', type=int, default=128) - -parser.add_argument('--top_k', type=int, default=30) -parser.add_argument('--mem_every', help='r in paper. Increase to improve running speed.', type=int, default=5) -parser.add_argument('--deep_update_every', help='Leave -1 normally to synchronize with mem_every', type=int, default=-1) - -# Multi-scale options -parser.add_argument('--save_scores', action='store_true') -parser.add_argument('--flip', action='store_true') -parser.add_argument('--size', default=480, type=int, - help='Resize the shorter side to this size. -1 to use original resolution. ') - -args = parser.parse_args() -config = vars(args) -config['enable_long_term'] = not config['disable_long_term'] - -if args.output is None: - args.output = f'../output/{args.dataset}_{args.split}' - print(f'Output path not provided. Defaulting to {args.output}') - -""" -Data preparation -""" -is_youtube = args.dataset.startswith('Y') -is_davis = args.dataset.startswith('D') -is_lv = args.dataset.startswith('LV') - -if is_youtube or args.save_scores: - out_path = path.join(args.output, 'Annotations') -else: - out_path = args.output - -if is_youtube: - if args.dataset == 'Y18': - yv_path = args.y18_path - elif args.dataset == 'Y19': - yv_path = args.y19_path - - if args.split == 'val': - args.split = 'valid' - meta_dataset = YouTubeVOSTestDataset(data_root=yv_path, split='valid', size=args.size) - elif args.split == 'test': - meta_dataset = YouTubeVOSTestDataset(data_root=yv_path, split='test', size=args.size) - else: - raise NotImplementedError - -elif is_davis: - if args.dataset == 'D16': - if args.split == 'val': - # Set up Dataset, a small hack to use the image set in the 2017 folder because the 2016 one is of a different format - meta_dataset = DAVISTestDataset(args.d16_path, imset='../../2017/trainval/ImageSets/2016/val.txt', size=args.size) - else: - raise NotImplementedError - palette = None - elif args.dataset == 'D17': - if args.split == 'val': - meta_dataset = DAVISTestDataset(path.join(args.d17_path, 'trainval'), imset='2017/val.txt', size=args.size) - elif args.split == 'test': - meta_dataset = DAVISTestDataset(path.join(args.d17_path, 'test-dev'), imset='2017/test-dev.txt', size=args.size) - else: - raise NotImplementedError - -elif is_lv: - if args.dataset == 'LV1': - meta_dataset = LongTestDataset(path.join(args.lv_path, 'long_video')) - elif args.dataset == 'LV3': - meta_dataset = LongTestDataset(path.join(args.lv_path, 'long_video_x3')) - else: - raise NotImplementedError -elif args.dataset == 'G': - meta_dataset = LongTestDataset(path.join(args.generic_path), size=args.size) - if not args.save_all: - args.save_all = True - print('save_all is forced to be true in generic evaluation mode.') -else: - raise NotImplementedError - -torch.autograd.set_grad_enabled(False) - -# Set up loader -meta_loader = meta_dataset.get_datasets() - -# Load our checkpoint -network = XMem(config, args.model).cuda().eval() -if args.model is not None: - model_weights = torch.load(args.model) - network.load_weights(model_weights, init_as_zero_if_needed=True) -else: - print('No model loaded.') - -total_process_time = 0 -total_frames = 0 - -# Start eval -for vid_reader in progressbar(meta_loader, max_value=len(meta_dataset), redirect_stdout=True): - - loader = DataLoader(vid_reader, batch_size=1, shuffle=False, num_workers=2) - vid_name = vid_reader.vid_name - vid_length = len(loader) - # no need to count usage for LT if the video is not that long anyway - config['enable_long_term_count_usage'] = ( - config['enable_long_term'] and - (vid_length - / (config['max_mid_term_frames']-config['min_mid_term_frames']) - * config['num_prototypes']) - >= config['max_long_term_elements'] - ) - - mapper = MaskMapper() - processor = InferenceCore(network, config=config) - first_mask_loaded = False - - for ti, data in enumerate(loader): - with torch.cuda.amp.autocast(enabled=not args.benchmark): - rgb = data['rgb'].cuda()[0] - msk = data.get('mask') - info = data['info'] - frame = info['frame'][0] - shape = info['shape'] - need_resize = info['need_resize'][0] - - """ - For timing see https://discuss.pytorch.org/t/how-to-measure-time-in-pytorch/26964 - Seems to be very similar in testing as my previous timing method - with two cuda sync + time.time() in STCN though - """ - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - start.record() - - if not first_mask_loaded: - if msk is not None: - first_mask_loaded = True - else: - # no point to do anything without a mask - continue - - if args.flip: - rgb = torch.flip(rgb, dims=[-1]) - msk = torch.flip(msk, dims=[-1]) if msk is not None else None - - # Map possibly non-continuous labels to continuous ones - if msk is not None: - msk, labels = mapper.convert_mask(msk[0].numpy()) - msk = torch.Tensor(msk).cuda() - if need_resize: - msk = vid_reader.resize_mask(msk.unsqueeze(0))[0] - processor.set_all_labels(list(mapper.remappings.values())) - else: - labels = None - - # Run the model on this frame - prob = processor.step(rgb, msk, labels, end=(ti==vid_length-1)) # 0, background, >0, objects - - # Upsample to original size if needed - if need_resize: - prob = F.interpolate(prob.unsqueeze(1), shape, mode='bilinear', align_corners=False)[:,0] - - end.record() - torch.cuda.synchronize() - total_process_time += (start.elapsed_time(end)/1000) - total_frames += 1 - - if args.flip: - prob = torch.flip(prob, dims=[-1]) - - # Probability mask -> index mask - out_mask = torch.argmax(prob, dim=0) - out_mask = (out_mask.detach().cpu().numpy()).astype(np.uint8) - - if args.save_scores: - prob = (prob.detach().cpu().numpy()*255).astype(np.uint8) - - # Save the mask - if args.save_all or info['save'][0]: - this_out_path = path.join(out_path, vid_name) - os.makedirs(this_out_path, exist_ok=True) - out_mask = mapper.remap_index_mask(out_mask) - out_img = Image.fromarray(out_mask) - if vid_reader.get_palette() is not None: - out_img.putpalette(vid_reader.get_palette()) - out_img.save(os.path.join(this_out_path, frame[:-4]+'.png')) - - if args.save_scores: - np_path = path.join(args.output, 'Scores', vid_name) - os.makedirs(np_path, exist_ok=True) - if ti==len(loader)-1: - hkl.dump(mapper.remappings, path.join(np_path, f'backward.hkl'), mode='w') - if args.save_all or info['save'][0]: - hkl.dump(prob, path.join(np_path, f'{frame[:-4]}.hkl'), mode='w', compression='lzf') - - -print(f'Total processing time: {total_process_time}') -print(f'Total processed frames: {total_frames}') -print(f'FPS: {total_frames / total_process_time}') -print(f'Max allocated memory (MB): {torch.cuda.max_memory_allocated() / (2**20)}') - -if not args.save_scores: - if is_youtube: - print('Making zip for YouTubeVOS...') - shutil.make_archive(path.join(args.output, path.basename(args.output)), 'zip', args.output, 'Annotations') - elif is_davis and args.split == 'test': - print('Making zip for DAVIS test-dev...') - shutil.make_archive(args.output, 'zip', args.output) diff --git a/tracker/inference/data/__init__.py b/tracker/inference/data/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/tracker/inference/data/test_datasets.py b/tracker/inference/data/test_datasets.py deleted file mode 100644 index 3a4446e..0000000 --- a/tracker/inference/data/test_datasets.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from os import path -import json - -from inference.data.video_reader import VideoReader - - -class LongTestDataset: - def __init__(self, data_root, size=-1): - self.image_dir = path.join(data_root, 'JPEGImages') - self.mask_dir = path.join(data_root, 'Annotations') - self.size = size - - self.vid_list = sorted(os.listdir(self.image_dir)) - - def get_datasets(self): - for video in self.vid_list: - yield VideoReader(video, - path.join(self.image_dir, video), - path.join(self.mask_dir, video), - to_save = [ - name[:-4] for name in os.listdir(path.join(self.mask_dir, video)) - ], - size=self.size, - ) - - def __len__(self): - return len(self.vid_list) - - -class DAVISTestDataset: - def __init__(self, data_root, imset='2017/val.txt', size=-1): - if size != 480: - self.image_dir = path.join(data_root, 'JPEGImages', 'Full-Resolution') - self.mask_dir = path.join(data_root, 'Annotations', 'Full-Resolution') - if not path.exists(self.image_dir): - print(f'{self.image_dir} not found. Look at other options.') - self.image_dir = path.join(data_root, 'JPEGImages', '1080p') - self.mask_dir = path.join(data_root, 'Annotations', '1080p') - assert path.exists(self.image_dir), 'path not found' - else: - self.image_dir = path.join(data_root, 'JPEGImages', '480p') - self.mask_dir = path.join(data_root, 'Annotations', '480p') - self.size_dir = path.join(data_root, 'JPEGImages', '480p') - self.size = size - - with open(path.join(data_root, 'ImageSets', imset)) as f: - self.vid_list = sorted([line.strip() for line in f]) - - def get_datasets(self): - for video in self.vid_list: - yield VideoReader(video, - path.join(self.image_dir, video), - path.join(self.mask_dir, video), - size=self.size, - size_dir=path.join(self.size_dir, video), - ) - - def __len__(self): - return len(self.vid_list) - - -class YouTubeVOSTestDataset: - def __init__(self, data_root, split, size=480): - self.image_dir = path.join(data_root, 'all_frames', split+'_all_frames', 'JPEGImages') - self.mask_dir = path.join(data_root, split, 'Annotations') - self.size = size - - self.vid_list = sorted(os.listdir(self.image_dir)) - self.req_frame_list = {} - - with open(path.join(data_root, split, 'meta.json')) as f: - # read meta.json to know which frame is required for evaluation - meta = json.load(f)['videos'] - - for vid in self.vid_list: - req_frames = [] - objects = meta[vid]['objects'] - for value in objects.values(): - req_frames.extend(value['frames']) - - req_frames = list(set(req_frames)) - self.req_frame_list[vid] = req_frames - - def get_datasets(self): - for video in self.vid_list: - yield VideoReader(video, - path.join(self.image_dir, video), - path.join(self.mask_dir, video), - size=self.size, - to_save=self.req_frame_list[video], - use_all_mask=True - ) - - def __len__(self): - return len(self.vid_list) diff --git a/tracker/inference/data/video_reader.py b/tracker/inference/data/video_reader.py deleted file mode 100644 index 28cc4c6..0000000 --- a/tracker/inference/data/video_reader.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -from os import path - -from torch.utils.data.dataset import Dataset -from torchvision import transforms -from torchvision.transforms import InterpolationMode -import torch.nn.functional as F -from PIL import Image -import numpy as np - -from dataset.range_transform import im_normalization - - -class VideoReader(Dataset): - """ - This class is used to read a video, one frame at a time - """ - def __init__(self, vid_name, image_dir, mask_dir, size=-1, to_save=None, use_all_mask=False, size_dir=None): - """ - image_dir - points to a directory of jpg images - mask_dir - points to a directory of png masks - size - resize min. side to size. Does nothing if <0. - to_save - optionally contains a list of file names without extensions - where the segmentation mask is required - use_all_mask - when true, read all available mask in mask_dir. - Default false. Set to true for YouTubeVOS validation. - """ - self.vid_name = vid_name - self.image_dir = image_dir - self.mask_dir = mask_dir - self.to_save = to_save - self.use_all_mask = use_all_mask - if size_dir is None: - self.size_dir = self.image_dir - else: - self.size_dir = size_dir - - self.frames = sorted(os.listdir(self.image_dir)) - self.palette = Image.open(path.join(mask_dir, sorted(os.listdir(mask_dir))[0])).getpalette() - self.first_gt_path = path.join(self.mask_dir, sorted(os.listdir(self.mask_dir))[0]) - - if size < 0: - self.im_transform = transforms.Compose([ - transforms.ToTensor(), - im_normalization, - ]) - else: - self.im_transform = transforms.Compose([ - transforms.ToTensor(), - im_normalization, - transforms.Resize(size, interpolation=InterpolationMode.BILINEAR), - ]) - self.size = size - - - def __getitem__(self, idx): - frame = self.frames[idx] - info = {} - data = {} - info['frame'] = frame - info['save'] = (self.to_save is None) or (frame[:-4] in self.to_save) - - im_path = path.join(self.image_dir, frame) - img = Image.open(im_path).convert('RGB') - - if self.image_dir == self.size_dir: - shape = np.array(img).shape[:2] - else: - size_path = path.join(self.size_dir, frame) - size_im = Image.open(size_path).convert('RGB') - shape = np.array(size_im).shape[:2] - - gt_path = path.join(self.mask_dir, frame[:-4]+'.png') - img = self.im_transform(img) - - load_mask = self.use_all_mask or (gt_path == self.first_gt_path) - if load_mask and path.exists(gt_path): - mask = Image.open(gt_path).convert('P') - mask = np.array(mask, dtype=np.uint8) - data['mask'] = mask - - info['shape'] = shape - info['need_resize'] = not (self.size < 0) - data['rgb'] = img - data['info'] = info - - return data - - def resize_mask(self, mask): - # mask transform is applied AFTER mapper, so we need to post-process it in eval.py - h, w = mask.shape[-2:] - min_hw = min(h, w) - return F.interpolate(mask, (int(h/min_hw*self.size), int(w/min_hw*self.size)), - mode='nearest') - - def get_palette(self): - return self.palette - - def __len__(self): - return len(self.frames) \ No newline at end of file diff --git a/tracker/inference/inference_core.py b/tracker/inference/inference_core.py index c4cbffd..8061f51 100644 --- a/tracker/inference/inference_core.py +++ b/tracker/inference/inference_core.py @@ -63,8 +63,6 @@ class InferenceCore: if need_segment: memory_readout = self.memory.match_memory(key, selection).unsqueeze(0) - - hidden, _, pred_prob_with_bg = self.network.segment(multi_scale_features, memory_readout, self.memory.get_hidden(), h_out=is_normal_update, strip_bg=False) # remove batch dim diff --git a/tracker/util/configuration.py b/tracker/util/configuration.py deleted file mode 100644 index ba7d9e8..0000000 --- a/tracker/util/configuration.py +++ /dev/null @@ -1,138 +0,0 @@ -from argparse import ArgumentParser - - -def none_or_default(x, default): - return x if x is not None else default - -class Configuration(): - def parse(self, unknown_arg_ok=False): - parser = ArgumentParser() - - # Enable torch.backends.cudnn.benchmark -- Faster in some cases, test in your own environment - parser.add_argument('--benchmark', action='store_true') - parser.add_argument('--no_amp', action='store_true') - - # Save paths - parser.add_argument('--save_path', default='/ssd1/gaomingqi/output/xmem-sam') - - # Data parameters - parser.add_argument('--static_root', help='Static training data root', default='/ssd1/gaomingqi/datasets/static') - parser.add_argument('--bl_root', help='Blender training data root', default='../BL30K') - parser.add_argument('--yv_root', help='YouTubeVOS data root', default='/ssd1/gaomingqi/datasets/youtube-vos/2018') - parser.add_argument('--davis_root', help='DAVIS data root', default='/ssd1/gaomingqi/datasets/davis') - parser.add_argument('--num_workers', help='Total number of dataloader workers across all GPUs processes', type=int, default=16) - - parser.add_argument('--key_dim', default=64, type=int) - parser.add_argument('--value_dim', default=512, type=int) - parser.add_argument('--hidden_dim', default=64, help='Set to =0 to disable', type=int) - - parser.add_argument('--deep_update_prob', default=0.2, type=float) - - parser.add_argument('--stages', help='Training stage (0-static images, 1-Blender dataset, 2-DAVIS+YouTubeVOS)', default='02') - - """ - Stage-specific learning parameters - Batch sizes are effective -- you don't have to scale them when you scale the number processes - """ - # Stage 0, static images - parser.add_argument('--s0_batch_size', default=16, type=int) - parser.add_argument('--s0_iterations', default=150000, type=int) - parser.add_argument('--s0_finetune', default=0, type=int) - parser.add_argument('--s0_steps', nargs="*", default=[], type=int) - parser.add_argument('--s0_lr', help='Initial learning rate', default=1e-5, type=float) - parser.add_argument('--s0_num_ref_frames', default=2, type=int) - parser.add_argument('--s0_num_frames', default=3, type=int) - parser.add_argument('--s0_start_warm', default=20000, type=int) - parser.add_argument('--s0_end_warm', default=70000, type=int) - - # Stage 1, BL30K - parser.add_argument('--s1_batch_size', default=8, type=int) - parser.add_argument('--s1_iterations', default=250000, type=int) - # fine-tune means fewer augmentations to train the sensory memory - parser.add_argument('--s1_finetune', default=0, type=int) - parser.add_argument('--s1_steps', nargs="*", default=[200000], type=int) - parser.add_argument('--s1_lr', help='Initial learning rate', default=1e-5, type=float) - parser.add_argument('--s1_num_ref_frames', default=3, type=int) - parser.add_argument('--s1_num_frames', default=8, type=int) - parser.add_argument('--s1_start_warm', default=20000, type=int) - parser.add_argument('--s1_end_warm', default=70000, type=int) - - # Stage 2, DAVIS+YoutubeVOS, longer - parser.add_argument('--s2_batch_size', default=8, type=int) - parser.add_argument('--s2_iterations', default=150000, type=int) - # fine-tune means fewer augmentations to train the sensory memory - parser.add_argument('--s2_finetune', default=10000, type=int) - parser.add_argument('--s2_steps', nargs="*", default=[120000], type=int) - parser.add_argument('--s2_lr', help='Initial learning rate', default=1e-5, type=float) - parser.add_argument('--s2_num_ref_frames', default=3, type=int) - parser.add_argument('--s2_num_frames', default=8, type=int) - parser.add_argument('--s2_start_warm', default=20000, type=int) - parser.add_argument('--s2_end_warm', default=70000, type=int) - - # Stage 3, DAVIS+YoutubeVOS, shorter - parser.add_argument('--s3_batch_size', default=8, type=int) - parser.add_argument('--s3_iterations', default=100000, type=int) - # fine-tune means fewer augmentations to train the sensory memory - parser.add_argument('--s3_finetune', default=10000, type=int) - parser.add_argument('--s3_steps', nargs="*", default=[80000], type=int) - parser.add_argument('--s3_lr', help='Initial learning rate', default=1e-5, type=float) - parser.add_argument('--s3_num_ref_frames', default=3, type=int) - parser.add_argument('--s3_num_frames', default=8, type=int) - parser.add_argument('--s3_start_warm', default=20000, type=int) - parser.add_argument('--s3_end_warm', default=70000, type=int) - - parser.add_argument('--gamma', help='LR := LR*gamma at every decay step', default=0.1, type=float) - parser.add_argument('--weight_decay', default=0.05, type=float) - - # Loading - parser.add_argument('--load_network', help='Path to pretrained network weight only') - parser.add_argument('--load_checkpoint', help='Path to the checkpoint file, including network, optimizer and such') - - # Logging information - parser.add_argument('--log_text_interval', default=100, type=int) - parser.add_argument('--log_image_interval', default=1000, type=int) - parser.add_argument('--save_network_interval', default=25000, type=int) - parser.add_argument('--save_checkpoint_interval', default=50000, type=int) - parser.add_argument('--exp_id', help='Experiment UNIQUE id, use NULL to disable logging to tensorboard', default='NULL') - parser.add_argument('--debug', help='Debug mode which logs information more often', action='store_true') - - # # Multiprocessing parameters, not set by users - # parser.add_argument('--local_rank', default=0, type=int, help='Local rank of this process') - - if unknown_arg_ok: - args, _ = parser.parse_known_args() - self.args = vars(args) - else: - self.args = vars(parser.parse_args()) - - self.args['amp'] = not self.args['no_amp'] - - # check if the stages are valid - stage_to_perform = list(self.args['stages']) - for s in stage_to_perform: - if s not in ['0', '1', '2', '3']: - raise NotImplementedError - - def get_stage_parameters(self, stage): - parameters = { - 'batch_size': self.args['s%s_batch_size'%stage], - 'iterations': self.args['s%s_iterations'%stage], - 'finetune': self.args['s%s_finetune'%stage], - 'steps': self.args['s%s_steps'%stage], - 'lr': self.args['s%s_lr'%stage], - 'num_ref_frames': self.args['s%s_num_ref_frames'%stage], - 'num_frames': self.args['s%s_num_frames'%stage], - 'start_warm': self.args['s%s_start_warm'%stage], - 'end_warm': self.args['s%s_end_warm'%stage], - } - - return parameters - - def __getitem__(self, key): - return self.args[key] - - def __setitem__(self, key, value): - self.args[key] = value - - def __str__(self): - return str(self.args) diff --git a/tracker/util/davis_subset.txt b/tracker/util/davis_subset.txt deleted file mode 100644 index 875c240..0000000 --- a/tracker/util/davis_subset.txt +++ /dev/null @@ -1,60 +0,0 @@ -bear -bmx-bumps -boat -boxing-fisheye -breakdance-flare -bus -car-turn -cat-girl -classic-car -color-run -crossing -dance-jump -dancing -disc-jockey -dog-agility -dog-gooses -dogs-scale -drift-turn -drone -elephant -flamingo -hike -hockey -horsejump-low -kid-football -kite-walk -koala -lady-running -lindy-hop -longboard -lucia -mallard-fly -mallard-water -miami-surf -motocross-bumps -motorbike -night-race -paragliding -planes-water -rallye -rhino -rollerblade -schoolgirls -scooter-board -scooter-gray -sheep -skate-park -snowboard -soccerball -stroller -stunt -surf -swing -tennis -tractor-sand -train -tuk-tuk -upside-down -varanus-cage -walking \ No newline at end of file diff --git a/tracker/util/image_saver.py b/tracker/util/image_saver.py deleted file mode 100644 index c43d9de..0000000 --- a/tracker/util/image_saver.py +++ /dev/null @@ -1,136 +0,0 @@ -import cv2 -import numpy as np - -import torch -from dataset.range_transform import inv_im_trans -from collections import defaultdict - -def tensor_to_numpy(image): - image_np = (image.numpy() * 255).astype('uint8') - return image_np - -def tensor_to_np_float(image): - image_np = image.numpy().astype('float32') - return image_np - -def detach_to_cpu(x): - return x.detach().cpu() - -def transpose_np(x): - return np.transpose(x, [1,2,0]) - -def tensor_to_gray_im(x): - x = detach_to_cpu(x) - x = tensor_to_numpy(x) - x = transpose_np(x) - return x - -def tensor_to_im(x): - x = detach_to_cpu(x) - x = inv_im_trans(x).clamp(0, 1) - x = tensor_to_numpy(x) - x = transpose_np(x) - return x - -# Predefined key <-> caption dict -key_captions = { - 'im': 'Image', - 'gt': 'GT', -} - -""" -Return an image array with captions -keys in dictionary will be used as caption if not provided -values should contain lists of cv2 images -""" -def get_image_array(images, grid_shape, captions={}): - h, w = grid_shape - cate_counts = len(images) - rows_counts = len(next(iter(images.values()))) - - font = cv2.FONT_HERSHEY_SIMPLEX - - output_image = np.zeros([w*cate_counts, h*(rows_counts+1), 3], dtype=np.uint8) - col_cnt = 0 - for k, v in images.items(): - - # Default as key value itself - caption = captions.get(k, k) - - # Handles new line character - dy = 40 - for i, line in enumerate(caption.split('\n')): - cv2.putText(output_image, line, (10, col_cnt*w+100+i*dy), - font, 0.8, (255,255,255), 2, cv2.LINE_AA) - - # Put images - for row_cnt, img in enumerate(v): - im_shape = img.shape - if len(im_shape) == 2: - img = img[..., np.newaxis] - - img = (img * 255).astype('uint8') - - output_image[(col_cnt+0)*w:(col_cnt+1)*w, - (row_cnt+1)*h:(row_cnt+2)*h, :] = img - - col_cnt += 1 - - return output_image - -def base_transform(im, size): - im = tensor_to_np_float(im) - if len(im.shape) == 3: - im = im.transpose((1, 2, 0)) - else: - im = im[:, :, None] - - # Resize - if im.shape[1] != size: - im = cv2.resize(im, size, interpolation=cv2.INTER_NEAREST) - - return im.clip(0, 1) - -def im_transform(im, size): - return base_transform(inv_im_trans(detach_to_cpu(im)), size=size) - -def mask_transform(mask, size): - return base_transform(detach_to_cpu(mask), size=size) - -def out_transform(mask, size): - return base_transform(detach_to_cpu(torch.sigmoid(mask)), size=size) - -def pool_pairs(images, size, num_objects): - req_images = defaultdict(list) - - b, t = images['rgb'].shape[:2] - - # limit the number of images saved - b = min(2, b) - - # find max num objects - max_num_objects = max(num_objects[:b]) - - GT_suffix = '' - for bi in range(b): - GT_suffix += ' \n%s' % images['info']['name'][bi][-25:-4] - - for bi in range(b): - for ti in range(t): - req_images['RGB'].append(im_transform(images['rgb'][bi,ti], size)) - for oi in range(max_num_objects): - if ti == 0 or oi >= num_objects[bi]: - req_images['Mask_%d'%oi].append(mask_transform(images['first_frame_gt'][bi][0,oi], size)) - # req_images['Mask_X8_%d'%oi].append(mask_transform(images['first_frame_gt'][bi][0,oi], size)) - # req_images['Mask_X16_%d'%oi].append(mask_transform(images['first_frame_gt'][bi][0,oi], size)) - else: - req_images['Mask_%d'%oi].append(mask_transform(images['masks_%d'%ti][bi][oi], size)) - # req_images['Mask_%d'%oi].append(mask_transform(images['masks_%d'%ti][bi][oi][2], size)) - # req_images['Mask_X8_%d'%oi].append(mask_transform(images['masks_%d'%ti][bi][oi][1], size)) - # req_images['Mask_X16_%d'%oi].append(mask_transform(images['masks_%d'%ti][bi][oi][0], size)) - req_images['GT_%d_%s'%(oi, GT_suffix)].append(mask_transform(images['cls_gt'][bi,ti,0]==(oi+1), size)) - # print((images['cls_gt'][bi,ti,0]==(oi+1)).shape) - # print(mask_transform(images['cls_gt'][bi,ti,0]==(oi+1), size).shape) - - - return get_image_array(req_images, size, key_captions) \ No newline at end of file diff --git a/tracker/util/load_subset.py b/tracker/util/load_subset.py deleted file mode 100644 index 3191f4f..0000000 --- a/tracker/util/load_subset.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -load_subset.py - Presents a subset of data -DAVIS - only the training set -YouTubeVOS - I manually filtered some erroneous ones out but I haven't checked all -""" - - -def load_sub_davis(path='util/davis_subset.txt'): - with open(path, mode='r') as f: - subset = set(f.read().splitlines()) - return subset - -def load_sub_yv(path='util/yv_subset.txt'): - with open(path, mode='r') as f: - subset = set(f.read().splitlines()) - return subset diff --git a/tracker/util/log_integrator.py b/tracker/util/log_integrator.py deleted file mode 100644 index e4b26d5..0000000 --- a/tracker/util/log_integrator.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Integrate numerical values for some iterations -Typically used for loss computation / logging to tensorboard -Call finalize and create a new Integrator when you want to display/log -""" - -import torch - - -class Integrator: - def __init__(self, logger, distributed=True, local_rank=0, world_size=1): - self.values = {} - self.counts = {} - self.hooks = [] # List is used here to maintain insertion order - - self.logger = logger - - self.distributed = distributed - self.local_rank = local_rank - self.world_size = world_size - - def add_tensor(self, key, tensor): - if key not in self.values: - self.counts[key] = 1 - if type(tensor) == float or type(tensor) == int: - self.values[key] = tensor - else: - self.values[key] = tensor.mean().item() - else: - self.counts[key] += 1 - if type(tensor) == float or type(tensor) == int: - self.values[key] += tensor - else: - self.values[key] += tensor.mean().item() - - def add_dict(self, tensor_dict): - for k, v in tensor_dict.items(): - self.add_tensor(k, v) - - def add_hook(self, hook): - """ - Adds a custom hook, i.e. compute new metrics using values in the dict - The hook takes the dict as argument, and returns a (k, v) tuple - e.g. for computing IoU - """ - if type(hook) == list: - self.hooks.extend(hook) - else: - self.hooks.append(hook) - - def reset_except_hooks(self): - self.values = {} - self.counts = {} - - # Average and output the metrics - def finalize(self, prefix, it, f=None): - - for hook in self.hooks: - k, v = hook(self.values) - self.add_tensor(k, v) - - for k, v in self.values.items(): - - if k[:4] == 'hide': - continue - - avg = v / self.counts[k] - - if self.distributed: - # Inplace operation - avg = torch.tensor(avg).cuda() - torch.distributed.reduce(avg, dst=0) - - if self.local_rank == 0: - avg = (avg/self.world_size).cpu().item() - self.logger.log_metrics(prefix, k, avg, it, f) - else: - # Simple does it - self.logger.log_metrics(prefix, k, avg, it, f) - diff --git a/tracker/util/logger.py b/tracker/util/logger.py deleted file mode 100644 index c3a0d3c..0000000 --- a/tracker/util/logger.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Dumps things to tensorboard and console -""" - -import os -import warnings - -import torchvision.transforms as transforms -from torch.utils.tensorboard import SummaryWriter - - -def tensor_to_numpy(image): - image_np = (image.numpy() * 255).astype('uint8') - return image_np - -def detach_to_cpu(x): - return x.detach().cpu() - -def fix_width_trunc(x): - return ('{:.9s}'.format('{:0.9f}'.format(x))) - -class TensorboardLogger: - def __init__(self, short_id, id, git_info): - self.short_id = short_id - if self.short_id == 'NULL': - self.short_id = 'DEBUG' - - if id is None: - self.no_log = True - warnings.warn('Logging has been disbaled.') - else: - self.no_log = False - - self.inv_im_trans = transforms.Normalize( - mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], - std=[1/0.229, 1/0.224, 1/0.225]) - - self.inv_seg_trans = transforms.Normalize( - mean=[-0.5/0.5], - std=[1/0.5]) - - log_path = os.path.join('.', 'logs', '%s' % id) - self.logger = SummaryWriter(log_path) - - self.log_string('git', git_info) - - def log_scalar(self, tag, x, step): - if self.no_log: - warnings.warn('Logging has been disabled.') - return - self.logger.add_scalar(tag, x, step) - - def log_metrics(self, l1_tag, l2_tag, val, step, f=None): - tag = l1_tag + '/' + l2_tag - text = '{:s} - It {:6d} [{:5s}] [{:13}]: {:s}'.format(self.short_id, step, l1_tag.upper(), l2_tag, fix_width_trunc(val)) - print(text) - if f is not None: - f.write(text + '\n') - f.flush() - self.log_scalar(tag, val, step) - - def log_im(self, tag, x, step): - if self.no_log: - warnings.warn('Logging has been disabled.') - return - x = detach_to_cpu(x) - x = self.inv_im_trans(x) - x = tensor_to_numpy(x) - self.logger.add_image(tag, x, step) - - def log_cv2(self, tag, x, step): - if self.no_log: - warnings.warn('Logging has been disabled.') - return - x = x.transpose((2, 0, 1)) - self.logger.add_image(tag, x, step) - - def log_seg(self, tag, x, step): - if self.no_log: - warnings.warn('Logging has been disabled.') - return - x = detach_to_cpu(x) - x = self.inv_seg_trans(x) - x = tensor_to_numpy(x) - self.logger.add_image(tag, x, step) - - def log_gray(self, tag, x, step): - if self.no_log: - warnings.warn('Logging has been disabled.') - return - x = detach_to_cpu(x) - x = tensor_to_numpy(x) - self.logger.add_image(tag, x, step) - - def log_string(self, tag, x): - print(tag, x) - if self.no_log: - warnings.warn('Logging has been disabled.') - return - self.logger.add_text(tag, x) - \ No newline at end of file diff --git a/tracker/inference/data/mask_mapper.py b/tracker/util/mask_mapper.py similarity index 80% rename from tracker/inference/data/mask_mapper.py rename to tracker/util/mask_mapper.py index 8e5b38d..815807b 100644 --- a/tracker/inference/data/mask_mapper.py +++ b/tracker/util/mask_mapper.py @@ -1,8 +1,16 @@ import numpy as np import torch -from dataset.util import all_to_onehot +def all_to_onehot(masks, labels): + if len(masks.shape) == 3: + Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1], masks.shape[2]), dtype=np.uint8) + else: + Ms = np.zeros((len(labels), masks.shape[0], masks.shape[1]), dtype=np.uint8) + for ni, l in enumerate(labels): + Ms[ni] = (masks == l).astype(np.uint8) + + return Ms class MaskMapper: """ @@ -23,6 +31,12 @@ class MaskMapper: # if coherent, no mapping is required self.coherent = True + def clear_labels(self): + self.labels = [] + self.remappings = {} + # if coherent, no mapping is required + self.coherent = True + def convert_mask(self, mask, exhaustive=False): # mask is in index representation, H*W numpy array labels = np.unique(mask).astype(np.uint8) diff --git a/tracker/util/palette.py b/tracker/util/palette.py deleted file mode 100644 index d254165..0000000 --- a/tracker/util/palette.py +++ /dev/null @@ -1,3 +0,0 @@ -davis_palette = b'\x00\x00\x00\x80\x00\x00\x00\x80\x00\x80\x80\x00\x00\x00\x80\x80\x00\x80\x00\x80\x80\x80\x80\x80@\x00\x00\xc0\x00\x00@\x80\x00\xc0\x80\x00@\x00\x80\xc0\x00\x80@\x80\x80\xc0\x80\x80\x00@\x00\x80@\x00\x00\xc0\x00\x80\xc0\x00\x00@\x80\x80@\x80\x00\xc0\x80\x80\xc0\x80@@\x00\xc0@\x00@\xc0\x00\xc0\xc0\x00@@\x80\xc0@\x80@\xc0\x80\xc0\xc0\x80\x00\x00@\x80\x00@\x00\x80@\x80\x80@\x00\x00\xc0\x80\x00\xc0\x00\x80\xc0\x80\x80\xc0@\x00@\xc0\x00@@\x80@\xc0\x80@@\x00\xc0\xc0\x00\xc0@\x80\xc0\xc0\x80\xc0\x00@@\x80@@\x00\xc0@\x80\xc0@\x00@\xc0\x80@\xc0\x00\xc0\xc0\x80\xc0\xc0@@@\xc0@@@\xc0@\xc0\xc0@@@\xc0\xc0@\xc0@\xc0\xc0\xc0\xc0\xc0 \x00\x00\xa0\x00\x00 \x80\x00\xa0\x80\x00 \x00\x80\xa0\x00\x80 \x80\x80\xa0\x80\x80`\x00\x00\xe0\x00\x00`\x80\x00\xe0\x80\x00`\x00\x80\xe0\x00\x80`\x80\x80\xe0\x80\x80 @\x00\xa0@\x00 \xc0\x00\xa0\xc0\x00 @\x80\xa0@\x80 \xc0\x80\xa0\xc0\x80`@\x00\xe0@\x00`\xc0\x00\xe0\xc0\x00`@\x80\xe0@\x80`\xc0\x80\xe0\xc0\x80 \x00@\xa0\x00@ \x80@\xa0\x80@ \x00\xc0\xa0\x00\xc0 \x80\xc0\xa0\x80\xc0`\x00@\xe0\x00@`\x80@\xe0\x80@`\x00\xc0\xe0\x00\xc0`\x80\xc0\xe0\x80\xc0 @@\xa0@@ \xc0@\xa0\xc0@ @\xc0\xa0@\xc0 \xc0\xc0\xa0\xc0\xc0`@@\xe0@@`\xc0@\xe0\xc0@`@\xc0\xe0@\xc0`\xc0\xc0\xe0\xc0\xc0\x00 \x00\x80 \x00\x00\xa0\x00\x80\xa0\x00\x00 \x80\x80 \x80\x00\xa0\x80\x80\xa0\x80@ \x00\xc0 \x00@\xa0\x00\xc0\xa0\x00@ \x80\xc0 \x80@\xa0\x80\xc0\xa0\x80\x00`\x00\x80`\x00\x00\xe0\x00\x80\xe0\x00\x00`\x80\x80`\x80\x00\xe0\x80\x80\xe0\x80@`\x00\xc0`\x00@\xe0\x00\xc0\xe0\x00@`\x80\xc0`\x80@\xe0\x80\xc0\xe0\x80\x00 @\x80 @\x00\xa0@\x80\xa0@\x00 \xc0\x80 \xc0\x00\xa0\xc0\x80\xa0\xc0@ @\xc0 @@\xa0@\xc0\xa0@@ \xc0\xc0 \xc0@\xa0\xc0\xc0\xa0\xc0\x00`@\x80`@\x00\xe0@\x80\xe0@\x00`\xc0\x80`\xc0\x00\xe0\xc0\x80\xe0\xc0@`@\xc0`@@\xe0@\xc0\xe0@@`\xc0\xc0`\xc0@\xe0\xc0\xc0\xe0\xc0 \x00\xa0 \x00 \xa0\x00\xa0\xa0\x00 \x80\xa0 \x80 \xa0\x80\xa0\xa0\x80` \x00\xe0 \x00`\xa0\x00\xe0\xa0\x00` \x80\xe0 \x80`\xa0\x80\xe0\xa0\x80 `\x00\xa0`\x00 \xe0\x00\xa0\xe0\x00 `\x80\xa0`\x80 \xe0\x80\xa0\xe0\x80``\x00\xe0`\x00`\xe0\x00\xe0\xe0\x00``\x80\xe0`\x80`\xe0\x80\xe0\xe0\x80 @\xa0 @ \xa0@\xa0\xa0@ \xc0\xa0 \xc0 \xa0\xc0\xa0\xa0\xc0` @\xe0 @`\xa0@\xe0\xa0@` \xc0\xe0 \xc0`\xa0\xc0\xe0\xa0\xc0 `@\xa0`@ \xe0@\xa0\xe0@ `\xc0\xa0`\xc0 \xe0\xc0\xa0\xe0\xc0``@\xe0`@`\xe0@\xe0\xe0@``\xc0\xe0`\xc0`\xe0\xc0\xe0\xe0\xc0' - -youtube_palette = b'\x00\x00\x00\xec_g\xf9\x91W\xfa\xc8c\x99\xc7\x94b\xb3\xb2f\x99\xcc\xc5\x94\xc5\xabyg\xff\xff\xffes~\x0b\x0b\x0b\x0c\x0c\x0c\r\r\r\x0e\x0e\x0e\x0f\x0f\x0f' diff --git a/tracker/dataset/range_transform.py b/tracker/util/range_transform.py similarity index 100% rename from tracker/dataset/range_transform.py rename to tracker/util/range_transform.py diff --git a/tracker/util/yv_subset.txt b/tracker/util/yv_subset.txt deleted file mode 100644 index a26e50a..0000000 --- a/tracker/util/yv_subset.txt +++ /dev/null @@ -1,3464 +0,0 @@ -003234408d -0043f083b5 -0044fa5fba -005a527edd -0065b171f9 -00917dcfc4 -00a23ccf53 -00ad5016a4 -01082ae388 -011ac0a06f -013099c098 -0155498c85 -01694ad9c8 -017ac35701 -01b80e8e1a -01baa5a4e1 -01c3111683 -01c4cb5ffe -01c76f0a82 -01c783268c -01ed275c6e -01ff60d1fa -020cd28cd2 -02264db755 -0248626d9a -02668dbffa -0274193026 -02d28375aa -02f3a5c4df -031ccc99b1 -0321b18c10 -0348a45bca -0355e92655 -0358b938c1 -0368107cf1 -0379ddf557 -038b2cc71d -038c15a5dd -03a06cc98a -03a63e187f -03c95b4dae -03e2b57b0e -04194e1248 -0444918a5f -04460a7a52 -04474174a4 -0450095513 -045f00aed2 -04667fabaa -04735c5030 -04990d1915 -04d62d9d98 -04f21da964 -04fbad476e -04fe256562 -0503bf89c9 -0536c9eed0 -054acb238f -05579ca250 -056c200404 -05774f3a2c -058a7592c8 -05a0a513df -05a569d8aa -05aa652648 -05d7715782 -05e0b0f28f -05fdbbdd7a -05ffcfed85 -0630391881 -06840b2bbe -068f7dce6f -0693719753 -06ce2b51fb -06e224798e -06ee361788 -06fbb3fa2c -0700264286 -070c918ca7 -07129e14a4 -07177017e9 -07238ffc58 -07353b2a89 -0738493cbf -075926c651 -075c701292 -0762ea9a30 -07652ee4af -076f206928 -077d32af19 -079049275c -07913cdda7 -07a11a35e8 -07ac33b6df -07b6e8fda8 -07c62c3d11 -07cc1c7d74 -080196ef01 -081207976e -081ae4fa44 -081d8250cb -082900c5d4 -0860df21e2 -0866d4c5e3 -0891ac2eb6 -08931bc458 -08aa2705d5 -08c8450db7 -08d50b926c -08e1e4de15 -08e48c1a48 -08f561c65e -08feb87790 -09049f6fe3 -092e4ff450 -09338adea8 -093c335ccc -0970d28339 -0974a213dc -097b471ed8 -0990941758 -09a348f4fa -09a6841288 -09c5bad17b -09c9ce80c7 -09ff54fef4 -0a23765d15 -0a275e7f12 -0a2f2bd294 -0a7a2514aa -0a7b27fde9 -0a8c467cc3 -0ac8c560ae -0b1627e896 -0b285c47f6 -0b34ec1d55 -0b5b5e8e5a -0b68535614 -0b6f9105fc -0b7dbfa3cb -0b9cea51ca -0b9d012be8 -0bcfc4177d -0bd37b23c1 -0bd864064c -0c11c6bf7b -0c26bc77ac -0c3a04798c -0c44a9d545 -0c817cc390 -0ca839ee9a -0cd7ac0ac0 -0ce06e0121 -0cfe974a89 -0d2fcc0dcd -0d3aad05d2 -0d40b015f4 -0d97fba242 -0d9cc80d7e -0dab85b6d3 -0db5c427a5 -0dbaf284f1 -0de4923598 -0df28a9101 -0e04f636c4 -0e05f0e232 -0e0930474b -0e27472bea -0e30020549 -0e621feb6c -0e803c7d73 -0e9ebe4e3c -0e9f2785ec -0ea68d418b -0eb403a222 -0ee92053d6 -0eefca067f -0f17fa6fcb -0f1ac8e9a3 -0f202e9852 -0f2ab8b1ff -0f51a78756 -0f5fbe16b0 -0f6072077b -0f6b69b2f4 -0f6c2163de -0f74ec5599 -0f9683715b -0fa7b59356 -0fb173695b -0fc958cde2 -0fe7b1a621 -0ffcdb491c -101caff7d4 -1022fe8417 -1032e80b37 -103f501680 -104e64565f -104f1ab997 -106242403f -10b31f5431 -10eced835e -110d26fa3a -1122c1d16a -1145b49a5f -11485838c2 -114e7676ec -1157472b95 -115ee1072c -1171141012 -117757b4b8 -1178932d2f -117cc76bda -1180cbf814 -1187bbd0e3 -1197e44b26 -119cf20728 -119dd54871 -11a0c3b724 -11a6ba8c94 -11c722a456 -11cbcb0b4d -11ccf5e99d -11ce6f452e -11e53de6f2 -11feabe596 -120cb9514d -12156b25b3 -122896672d -1232b2f1d4 -1233ac8596 -1239c87234 -1250423f7c -1257a1bc67 -125d1b19dd -126d203967 -1295e19071 -12ad198c54 -12bddb2bcb -12ec9b93ee -12eebedc35 -132852e094 -1329409f2a -13325cfa14 -134d06dbf9 -135625b53d -13870016f9 -13960b3c84 -13adaad9d9 -13ae097e20 -13e3070469 -13f6a8c20d -1416925cf2 -142d2621f5 -145d5d7c03 -145fdc3ac5 -1471274fa7 -14a6b5a139 -14c21cea0d -14dae0dc93 -14f9bd22b5 -14fd28ae99 -15097d5d4e -150ea711f2 -1514e3563f -152aaa3a9e -152b7d3bd7 -15617297cc -15abbe0c52 -15d1fb3de5 -15f67b0fab -161eb59aad -16288ea47f -164410ce62 -165c3c8cd4 -165c42b41b -165ec9e22b -1669502269 -16763cccbb -16adde065e -16af445362 -16afd538ad -16c3fa4d5d -16d1d65c27 -16e8599e94 -16fe9fb444 -1705796b02 -1724db7671 -17418e81ea -175169edbb -17622326fd -17656bae77 -17b0d94172 -17c220e4f6 -17c7bcd146 -17cb4afe89 -17cd79a434 -17d18604c3 -17d8ca1a37 -17e33f4330 -17f7a6d805 -180abc8378 -183ba3d652 -185bf64702 -18913cc690 -1892651815 -189ac8208a -189b44e92c -18ac264b76 -18b245ab49 -18b5cebc34 -18bad52083 -18bb5144d5 -18c6f205c5 -1903f9ea15 -1917b209f2 -191e74c01d -19367bb94e -193ffaa217 -19696b67d3 -197f3ab6f3 -1981e763cc -198afe39ae -19a6e62b9b -19b60d5335 -19c00c11f9 -19e061eb88 -19e8bc6178 -19ee80dac6 -1a25a9170a -1a359a6c1a -1a3e87c566 -1a5fe06b00 -1a6c0fbd1e -1a6f3b5a4b -1a8afbad92 -1a8bdc5842 -1a95752aca -1a9c131cb7 -1aa3da3ee3 -1ab27ec7ea -1abf16d21d -1acd0f993b -1ad202e499 -1af8d2395d -1afd39a1fa -1b2d31306f -1b3fa67f0e -1b43fa74b4 -1b73ea9fc2 -1b7e8bb255 -1b8680f8cd -1b883843c0 -1b8898785b -1b88ba1aa4 -1b96a498e5 -1bbc4c274f -1bd87fe9ab -1c4090c75b -1c41934f84 -1c72b04b56 -1c87955a3a -1c9f9eb792 -1ca240fede -1ca5673803 -1cada35274 -1cb44b920d -1cd10e62be -1d3087d5e5 -1d3685150a -1d6ff083aa -1d746352a6 -1da256d146 -1da4e956b1 -1daf812218 -1dba687bce -1dce57d05d -1de4a9e537 -1dec5446c8 -1dfbe6f586 -1e1a18c45a -1e1e42529d -1e4be70796 -1eb60959c8 -1ec8b2566b -1ecdc2941c -1ee0ac70ff -1ef8e17def -1f1a2a9fc0 -1f1beb8daa -1f2609ee13 -1f3876f8d0 -1f4ec0563d -1f64955634 -1f7d31b5b2 -1f8014b7fd -1f9c7d10f1 -1fa350df76 -1fc9538993 -1fe2f0ec59 -2000c02f9d -20142b2f05 -201a8d75e5 -2023b3ee4f -202b767bbc -203594a418 -2038987336 -2039c3aecb -204a90d81f -207bc6cf01 -208833d1d1 -20c6d8b362 -20e3e52e0a -2117fa0c14 -211bc5d102 -2120d9c3c3 -2125235a49 -21386f5978 -2142af8795 -215dfc0f73 -217bae91e5 -217c0d44e4 -219057c87b -21d0edbf81 -21df87ad76 -21f1d089f5 -21f4019116 -222597030f -222904eb5b -223a0e0657 -223bd973ab -22472f7395 -224e7c833e -225aba51d9 -2261d421ea -2263a8782b -2268cb1ffd -2268e93b0a -2293c99f3f -22a1141970 -22b13084b2 -22d9f5ab0c -22f02efe3a -232c09b75b -2350d71b4b -2376440551 -2383d8aafd -238b84e67f -238d4b86f6 -238d947c6b -23993ce90d -23b0c8a9ab -23b3beafcc -23d80299fe -23f404a9fc -240118e58a -2431dec2fd -24440e0ac7 -2457274dbc -2465bf515d -246b142c4d -247d729e36 -2481ceafeb -24866b4e6a -2489d78320 -24ab0b83e8 -24b0868d92 -24b5207cd9 -24ddf05c03 -250116161c -256ad2e3fc -256bd83d5e -256dcc8ab8 -2589956baa -258b3b33c6 -25ad437e29 -25ae395636 -25c750c6db -25d2c3fe5d -25dc80db7c -25f97e926f -26011bc28b -260846ffbe -260dd9ad33 -267964ee57 -2680861931 -268ac7d3fc -26b895d91e -26bc786d4f -26ddd2ef12 -26de3d18ca -26f7784762 -2703e52a6a -270ed80c12 -2719b742ab -272f4163d0 -27303333e1 -27659fa7d6 -279214115d -27a5f92a9c -27cf2af1f3 -27f0d5f8a2 -28075f33c1 -281629cb41 -282b0d51f5 -282fcab00b -28449fa0dc -28475208ca -285580b7c4 -285b69e223 -288c117201 -28a8eb9623 -28bf9c3cf3 -28c6b8f86a -28c972dacd -28d9fa6016 -28e392de91 -28f4a45190 -298c844fc9 -29a0356a2b -29d779f9e3 -29dde5f12b -29de7b6579 -29e630bdd0 -29f2332d30 -2a18873352 -2a3824ff31 -2a559dd27f -2a5c09acbd -2a63eb1524 -2a6a30a4ea -2a6d9099d1 -2a821394e3 -2a8c5b1342 -2abc8d66d2 -2ac9ef904a -2b08f37364 -2b351bfd7d -2b659a49d7 -2b69ee5c26 -2b6c30bbbd -2b88561cf2 -2b8b14954e -2ba621c750 -2bab50f9a7 -2bb00c2434 -2bbde474ef -2bdd82fb86 -2be06fb855 -2bf545c2f5 -2bffe4cf9a -2c04b887b7 -2c05209105 -2c0ad8cf39 -2c11fedca8 -2c1a94ebfb -2c1e8c8e2f -2c29fabcf1 -2c2c076c01 -2c3ea7ee7d -2c41fa0648 -2c44bb6d1c -2c54cfbb78 -2c5537eddf -2c6e63b7de -2cb10c6a7e -2cbcd5ccd1 -2cc5d9c5f6 -2cd01cf915 -2cdbf5f0a7 -2ce660f123 -2cf114677e -2d01eef98e -2d03593bdc -2d183ac8c4 -2d33ad3935 -2d3991d83e -2d4333577b -2d4d015c64 -2d8f5e5025 -2d900bdb8e -2d9a1a1d49 -2db0576a5c -2dc0838721 -2dcc417f82 -2df005b843 -2df356de14 -2e00393d96 -2e03b8127a -2e0f886168 -2e2bf37e6d -2e42410932 -2ea78f46e4 -2ebb017a26 -2ee2edba2a -2efb07554a -2f17e4fc1e -2f2c65c2f3 -2f2d9b33be -2f309c206b -2f53822e88 -2f53998171 -2f5b0c89b1 -2f680909e6 -2f710f66bd -2f724132b9 -2f7e3517ae -2f96f5fc6f -2f97d9fecb -2fbfa431ec -2fc9520b53 -2fcd9f4c62 -2feb30f208 -2ff7f5744f -30085a2cc6 -30176e3615 -301f72ee11 -3026bb2f61 -30318465dc -3054ca937d -306121e726 -3064ad91e8 -307444a47f -307bbb7409 -30a20194ab -30c35c64a4 -30dbdb2cd6 -30fc77d72f -310021b58b -3113140ee8 -3150b2ee57 -31539918c4 -318dfe2ce2 -3193da4835 -319f725ad9 -31bbd0d793 -322505c47f -322b237865 -322da43910 -3245e049fb -324c4c38f6 -324e35111a -3252398f09 -327dc4cabf -328d918c7d -3290c0de97 -3299ae3116 -32a7cd687b -33098cedb4 -3332334ac4 -334cb835ac -3355e056eb -33639a2847 -3373891cdc -337975816b -33e29d7e91 -34046fe4f2 -3424f58959 -34370a710f -343bc6a65a -3450382ef7 -3454303a08 -346aacf439 -346e92ff37 -34a5ece7dd -34b109755a -34d1b37101 -34dd2c70a7 -34efa703df -34fbee00a6 -3504df2fda -35195a56a1 -351c822748 -351cfd6bc5 -3543d8334c -35573455c7 -35637a827f -357a710863 -358bf16f9e -35ab34cc34 -35c6235b8d -35d01a438a -3605019d3b -3609bc3f88 -360e25da17 -36299c687c -362c5bc56e -3649228783 -365b0501ea -365f459863 -369893f3ad -369c9977e1 -369dde050a -36c7dac02f -36d5b1493b -36f5cc68fd -3735480d18 -374b479880 -375a49d38f -375a5c0e09 -376bda9651 -377db65f60 -37c19d1087 -37d4ae24fc -37ddce7f8b -37e10d33af -37e45c6247 -37fa0001e8 -3802d458c0 -382caa3cb4 -383bb93111 -388843df90 -38924f4a7f -38b00f93d7 -38c197c10e -38c9c3d801 -38eb2bf67f -38fe9b3ed1 -390352cced -390c51b987 -390ca6f1d6 -392bc0f8a1 -392ecb43bd -3935291688 -3935e63b41 -394454fa9c -394638fc8b -39545e20b7 -397abeae8f -3988074b88 -398f5d5f19 -39bc49a28c -39befd99fb -39c3c7bf55 -39d584b09f -39f6f6ffb1 -3a079fb484 -3a0d3a81b7 -3a1d55d22b -3a20a7583e -3a2c1f66e5 -3a33f4d225 -3a3bf84b13 -3a4565e5ec -3a4e32ed5e -3a7ad86ce0 -3a7bdde9b8 -3a98867cbe -3aa3f1c9e8 -3aa7fce8b6 -3aa876887d -3ab807ded6 -3ab9b1a85a -3adac8d7da -3ae1a4016f -3ae2deaec2 -3ae81609d6 -3af847e62f -3b23792b84 -3b3b0af2ee -3b512dad74 -3b6c7988f6 -3b6e983b5b -3b74a0fc20 -3b7a50b80d -3b96d3492f -3b9ad0c5a9 -3b9ba0894a -3bb4e10ed7 -3bd9a9b515 -3beef45388 -3c019c0a24 -3c090704aa -3c2784fc0d -3c47ab95f8 -3c4db32d74 -3c5ff93faf -3c700f073e -3c713cbf2f -3c8320669c -3c90d225ee -3cadbcc404 -3cb9be84a5 -3cc37fd487 -3cc6f90cb2 -3cd5e035ef -3cdf03531b -3cdf828f59 -3d254b0bca -3d5aeac5ba -3d690473e1 -3d69fed2fb -3d8997aeb6 -3db0d6b07e -3db1ddb8cf -3db907ac77 -3dcbc0635b -3dd48ed55f -3de4ac4ec4 -3decd63d88 -3e04a6be11 -3e108fb65a -3e1448b01c -3e16c19634 -3e2845307e -3e38336da5 -3e3a819865 -3e3e4be915 -3e680622d7 -3e7d2aeb07 -3e7d8f363d -3e91f10205 -3ea4c49bbe -3eb39d11ab -3ec273c8d5 -3ed3f91271 -3ee062a2fd -3eede9782c -3ef2fa99cb -3efc6e9892 -3f0b0dfddd -3f0c860359 -3f18728586 -3f3b15f083 -3f45a470ad -3f4f3bc803 -3fd96c5267 -3fea675fab -3fee8cbc9f -3fff16d112 -401888b36c -4019231330 -402316532d -402680df52 -404d02e0c0 -40709263a8 -4083cfbe15 -40a96c5cb1 -40b8e50f82 -40f4026bf5 -4100b57a3a -41059fdd0b -41124e36de -4122aba5f9 -413bab0f0d -4164faee0b -418035eec9 -4182d51532 -418bb97e10 -41a34c20e7 -41dab05200 -41ff6d5e2a -420caf0859 -42264230ba -425a0c96e0 -42da96b87c -42eb5a5b0f -42f17cd14d -42f5c61c49 -42ffdcdee9 -432f9884f9 -43326d9940 -4350f3ab60 -4399ffade3 -43a6c21f37 -43b5555faa -43d63b752a -4416bdd6ac -4444753edd -444aa274e7 -444d4e0596 -446b8b5f7a -4478f694bb -44b1da0d87 -44b4dad8c9 -44b5ece1b9 -44d239b24e -44eaf8f51e -44f4f57099 -44f7422af2 -450787ac97 -4523656564 -4536c882e5 -453b65daa4 -454f227427 -45636d806a -456fb9362e -457e717a14 -45a89f35e1 -45bf0e947d -45c36a9eab -45d9fc1357 -45f8128b97 -4607f6c03c -46146dfd39 -4620e66b1e -4625f3f2d3 -462b22f263 -4634736113 -463c0f4fdd -46565a75f8 -46630b55ae -466839cb37 -466ba4ae0c -4680236c9d -46bf4e8709 -46e18e42f1 -46f5093c59 -47269e0499 -472da1c484 -47354fab09 -4743bb84a7 -474a796272 -4783d2ab87 -479cad5da3 -479f5d7ef6 -47a05fbd1d -4804ee2767 -4810c3fbca -482fb439c2 -48375af288 -484ab44de4 -485f3944cd -4867b84887 -486a8ac57e -486e69c5bd -48812cf33e -4894b3b9ea -48bd66517d -48d83b48a4 -49058178b8 -4918d10ff0 -4932911f80 -49405b7900 -49972c2d14 -499bf07002 -49b16e9377 -49c104258e -49c879f82d -49e7326789 -49ec3e406a -49fbf0c98a -4a0255c865 -4a088fe99a -4a341402d0 -4a3471bdf5 -4a4b50571c -4a50f3d2e9 -4a6e3faaa1 -4a7191f08a -4a86fcfc30 -4a885fa3ef -4a8af115de -4aa2e0f865 -4aa9d6527f -4abb74bb52 -4ae13de1cd -4af8cb323f -4b02c272b3 -4b19c529fb -4b2974eff4 -4b3154c159 -4b54d2587f -4b556740ff -4b67aa9ef6 -4b97cc7b8d -4baa1ed4aa -4bc8c676bb -4beaea4dbe -4bf5763d24 -4bffa92b67 -4c25dfa8ec -4c397b6fd4 -4c51e75d66 -4c7710908f -4c9b5017be -4ca2ffc361 -4cad2e93bc -4cd427b535 -4cd9a4b1ef -4cdfe3c2b2 -4cef87b649 -4cf208e9b3 -4cf5bc3e60 -4cfdd73249 -4cff5c9e42 -4d26d41091 -4d5c23c554 -4d67c59727 -4d983cad9f -4da0d00b55 -4daa179861 -4dadd57153 -4db117e6c5 -4de4ce4dea -4dfaee19e5 -4dfdd7fab0 -4e3f346aa5 -4e49c2a9c7 -4e4e06a749 -4e70279712 -4e72856cc7 -4e752f8075 -4e7a28907f -4e824b9247 -4e82b1df57 -4e87a639bc -4ea77bfd15 -4eb6fc23a2 -4ec9da329e -4efb9a0720 -4f062fbc63 -4f35be0e0b -4f37e86797 -4f414dd6e7 -4f424abded -4f470cc3ae -4f601d255a -4f7386a1ab -4f824d3dcd -4f827b0751 -4f8db33a13 -4fa160f8a3 -4fa9c30a45 -4facd8f0e8 -4fca07ad01 -4fded94004 -4fdfef4dea -4feb3ac01f -4fffec8479 -500c835a86 -50168342bf -50243cffdc -5031d5a036 -504dd9c0fd -50568fbcfb -5069c7c5b3 -508189ac91 -50b6b3d4b7 -50c6f4fe3e -50cce40173 -50efbe152f -50f290b95d -5104aa1fea -5110dc72c0 -511e8ecd7f -513aada14e -5158d6e985 -5161e1fa57 -51794ddd58 -517d276725 -51a597ee04 -51b37b6d97 -51b5dc30a0 -51e85b347b -51eea1fdac -51eef778af -51f384721c -521cfadcb4 -52355da42f -5247d4b160 -524b470fd0 -524cee1534 -5252195e8a -5255c9ca97 -525928f46f -526df007a7 -529b12de78 -52c7a3d653 -52c8ec0373 -52d225ed52 -52ee406d9e -52ff1ccd4a -53143511e8 -5316d11eb7 -53253f2362 -534a560609 -5352c4a70e -536096501f -536b17bcea -5380eaabff -5390a43a54 -53af427bb2 -53bf5964ce -53c30110b5 -53cad8e44a -53d9c45013 -53e274f1b5 -53e32d21ea -540850e1c7 -540cb31cfe -541c4da30f -541d7935d7 -545468262b -5458647306 -54657855cd -547b3fb23b -5497dc3712 -549c56f1d4 -54a4260bb1 -54b98b8d5e -54e1054b0f -54e8867b83 -54ebe34f6e -5519b4ad13 -551acbffd5 -55341f42da -5566ab97e1 -556c79bbf2 -5589637cc4 -558aa072f0 -559824b6f6 -55c1764e90 -55eda6c77e -562d173565 -5665c024cb -566cef4959 -5675d78833 -5678a91bd8 -567a2b4bd0 -569c282890 -56cc449917 -56e71f3e07 -56f09b9d92 -56fc0e8cf9 -571ca79c71 -57243657cf -57246af7d1 -57427393e9 -574b682c19 -578f211b86 -5790ac295d -579393912d -57a344ab1a -57bd3bcda4 -57bfb7fa4c -57c010175e -57c457cc75 -57c7fc2183 -57d5289a01 -58045fde85 -58163c37cd -582d463e5c -5851739c15 -585dd0f208 -587250f3c3 -589e4cc1de -589f65f5d5 -58a07c17d5 -58adc6d8b6 -58b9bcf656 -58c374917e -58fc75fd42 -5914c30f05 -59323787d5 -5937b08d69 -594065ddd7 -595a0ceea6 -59623ec40b -597ff7ef78 -598935ef05 -598c2ad3b2 -59a6459751 -59b175e138 -59bf0a149f -59d53d1649 -59e3e6fae7 -59fe33e560 -5a13a73fe5 -5a25c22770 -5a4a785006 -5a50640995 -5a75f7a1cf -5a841e59ad -5a91c5ab6d -5ab49d9de0 -5aba1057fe -5abe46ba6d -5ac7c88d0c -5aeb95cc7d -5af15e4fc3 -5afe381ae4 -5b07b4229d -5b1001cc4f -5b1df237d2 -5b263013bf -5b27d19f0b -5b48ae16c5 -5b5babc719 -5baaebdf00 -5bab55cdbe -5bafef6e79 -5bd1f84545 -5bddc3ba25 -5bdf7c20d2 -5bf23bc9d3 -5c01f6171a -5c021681b7 -5c185cff1d -5c42aba280 -5c44bf8ab6 -5c4c574894 -5c52fa4662 -5c6ea7dac3 -5c74315dc2 -5c7668855e -5c83e96778 -5ca36173e4 -5cac477371 -5cb0cb1b2f -5cb0cfb98f -5cb49a19cf -5cbf7dc388 -5d0e07d126 -5d1e24b6e3 -5d663000ff -5da6b2dc5d -5de9b90f24 -5e08de0ed7 -5e1011df9a -5e1ce354fd -5e35512dd7 -5e418b25f9 -5e4849935a -5e4ee19663 -5e886ef78f -5e8d00b974 -5e8d59dc31 -5ed838bd5c -5edda6ee5a -5ede4d2f7a -5ede9767da -5eec4d9fe5 -5eecf07824 -5eef7ed4f4 -5ef5860ac6 -5ef6573a99 -5f1193e72b -5f29ced797 -5f32cf521e -5f51876986 -5f6ebe94a9 -5f6f14977c -5f808d0d2d -5fb8aded6a -5fba90767d -5fd1c7a3df -5fd3da9f68 -5fee2570ae -5ff66140d6 -5ff8b85b53 -600803c0f6 -600be7f53e -6024888af8 -603189a03c -6057307f6e -6061ddbb65 -606c86c455 -60c61cc2e5 -60e51ff1ae -610e38b751 -61344be2f6 -6135e27185 -614afe7975 -614e571886 -614e7078db -619812a1a7 -61b481a78b -61c7172650 -61cf7e40d2 -61d08ef5a1 -61da008958 -61ed178ecb -61f5d1282c -61fd977e49 -621584cffe -625817a927 -625892cf0b -625b89d28a -629995af95 -62a0840bb5 -62ad6e121c -62d6ece152 -62ede7b2da -62f025e1bc -6316faaebc -63281534dc -634058dda0 -6353f09384 -6363c87314 -636e4872e0 -637681cd6b -6376d49f31 -6377809ec2 -63936d7de5 -639bddef11 -63d37e9fd3 -63d90c2bae -63e544a5d6 -63ebbcf874 -63fff40b31 -6406c72e4d -64148128be -6419386729 -643092bc41 -644081b88d -64453cf61d -644bad9729 -6454f548fd -645913b63a -64750b825f -64a43876b7 -64dd6c83e3 -64e05bf46e -64f55f1478 -650b0165e4 -651066ed39 -652b67d960 -653821d680 -6538d00d73 -65866dce22 -6589565c8c -659832db64 -65ab7e1d98 -65b7dda462 -65bd5eb4f5 -65dcf115ab -65e9825801 -65f9afe51c -65ff12bcb5 -666b660284 -6671643f31 -668364b372 -66852243cb -6693a52081 -669b572898 -66e98e78f5 -670f12e88f -674c12c92d -675c27208a -675ed3e1ca -67741db50a -678a2357eb -67b0f4d562 -67cfbff9b1 -67e717d6bd -67ea169a3b -67ea809e0e -681249baa3 -683de643d9 -6846ac20df -6848e012ef -684bcd8812 -684dc1c40c -685a1fa9cf -686dafaac9 -68807d8601 -6893778c77 -6899d2dabe -68a2fad4ab -68cb45fda3 -68cc4a1970 -68dcb40675 -68ea4a8c3d -68f6e7fbf0 -68fa8300b4 -69023db81f -6908ccf557 -691a111e7c -6927723ba5 -692ca0e1a2 -692eb57b63 -69340faa52 -693cbf0c9d -6942f684ad -6944fc833b -69491c0ebf -695b61a2b0 -6979b4d83f -697d4fdb02 -69910460a4 -6997636670 -69a436750b -69aebf7669 -69b8c17047 -69c67f109f -69e0e7b868 -69ea9c09d1 -69f0af42a6 -6a078cdcc7 -6a37a91708 -6a42176f2e -6a48e4aea8 -6a5977be3a -6a5de0535f -6a80d2e2e5 -6a96c8815d -6a986084e2 -6aa8e50445 -6ab9dce449 -6abf0ba6b2 -6acc6049d9 -6adb31756c -6ade215eb0 -6afb7d50e4 -6afd692f1a -6b0b1044fe -6b17c67633 -6b1b6ef28b -6b1e04d00d -6b2261888d -6b25d6528a -6b3a24395c -6b685eb75b -6b79be238c -6b928b7ba6 -6b9c43c25a -6ba99cc41f -6bdab62bcd -6bf2e853b1 -6bf584200f -6bf95df2b9 -6c0949c51c -6c11a5f11f -6c23d89189 -6c4387daf5 -6c4ce479a4 -6c5123e4bc -6c54265f16 -6c56848429 -6c623fac5f -6c81b014e9 -6c99ea7c31 -6c9d29d509 -6c9e3b7d1a -6ca006e283 -6caeb928d6 -6cb2ee722a -6cbfd32c5e -6cc791250b -6cccc985e0 -6d12e30c48 -6d4bf200ad -6d6d2b8843 -6d6eea5682 -6d7a3d0c21 -6d7efa9b9e -6da21f5c91 -6da6adabc0 -6dd2827fbb -6dd36705b9 -6df3637557 -6dfe55e9e5 -6e1a21ba55 -6e2f834767 -6e36e4929a -6e4f460caf -6e618d26b6 -6ead4670f7 -6eaff19b9f -6eb2e1cd9e -6eb30b3b5a -6eca26c202 -6ecad29e52 -6ef0b44654 -6efcfe9275 -6f4789045c -6f49f522ef -6f67d7c4c4 -6f96e91d81 -6fc6fce380 -6fc9b44c00 -6fce7f3226 -6fdf1ca888 -702fd8b729 -70405185d2 -7053e4f41e -707bf4ce41 -7082544248 -708535b72a -7094ac0f60 -70a6b875fa -70c3e97e41 -7106b020ab -711dce6fe2 -7136a4453f -7143fb084f -714d902095 -7151c53b32 -715357be94 -7163b8085f -716df1aa59 -71caded286 -71d2665f35 -71d67b9e19 -71e06dda39 -720b398b9c -720e3fa04c -720e7a5f1e -721bb6f2cb -722803f4f2 -72552a07c9 -726243a205 -72690ef572 -728cda9b65 -728e81c319 -72a810a799 -72acb8cdf6 -72b01281f9 -72cac683e4 -72cadebbce -72cae058a5 -72d8dba870 -72e8d1c1ff -72edc08285 -72f04f1a38 -731b825695 -7320b49b13 -732626383b -732df1eb05 -73329902ab -733798921e -733824d431 -734ea0d7fb -735a7cf7b9 -7367a42892 -7368d5c053 -73c6ae7711 -73e1852735 -73e4e5cc74 -73eac9156b -73f8441a88 -7419e2ab3f -74267f68b9 -7435690c8c -747c44785c -747f1b1f2f -748b2d5c01 -74d4cee0a4 -74ec2b3073 -74ef677020 -750be4c4d8 -75172d4ac8 -75285a7eb1 -75504539c3 -7550949b1d -7551cbd537 -75595b453d -7559b4b0ec -755bd1fbeb -756f76f74d -7570ca7f3c -757a69746e -757cac96c6 -7584129dc3 -75a058dbcd -75b09ce005 -75cae39a8f -75cee6caf0 -75cf58fb2c -75d5c2f32a -75eaf5669d -75f7937438 -75f99bd3b3 -75fa586876 -7613df1f84 -762e1b3487 -76379a3e69 -764271f0f3 -764503c499 -7660005554 -7666351b84 -76693db153 -767856368b -768671f652 -768802b80d -76962c7ed2 -76a75f4eee -76b90809f7 -770a441457 -772a0fa402 -772f2ffc3e -774f6c2175 -77610860e0 -777e58ff3d -77920f1708 -7799df28e7 -779e847a9a -77ba4edc72 -77c834dc43 -77d8aa8691 -77e7f38f4d -77eea6845e -7806308f33 -78254660ea -7828af8bff -784398620a -784d201b12 -78613981ed -78896c6baf -78aff3ebc0 -78c7c03716 -78d3676361 -78e29dd4c3 -78f1a1a54f -79208585cd -792218456c -7923bad550 -794e6fc49f -796e6762ce -797cd21f71 -79921b21c2 -79a5778027 -79bc006280 -79bf95e624 -79d9e00c55 -79e20fc008 -79e9db913e -79f014085e -79fcbb433a -7a13a5dfaa -7a14bc9a36 -7a3c535f70 -7a446a51e9 -7a56e759c5 -7a5f46198d -7a626ec98d -7a802264c4 -7a8b5456ca -7abdff3086 -7aecf9f7ac -7b0fd09c28 -7b18b3db87 -7b39fe7371 -7b49e03d4c -7b5388c9f1 -7b5cf7837f -7b733d31d8 -7b74fd7b98 -7b918ccb8a -7ba3ce3485 -7bb0abc031 -7bb5bb25cd -7bb7dac673 -7bc7761b8c -7bf3820566 -7c03a18ec1 -7c078f211b -7c37d7991a -7c4ec17eff -7c649c2aaf -7c73340ab7 -7c78a2266d -7c88ce3c5b -7ca6843a72 -7cc9258dee -7cec7296ae -7d0ffa68a4 -7d11b4450f -7d1333fcbe -7d18074fef -7d18c8c716 -7d508fb027 -7d55f791f0 -7d74e3c2f6 -7d783f67a9 -7d83a5d854 -7dd409947e -7de45f75e5 -7e0cd25696 -7e1922575c -7e1e3bbcc1 -7e24023274 -7e2f212fd3 -7e6d1cc1f4 -7e7cdcb284 -7e9b6bef69 -7ea5b49283 -7eb2605d96 -7eb26b8485 -7ecd1f0c69 -7f02b3cfe2 -7f1723f0d5 -7f21063c3a -7f3658460e -7f54132e48 -7f559f9d4a -7f5faedf8b -7f838baf2b -7fa5f527e3 -7ff84d66dd -802b45c8c4 -804382b1ad -804c558adb -804f6338a4 -8056117b89 -806b6223ab -8088bda461 -80b790703b -80c4a94706 -80ce2e351b -80db581acd -80e12193df -80e41b608f -80f16b016d -81541b3725 -8175486e6a -8179095000 -8193671178 -81a58d2c6b -81aa1286fb -81dffd30fb -8200245704 -823e7a86e8 -824973babb -824ca5538f -827171a845 -8273a03530 -827cf4f886 -82b865c7dd -82c1517708 -82d15514d6 -82e117b900 -82fec06574 -832b5ef379 -83424c9fbf -8345358fb8 -834b50b31b -835e3b67d7 -836ea92b15 -837c618777 -838eb3bd89 -839381063f -839bc71489 -83a8151377 -83ae88d217 -83ca8bcad0 -83ce590d7f -83d3130ba0 -83d40bcba5 -83daba503a -83de906ec0 -84044f37f3 -84696b5a5e -84752191a3 -847eeeb2e0 -848e7835a0 -84a4b29286 -84a4bf147d -84be115c09 -84d95c4350 -84e0922cf7 -84f0cfc665 -8515f6db22 -851f2f32c1 -852a4d6067 -854c48b02a -857a387c86 -859633d56a -85a4f4a639 -85ab85510c -85b1eda0d9 -85dc1041c6 -85e081f3c7 -85f75187ad -8604bb2b75 -860745b042 -863b4049d7 -8643de22d0 -8647d06439 -864ffce4fe -8662d9441a -8666521b13 -868d6a0685 -869fa45998 -86a40b655d -86a8ae4223 -86b2180703 -86c85d27df -86d3755680 -86e61829a1 -871015806c -871e409c5c -8744b861ce -8749369ba0 -878a299541 -8792c193a0 -8799ab0118 -87d1f7d741 -882b9e4500 -885673ea17 -8859dedf41 -8873ab2806 -887a93b198 -8883e991a9 -8891aa6dfa -8899d8cbcd -88b8274d67 -88d3b80af6 -88ede83da2 -88f345941b -890976d6da -8909bde9ab -8929c7d5d9 -89363acf76 -89379487e0 -8939db6354 -893f658345 -8953138465 -895c96d671 -895cbf96f9 -895e8b29a7 -898fa256c8 -89986c60be -89b874547b -89bdb021d5 -89c802ff9c -89d6336c2b -89ebb27334 -8a27e2407c -8a31f7bca5 -8a4a2fc105 -8a5d6c619c -8a75ad7924 -8aa817e4ed -8aad0591eb -8aca214360 -8ae168c71b -8b0cfbab97 -8b3645d826 -8b3805dbd4 -8b473f0f5d -8b4f6d1186 -8b4fb018b7 -8b518ee936 -8b523bdfd6 -8b52fb5fba -8b91036e5c -8b99a77ac5 -8ba04b1e7b -8ba782192f -8bbeaad78b -8bd1b45776 -8bd7a2dda6 -8bdb091ccf -8be56f165d -8be950d00f -8bf84e7d45 -8bffc4374b -8bfff50747 -8c09867481 -8c0a3251c3 -8c3015cccb -8c469815cf -8c9ccfedc7 -8ca1af9f3c -8ca3f6e6c1 -8ca6a4f60f -8cac6900fe -8cba221a1e -8cbbe62ccd -8d064b29e2 -8d167e7c08 -8d4ab94e1c -8d81f6f899 -8d87897d66 -8dcccd2bd2 -8dcfb878a8 -8dd3ab71b9 -8dda6bf10f -8ddd51ca94 -8dea22c533 -8def5bd3bf -8e1848197c -8e3a83cf2d -8e478e73f3 -8e98ae3c84 -8ea6687ab0 -8eb0d315c1 -8ec10891f9 -8ec3065ec2 -8ecf51a971 -8eddbab9f7 -8ee198467a -8ee2368f40 -8ef595ce82 -8f0a653ad7 -8f1204a732 -8f1600f7f6 -8f16366707 -8f1ce0a411 -8f2e05e814 -8f320d0e09 -8f3b4a84ad -8f3fdad3da -8f5d3622d8 -8f62a2c633 -8f81c9405a -8f8c974d53 -8f918598b6 -8ff61619f6 -9002761b41 -90107941f3 -90118a42ee -902bc16b37 -903e87e0d6 -9041a0f489 -9047bf3222 -9057bfa502 -90617b0954 -9076f4b6db -9077e69b08 -909655b4a6 -909c2eca88 -909dbd1b76 -90bc4a319a -90c7a87887 -90cc785ddd -90d300f09b -9101ea9b1b -9108130458 -911ac9979b -9151cad9b5 -9153762797 -91634ee0c9 -916942666f -9198cfb4ea -919ac864d6 -91b67d58d4 -91bb8df281 -91be106477 -91c33b4290 -91ca7dd9f3 -91d095f869 -91f107082e -920329dd5e -920c959958 -92128fbf4b -9223dacb40 -923137bb7f -9268e1f88a -927647fe08 -9276f5ba47 -92a28cd233 -92b5c1fc6d -92c46be756 -92dabbe3a0 -92e3159361 -92ebab216a -934bdc2893 -9359174efc -935d97dd2f -935feaba1b -93901858ee -939378f6d6 -939bdf742e -93a22bee7e -93da9aeddf -93e2feacce -93e6f1fdf9 -93e811e393 -93e85d8fd3 -93f623d716 -93ff35e801 -94031f12f2 -94091a4873 -94125907e3 -9418653742 -941c870569 -94209c86f0 -9437c715eb -9445c3eca2 -9467c8617c -946d71fb5d -948f3ae6fb -9498baa359 -94a33abeab -94bf1af5e3 -94cf3a8025 -94db712ac8 -94e4b66cff -94e76cbaf6 -950be91db1 -952058e2d0 -952633c37f -952ec313fe -9533fc037c -9574b81269 -9579b73761 -957f7bc48b -958073d2b0 -9582e0eb33 -9584092d0b -95b58b8004 -95bd88da55 -95f74a9959 -962781c601 -962f045bf5 -964ad23b44 -967b90590e -967bffe201 -96825c4714 -968492136a -9684ef9d64 -968c41829e -96a856ef9a -96dfc49961 -96e1a5b4f8 -96e6ff0917 -96fb88e9d7 -96fbe5fc23 -96fc924050 -9715cc83dc -9720eff40f -972c187c0d -97476eb38d -97659ed431 -9773492949 -97756b264f -977bff0d10 -97ab569ff3 -97ba838008 -97d9d008c7 -97e59f09fa -97eb642e56 -98043e2d14 -981ff580cf -983e66cbfc -984f0f1c36 -98595f2bb4 -985c3be474 -9869a12362 -986b5a5e18 -9877af5063 -98911292da -9893a3cf77 -9893d9202d -98a8b06e7f -98ac6f93d9 -98b6974d12 -98ba3c9417 -98c7c00a19 -98d044f206 -98e909f9d1 -98fe7f0410 -990f2742c7 -992bd0779a -994b9b47ba -9955b76bf5 -9966f3adac -997117a654 -999d53d841 -99c04108d3 -99c4277aee -99c6b1acf2 -99dc8bb20b -99fcba71e5 -99fecd4efb -9a02c70ba2 -9a08e7a6f8 -9a2f2c0f86 -9a3254a76e -9a3570a020 -9a39112493 -9a4e9fd399 -9a50af4bfb -9a68631d24 -9a72318dbf -9a767493b7 -9a7fc1548b -9a84ccf6a7 -9a9c0e15b7 -9adf06d89b -9b22b54ee4 -9b473fc8fe -9b4f081782 -9b997664ba -9bc454e109 -9bccfd04de -9bce4583a2 -9bebf1b87f -9bfc50d261 -9c166c86ff -9c293ef4d7 -9c29c047b0 -9c3bc2e2a7 -9c3ce23bd1 -9c404cac0c -9c5180d23a -9c7feca6e4 -9caa49d3ff -9cb2f1b646 -9ce6f765c3 -9cfee34031 -9d01f08ec6 -9d04c280b8 -9d12ceaddc -9d15f8cb3c -9d2101e9bf -9d407c3aeb -9ddefc6165 -9df0b1e298 -9e16f115d8 -9e249b4982 -9e29b1982c -9e493e4773 -9e4c752cd0 -9e4de40671 -9e6319faeb -9e6ddbb52d -9eadcea74f -9ecec5f8ea -9efb47b595 -9f30bfe61e -9f3734c3a4 -9f5b858101 -9f66640cda -9f913803e9 -9f97bc74c8 -9fbad86e20 -9fc2bad316 -9fc5c3af78 -9fcb310255 -9fcc256871 -9fd2fd4d47 -a0071ae316 -a023141022 -a046399a74 -a066e739c1 -a06722ba82 -a07a15dd64 -a07b47f694 -a09c39472e -a0b208fe2e -a0b61c959e -a0bc6c611d -a0e6da5ba2 -a1193d6490 -a14ef483ff -a14f709908 -a15ccc5658 -a16062456f -a174e8d989 -a177c2733c -a17c62e764 -a18ad065fc -a1aaf63216 -a1bb65fb91 -a1bd8e5349 -a1dfdd0cac -a2052e4f6c -a20fd34693 -a21ffe4d81 -a22349e647 -a235d01ec1 -a24f63e8a2 -a2554c9f6d -a263ce8a87 -a29bfc29ec -a2a80072d4 -a2a800ab63 -a2bcd10a33 -a2bdaff3b0 -a2c146ab0d -a2c996e429 -a2dc51ebe8 -a2e6608bfa -a2f2a55f01 -a301869dea -a31fccd2cc -a34f440f33 -a35e0206da -a36bdc4cab -a36e8c79d8 -a378053b20 -a37db3a2b3 -a38950ebc2 -a39a0eb433 -a39c9bca52 -a3a945dc8c -a3b40a0c1e -a3b8588550 -a3c502bec3 -a3f2878017 -a3f4d58010 -a3f51855c3 -a402dc0dfe -a4065a7eda -a412bb2fef -a416b56b53 -a41ec95906 -a43299e362 -a4757bd7af -a48c53c454 -a49dcf9ad5 -a4a506521f -a4ba7753d9 -a4bac06849 -a4f05d681c -a50c10060f -a50eb5a0ea -a5122c6ec6 -a522b1aa79 -a590915345 -a5b5b59139 -a5b77abe43 -a5c2b2c3e1 -a5cd17bb11 -a5da03aef1 -a5dd11de0d -a5ea2b93b6 -a5eaeac80b -a5ec5b0265 -a5f350a87e -a5f472caf4 -a6027a53cf -a61715bb1b -a61cf4389d -a61d9bbd9b -a6470dbbf5 -a64a40f3eb -a653d5c23b -a65bd23cb5 -a66e0b7ad4 -a66fc5053c -a68259572b -a6a810a92c -a6bc36937f -a6c3a374e9 -a6d8a4228d -a6f4e0817f -a71e0481f5 -a7203deb2d -a7392d4438 -a73d3c3902 -a7491f1578 -a74b9ca19c -a77b7a91df -a78195a5f5 -a78758d4ce -a7e6d6c29a -a800d85e88 -a832fa8790 -a83d06410d -a8999af004 -a8f78125b9 -a907b18df1 -a919392446 -a965504e88 -a96b84b8d2 -a973f239cd -a977126596 -a9804f2a08 -a984e56893 -a99738f24c -a99bdd0079 -a9c9c1517e -a9cbf9c41b -a9e42e3c0c -aa07b7c1c0 -aa175e5ec7 -aa1a338630 -aa27d7b868 -aa45f1caaf -aa49e46432 -aa51934e1b -aa6287bb6c -aa6d999971 -aa85278334 -aab33f0e2a -aaba004362 -aade4cf385 -aae78feda4 -aaed233bf3 -aaff16c2db -ab199e8dfb -ab23b78715 -ab2e1b5577 -ab33a18ded -ab45078265 -ab56201494 -ab90f0d24b -abab2e6c20 -abb50c8697 -abbe2d15a0 -abbe73cd21 -abe61a11bb -abeae8ce21 -ac2b431d5f -ac2cb1b9eb -ac31fcd6d0 -ac3d3a126d -ac46bd8087 -ac783ef388 -acb73e4297 -acbf581760 -accafc3531 -acf2c4b745 -acf44293a2 -acf736a27b -acff336758 -ad1fe56886 -ad28f9b9d9 -ad2de9f80e -ad397527b2 -ad3d1cfbcb -ad3fada9d9 -ad4108ee8e -ad54468654 -ad573f7d31 -ad6255bc29 -ad65ebaa07 -ad97cc064a -adabbd1cc4 -adb0b5a270 -adc648f890 -add21ee467 -adfd15ceef -adfdd52eac -ae01cdab63 -ae0b50ff4f -ae13ee3d70 -ae1bcbd423 -ae20d09dea -ae2cecf5f6 -ae3bc4a0ef -ae499c7514 -ae628f2cd4 -ae8545d581 -ae93214fe6 -ae9cd16dbf -aeba9ac967 -aebb242b5c -aed4e0b4c4 -aedd71f125 -aef3e2cb0e -af0b54cee3 -af3de54c7a -af5fd24a36 -af8826d084 -af8ad72057 -afb71e22c5 -afcb331e1f -afe1a35c1e -b01080b5d3 -b05ad0d345 -b0623a6232 -b064dbd4b7 -b06ed37831 -b06f5888e6 -b08dcc490e -b0a68228dc -b0aece727f -b0b0731606 -b0c7f11f9f -b0cca8b830 -b0dd580a89 -b0de66ca08 -b0df7c5c5c -b0f5295608 -b11099eb09 -b132a53086 -b1399fac64 -b13abc0c69 -b1457e3b5e -b15bf4453b -b179c4a82d -b17ee70e8c -b190b1aa65 -b19b3e22c0 -b19c561fab -b1d1cd2e6e -b1d7c03927 -b1d7fe2753 -b1f540a4bd -b1fc9c64e1 -b1fcbb3ced -b220939e93 -b22099b419 -b241e95235 -b2432ae86d -b2456267df -b247940d01 -b24af1c35c -b24f600420 -b24fe36b2a -b258fb0b7d -b26b219919 -b26d9904de -b274456ce1 -b27b28d581 -b2a26bc912 -b2a9c51e1b -b2b0baf470 -b2b2756fe7 -b2ce7699e3 -b2edc76bd2 -b2f6b52100 -b30bf47bcd -b34105a4e9 -b372a82edf -b3779a1962 -b379ab4ff5 -b37a1d69e3 -b37c01396e -b382b09e25 -b3996e4ba5 -b3d9ca2aee -b3dde1e1e9 -b3eb7f05eb -b40b25055c -b41e0f1f19 -b44e32a42b -b4805ae9cd -b4807569a5 -b48efceb3e -b493c25c7f -b4b565aba1 -b4b715a15b -b4d0c90bf4 -b4d84bc371 -b4e5ad97aa -b4eaea9e6b -b50f4b90d5 -b53f675641 -b54278cd43 -b554843889 -b573c0677a -b58d853734 -b5943b18ab -b5a09a83f3 -b5aae1fe25 -b5b9da5364 -b5eb64d419 -b5ebb1d000 -b5f1c0c96a -b5f7fece90 -b6070de1bb -b60a76fe73 -b61f998772 -b62c943664 -b63094ba0c -b64fca8100 -b673e7dcfb -b678b7db00 -b68fc1b217 -b69926d9fa -b6a1df3764 -b6a4859528 -b6b4738b78 -b6b4f847b7 -b6b8d502d4 -b6bb00e366 -b6d65a9eef -b6d79a0845 -b6e9ec577f -b6ec609f7b -b6f92a308d -b70a2c0ab1 -b70a5a0d50 -b70c052f2f -b70d231781 -b72ac6e10b -b7302d8226 -b73867d769 -b751e767f2 -b76df6e059 -b77e5eddef -b7a2c2c83c -b7bcbe6466 -b7c2a469c4 -b7d69da8f0 -b7f31b7c36 -b7f675fb98 -b7fb871660 -b82e5ad1c9 -b841cfb932 -b84b8ae665 -b85b78ac2b -b86c17caa6 -b86e50d82d -b871db031a -b87d56925a -b8aaa59b75 -b8c03d1091 -b8c3210036 -b8e16df00b -b8f34cf72e -b8fb75864e -b9004db86c -b9166cbae9 -b920b256a6 -b938d79dff -b93963f214 -b941aef1a0 -b94d34d14e -b964c57da4 -b96a95bc7a -b96c57d2c7 -b9b6bdde0c -b9bcb3e0f2 -b9d3b92169 -b9dd4b306c -b9f43ef41e -ba1f03c811 -ba3a775d7b -ba3c7f2a31 -ba3fcd417d -ba5e1f4faa -ba795f3089 -ba8a291e6a -ba98512f97 -bac9db04f5 -baedae3442 -baff40d29d -bb04e28695 -bb1b0ee89f -bb1c770fe7 -bb1fc34f99 -bb2d220506 -bb334e5cdb -bb337f9830 -bb721eb9aa -bb87ff58bd -bb89a6b18a -bbaa9a036a -bbb4302dda -bbd31510cf -bbe0256a75 -bc141b9ad5 -bc17ab8a99 -bc318160de -bc3b9ee033 -bc4240b43c -bc4ce49105 -bc4f71372d -bc6b8d6371 -bcaad44ad7 -bcc241b081 -bcc5d8095e -bcd1d39afb -bd0d849da4 -bd0e9ed437 -bd2c94730f -bd321d2be6 -bd3ec46511 -bd5b2e2848 -bd7e02b139 -bd96f9943a -bda224cb25 -bda4a82837 -bdb74e333f -bdccd69dde -bddcc15521 -be116aab29 -be15e18f1e -be1a284edb -be2a367a7b -be376082d0 -be3e3cffbd -be5d1d89a0 -be8b72fe37 -be9b29e08e -bea1f6e62c -bea83281b5 -beb921a4c9 -bec5e9edcd -beeb8a3f92 -bf2232b58d -bf28751739 -bf443804e8 -bf461df850 -bf5374f122 -bf551a6f60 -bf8d0f5ada -bf961167a6 -bfab1ad8f9 -bfcb05d88d -bfd8f6e6c9 -bfd91d0742 -bfe262322f -c013f42ed7 -c01878083f -c01faff1ed -c046fd0edb -c053e35f97 -c079a6482d -c0847b521a -c0a1e06710 -c0e8d4635c -c0e973ad85 -c0f49c6579 -c0f5b222d7 -c10d07c90d -c1268d998c -c130c3fc0c -c14826ad5e -c15b922281 -c16f09cb63 -c18e19d922 -c1c830a735 -c1e8aeea45 -c20a5ccc99 -c20fd5e597 -c219d6f8dc -c2406ae462 -c26f7b5824 -c279e641ee -c27adaeac5 -c2a35c1cda -c2a9903b8b -c2b62567c1 -c2b974ec8c -c2baaff7bf -c2be6900f2 -c304dd44d5 -c307f33da2 -c30a7b62c9 -c3128733ee -c31fa6c598 -c325c8201e -c32d4aa5d1 -c33f28249a -c34365e2d7 -c3457af795 -c34d120a88 -c3509e728d -c35e4fa6c4 -c36240d96f -c3641dfc5a -c37b17a4a9 -c39559ddf6 -c3b0c6e180 -c3b3d82e6c -c3be369fdb -c3bf1e40c2 -c3c760b015 -c3dd38bf98 -c3e4274614 -c3edc48cbd -c41e6587f5 -c4272227b0 -c42917fe82 -c438858117 -c44676563f -c44beb7472 -c45411dacb -c4571bedc8 -c46deb2956 -c479ee052e -c47d551843 -c49f07d46d -c4cc40c1fc -c4f256f5d5 -c4f5b1ddcc -c4ff9b4885 -c52bce43db -c544da6854 -c55784c766 -c557b69fbf -c593a3f7ab -c598faa682 -c5ab1f09c8 -c5b6da8602 -c5b9128d94 -c5e845c6b7 -c5fba7b341 -c60897f093 -c61fe6ed7c -c62188c536 -c64035b2e2 -c69689f177 -c6a12c131f -c6bb6d2d5c -c6c18e860f -c6d9526e0d -c6e55c33f0 -c7030b28bd -c70682c7cc -c70f9be8c5 -c71f30d7b6 -c73c8e747f -c760eeb8b3 -c7637cab0a -c7a1a17308 -c7bf937af5 -c7c2860db3 -c7cef4aee2 -c7ebfc5d57 -c813dcf13c -c82235a49a -c82a7619a1 -c82ecb90cb -c844f03dc7 -c8557963f3 -c89147e6e8 -c8a46ff0c8 -c8ab107dd5 -c8b869a04a -c8c7b306a6 -c8c8b28781 -c8d79e3163 -c8edab0415 -c8f494f416 -c8f6cba9fd -c909ceea97 -c9188f4980 -c922365dd4 -c92c8c3c75 -c937eb0b83 -c94b31b5e5 -c95cd17749 -c96379c03c -c96465ee65 -c965afa713 -c9734b451f -c9862d82dc -c98b6fe013 -c9999b7c48 -c99e92aaf0 -c9b3a8fbda -c9bf64e965 -c9c3cb3797 -c9d1c60cd0 -c9de9c22c4 -ca1828fa54 -ca346f17eb -ca3787d3d3 -ca4b99cbac -ca91c69e3b -ca91e99105 -caa8e97f81 -caac5807f8 -cabba242c2 -cad5a656a9 -cad673e375 -cad8a85930 -cae7b0a02b -cae7ef3184 -caeb6b6cbb -caecf0a5db -cb15312003 -cb2e35d610 -cb35a87504 -cb3f22b0cf -cbb410da64 -cc8728052e -cc892997b8 -cce03c2a9b -cd47a23e31 -cd4dc03dc0 -cd5ae611da -cd603bb9d1 -cd8f49734c -cdc6b1c032 -cdcfe008ad -cdd57027c2 -ce1af99b4b -ce1bc5743a -ce25872021 -ce2776f78f -ce49b1f474 -ce4f0a266f -ce5641b195 -ce6866aa19 -ce712ed3c9 -ce7d1c8117 -ce7dbeaa88 -ce9b015a5e -cea7697b25 -cebbd826cf -cec3415361 -cec41ad4f4 -ced49d26df -ced7705ab2 -cef824a1e1 -cf13f5c95a -cf4376a52d -cf85ab28b5 -cfc2e50b9d -cfcd571fff -cfd9d4ae47 -cfda2dcce5 -cff035928b -cff8191891 -d01608c2a5 -d01a8f1f83 -d021d68bca -d04258ca14 -d0483573dc -d04a90aaff -d05279c0bd -d0696bd5fc -d072fda75b -d0a83bcd9f -d0ab39112e -d0acde820f -d0b4442c71 -d0c65e9e95 -d0fb600c73 -d107a1457c -d123d674c1 -d14d1e9289 -d154e3388e -d177e9878a -d1802f69f8 -d182c4483a -d195d31128 -d200838929 -d205e3cff5 -d247420c4c -d2484bff33 -d26f6ed9b0 -d280fcd1cb -d2857f0faa -d292a50c7f -d295ea2dc7 -d2a58b4fa6 -d2b026739a -d2ebe0890f -d2ede5d862 -d301ca58cc -d3069da8bb -d343d4a77d -d355e634ef -d367fb5253 -d36d16358e -d38bc77e2c -d38d1679e2 -d3932ad4bd -d3987b2930 -d39934abe3 -d3ae1c3f4c -d3b088e593 -d3e6e05e16 -d3eefae7c5 -d3f55f5ab8 -d3f5c309cc -d4034a7fdf -d4193011f3 -d429c67630 -d42c0ff975 -d44a764409 -d44e6acd1d -d45158c175 -d454e8444f -d45f62717e -d48ebdcf74 -d49ab52a25 -d4a607ad81 -d4b063c7db -d4da13e9ba -d4dd1a7d00 -d4f4f7c9c3 -d521aba02e -d535bb1b97 -d53b955f78 -d55cb7a205 -d55f247a45 -d5695544d8 -d5853d9b8b -d5b6c6d94a -d5cae12834 -d5df027f0c -d5ee40e5d0 -d600046f73 -d632fd3510 -d6476cad55 -d65a7bae86 -d664c89912 -d689658f06 -d6917db4be -d69967143e -d699d3d798 -d69f757a3f -d6ac0e065c -d6c02bfda5 -d6c1b5749e -d6e12ef6cc -d6eed152c4 -d6faaaf726 -d704766646 -d708e1350c -d7135cf104 -d7157a9f44 -d719cf9316 -d724134cfd -d73a60a244 -d7411662da -d74875ea7c -d756f5a694 -d7572b7d8a -d763bd6d96 -d7697c8b13 -d7797196b4 -d79c834768 -d7b34e5d73 -d7bb6b37a7 -d7c7e064a6 -d7fbf545b3 -d82a0aa15b -d847e24abd -d8596701b7 -d86101499c -d87069ba86 -d87160957b -d874654b52 -d88a403092 -d8aee40f3f -d8e77a222d -d8eb07c381 -d9010348a1 -d90e3cf281 -d92532c7b2 -d927fae122 -d95707bca8 -d973b31c00 -d991cb471d -d992c69d37 -d99d770820 -d9b63abc11 -d9db6f1983 -d9e52be2d2 -d9edc82650 -da01070697 -da070ea4b7 -da080507b9 -da0e944cc4 -da28d94ff4 -da5d78b9d1 -da6003fc72 -da690fee9f -da6c68708f -da7a816676 -dac361e828 -dac71659b8 -dad980385d -daebc12b77 -db0968cdd3 -db231a7100 -db59282ace -db7f267c3f -dba35b87fd -dbba735a50 -dbca076acd -dbd66dc3ac -dbdc3c292b -dbf4a5b32b -dbfc417d28 -dc1745e0a2 -dc32a44804 -dc34b35e30 -dc504a4f79 -dc704dd647 -dc71bc6918 -dc7771b3be -dcf8c93617 -dd0f4c9fb9 -dd415df125 -dd601f9a3f -dd61d903df -dd77583736 -dd8636bd8b -dd9fe6c6ac -ddb2da4c14 -ddcd450d47 -dde8e67fb4 -ddfc3f04d3 -de2ab79dfa -de2f35b2fd -de30990a51 -de36b216da -de37403340 -de46e4943b -de4ddbccb1 -de5e480f05 -de6a9382ca -de74a601d3 -de827c510d -ded6069f7b -defb71c741 -df01f277f1 -df05214b82 -df0638b0a0 -df11931ffe -df1b0e4620 -df20a8650d -df2bc56d7c -df365282c6 -df39a0d9df -df3c430c24 -df5536cfb9 -df59cfd91d -df5e2152b3 -df741313c9 -df7626172f -df8ad5deb9 -df96aa609a -df9705605c -df9c91c4da -dfc0d3d27a -dfdbf91a99 -e00baaae9b -e0a938c6e7 -e0b2ceee6f -e0bdb5dfae -e0be1f6e17 -e0c478f775 -e0de82caa7 -e0f217dd59 -e0f7208874 -e0fb58395e -e1194c2e9d -e11adcd05d -e128124b9d -e1495354e4 -e1561d6d4b -e158805399 -e16945b951 -e19edcd34b -e1a1544285 -e1ab7957f4 -e1d26d35be -e1e957085b -e1f14510fa -e214b160f4 -e2167379b8 -e21acb20ab -e221105579 -e22ddf8a1b -e22de45950 -e22ffc469b -e23cca5244 -e252f46f0b -e25fa6cf39 -e26e486026 -e275760245 -e27bbedbfe -e29e9868a8 -e2b37ff8af -e2b608d309 -e2bef4da9a -e2c87a6421 -e2ea25542c -e2fb1d6497 -e2fcc99117 -e33c18412a -e348377191 -e352cb59c8 -e36ac982f0 -e391bc981e -e39e3e0a06 -e3bf38265f -e3d5b2cd21 -e3d60e82d5 -e3e3245492 -e3e4134877 -e3f4635e03 -e4004ee048 -e402d1afa5 -e415093d27 -e41ceb5d81 -e424653b78 -e42b6d3dbb -e42d60f0d4 -e436d0ff1e -e43d7ae2c5 -e4428801bc -e44e0b4917 -e470345ede -e48e8b4263 -e4922e3726 -e4936852bb -e495f32c60 -e499228f26 -e4af66e163 -e4b2095f58 -e4d19c8283 -e4d4872dab -e4e2983570 -e4eaa63aab -e4ef0a3a34 -e4f8e5f46e -e4ffb6d0dd -e53e21aa02 -e57f4f668b -e588433c1e -e597442c99 -e5abc0e96b -e5be628030 -e5ce96a55d -e5d6b70a9f -e5fde1574c -e625e1d27b -e6261d2348 -e6267d46bc -e6295f223f -e63463d8c6 -e6387bd1e0 -e653883384 -e65f134e0b -e668ef5664 -e672ccd250 -e674510b20 -e676107765 -e699da0cdf -e6be243065 -e6deab5e0b -e6f065f2b9 -e71629e7b5 -e72a7d7b0b -e72f6104e1 -e75a466eea -e76c55933f -e7784ec8ad -e78922e5e6 -e78d450a9c -e7c6354e77 -e7c8de1fce -e7ea10db28 -e803918710 -e8073a140b -e828dd02db -e845994987 -e8485a2615 -e85c5118a7 -e88b6736e4 -e8962324e3 -e8b3018d36 -e8cee8bf0b -e8d97ebece -e8da49ea6a -e8ed1a3ccf -e8f7904326 -e8f8341dec -e8fa21eb13 -e90c10fc4c -e914b8cac8 -e92b6bfea4 -e92e1b7623 -e93f83e512 -e9422ad240 -e9460b55f9 -e9502628f6 -e950befd5f -e9582bdd1b -e95e5afe0f -e97cfac475 -e98d57d99c -e98eda8978 -e99706b555 -e9bc0760ba -e9d3c78bf3 -e9ec1b7ea8 -ea065cc205 -ea138b6617 -ea16d3fd48 -ea2545d64b -ea286a581c -ea320da917 -ea345f3627 -ea3b94a591 -ea444a37eb -ea4a01216b -ea5672ffa8 -eaa99191cb -eaab4d746c -eac7a59bc1 -ead5d3835a -eaec65cfa7 -eaed1a87be -eb2f821c6f -eb383cb82e -eb6992fe02 -eb6ac20a01 -eb6d7ab39e -eb7921facd -eb8fce51a6 -ebbb90e9f9 -ebbf5c9ee1 -ebc4ec32e6 -ebe56e5ef8 -ec1299aee4 -ec139ff675 -ec193e1a01 -ec28252938 -ec387be051 -ec3d4fac00 -ec4186ce12 -ec579c2f96 -ecae59b782 -ecb33a0448 -ece6bc9e92 -ecfedd4035 -ecfff22fd6 -ed3291c3d6 -ed3cd5308d -ed3e6fc1a5 -ed72ae8825 -ed7455da68 -ed844e879f -ed8f814b2b -ed911a1f63 -ed9ff4f649 -eda8ab984b -edb8878849 -edbfdfe1b4 -edd22c46a2 -edd663afa3 -ede3552eae -edeab61ee0 -ee07583fc0 -ee316eaed6 -ee3f509537 -ee40a1e491 -ee4bf100f1 -ee6f9b01f9 -ee947ed771 -ee9706ac7f -ee9a7840ae -eeb90cb569 -eebf45e5c5 -eeed0c7d73 -ef0061a309 -ef07f1a655 -ef0a8e8f35 -ef232a2aed -ef308ad2e9 -ef44945428 -ef45ce3035 -ef5dde449d -ef5e770988 -ef6359cea3 -ef65268834 -ef6cb5eae0 -ef78972bc2 -ef8cfcfc4f -ef96501dd0 -ef9a2e976b -efb24f950f -efce0c1868 -efe5ac6901 -efe828affa -efea4e0523 -f0268aa627 -f0483250c8 -f04cf99ee6 -f05b189097 -f08928c6d3 -f09d74856f -f0a7607d63 -f0ad38da27 -f0c34e1213 -f0c7f86c29 -f0dfa18ba7 -f0eb3179f7 -f119bab27d -f14409b6a3 -f1489baff4 -f14c18cf6a -f15c607b92 -f1af214222 -f1b77bd309 -f1ba9e1a3e -f1d99239eb -f1dc710cf4 -f1ec5c08fa -f22648fe12 -f22d21f1f1 -f233257395 -f23e95dbe5 -f2445b1572 -f253b3486d -f277c7a6a4 -f2ab2b84d6 -f2b7c9b1f3 -f2b83d5ce5 -f2c276018f -f2cfd94d64 -f2dd6e3add -f2e7653f16 -f2f333ad06 -f2f55d6713 -f2fdb6abec -f305a56d9f -f3085d6570 -f3325c3338 -f3400f1204 -f34497c932 -f34a56525e -f36483c824 -f3704d5663 -f3734c4913 -f38e5aa5b4 -f3986fba44 -f3a0ffc7d9 -f3b24a7d28 -f3e6c35ec3 -f3fc0ea80b -f40a683fbe -f4207ca554 -f4377499c2 -f46184f393 -f46c2d0a6d -f46c364dca -f46f7a0b63 -f46fe141b0 -f470b9aeb0 -f47eb7437f -f48b535719 -f49e4866ac -f4aa882cfd -f4daa3dbd5 -f4dd51ac35 -f507a1b9dc -f51c5ac84b -f52104164b -f54c67b9bb -f5966cadd2 -f5bddf5598 -f5d85cfd17 -f5e2e7d6a0 -f5f051e9b4 -f5f8a93a76 -f6283e8af5 -f635e9568b -f6474735be -f659251be2 -f66981af4e -f6708fa398 -f697fe8e8f -f6adb12c42 -f6c7906ca4 -f6cd0a8016 -f6d6f15ae7 -f6e501892c -f6f59d986f -f6fe8c90a5 -f714160545 -f74c3888d7 -f7782c430e -f7783ae5f2 -f77ab47923 -f788a98327 -f7961ac1f0 -f7a71e7574 -f7a8521432 -f7afbf4947 -f7b7cd5f44 -f7cf4b4a39 -f7d49799ad -f7e0c9bb83 -f7e5b84928 -f7e6bd58be -f7f2a38ac6 -f7f6cb2d6d -f83f19e796 -f85796a921 -f8603c26b2 -f8819b42ec -f891f8eaa1 -f89288d10c -f895ae8cc1 -f8b4ac12f1 -f8c3fb2b01 -f8c8de2764 -f8db369b40 -f8fcb6a78c -f94aafdeef -f95d217b70 -f9681d5103 -f9750192a4 -f9823a32c2 -f991ddb4c2 -f99d535567 -f9ae3d98b7 -f9b6217959 -f9bd1fabf5 -f9c68eaa64 -f9d3e04c4f -f9daf64494 -f9e4cc5a0a -f9ea6b7f31 -f9f3852526 -fa04c615cf -fa08e00a56 -fa4370d74d -fa67744af3 -fa88d48a92 -fa8b904cc9 -fa9526bdf1 -fa9b9d2426 -fad633fbe1 -faf5222dc3 -faff0e15f1 -fb08c64e8c -fb23455a7f -fb2e19fa6e -fb34dfbb77 -fb47fcea1e -fb49738155 -fb4cbc514b -fb4e6062f7 -fb5ba7ad6e -fb63cd1236 -fb81157a07 -fb92abdaeb -fba22a6848 -fbaca0c9df -fbc645f602 -fbd77444cd -fbe53dc8e8 -fbe541dd73 -fbe8488798 -fbfd25174f -fc28cb305e -fc33b1ffd6 -fc6186f0bb -fc918e3a40 -fc96cda9d8 -fc9832eea4 -fcb10d0f81 -fcd20a2509 -fcf637e3ab -fcfd81727f -fd31890379 -fd33551c28 -fd542da05e -fd6789b3fe -fd77828200 -fd7af75f4d -fdb28d0fbb -fdb3d1fb1e -fdb8b04124 -fdc6e3d581 -fdfce7e6fc -fe0f76d41b -fe24b0677d -fe3c02699d -fe58b48235 -fe6a5596b8 -fe6c244f63 -fe7afec086 -fe985d510a -fe9db35d15 -fea8ffcd36 -feb1080388 -fed208bfca -feda5ad1c2 -feec95b386 -ff15a5eff6 -ff204daf4b -ff25f55852 -ff2ada194f -ff2ce142e8 -ff49d36d20 -ff5a1ec4f3 -ff66152b25 -ff692fdc56 -ff773b1a1e -ff97129478 -ffb904207d -ffc43fc345 -fffe5f8df6