From 9d8b93db2673f8e123e631cdd111caef4a21c99c Mon Sep 17 00:00:00 2001 From: Adnen Abdessaied Date: Wed, 10 Aug 2022 16:49:55 +0200 Subject: [PATCH] make code public --- README.md | 148 ++ clevr_utils.py | 224 +++ constraints.py | 1049 +++++++++++++ constraints_minecraft.py | 1055 +++++++++++++ constraints_splitA.py | 1055 +++++++++++++ constraints_splitB.py | 1055 +++++++++++++ executor/__init__.py | 0 executor/clevr_statics.py | 47 + executor/minecraft_statics.py | 44 + executor/symbolic_executor.py | 1678 +++++++++++++++++++++ generate_dataset.py | 952 ++++++++++++ generate_dataset_minecraft.py | 1069 +++++++++++++ global_vars.py | 10 + minecraft_utils.py | 224 +++ misc/method_overview.png | Bin 0 -> 102188 bytes misc/method_smaller.png | Bin 0 -> 67803 bytes preprocess_dialogs/preprocess.py | 735 +++++++++ prog_generator/clevrDialog_dataset.py | 94 ++ prog_generator/models.py | 476 ++++++ prog_generator/optim.py | 79 + prog_generator/options_caption_parser.py | 283 ++++ prog_generator/options_question_parser.py | 326 ++++ prog_generator/train_caption_parser.py | 280 ++++ prog_generator/train_question_parser.py | 912 +++++++++++ utils.py | 80 + utils_preprocess.py | 62 + 26 files changed, 11937 insertions(+) create mode 100644 README.md create mode 100644 clevr_utils.py create mode 100644 constraints.py create mode 100644 constraints_minecraft.py create mode 100644 constraints_splitA.py create mode 100644 constraints_splitB.py create mode 100644 executor/__init__.py create mode 100644 executor/clevr_statics.py create mode 100644 executor/minecraft_statics.py create mode 100644 executor/symbolic_executor.py create mode 100644 generate_dataset.py create mode 100644 generate_dataset_minecraft.py create mode 100644 global_vars.py create mode 100644 minecraft_utils.py create mode 100644 misc/method_overview.png create mode 100644 misc/method_smaller.png create mode 100644 preprocess_dialogs/preprocess.py create mode 100644 prog_generator/clevrDialog_dataset.py create mode 100644 prog_generator/models.py create mode 100644 prog_generator/optim.py create mode 100644 prog_generator/options_caption_parser.py create mode 100644 prog_generator/options_question_parser.py create mode 100644 prog_generator/train_caption_parser.py create mode 100644 prog_generator/train_question_parser.py create mode 100644 utils.py create mode 100644 utils_preprocess.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..8ed043a --- /dev/null +++ b/README.md @@ -0,0 +1,148 @@ + +# NSVD + +This repository contains the official code of the paper: + +## Neuro-Symbolic Visual Dialog [[PDF](TODO)] + +[Adnen Abdessaied](https://adnenabdessaied.de), [Mihai Bace](https://perceptualui.org/people/bace/), [Andreas Bulling](https://perceptualui.org/people/bulling/) +**Oral Presentaion / Poster** +International Conferenc on Computational Linguistics (COLING), 2022 / Gyeongju, Republic of Korea. + +If you find our code useful or use it in your own projects, please cite our paper: + +``TODO`` + +# Abstract + +We propose Neuro-Symbolic Visual Dialog (NSVD) —the first method to combine deep learning and symbolic program execution for multi-round visually-grounded reasoning. NSVD significantly outperforms existing purely-connectionist methods on two key challenges inherent to visual dialog: long-distance co-reference resolution as well as vanishing question-answering performance. We demonstrate the latter by proposing a more realistic and stricter evaluation scheme in which we use predicted answers for the full dialog history when calculating accuracy. We describe two variants of our model and show that using this new scheme, our best model achieves an accuracy of 99.72% on CLEVR-Dialog —a relative improvement of more than 10% over the state +of the art —while only requiring a fraction of training data. Moreover, we demonstrate that our neuro-symbolic models have a higher mean first failure round, are more robust against incomplete dialog histories, and generalise better not only to dialogs that are up to three times longer than those seen during training but also to unseen question types and scenes. + +# Method + +
+

missingOverview of our method NSVD. +

+ +
+

missingOverview of concat and stack encoders. +

+ +# Requirements + +- PyTorch 1.3.1 +- Python 3.6 +- Ubuntu 18.04 + +# Raw Data + +## Scene Data + +We used CLEVR and Minecraft images in this project. The raw images have a large footprint and we won't upload them. However, we provide their json file as well as their derendedred versions. They can be found in : + +- ``data/scenes/raw`` +- ``data/scenes/derendered`` + +## Dialog Data + +The dialog data we used can be found in ``data/dialogs``. +You can also create your own data using the ``generate_dataset.py`` script. + +# Preprocessing + +## Scenes + +The derendered scenes do not need any further preprocessing and can be diretly used with our neuro-symbolic executor. + +## Dialogs + +To preprocess the dialogs, follow these steps: + +- ``cd preprocess_dialogs`` + +For the stack encoder, execute + +- ``python preprocess.py --input_dialogs_json --input_vocab_json '' --output_vocab_json --output_h5_file --split --mode stack`` + +For the concat encoder, execute + +- ``python preprocess.py --input_dialogs_json --input_vocab_json '' --output_vocab_json --output_h5_file --split --mode concat`` + +# Training + +First, change directory + +- ``cd ../prog_generator`` + +## Caption Program Parser + +To train the caption parser, execute + +- ``python train_caption_parser.py --mode train --run_dir --res_path --dataPathTr --dataPathVal --dataPathTest --vocab_path `` + +## Question Program Parser + +To train the question program parser with the stack encoder, execute + +- ``python train_question_parser.py --mode train --run_dir --text_log_dir --dataPathTr --dataPathVal --dataPathTest --scenePath --vocab_path --encoder_type 2`` + +To train the question program parser with the concat encoder, execute + +- ``python train_question_parser.py --mode train --run_dir --text_log_dir --dataPathTr --dataPathVal --dataPathTest --scenePath --vocab_path --encoder_type 1`` + +## Baselines + +- [MAC-XXX](https://github.com/ahmedshah1494/clevr-dialog-mac-net/tree/dialog-macnet) + +- [HCN](https://github.com/jojonki/Hybrid-Code-Networks) + +# Evaluation + +To evaluate using the *Hist+GT* scheme, execute + +- ``python train_question_parser.py --mode test_with_gt --run_dir --text_log_dir --dataPathTr --dataPathVal --dataPathTest --scenePath --vocab_path --encoder_type <1/2> --questionNetPath --captionNetPath --dialogLen --last_n_rounds `` + +To evaluate using the *Hist+Pred* scheme, execute + +- ``python train_question_parser.py --mode test_with_pred --run_dir --text_log_dir --dataPathTr --dataPathVal --dataPathTest --scenePath --vocab_path --encoder_type <1/2> --questionNetPath --captionNetPath --dialogLen --last_n_rounds `` + +# Results + +We achieve new state-of-the-art performance on clevr-dialog. + +## Hist+GT + +|
Model
|
Accurcy
|
NFFR
| +| :---: | :---: | :---: | +| MAC-CQ | 97.34 | 0.92 | +| + CAA | 97.87 | 0.94 | +| + MTM | 97.58 | 0.92 | +| HCN | 75.88 | 0.34 | +| **NSVD-concat (Ours)** | 99.59 | 0.98 | +| **NSVD-stack (Ours)** | **99.72** | **0.99** | + +## Hist+Pred + +|
Model
|
Accurcy
|
NFFR
| +| :---: | :---: | :---: | +| MAC-CQ | 41.10 | 0.15 | +| + CAA | 89.39 | 0.75 | +| + MTM | 70.39 | 0.46 | +| HCN | 74.42 | 0.32 | +| **NSVD-concat (Ours)** | 99.59 | 0.98 | +| **NSVD-stack (Ours)** | **99.72** | **0.99** | + +We refer to our paper for the results of the other experiments. + +# Acknowledgements + +We thank [Ahmed Shah](https://www.linkedin.com/in/mahmedshah/) for his MAC-XXX implemetation,[Junki Ohmura](https://www.linkedin.com/in/junki/) for his HCN implemantation, [Jiayuan Mao](https://jiayuanm.com/) for providing us with the minecraft images, and finally [Satwik Kottur](https://satwikkottur.github.io/) for his clevr-dialog [codebase](https://github.com/satwikkottur/clevr-dialog). + +# Contributors + +- [Adnen Abdessaied](https://adnenabdessaied.de) + +For any questions or enquiries, don't not hesitate to contact the above contributor. + diff --git a/clevr_utils.py b/clevr_utils.py new file mode 100644 index 0000000..674c9da --- /dev/null +++ b/clevr_utils.py @@ -0,0 +1,224 @@ +"""Utilities for CLEVR-Dialog dataset generation. + +Author: Satwik Kottur +""" + +import copy + + +def pretty_print_templates(templates, verbosity=1): + """Pretty prints templates. + + Args: + templates: Templates to print + verbosity: 1 to print name and type of the templates + """ + + # Verbosity 1: Name and type. + print('-'*70) + for ii in templates: + print('[Name: %s] [Type: %s]' % (ii['name'], ii['type'])) + print('-'*70) + print('Total of %s templates..' % len(templates)) + print('-'*70) + + +def pretty_print_scene_objects(scene): + """Pretty prints scene objects. + + Args: + scene: Scene graph containing list of objects + """ + + for index, ii in enumerate(scene['objects']): + print_args = (index, ii['shape'], ii['color'], + ii['size'], ii['material']) + print('\t%d : %s-%s-%s-%s' % print_args) + + +def pretty_print_dialogs(dialogs): + """Pretty prints generated dialogs. + + Args: + dialogs: Generated dialogs to print + """ + + for scene_id, dialog_datum in enumerate(dialogs): + for dialog in dialog_datum['dialogs']: + print(dialog['caption']) + for round_id, ii in enumerate(dialog['dialog']): + coref_id = dialog['graph']['history'][round_id+1]['dependence'] + in_tuple = (round_id, ii['question'], str(ii['answer']), + ii['template'], str(coref_id)) + print('\t[Q-%d: %s] [A: %s] [%s] [%s]' % in_tuple) + + +def merge_update_scene_graph(orig_graph, graph_item): + """Merges two scene graphs into one. + + Args: + orig_graph: Original scene graph + graph_item: New graph item to add to the scene graph + + Returns: + graph: Deep copy of the original scene graph after merging + """ + + graph = copy.deepcopy(orig_graph) + # Local alias. + objects = graph['objects'] + + # If not mergeable, return the same scene graph. + if not graph_item['mergeable']: + return graph + + # 1. Go through each new object + # 2. Find its batch in objects + # a. If found, assert for a clash of attributes, update + # b. If novel, just add the object as is + for new_obj in graph_item['objects']: + match_found = False + obj = objects.get(new_obj['id'], None) + + if obj: + # Assert for existing entries. + for attr in new_obj: + try: + assert new_obj[attr] == obj.get(attr, new_obj[attr]),\ + 'Some of the attributes do not match!' + except: + pdb.set_trace() + + # Add additional keys. + objects[new_obj['id']].update(new_obj) + else: + # Add the new object. + objects[new_obj['id']] = new_obj + + # if a relation, update it + if 'relation' in graph_item: + rel = graph_item['relation'] + # update it with object 2 id + id1 = graph_item['objects'][0]['id'] + id2 = graph_item['objects'][1]['id'] + rel_objs = graph['relationships'][rel][id1] + rel_objs.append(id2) + graph['relationships'][rel][id1] = rel_objs + + # update objects in graph + graph['objects'] = objects + return graph + + +def add_object_ids(scenes): + """Adds object ids field for input scenes. + + Args: + scenes: List of CLEVR scene graphs + + Returns: + scenes: Adds object_id field for the objects in the scene graph inplace + """ + + for scene_id, scene in enumerate(scenes['scenes']): + for obj_id, _ in enumerate(scene['objects']): + scenes['scenes'][scene_id]['objects'][obj_id]['id'] = obj_id + return scenes + + +def clean_object_attributes(scenes): + """Cleans attributes for objects, keeping only attributes and id. + + Args: + scenes: Scene graph to clean + + Returns: + scenes: Cleaned up scene graphs inplace + """ + + keys = ['shape', 'size', 'material', 'color', 'id'] + for scene_id, scene in enumerate(scenes['scenes']): + for obj_id, obj in enumerate(scene['objects']): + new_obj = {key: obj[key] for key in keys} + scenes['scenes'][scene_id]['objects'][obj_id] = new_obj + return scenes + + +def pretty_print_corefs(dialog, coref_groups): + """Prints coreferences for a dialog, higlighting different groups in colors. + + Args: + dialog: Generated dialogs to print + coref_groups: Coreference groups for dialogs + """ + + colorama.init() + # Mapping of group_id -> color_ids for (foreground, background) + color_map = {} + groups = coref_groups.get(0, []) + colored, color_map = pretty_print_coref_sentence(dialog['caption'], groups, + color_map) + print('\n\nC: %s' % colored) + for round_id, round_datum in enumerate(dialog['dialog']): + question = round_datum['question'] + groups = coref_groups.get(round_id + 1, []) + colored, color_map = pretty_print_coref_sentence(question, groups, + color_map) + print('%d: %s' % (round_id, colored)) + + +def pretty_print_coref_sentence(sentence, groups, color_map): + """Prints a sentence containing difference coreference groups. + + Args: + sentence: Text sentence + groups: List of coreference groups with spans + color_map: List of groups and associated color maps + + Returns: + sentence: Text sentence with colors inserted + color_map: Updated, if new groups in the current sentence + """ + + fore_colors = ['RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA'] + back_colors = ['BLACK', 'YELLOW', 'CYAN'] + insertions = [] + for group in groups: + group_id = group['group_id'] + if group_id in color_map: + forecolor_id, backcolor_id = color_map[group_id] + else: + num_groups = len(color_map) + forecolor_id = num_groups % len(fore_colors) + backcolor_id = num_groups // len(fore_colors) + color_map[group_id] = (forecolor_id, backcolor_id) + + forecolor = fore_colors[forecolor_id] + backcolor = back_colors[backcolor_id] + insertions.append( + (group['span'][0], getattr(colorama.Fore, forecolor))) + insertions.append( + (group['span'][0], getattr(colorama.Back, backcolor))) + insertions.append((group['span'][1], + getattr(colorama.Style, 'RESET_ALL'))) + + # Perform insertions. + sentence = insert_into_sentence(sentence, insertions) + return sentence, color_map + + +def insert_into_sentence(sentence, insertions): + """Sorts and performs insertions from right. + + Args: + sentence: Sentence to perform insertions into + insertions: List of insertions, format: (position, text_insert) + + Returns: + sentence: Inplace inserted sentence + """ + + insertions = sorted(insertions, key=lambda x: x[0], reverse=True) + for position, text in insertions: + sentence = sentence[:position] + text + sentence[position:] + return sentence diff --git a/constraints.py b/constraints.py new file mode 100644 index 0000000..fbeca91 --- /dev/null +++ b/constraints.py @@ -0,0 +1,1049 @@ + +"""Supporting script checks constraints for caption and question generation. +Author: Satwik Kottur +""" + +import copy +import json +import random +import numpy as np + +import global_vars as gvars + + +# Some quick methods. +def apply_immediate(hist): return (len(hist['objects']) == 1 and + hist['mergeable'] and + 'exist' not in hist['template']) + + +def apply_group(hist): return (len(hist['objects']) >= 2 and + hist['mergeable'] and + 'count' not in prev_group) + + +def caption(scene, templates): + """Constraints for caption generation. + Args: + scene: CLEVR Scene graphs to generate captions with constraints + template: List of caption templates + Returns: + sample_captions: Samples from caption hypotheses + """ + + caption_hypotheses = {} + + # Sweep through all templates to extract 'interesting' captions. + n_objs = len(scene['objects']) + rels = scene['relationships'] + + # Caption Type 1: Extreme locations. + ext_loc_templates = [ii for ii in templates if ii['type'] == 'extreme-loc'] + # number of objects in the scene + filter_objs = copy.deepcopy(scene['objects']) + attr_counts = get_attribute_counts_for_objects(scene, filter_objs) + hypotheses = [] + for template in ext_loc_templates: + # absolute location based constraint + constraint = template['constraints'][0] + extreme_type = constraint['args'][0] + + # check if there is an object that is at the center of the image + # roughly in the middle along front-back and right-left dim + if extreme_type == 'center': + for ii, obj in enumerate(filter_objs): + bla = [len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']] + matches = np.sum([len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']]) + if matches == 4: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + else: + for ii, obj in enumerate(filter_objs): + if len(rels[extreme_type][ii]) == 0: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + + # sample one at random, and create the graph item + # Filter hypothesis which are ambiguous otherwise. + for index, (_, hypothesis) in enumerate(hypotheses): + uniq_attr = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, hypothesis[attr])] == 1] + + for attr in uniq_attr: + del hypotheses[index][1][attr] + + hypotheses = [ii for ii in hypotheses if len(ii[1]) > 1] + caption_hypotheses['extreme-loc'] = hypotheses + + # Caption Type 2: Unique object and attribute. + filter_objs = copy.deepcopy(scene['objects']) + # each hypothesis is (object, attribute) pair + hypotheses = [] + for ii, obj in enumerate(filter_objs): + # get unique set of attributes + uniq_attrs = [ii for ii in gvars.METAINFO['attributes'] + if attr_counts[(ii, obj[ii])] == 1] + # for each, add it to hypothesis + for attr in uniq_attrs: + hypotheses.append((obj, attr)) + caption_hypotheses['unique-obj'] = hypotheses + + # Caption Type 3: Unique attribute count based caption. + # count unique object based constraint + # Each hypothesis is object collection. + caption_hypotheses['count-attr'] = [(attr_val, count) + for attr_val, count in attr_counts.items() + if count > 1] + + # Caption Type 4: Relation between two objects. + # Out of the two, one has a unique attribute. + # find a pair of objects sharing a relation, unique + filter_objs = copy.deepcopy(scene['objects']) + n_objs = len(filter_objs) + + # get a dict of unique attributes for each object + uniq_attr = [[] for ii in range(n_objs)] + non_uniq_attr = [[] for ii in range(n_objs)] + for ind, obj in enumerate(filter_objs): + uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, obj[attr])] == 1] + non_uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, obj[attr])] > 1] + uniqueness = [len(ii) > 0 for ii in uniq_attr] + + # Hypothesis is a uniq object and non-unique obj2 sharing relation R + # global ordering for uniqueness + hypotheses = [] + for rel, order in scene['relationships'].items(): + num_rel = [(ii, len(order[ii])) for ii in range(n_objs)] + num_rel = sorted(num_rel, key=lambda x: x[1], reverse=True) + # take only the ids + num_rel = [ii[0] for ii in num_rel] + + for index, obj_id in enumerate(num_rel[:-1]): + next_obj_id = num_rel[index + 1] + # if unique, check if the next one has non-unique attributes + if uniqueness[obj_id]: + if len(non_uniq_attr[next_obj_id]) > 0: + obj1 = (obj_id, random.choice(uniq_attr[obj_id])) + obj2 = (next_obj_id, random.choice(non_uniq_attr[next_obj_id])) + hypotheses.append((obj1, rel, obj2)) + # if not unique, check if the next one has unique attributes + else: + if len(uniq_attr[next_obj_id]) > 0: + obj1 = (obj_id, random.choice(non_uniq_attr[obj_id])) + obj2 = (next_obj_id, random.choice(uniq_attr[next_obj_id])) + hypotheses.append((obj1, rel, obj2)) + caption_hypotheses['obj-relation'] = hypotheses + sample_captions = sample_from_hypotheses( + caption_hypotheses, scene, templates) + return sample_captions + + +def question(scene, dialog, template): + """Constraints question generation. + Inputs: + scene:Partial scene graphs on CLEVR images with generated captions + template: List of question templates to use + Output: + list of object groups + """ + + ques_round = len(dialog['graph']['history']) - 1 + graph = dialog['graph'] + + # check for constraints and answer question + if 'group' in template['label']: + groups = [] + # Pick a group hypothesis + for ii in graph['history']: + if 'count' in ii or len(ii['objects']) == 0: + groups.append(ii) + + if template['label'] == 'count-all': + # Preliminary checks: + # (A) count-all cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, + 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': len(obj_group)} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': len(obj_group), 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif (template['label'] == 'count-other' or + template['label'] == 'exist-other'): + # preliminary checks: + # (A) exist-other cannot follow exist-other, count-all, count-other + # (B) count-other cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + if (prev_history['template'] == 'exist-other' and + template['label'] == 'exist-other'): + return [] + + # get a list of all objects we know + known_ids = [jj['id'] for ii in graph['history'] for jj in ii['objects']] + known_ids = list(set(known_ids)) + n_objs = len(scene['objects']) + difference = n_objs - len(known_ids) + diff_ids = [ii for ii in range(n_objs) if ii not in known_ids] + + # create empty objects for these + obj_group = [{'id': ii} for ii in diff_ids] + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': obj_group, + 'template': template['label'], 'mergeable': False} + + if 'count' in template['label']: + graph_item['count'] = difference + graph_item['mergeable'] = True # merge if count is known + answer = difference + elif 'exist' in template['label']: + # If heads (> 0.5) -- difference > 0 + if random.random() > 0.5: + if difference > 0: + answer = 'yes' + else: + return [] + else: + if difference == 0: + answer = 'no' + else: + return [] + + # no constraints, count the number of objects in true scene + return [{'answer': answer, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif template['label'] == 'count-all-group': + # we need a group in the previous round + prev_group = graph['history'][-1] + prev_label = prev_group['template'] + if not (len(prev_group['objects']) > 1 and + 'count' not in prev_group and + 'obj-relation' not in prev_label): + return [] + + # check if count is not given before + attrs = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_group] + count = 0 + for obj in prev_group['objects']: + count += all([obj[ii] == prev_group['objects'][0][ii] for ii in attrs]) + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': count} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': count, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif ('count-obj-exclude' in template['label'] or + 'exist-obj-exclude' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + scene_counts = get_attribute_counts_for_objects(scene) + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + #scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get the known attributes for the current object + focus_obj = graph['objects'][focus_id] + known_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj and + '%s_exclude_count' % attr not in focus_obj] + + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for attr in known_attrs[::-1]: + if not focus_obj.get('%s_exclude_exist' % attr, True): + known_attrs.remove(attr) + # for exist: get relations without exist before + elif 'exist' in template['label']: + known_attrs = [attr for attr in known_attrs + if '%s_exclude_exist' % attr not in focus_obj] + + # select an attribute + if len(known_attrs) == 0: + return[] + + # split this into zero and non-zero + if 'exist' in template['label']: + focus_attrs = [(ii, scene['objects'][focus_id][ii]) + for ii in known_attrs] + zero_count = [ii for ii in focus_attrs if scene_counts[ii] == 1] + nonzero_count = [ii for ii in focus_attrs if scene_counts[ii] > 1] + + if random.random() > 0.5: + if len(zero_count) > 0: + attr = random.choice(zero_count)[0] + else: + return [] + else: + if len(nonzero_count) > 0: + attr = random.choice(nonzero_count)[0] + else: + return [] + else: + attr = random.choice(known_attrs) + + # create the object group + obj_group = [] + new_obj = {'required': ['attribute'], 'optional': []} + for obj in scene['objects']: + # add if same attribute value and not focus object + if obj[attr] == focus_obj[attr] and obj['id'] != focus_id: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_copy[attr] = focus_obj[attr] + obj_group.append(obj_copy) + answer = len(obj_group) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exclude_exist' % attr] = answer + elif 'count' in template['label']: + ref_obj['%s_exclude_count' % attr] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + ref_obj['attribute'] = attr + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-obj-rel' in template['label'] or + 'exist-obj-rel' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a obj-rel-imm in previous label, same as the current one + prev_label = prev_history['template'] + cur_label = template['label'] + if 'obj-rel-imm' not in prev_label or cur_label[:5] != prev_label[:5]: + return [] + else: + focus_id = prev_history['focus_id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + focus_attr = random.choice(single_count) + for focus_id, obj in graph['objects'].items(): + if obj.get(focus_attr[0], None) == focus_attr[1]: + break + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get relations with unknown counts + unknown_rels = [rel for rel in gvars.METAINFO['relations'] + if '%s_count' % rel not in graph['objects'][focus_id]] + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for ii in unknown_rels[::-1]: + if not graph['objects'][focus_id].get('%s_exist' % ii, True): + unknown_rels.remove(ii) + + # for exist: get relations without exist before + elif 'exist' in template['label']: + unknown_rels = [rel for rel in unknown_rels + if '%s_exist' % rel not in graph['objects'][focus_id]] + + # select an object with some known objects + if len(unknown_rels) == 0: + return [] + + # pick between yes/no for exist questions, 50% of times + if 'exist' in template['label']: + zero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) == 0] + nonzero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) > 0] + + if random.random() > 0.5: + if len(zero_count) > 0: + rel = random.choice(zero_count) + else: + return [] + else: + if len(nonzero_count) > 0: + rel = random.choice(nonzero_count) + else: + return [] + else: + rel = random.choice(unknown_rels) + + # create the object group + obj_group = [] + new_obj = {'required': ['relation'], 'optional': []} + obj_pool = scene['relationships'][rel][focus_id] + for obj_id in obj_pool: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj_id + obj_group.append(obj_copy) + answer = len(obj_pool) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exist' % rel] = answer + elif 'count' in template['label']: + ref_obj['%s_count' % rel] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + #ref_obj['relation'] = rel + # add attribute as argument + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-attribute' in template['label'] or + 'exist-attribute' in template['label']): + if 'group' in template['label']: + # we need an immediate group in the previous round + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + + # if exist: > 0 is good, else > 1 is needed + min_count = 0 if 'exist' in prev_label else 1 + if (len(prev_history['objects']) > min_count and + prev_history['mergeable'] and + 'obj-relation' not in prev_label): + obj_pool = graph['history'][-1]['objects'] + else: + return [] + else: + obj_pool = scene['objects'] + + # get counts for attributes, and sample evenly with 0 and other numbers + counts = get_attribute_counts_for_objects(scene, obj_pool) + + # if exist, choose between zero and others wiht 0.5 probability + zero_prob = 0.5 if 'exist' in template['label'] else 0.7 + if random.random() > zero_prob: + pool = [ii for ii in counts if counts[ii] == 0] + else: + pool = [ii for ii in counts if counts[ii] != 0] + + # check if count is already known + attr_pool = filter_attributes_with_known_counts(graph, pool) + + # for exist: get known attributes and remove them + if 'exist' in template['label']: + known_attr = get_known_attributes(graph) + attr_pool = [ii for ii in attr_pool if ii not in known_attr] + + # if non-empty, sample it + if len(attr_pool) == 0: + return [] + + attr, value = random.choice(attr_pool) + # add a hypothesi, and return the answer + count = 0 + obj_group = [] + new_obj = {attr: value, 'required': [attr], 'optional': []} + for index, obj in enumerate(obj_pool): + if scene['objects'][obj['id']][attr] == value: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_group.append(obj_copy) + count += 1 + + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, attr: value} + + if 'count' in template['label']: + graph_item['count'] = count + answer = count + elif 'exist' in template['label']: + answer = 'yes' if count > 0 else 'no' + # Clean graph item. + graph_item = clean_graph_item(graph_item) + if count == 0: + # Fake object group, to serve for arguments. + obj_group = [{attr: value, 'required': [attr], 'optional': []}] + + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [attr], 'optional': [], + 'count': 9999, 'objects': obj_group, 'graph': graph_item}] + + elif 'seek-attr-rel' in template['label']: + # Placeholder for object description, see below. + obj_desc = None + prev_history = graph['history'][-1] + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # for each relation, get the object, sample an attribute, and sample + hypotheses = [] + for rel in gvars.METAINFO['relations']: + gt_relations = scene['relationships'][rel] + objs = [(ii, len(gt_relations[ii])) for ii in gt_relations[focus_id]] + objs = sorted(objs, key=lambda x: x[1], reverse=True) + if len(objs) == 0: + # add a null hypotheses + # check if the object is known to be extreme + if ('%s_count' % rel not in graph['objects'][focus_id] and + '%s_exist' % rel not in graph['objects'][focus_id]): + random_attr = random.choice(gvars.METAINFO['attributes']) + hypotheses.append((None, rel, random_attr)) + continue + + closest_obj = objs[0][0] + # check what attributes are known/unknown + known_info = graph['objects'].get(closest_obj, {}) + for attr in gvars.METAINFO['attributes']: + if attr not in known_info: + hypotheses.append((closest_obj, rel, attr)) + + if len(hypotheses) == 0: + return [] + sample_id, rel, attr = random.choice(hypotheses) + # add the new attribute to object + new_obj = {'required': ['attribute', 'relation'], + 'optional': [], 'id': sample_id} + + if sample_id is not None: + answer = scene['objects'][sample_id][attr] + else: + answer = 'none' + new_obj[attr] = answer + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + # remove objects if none + if sample_id is None: + graph_item['objects'] = [] + graph_item = clean_graph_item(graph_item) + + # Add attribute as argument. + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + + elif 'seek-attr' in template['label']: + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + implicit_attr = None + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a seek-attr-imm/seek-attr-rel-imm in previous label + if ('seek-attr-imm' not in prev_label and + 'seek-attr-rel-imm' not in prev_label): + return [] + elif len(prev_history['objects']) == 0: + return [] + else: + focus_id = prev_history['objects'][0]['id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'sim' in template['label']: + if 'seek-attr-imm' not in prev_label: + return[] + else: + prev_obj = prev_history['objects'][0] + focus_id = prev_obj['id'] + attr = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_obj] + assert len(attr) == 1, 'Something wrong in previous history!' + implicit_attr = attr[0] + + if 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + # if there is an attribute, eliminate those options + if implicit_attr is not None: + single_count = [ii for ii in single_count if ii[0] != implicit_attr] + obj_ids = get_unique_attribute_objects(graph, single_count) + + # again rule out objects whose implicit_attr is known + single_count = [ii for ii in single_count + if implicit_attr not in graph['objects'][obj_ids[ii]]] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = get_unique_attribute_objects(graph, [focus_attr])[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get unknown attributes, randomly sample one + if implicit_attr is None: + unknown_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr not in graph['objects'][focus_id]] + + # TODO: select an object with some known objects + if len(unknown_attrs) == 0: + return [] + attr = random.choice(unknown_attrs) + else: + attr = implicit_attr + + # add the new attribute to object + new_obj = {'required': ['attribute'], 'optional': [], 'id': focus_id} + if 'sim' in template['label']: + new_obj['required'] = [] + new_obj[attr] = scene['objects'][focus_id][attr] + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + graph_item = clean_graph_item(graph_item) + + # add attribute as argument + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + return [] + + +def sample_from_hypotheses(caption_hypotheses, scene, cap_templates): + """Samples from caption hypotheses given the scene and caption templates. + Args: + caption_hypotheses: List of hypotheses for objects/object pairs + scene: CLEVR image scene graph + cap_templates: List of caption templates to sample captions + Returns: + obj_groups: List of object groups and corresponding sampled captions + """ + + obj_groups = [] + + # Caption Type 1: Extreme location. + hypotheses = caption_hypotheses['extreme-loc'] + if len(hypotheses) > 0: + # extreme location hypotheses + extreme_type, focus_obj = random.choice(hypotheses) + # sample optional attributes + obj_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj] + focus_attr = random.choice(obj_attrs) + optional_attrs = [ii for ii in obj_attrs if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['%s_count' % extreme_type] = 0 + graph_item['objects'][0]['%s_exist' % extreme_type] = False + graph_item['template'] = 'extreme-%s' % extreme_type + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 2: Unique object. + hypotheses = caption_hypotheses['unique-obj'] + if len(hypotheses) > 0: + # sample one at random, and create the graph item + focus_obj, focus_attr = random.choice(hypotheses) + # sample optional attributes + optional_attrs = [ii for ii in gvars.METAINFO['attributes'] + if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['unique'] = True + graph_item['template'] = 'unique-obj' + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 3: Unique attribute count based caption. + hypotheses = caption_hypotheses['count-attr'] + if len(hypotheses) > 0: + # Randomly sample one hypothesis and one template. + (attr, value), count = random.choice(hypotheses) + # Segregate counting templates. + count_templates = [ii for ii in cap_templates if 'count' in ii['type']] + template = random.choice(count_templates) + obj_group = {'group_id': 0, 'count': count, attr: value, + 'optional': [], 'required': [], 'objects': []} + + # get a list of objects which are part of this collection + for ii, obj in enumerate(scene['objects']): + if obj[attr] == value: + new_obj = {'id': obj['id'], attr: value} + new_obj['required'] = [attr] + new_obj['optional'] = [] + obj_group['objects'].append(new_obj) + + if 'no' in template['label']: + # Count is not mentioned. + del obj_group['count'] + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = False + else: + # Count is mentioned. + for index, ii in enumerate(obj_group['objects']): + obj_group['objects'][index]['required'].append('count') + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = True + + # clean up graph item + graph_item['template'] = template['label'] + graph_item = clean_graph_item(graph_item) + obj_group['graph'] = graph_item + obj_group['use_plural'] = True + obj_groups.append([obj_group]) + + # Caption Type 4: Relation between two objects (one of them is unique). + hypotheses = caption_hypotheses['obj-relation'] + if len(hypotheses) > 0: + (obj_id1, attr1), rel, (obj_id2, attr2) = random.choice(hypotheses) + obj_group = {'group_id': 0, 'relation': rel} + + # create object dictionaries + obj1 = {'optional': [], 'required': [attr1], 'id': obj_id1, + attr1: scene['objects'][obj_id1][attr1]} + obj2 = {'optional': [], 'required': [attr2], 'id': obj_id2, + attr2: scene['objects'][obj_id2][attr2]} + obj_group['objects'] = [obj2, obj1] + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['template'] = 'obj-relation' + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + return obj_groups + + +def get_known_attributes(graph): + """Fetches a list of known attributes given the scene graph. + Args: + graph: Scene graph to check unique attributes from + Returns: + known_attrs: List of known attributes from the scene graph + """ + + known_attrs = [] + for obj_id, obj_info in graph['objects'].items(): + # The attribute is unique already. + # if obj_info.get('unique', False): continue + for attr in gvars.METAINFO['attributes']: + if attr in obj_info: + known_attrs.append((attr, obj_info[attr])) + + # also go over the groups + for ii in graph['history']: + # a group of objects, with unknown count + #if 'count' not in ii: continue + for attr in gvars.METAINFO['attributes']: + if attr in ii: + known_attrs.append((attr, ii[attr])) + known_attrs = list(set(known_attrs)) + return known_attrs + + +def get_known_attribute_counts(graph): + """Fetches a count of known attributes given the scene graph. + Calls get_known_attributes method internally. + Args: + graph: Scene graph to check unique attributes from + Returns: + counts: Count of known attributes from the scene graph + """ + + known_attrs = get_known_attributes(graph) + # Go through objects and count. + counts = {ii: 0 for ii in known_attrs} + for _, obj in graph['objects'].items(): + for attr, val in known_attrs: + if obj.get(attr, None) == val: + counts[(attr, val)] += 1 + return counts + + +def filter_attributes_with_known_counts(graph, known_attrs): + """Filters attributes whose counts are known, given the scene graph. + Args: + graph: Scene graph from the dialog generated so far + known_attrs: List of known attributes from the ground truth scene graph + Returns: + known_attrs: List of attributes with unknown counts removed inplace + """ + + for attr, val in known_attrs[::-1]: + for ii in graph['history']: + # A group of objects, with unknown count. + if 'count' not in ii: + continue + # Count is absent. + if ii.get(attr, None) == val: + known_attrs.remove((attr, val)) + return known_attrs + + +def clean_graph_item(graph_item): + """Cleans up graph item (remove 'required' and 'optional' tags). + Args: + graph_item: Input graph item to be cleaned. + Returns: + clean_graph_item: Copy of the graph item after cleaning. + """ + + clean_graph_item = copy.deepcopy(graph_item) + if 'optional' in clean_graph_item: + del clean_graph_item['optional'] + if 'required' in clean_graph_item: + del clean_graph_item['required'] + + for index, ii in enumerate(clean_graph_item['objects']): + if 'optional' in ii: + del clean_graph_item['objects'][index]['optional'] + if 'required' in ii: + del clean_graph_item['objects'][index]['required'] + return clean_graph_item + + +def get_attribute_counts_for_objects(scene, objects=None): + """Counts attributes for a given set of objects. + Args: + scene: Scene graph for the dialog generated so far + objects: List of objects. Default = None selects all objects + Returns: + counts: Counts for the attributes for attributes + """ + + # Initialize the dictionary. + counts = {} + for attr, vals in gvars.METAINFO['values'].items(): + for val in vals: + counts[(attr, val)] = 0 + + # Now count for each given object. + if objects is None: + objects = scene['objects'] + for obj in objects: + for attr in gvars.METAINFO['attributes']: + key = (attr, scene['objects'][obj['id']][attr]) + counts[key] = counts.get(key, 0) + 1 + return counts + + +def get_unique_attribute_objects(graph, uniq_attrs): + """Fetches objects from given scene graph with unique attributes. + Args: + graph: Scene graph constructed from the dialog generated so far + uniq_attrs: List of unique attributes to get attributes + Returns: + obj_ids: List of object ids with the unique attributes + """ + + obj_ids = {} + for obj_id, obj in graph['objects'].items(): + for attr, val in uniq_attrs: + if obj.get(attr, '') == val: + # At this point the key should not be present. + assert (attr, val) not in obj_ids, 'Attributes not unique!' + obj_ids[(attr, val)] = obj_id + return obj_ids + + +def sample_optional_tags(optional, sample_probs): + """Samples additional tags depending on given sample probabilities. + Args: + optional: List of optional tags to sample from. + sample_probs: Probabilities of sampling 'n' tags. + Returns: + sampled: Sampled tags from the optional list + """ + + sampled = [] + if len(optional) > 0: + n_sample = np.random.choice([0, 1], 1, p=sample_probs[:2])[0] + n_sample = min(n_sample, len(optional)) + sampled = random.sample(optional, n_sample) + return sampled diff --git a/constraints_minecraft.py b/constraints_minecraft.py new file mode 100644 index 0000000..fd3c9b3 --- /dev/null +++ b/constraints_minecraft.py @@ -0,0 +1,1055 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +# -------------------------------------------------------- +# adapted from https://github.com/satwikkottur/clevr-dialog/blob/master/constraints.py +# -------------------------------------------------------- + +import copy +import json +import random +import numpy as np + +import global_vars as gvars + + +# Some quick methods. +def apply_immediate(hist): return (len(hist['objects']) == 1 and + hist['mergeable'] and + 'exist' not in hist['template']) + + +def apply_group(hist): return (len(hist['objects']) >= 2 and + hist['mergeable'] and + 'count' not in prev_group) + + +def caption(scene, templates): + """Constraints for caption generation. + Args: + scene: CLEVR Scene graphs to generate captions with constraints + template: List of caption templates + Returns: + sample_captions: Samples from caption hypotheses + """ + + caption_hypotheses = {} + + # Sweep through all templates to extract 'interesting' captions. + n_objs = len(scene['objects']) + rels = scene['relationships'] + + # Caption Type 1: Extreme locations. + ext_loc_templates = [ii for ii in templates if ii['type'] == 'extreme-loc'] + # number of objects in the scene + filter_objs = copy.deepcopy(scene['objects']) + attr_counts = get_attribute_counts_for_objects(scene, filter_objs) + hypotheses = [] + for template in ext_loc_templates: + # absolute location based constraint + constraint = template['constraints'][0] + extreme_type = constraint['args'][0] + + # check if there is an object that is at the center of the image + # roughly in the middle along front-back and right-left dim + if extreme_type == 'center': + for ii, obj in enumerate(filter_objs): + bla = [len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']] + matches = np.sum([len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']]) + if matches == 4: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + else: + for ii, obj in enumerate(filter_objs): + if len(rels[extreme_type][ii]) == 0: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + + # sample one at random, and create the graph item + # Filter hypothesis which are ambiguous otherwise. + for index, (_, hypothesis) in enumerate(hypotheses): + uniq_attr = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, hypothesis[attr])] == 1] + + for attr in uniq_attr: + del hypotheses[index][1][attr] + + hypotheses = [ii for ii in hypotheses if len(ii[1]) > 1] + caption_hypotheses['extreme-loc'] = hypotheses + + # Caption Type 2: Unique object and attribute. + filter_objs = copy.deepcopy(scene['objects']) + # each hypothesis is (object, attribute) pair + hypotheses = [] + for ii, obj in enumerate(filter_objs): + # get unique set of attributes + uniq_attrs = [ii for ii in gvars.METAINFO['attributes'] + if attr_counts[(ii, obj[ii])] == 1] + # for each, add it to hypothesis + for attr in uniq_attrs: + hypotheses.append((obj, attr)) + caption_hypotheses['unique-obj'] = hypotheses + + # Caption Type 3: Unique attribute count based caption. + # count unique object based constraint + # Each hypothesis is object collection. + caption_hypotheses['count-attr'] = [(attr_val, count) + for attr_val, count in attr_counts.items() + if count > 1] + + # Caption Type 4: Relation between two objects. + # Out of the two, one has a unique attribute. + # find a pair of objects sharing a relation, unique + filter_objs = copy.deepcopy(scene['objects']) + n_objs = len(filter_objs) + + # get a dict of unique attributes for each object + uniq_attr = [[] for ii in range(n_objs)] + non_uniq_attr = [[] for ii in range(n_objs)] + for ind, obj in enumerate(filter_objs): + uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, obj[attr])] == 1] + non_uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, obj[attr])] > 1] + uniqueness = [len(ii) > 0 for ii in uniq_attr] + + # Hypothesis is a uniq object and non-unique obj2 sharing relation R + # global ordering for uniqueness + hypotheses = [] + for rel, order in scene['relationships'].items(): + num_rel = [(ii, len(order[ii])) for ii in range(n_objs)] + num_rel = sorted(num_rel, key=lambda x: x[1], reverse=True) + # take only the ids + num_rel = [ii[0] for ii in num_rel] + + for index, obj_id in enumerate(num_rel[:-1]): + next_obj_id = num_rel[index + 1] + # if unique, check if the next one has non-unique attributes + if uniqueness[obj_id]: + if len(non_uniq_attr[next_obj_id]) > 0: + obj1 = (obj_id, random.choice(uniq_attr[obj_id])) + obj2 = (next_obj_id, random.choice(non_uniq_attr[next_obj_id])) + hypotheses.append((obj1, rel, obj2)) + # if not unique, check if the next one has unique attributes + else: + if len(uniq_attr[next_obj_id]) > 0: + obj1 = (obj_id, random.choice(non_uniq_attr[obj_id])) + obj2 = (next_obj_id, random.choice(uniq_attr[next_obj_id])) + hypotheses.append((obj1, rel, obj2)) + caption_hypotheses['obj-relation'] = hypotheses + sample_captions = sample_from_hypotheses( + caption_hypotheses, scene, templates) + return sample_captions + + +def question(scene, dialog, template): + """Constraints question generation. + Inputs: + scene:Partial scene graphs on CLEVR images with generated captions + template: List of question templates to use + Output: + list of object groups + """ + + ques_round = len(dialog['graph']['history']) - 1 + graph = dialog['graph'] + + # check for constraints and answer question + if 'group' in template['label']: + groups = [] + # Pick a group hypothesis + for ii in graph['history']: + if 'count' in ii or len(ii['objects']) == 0: + groups.append(ii) + + if template['label'] == 'count-all': + # Preliminary checks: + # (A) count-all cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, + 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': len(obj_group)} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': len(obj_group), 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif (template['label'] == 'count-other' or + template['label'] == 'exist-other'): + # preliminary checks: + # (A) exist-other cannot follow exist-other, count-all, count-other + # (B) count-other cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + if (prev_history['template'] == 'exist-other' and + template['label'] == 'exist-other'): + return [] + + # get a list of all objects we know + known_ids = [jj['id'] for ii in graph['history'] for jj in ii['objects']] + known_ids = list(set(known_ids)) + n_objs = len(scene['objects']) + difference = n_objs - len(known_ids) + diff_ids = [ii for ii in range(n_objs) if ii not in known_ids] + + # create empty objects for these + obj_group = [{'id': ii} for ii in diff_ids] + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': obj_group, + 'template': template['label'], 'mergeable': False} + + if 'count' in template['label']: + graph_item['count'] = difference + graph_item['mergeable'] = True # merge if count is known + answer = difference + elif 'exist' in template['label']: + # If heads (> 0.5) -- difference > 0 + if random.random() > 0.5: + if difference > 0: + answer = 'yes' + else: + return [] + else: + if difference == 0: + answer = 'no' + else: + return [] + + # no constraints, count the number of objects in true scene + return [{'answer': answer, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif template['label'] == 'count-all-group': + # we need a group in the previous round + prev_group = graph['history'][-1] + prev_label = prev_group['template'] + if not (len(prev_group['objects']) > 1 and + 'count' not in prev_group and + 'obj-relation' not in prev_label): + return [] + + # check if count is not given before + attrs = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_group] + count = 0 + for obj in prev_group['objects']: + count += all([obj[ii] == prev_group['objects'][0][ii] for ii in attrs]) + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': count} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': count, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif ('count-obj-exclude' in template['label'] or + 'exist-obj-exclude' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + scene_counts = get_attribute_counts_for_objects(scene) + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + #scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get the known attributes for the current object + focus_obj = graph['objects'][focus_id] + known_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj and + '%s_exclude_count' % attr not in focus_obj] + + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for attr in known_attrs[::-1]: + if not focus_obj.get('%s_exclude_exist' % attr, True): + known_attrs.remove(attr) + # for exist: get relations without exist before + elif 'exist' in template['label']: + known_attrs = [attr for attr in known_attrs + if '%s_exclude_exist' % attr not in focus_obj] + + # select an attribute + if len(known_attrs) == 0: + return[] + + # split this into zero and non-zero + if 'exist' in template['label']: + focus_attrs = [(ii, scene['objects'][focus_id][ii]) + for ii in known_attrs] + zero_count = [ii for ii in focus_attrs if scene_counts[ii] == 1] + nonzero_count = [ii for ii in focus_attrs if scene_counts[ii] > 1] + + if random.random() > 0.5: + if len(zero_count) > 0: + attr = random.choice(zero_count)[0] + else: + return [] + else: + if len(nonzero_count) > 0: + attr = random.choice(nonzero_count)[0] + else: + return [] + else: + attr = random.choice(known_attrs) + + # create the object group + obj_group = [] + new_obj = {'required': ['attribute'], 'optional': []} + for obj in scene['objects']: + # add if same attribute value and not focus object + if obj[attr] == focus_obj[attr] and obj['id'] != focus_id: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_copy[attr] = focus_obj[attr] + obj_group.append(obj_copy) + answer = len(obj_group) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exclude_exist' % attr] = answer + elif 'count' in template['label']: + ref_obj['%s_exclude_count' % attr] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + ref_obj['attribute'] = attr + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-obj-rel' in template['label'] or + 'exist-obj-rel' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a obj-rel-imm in previous label, same as the current one + prev_label = prev_history['template'] + cur_label = template['label'] + if 'obj-rel-imm' not in prev_label or cur_label[:5] != prev_label[:5]: + return [] + else: + focus_id = prev_history['focus_id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + focus_attr = random.choice(single_count) + for focus_id, obj in graph['objects'].items(): + if obj.get(focus_attr[0], None) == focus_attr[1]: + break + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get relations with unknown counts + unknown_rels = [rel for rel in gvars.METAINFO['relations'] + if '%s_count' % rel not in graph['objects'][focus_id]] + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for ii in unknown_rels[::-1]: + if not graph['objects'][focus_id].get('%s_exist' % ii, True): + unknown_rels.remove(ii) + + # for exist: get relations without exist before + elif 'exist' in template['label']: + unknown_rels = [rel for rel in unknown_rels + if '%s_exist' % rel not in graph['objects'][focus_id]] + + # select an object with some known objects + if len(unknown_rels) == 0: + return [] + + # pick between yes/no for exist questions, 50% of times + if 'exist' in template['label']: + zero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) == 0] + nonzero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) > 0] + + if random.random() > 0.5: + if len(zero_count) > 0: + rel = random.choice(zero_count) + else: + return [] + else: + if len(nonzero_count) > 0: + rel = random.choice(nonzero_count) + else: + return [] + else: + rel = random.choice(unknown_rels) + + # create the object group + obj_group = [] + new_obj = {'required': ['relation'], 'optional': []} + obj_pool = scene['relationships'][rel][focus_id] + for obj_id in obj_pool: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj_id + obj_group.append(obj_copy) + answer = len(obj_pool) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exist' % rel] = answer + elif 'count' in template['label']: + ref_obj['%s_count' % rel] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + #ref_obj['relation'] = rel + # add attribute as argument + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-attribute' in template['label'] or + 'exist-attribute' in template['label']): + if 'group' in template['label']: + # we need an immediate group in the previous round + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + + # if exist: > 0 is good, else > 1 is needed + min_count = 0 if 'exist' in prev_label else 1 + if (len(prev_history['objects']) > min_count and + prev_history['mergeable'] and + 'obj-relation' not in prev_label): + obj_pool = graph['history'][-1]['objects'] + else: + return [] + else: + obj_pool = scene['objects'] + + # get counts for attributes, and sample evenly with 0 and other numbers + counts = get_attribute_counts_for_objects(scene, obj_pool) + + # if exist, choose between zero and others wiht 0.5 probability + zero_prob = 0.5 if 'exist' in template['label'] else 0.7 + if random.random() > zero_prob: + pool = [ii for ii in counts if counts[ii] == 0] + else: + pool = [ii for ii in counts if counts[ii] != 0] + + # check if count is already known + attr_pool = filter_attributes_with_known_counts(graph, pool) + + # for exist: get known attributes and remove them + if 'exist' in template['label']: + known_attr = get_known_attributes(graph) + attr_pool = [ii for ii in attr_pool if ii not in known_attr] + + # if non-empty, sample it + if len(attr_pool) == 0: + return [] + + attr, value = random.choice(attr_pool) + # add a hypothesi, and return the answer + count = 0 + obj_group = [] + new_obj = {attr: value, 'required': [attr], 'optional': []} + for index, obj in enumerate(obj_pool): + if scene['objects'][obj['id']][attr] == value: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_group.append(obj_copy) + count += 1 + + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, attr: value} + + if 'count' in template['label']: + graph_item['count'] = count + answer = count + elif 'exist' in template['label']: + answer = 'yes' if count > 0 else 'no' + # Clean graph item. + graph_item = clean_graph_item(graph_item) + if count == 0: + # Fake object group, to serve for arguments. + obj_group = [{attr: value, 'required': [attr], 'optional': []}] + + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [attr], 'optional': [], + 'count': 9999, 'objects': obj_group, 'graph': graph_item}] + + elif 'seek-attr-rel' in template['label']: + # Placeholder for object description, see below. + obj_desc = None + prev_history = graph['history'][-1] + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # for each relation, get the object, sample an attribute, and sample + hypotheses = [] + for rel in gvars.METAINFO['relations']: + gt_relations = scene['relationships'][rel] + objs = [(ii, len(gt_relations[ii])) for ii in gt_relations[focus_id]] + objs = sorted(objs, key=lambda x: x[1], reverse=True) + if len(objs) == 0: + # add a null hypotheses + # check if the object is known to be extreme + if ('%s_count' % rel not in graph['objects'][focus_id] and + '%s_exist' % rel not in graph['objects'][focus_id]): + random_attr = random.choice(gvars.METAINFO['attributes']) + hypotheses.append((None, rel, random_attr)) + continue + + closest_obj = objs[0][0] + # check what attributes are known/unknown + known_info = graph['objects'].get(closest_obj, {}) + for attr in gvars.METAINFO['attributes']: + if attr not in known_info: + hypotheses.append((closest_obj, rel, attr)) + + if len(hypotheses) == 0: + return [] + sample_id, rel, attr = random.choice(hypotheses) + # add the new attribute to object + new_obj = {'required': ['attribute', 'relation'], + 'optional': [], 'id': sample_id} + + if sample_id is not None: + answer = scene['objects'][sample_id][attr] + else: + answer = 'none' + new_obj[attr] = answer + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + # remove objects if none + if sample_id is None: + graph_item['objects'] = [] + graph_item = clean_graph_item(graph_item) + + # Add attribute as argument. + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + + elif 'seek-attr' in template['label']: + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + implicit_attr = None + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a seek-attr-imm/seek-attr-rel-imm in previous label + if ('seek-attr-imm' not in prev_label and + 'seek-attr-rel-imm' not in prev_label): + return [] + elif len(prev_history['objects']) == 0: + return [] + else: + focus_id = prev_history['objects'][0]['id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'sim' in template['label']: + if 'seek-attr-imm' not in prev_label: + return[] + else: + prev_obj = prev_history['objects'][0] + focus_id = prev_obj['id'] + attr = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_obj] + assert len(attr) == 1, 'Something wrong in previous history!' + implicit_attr = attr[0] + + if 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + # if there is an attribute, eliminate those options + if implicit_attr is not None: + single_count = [ii for ii in single_count if ii[0] != implicit_attr] + obj_ids = get_unique_attribute_objects(graph, single_count) + + # again rule out objects whose implicit_attr is known + single_count = [ii for ii in single_count + if implicit_attr not in graph['objects'][obj_ids[ii]]] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = get_unique_attribute_objects(graph, [focus_attr])[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get unknown attributes, randomly sample one + if implicit_attr is None: + unknown_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr not in graph['objects'][focus_id]] + + # TODO: select an object with some known objects + if len(unknown_attrs) == 0: + return [] + attr = random.choice(unknown_attrs) + else: + attr = implicit_attr + + # add the new attribute to object + new_obj = {'required': ['attribute'], 'optional': [], 'id': focus_id} + if 'sim' in template['label']: + new_obj['required'] = [] + new_obj[attr] = scene['objects'][focus_id][attr] + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + graph_item = clean_graph_item(graph_item) + + # add attribute as argument + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + return [] + + +def sample_from_hypotheses(caption_hypotheses, scene, cap_templates): + """Samples from caption hypotheses given the scene and caption templates. + Args: + caption_hypotheses: List of hypotheses for objects/object pairs + scene: CLEVR image scene graph + cap_templates: List of caption templates to sample captions + Returns: + obj_groups: List of object groups and corresponding sampled captions + """ + + obj_groups = [] + + # Caption Type 1: Extreme location. + hypotheses = caption_hypotheses['extreme-loc'] + if len(hypotheses) > 0: + # extreme location hypotheses + extreme_type, focus_obj = random.choice(hypotheses) + # sample optional attributes + obj_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj] + focus_attr = random.choice(obj_attrs) + optional_attrs = [ii for ii in obj_attrs if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['%s_count' % extreme_type] = 0 + graph_item['objects'][0]['%s_exist' % extreme_type] = False + graph_item['template'] = 'extreme-%s' % extreme_type + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 2: Unique object. + hypotheses = caption_hypotheses['unique-obj'] + if len(hypotheses) > 0: + # sample one at random, and create the graph item + focus_obj, focus_attr = random.choice(hypotheses) + # sample optional attributes + optional_attrs = [ii for ii in gvars.METAINFO['attributes'] + if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['unique'] = True + graph_item['template'] = 'unique-obj' + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 3: Unique attribute count based caption. + hypotheses = caption_hypotheses['count-attr'] + if len(hypotheses) > 0: + # Randomly sample one hypothesis and one template. + (attr, value), count = random.choice(hypotheses) + # Segregate counting templates. + count_templates = [ii for ii in cap_templates if 'count' in ii['type']] + template = random.choice(count_templates) + obj_group = {'group_id': 0, 'count': count, attr: value, + 'optional': [], 'required': [], 'objects': []} + + # get a list of objects which are part of this collection + for ii, obj in enumerate(scene['objects']): + if obj[attr] == value: + new_obj = {'id': obj['id'], attr: value} + new_obj['required'] = [attr] + new_obj['optional'] = [] + obj_group['objects'].append(new_obj) + + if 'no' in template['label']: + # Count is not mentioned. + del obj_group['count'] + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = False + else: + # Count is mentioned. + for index, ii in enumerate(obj_group['objects']): + obj_group['objects'][index]['required'].append('count') + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = True + + # clean up graph item + graph_item['template'] = template['label'] + graph_item = clean_graph_item(graph_item) + obj_group['graph'] = graph_item + obj_group['use_plural'] = True + obj_groups.append([obj_group]) + + # Caption Type 4: Relation between two objects (one of them is unique). + hypotheses = caption_hypotheses['obj-relation'] + if len(hypotheses) > 0: + (obj_id1, attr1), rel, (obj_id2, attr2) = random.choice(hypotheses) + obj_group = {'group_id': 0, 'relation': rel} + + # create object dictionaries + obj1 = {'optional': [], 'required': [attr1], 'id': obj_id1, + attr1: scene['objects'][obj_id1][attr1]} + obj2 = {'optional': [], 'required': [attr2], 'id': obj_id2, + attr2: scene['objects'][obj_id2][attr2]} + obj_group['objects'] = [obj2, obj1] + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['template'] = 'obj-relation' + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + return obj_groups + + +def get_known_attributes(graph): + """Fetches a list of known attributes given the scene graph. + Args: + graph: Scene graph to check unique attributes from + Returns: + known_attrs: List of known attributes from the scene graph + """ + + known_attrs = [] + for obj_id, obj_info in graph['objects'].items(): + # The attribute is unique already. + # if obj_info.get('unique', False): continue + for attr in gvars.METAINFO['attributes']: + if attr in obj_info: + known_attrs.append((attr, obj_info[attr])) + + # also go over the groups + for ii in graph['history']: + # a group of objects, with unknown count + #if 'count' not in ii: continue + for attr in gvars.METAINFO['attributes']: + if attr in ii: + known_attrs.append((attr, ii[attr])) + known_attrs = list(set(known_attrs)) + return known_attrs + + +def get_known_attribute_counts(graph): + """Fetches a count of known attributes given the scene graph. + Calls get_known_attributes method internally. + Args: + graph: Scene graph to check unique attributes from + Returns: + counts: Count of known attributes from the scene graph + """ + + known_attrs = get_known_attributes(graph) + # Go through objects and count. + counts = {ii: 0 for ii in known_attrs} + for _, obj in graph['objects'].items(): + for attr, val in known_attrs: + if obj.get(attr, None) == val: + counts[(attr, val)] += 1 + return counts + + +def filter_attributes_with_known_counts(graph, known_attrs): + """Filters attributes whose counts are known, given the scene graph. + Args: + graph: Scene graph from the dialog generated so far + known_attrs: List of known attributes from the ground truth scene graph + Returns: + known_attrs: List of attributes with unknown counts removed inplace + """ + + for attr, val in known_attrs[::-1]: + for ii in graph['history']: + # A group of objects, with unknown count. + if 'count' not in ii: + continue + # Count is absent. + if ii.get(attr, None) == val: + known_attrs.remove((attr, val)) + return known_attrs + + +def clean_graph_item(graph_item): + """Cleans up graph item (remove 'required' and 'optional' tags). + Args: + graph_item: Input graph item to be cleaned. + Returns: + clean_graph_item: Copy of the graph item after cleaning. + """ + + clean_graph_item = copy.deepcopy(graph_item) + if 'optional' in clean_graph_item: + del clean_graph_item['optional'] + if 'required' in clean_graph_item: + del clean_graph_item['required'] + + for index, ii in enumerate(clean_graph_item['objects']): + if 'optional' in ii: + del clean_graph_item['objects'][index]['optional'] + if 'required' in ii: + del clean_graph_item['objects'][index]['required'] + return clean_graph_item + + +def get_attribute_counts_for_objects(scene, objects=None): + """Counts attributes for a given set of objects. + Args: + scene: Scene graph for the dialog generated so far + objects: List of objects. Default = None selects all objects + Returns: + counts: Counts for the attributes for attributes + """ + + # Initialize the dictionary. + counts = {} + for attr, vals in gvars.METAINFO['values'].items(): + for val in vals: + counts[(attr, val)] = 0 + + # Now count for each given object. + if objects is None: + objects = scene['objects'] + for obj in objects: + for attr in gvars.METAINFO['attributes']: + key = (attr, scene['objects'][obj['id']][attr]) + counts[key] = counts.get(key, 0) + 1 + return counts + + +def get_unique_attribute_objects(graph, uniq_attrs): + """Fetches objects from given scene graph with unique attributes. + Args: + graph: Scene graph constructed from the dialog generated so far + uniq_attrs: List of unique attributes to get attributes + Returns: + obj_ids: List of object ids with the unique attributes + """ + + obj_ids = {} + for obj_id, obj in graph['objects'].items(): + for attr, val in uniq_attrs: + if obj.get(attr, '') == val: + # At this point the key should not be present. + assert (attr, val) not in obj_ids, 'Attributes not unique!' + obj_ids[(attr, val)] = obj_id + return obj_ids + + +def sample_optional_tags(optional, sample_probs): + """Samples additional tags depending on given sample probabilities. + Args: + optional: List of optional tags to sample from. + sample_probs: Probabilities of sampling 'n' tags. + Returns: + sampled: Sampled tags from the optional list + """ + + sampled = [] + if len(optional) > 0: + n_sample = np.random.choice([0, 1], 1, p=sample_probs[:2])[0] + n_sample = min(n_sample, len(optional)) + sampled = random.sample(optional, n_sample) + return sampled diff --git a/constraints_splitA.py b/constraints_splitA.py new file mode 100644 index 0000000..d4a5e3b --- /dev/null +++ b/constraints_splitA.py @@ -0,0 +1,1055 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +# -------------------------------------------------------- +# adapted from https://github.com/satwikkottur/clevr-dialog/blob/master/constraints.py +# -------------------------------------------------------- + +import copy +import json +import random +import numpy as np + +import global_vars as gvars + + +# Some quick methods. +def apply_immediate(hist): return (len(hist['objects']) == 1 and + hist['mergeable'] and + 'exist' not in hist['template']) + + +def apply_group(hist): return (len(hist['objects']) >= 2 and + hist['mergeable'] and + 'count' not in prev_group) + + +def caption(scene, templates): + """Constraints for caption generation. + Args: + scene: CLEVR Scene graphs to generate captions with constraints + template: List of caption templates + Returns: + sample_captions: Samples from caption hypotheses + """ + + caption_hypotheses = {} + + # Sweep through all templates to extract 'interesting' captions. + n_objs = len(scene['objects']) + rels = scene['relationships'] + + # Caption Type 1: Extreme locations. + ext_loc_templates = [ii for ii in templates if ii['type'] == 'extreme-loc'] + # number of objects in the scene + filter_objs = copy.deepcopy(scene['objects']) + attr_counts = get_attribute_counts_for_objects(scene, filter_objs) + hypotheses = [] + for template in ext_loc_templates: + # absolute location based constraint + constraint = template['constraints'][0] + extreme_type = constraint['args'][0] + + # check if there is an object that is at the center of the image + # roughly in the middle along front-back and right-left dim + if extreme_type == 'center': + for ii, obj in enumerate(filter_objs): + bla = [len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']] + matches = np.sum([len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']]) + if matches == 4: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + else: + for ii, obj in enumerate(filter_objs): + if len(rels[extreme_type][ii]) == 0: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + + # sample one at random, and create the graph item + # Filter hypothesis which are ambiguous otherwise. + for index, (_, hypothesis) in enumerate(hypotheses): + uniq_attr = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, hypothesis[attr])] == 1] + + for attr in uniq_attr: + del hypotheses[index][1][attr] + + hypotheses = [ii for ii in hypotheses if len(ii[1]) > 1] + caption_hypotheses['extreme-loc'] = hypotheses + + # Caption Type 2: Unique object and attribute. + filter_objs = copy.deepcopy(scene['objects']) + # each hypothesis is (object, attribute) pair + hypotheses = [] + for ii, obj in enumerate(filter_objs): + # get unique set of attributes + uniq_attrs = [ii for ii in gvars.METAINFO['attributes'] + if attr_counts[(ii, obj[ii])] == 1] + # for each, add it to hypothesis + for attr in uniq_attrs: + hypotheses.append((obj, attr)) + caption_hypotheses['unique-obj'] = hypotheses + + # Caption Type 3: Unique attribute count based caption. + # count unique object based constraint + # Each hypothesis is object collection. + caption_hypotheses['count-attr'] = [(attr_val, count) + for attr_val, count in attr_counts.items() + if count > 1] + + # Caption Type 4: Relation between two objects. + # Out of the two, one has a unique attribute. + # find a pair of objects sharing a relation, unique + # filter_objs = copy.deepcopy(scene['objects']) + # n_objs = len(filter_objs) + + # # get a dict of unique attributes for each object + # uniq_attr = [[] for ii in range(n_objs)] + # non_uniq_attr = [[] for ii in range(n_objs)] + # for ind, obj in enumerate(filter_objs): + # uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + # if attr_counts[(attr, obj[attr])] == 1] + # non_uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + # if attr_counts[(attr, obj[attr])] > 1] + # uniqueness = [len(ii) > 0 for ii in uniq_attr] + + # # Hypothesis is a uniq object and non-unique obj2 sharing relation R + # # global ordering for uniqueness + # hypotheses = [] + # for rel, order in scene['relationships'].items(): + # num_rel = [(ii, len(order[ii])) for ii in range(n_objs)] + # num_rel = sorted(num_rel, key=lambda x: x[1], reverse=True) + # # take only the ids + # num_rel = [ii[0] for ii in num_rel] + + # for index, obj_id in enumerate(num_rel[:-1]): + # next_obj_id = num_rel[index + 1] + # # if unique, check if the next one has non-unique attributes + # if uniqueness[obj_id]: + # if len(non_uniq_attr[next_obj_id]) > 0: + # obj1 = (obj_id, random.choice(uniq_attr[obj_id])) + # obj2 = (next_obj_id, random.choice(non_uniq_attr[next_obj_id])) + # hypotheses.append((obj1, rel, obj2)) + # # if not unique, check if the next one has unique attributes + # else: + # if len(uniq_attr[next_obj_id]) > 0: + # obj1 = (obj_id, random.choice(non_uniq_attr[obj_id])) + # obj2 = (next_obj_id, random.choice(uniq_attr[next_obj_id])) + # hypotheses.append((obj1, rel, obj2)) + # caption_hypotheses['obj-relation'] = hypotheses + sample_captions = sample_from_hypotheses( + caption_hypotheses, scene, templates) + return sample_captions + + +def question(scene, dialog, template): + """Constraints question generation. + Inputs: + scene:Partial scene graphs on CLEVR images with generated captions + template: List of question templates to use + Output: + list of object groups + """ + + ques_round = len(dialog['graph']['history']) - 1 + graph = dialog['graph'] + + # check for constraints and answer question + if 'group' in template['label']: + groups = [] + # Pick a group hypothesis + for ii in graph['history']: + if 'count' in ii or len(ii['objects']) == 0: + groups.append(ii) + + if template['label'] == 'count-all': + # Preliminary checks: + # (A) count-all cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, + 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': len(obj_group)} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': len(obj_group), 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif (template['label'] == 'count-other' or + template['label'] == 'exist-other'): + # preliminary checks: + # (A) exist-other cannot follow exist-other, count-all, count-other + # (B) count-other cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + if (prev_history['template'] == 'exist-other' and + template['label'] == 'exist-other'): + return [] + + # get a list of all objects we know + known_ids = [jj['id'] for ii in graph['history'] for jj in ii['objects']] + known_ids = list(set(known_ids)) + n_objs = len(scene['objects']) + difference = n_objs - len(known_ids) + diff_ids = [ii for ii in range(n_objs) if ii not in known_ids] + + # create empty objects for these + obj_group = [{'id': ii} for ii in diff_ids] + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': obj_group, + 'template': template['label'], 'mergeable': False} + + if 'count' in template['label']: + graph_item['count'] = difference + graph_item['mergeable'] = True # merge if count is known + answer = difference + elif 'exist' in template['label']: + # If heads (> 0.5) -- difference > 0 + if random.random() > 0.5: + if difference > 0: + answer = 'yes' + else: + return [] + else: + if difference == 0: + answer = 'no' + else: + return [] + + # no constraints, count the number of objects in true scene + return [{'answer': answer, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif template['label'] == 'count-all-group': + # we need a group in the previous round + prev_group = graph['history'][-1] + prev_label = prev_group['template'] + if not (len(prev_group['objects']) > 1 and + 'count' not in prev_group and + 'obj-relation' not in prev_label): + return [] + + # check if count is not given before + attrs = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_group] + count = 0 + for obj in prev_group['objects']: + count += all([obj[ii] == prev_group['objects'][0][ii] for ii in attrs]) + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': count} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': count, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif ('count-obj-exclude' in template['label'] or + 'exist-obj-exclude' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + scene_counts = get_attribute_counts_for_objects(scene) + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + #scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get the known attributes for the current object + focus_obj = graph['objects'][focus_id] + known_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj and + '%s_exclude_count' % attr not in focus_obj] + + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for attr in known_attrs[::-1]: + if not focus_obj.get('%s_exclude_exist' % attr, True): + known_attrs.remove(attr) + # for exist: get relations without exist before + elif 'exist' in template['label']: + known_attrs = [attr for attr in known_attrs + if '%s_exclude_exist' % attr not in focus_obj] + + # select an attribute + if len(known_attrs) == 0: + return[] + + # split this into zero and non-zero + if 'exist' in template['label']: + focus_attrs = [(ii, scene['objects'][focus_id][ii]) + for ii in known_attrs] + zero_count = [ii for ii in focus_attrs if scene_counts[ii] == 1] + nonzero_count = [ii for ii in focus_attrs if scene_counts[ii] > 1] + + if random.random() > 0.5: + if len(zero_count) > 0: + attr = random.choice(zero_count)[0] + else: + return [] + else: + if len(nonzero_count) > 0: + attr = random.choice(nonzero_count)[0] + else: + return [] + else: + attr = random.choice(known_attrs) + + # create the object group + obj_group = [] + new_obj = {'required': ['attribute'], 'optional': []} + for obj in scene['objects']: + # add if same attribute value and not focus object + if obj[attr] == focus_obj[attr] and obj['id'] != focus_id: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_copy[attr] = focus_obj[attr] + obj_group.append(obj_copy) + answer = len(obj_group) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exclude_exist' % attr] = answer + elif 'count' in template['label']: + ref_obj['%s_exclude_count' % attr] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + ref_obj['attribute'] = attr + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-obj-rel' in template['label'] or + 'exist-obj-rel' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a obj-rel-imm in previous label, same as the current one + prev_label = prev_history['template'] + cur_label = template['label'] + if 'obj-rel-imm' not in prev_label or cur_label[:5] != prev_label[:5]: + return [] + else: + focus_id = prev_history['focus_id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + focus_attr = random.choice(single_count) + for focus_id, obj in graph['objects'].items(): + if obj.get(focus_attr[0], None) == focus_attr[1]: + break + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get relations with unknown counts + unknown_rels = [rel for rel in gvars.METAINFO['relations'] + if '%s_count' % rel not in graph['objects'][focus_id]] + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for ii in unknown_rels[::-1]: + if not graph['objects'][focus_id].get('%s_exist' % ii, True): + unknown_rels.remove(ii) + + # for exist: get relations without exist before + elif 'exist' in template['label']: + unknown_rels = [rel for rel in unknown_rels + if '%s_exist' % rel not in graph['objects'][focus_id]] + + # select an object with some known objects + if len(unknown_rels) == 0: + return [] + + # pick between yes/no for exist questions, 50% of times + if 'exist' in template['label']: + zero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) == 0] + nonzero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) > 0] + + if random.random() > 0.5: + if len(zero_count) > 0: + rel = random.choice(zero_count) + else: + return [] + else: + if len(nonzero_count) > 0: + rel = random.choice(nonzero_count) + else: + return [] + else: + rel = random.choice(unknown_rels) + + # create the object group + obj_group = [] + new_obj = {'required': ['relation'], 'optional': []} + obj_pool = scene['relationships'][rel][focus_id] + for obj_id in obj_pool: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj_id + obj_group.append(obj_copy) + answer = len(obj_pool) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exist' % rel] = answer + elif 'count' in template['label']: + ref_obj['%s_count' % rel] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + #ref_obj['relation'] = rel + # add attribute as argument + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-attribute' in template['label'] or + 'exist-attribute' in template['label']): + if 'group' in template['label']: + # we need an immediate group in the previous round + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + + # if exist: > 0 is good, else > 1 is needed + min_count = 0 if 'exist' in prev_label else 1 + if (len(prev_history['objects']) > min_count and + prev_history['mergeable'] and + 'obj-relation' not in prev_label): + obj_pool = graph['history'][-1]['objects'] + else: + return [] + else: + obj_pool = scene['objects'] + + # get counts for attributes, and sample evenly with 0 and other numbers + counts = get_attribute_counts_for_objects(scene, obj_pool) + + # if exist, choose between zero and others wiht 0.5 probability + zero_prob = 0.5 if 'exist' in template['label'] else 0.7 + if random.random() > zero_prob: + pool = [ii for ii in counts if counts[ii] == 0] + else: + pool = [ii for ii in counts if counts[ii] != 0] + + # check if count is already known + attr_pool = filter_attributes_with_known_counts(graph, pool) + + # for exist: get known attributes and remove them + if 'exist' in template['label']: + known_attr = get_known_attributes(graph) + attr_pool = [ii for ii in attr_pool if ii not in known_attr] + + # if non-empty, sample it + if len(attr_pool) == 0: + return [] + + attr, value = random.choice(attr_pool) + # add a hypothesi, and return the answer + count = 0 + obj_group = [] + new_obj = {attr: value, 'required': [attr], 'optional': []} + for index, obj in enumerate(obj_pool): + if scene['objects'][obj['id']][attr] == value: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_group.append(obj_copy) + count += 1 + + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, attr: value} + + if 'count' in template['label']: + graph_item['count'] = count + answer = count + elif 'exist' in template['label']: + answer = 'yes' if count > 0 else 'no' + # Clean graph item. + graph_item = clean_graph_item(graph_item) + if count == 0: + # Fake object group, to serve for arguments. + obj_group = [{attr: value, 'required': [attr], 'optional': []}] + + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [attr], 'optional': [], + 'count': 9999, 'objects': obj_group, 'graph': graph_item}] + + elif 'seek-attr-rel' in template['label']: + # Placeholder for object description, see below. + obj_desc = None + prev_history = graph['history'][-1] + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # for each relation, get the object, sample an attribute, and sample + hypotheses = [] + for rel in gvars.METAINFO['relations']: + gt_relations = scene['relationships'][rel] + objs = [(ii, len(gt_relations[ii])) for ii in gt_relations[focus_id]] + objs = sorted(objs, key=lambda x: x[1], reverse=True) + if len(objs) == 0: + # add a null hypotheses + # check if the object is known to be extreme + if ('%s_count' % rel not in graph['objects'][focus_id] and + '%s_exist' % rel not in graph['objects'][focus_id]): + random_attr = random.choice(gvars.METAINFO['attributes']) + hypotheses.append((None, rel, random_attr)) + continue + + closest_obj = objs[0][0] + # check what attributes are known/unknown + known_info = graph['objects'].get(closest_obj, {}) + for attr in gvars.METAINFO['attributes']: + if attr not in known_info: + hypotheses.append((closest_obj, rel, attr)) + + if len(hypotheses) == 0: + return [] + sample_id, rel, attr = random.choice(hypotheses) + # add the new attribute to object + new_obj = {'required': ['attribute', 'relation'], + 'optional': [], 'id': sample_id} + + if sample_id is not None: + answer = scene['objects'][sample_id][attr] + else: + answer = 'none' + new_obj[attr] = answer + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + # remove objects if none + if sample_id is None: + graph_item['objects'] = [] + graph_item = clean_graph_item(graph_item) + + # Add attribute as argument. + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + + elif 'seek-attr' in template['label']: + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + implicit_attr = None + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a seek-attr-imm/seek-attr-rel-imm in previous label + if ('seek-attr-imm' not in prev_label and + 'seek-attr-rel-imm' not in prev_label): + return [] + elif len(prev_history['objects']) == 0: + return [] + else: + focus_id = prev_history['objects'][0]['id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'sim' in template['label']: + if 'seek-attr-imm' not in prev_label: + return[] + else: + prev_obj = prev_history['objects'][0] + focus_id = prev_obj['id'] + attr = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_obj] + assert len(attr) == 1, 'Something wrong in previous history!' + implicit_attr = attr[0] + + if 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + # if there is an attribute, eliminate those options + if implicit_attr is not None: + single_count = [ii for ii in single_count if ii[0] != implicit_attr] + obj_ids = get_unique_attribute_objects(graph, single_count) + + # again rule out objects whose implicit_attr is known + single_count = [ii for ii in single_count + if implicit_attr not in graph['objects'][obj_ids[ii]]] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = get_unique_attribute_objects(graph, [focus_attr])[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get unknown attributes, randomly sample one + if implicit_attr is None: + unknown_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr not in graph['objects'][focus_id]] + + # TODO: select an object with some known objects + if len(unknown_attrs) == 0: + return [] + attr = random.choice(unknown_attrs) + else: + attr = implicit_attr + + # add the new attribute to object + new_obj = {'required': ['attribute'], 'optional': [], 'id': focus_id} + if 'sim' in template['label']: + new_obj['required'] = [] + new_obj[attr] = scene['objects'][focus_id][attr] + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + graph_item = clean_graph_item(graph_item) + + # add attribute as argument + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + return [] + + +def sample_from_hypotheses(caption_hypotheses, scene, cap_templates): + """Samples from caption hypotheses given the scene and caption templates. + Args: + caption_hypotheses: List of hypotheses for objects/object pairs + scene: CLEVR image scene graph + cap_templates: List of caption templates to sample captions + Returns: + obj_groups: List of object groups and corresponding sampled captions + """ + + obj_groups = [] + + # Caption Type 1: Extreme location. + hypotheses = caption_hypotheses['extreme-loc'] + if len(hypotheses) > 0: + # extreme location hypotheses + extreme_type, focus_obj = random.choice(hypotheses) + # sample optional attributes + obj_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj] + focus_attr = random.choice(obj_attrs) + optional_attrs = [ii for ii in obj_attrs if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['%s_count' % extreme_type] = 0 + graph_item['objects'][0]['%s_exist' % extreme_type] = False + graph_item['template'] = 'extreme-%s' % extreme_type + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 2: Unique object. + hypotheses = caption_hypotheses['unique-obj'] + if len(hypotheses) > 0: + # sample one at random, and create the graph item + focus_obj, focus_attr = random.choice(hypotheses) + # sample optional attributes + optional_attrs = [ii for ii in gvars.METAINFO['attributes'] + if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['unique'] = True + graph_item['template'] = 'unique-obj' + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 3: Unique attribute count based caption. + hypotheses = caption_hypotheses['count-attr'] + if len(hypotheses) > 0: + # Randomly sample one hypothesis and one template. + (attr, value), count = random.choice(hypotheses) + # Segregate counting templates. + count_templates = [ii for ii in cap_templates if 'count' in ii['type']] + template = random.choice(count_templates) + obj_group = {'group_id': 0, 'count': count, attr: value, + 'optional': [], 'required': [], 'objects': []} + + # get a list of objects which are part of this collection + for ii, obj in enumerate(scene['objects']): + if obj[attr] == value: + new_obj = {'id': obj['id'], attr: value} + new_obj['required'] = [attr] + new_obj['optional'] = [] + obj_group['objects'].append(new_obj) + + if 'no' in template['label']: + # Count is not mentioned. + del obj_group['count'] + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = False + else: + # Count is mentioned. + for index, ii in enumerate(obj_group['objects']): + obj_group['objects'][index]['required'].append('count') + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = True + + # clean up graph item + graph_item['template'] = template['label'] + graph_item = clean_graph_item(graph_item) + obj_group['graph'] = graph_item + obj_group['use_plural'] = True + obj_groups.append([obj_group]) + + # Caption Type 4: Relation between two objects (one of them is unique). + # hypotheses = caption_hypotheses['obj-relation'] + # if len(hypotheses) > 0: + # (obj_id1, attr1), rel, (obj_id2, attr2) = random.choice(hypotheses) + # obj_group = {'group_id': 0, 'relation': rel} + + # # create object dictionaries + # obj1 = {'optional': [], 'required': [attr1], 'id': obj_id1, + # attr1: scene['objects'][obj_id1][attr1]} + # obj2 = {'optional': [], 'required': [attr2], 'id': obj_id2, + # attr2: scene['objects'][obj_id2][attr2]} + # obj_group['objects'] = [obj2, obj1] + + # # also create a clean graph object + # graph_item = copy.deepcopy(obj_group) + # graph_item = clean_graph_item(graph_item) + # graph_item['mergeable'] = True + # graph_item['template'] = 'obj-relation' + # obj_group['graph'] = graph_item + # obj_groups.append([obj_group]) + return obj_groups + + +def get_known_attributes(graph): + """Fetches a list of known attributes given the scene graph. + Args: + graph: Scene graph to check unique attributes from + Returns: + known_attrs: List of known attributes from the scene graph + """ + + known_attrs = [] + for obj_id, obj_info in graph['objects'].items(): + # The attribute is unique already. + # if obj_info.get('unique', False): continue + for attr in gvars.METAINFO['attributes']: + if attr in obj_info: + known_attrs.append((attr, obj_info[attr])) + + # also go over the groups + for ii in graph['history']: + # a group of objects, with unknown count + #if 'count' not in ii: continue + for attr in gvars.METAINFO['attributes']: + if attr in ii: + known_attrs.append((attr, ii[attr])) + known_attrs = list(set(known_attrs)) + return known_attrs + + +def get_known_attribute_counts(graph): + """Fetches a count of known attributes given the scene graph. + Calls get_known_attributes method internally. + Args: + graph: Scene graph to check unique attributes from + Returns: + counts: Count of known attributes from the scene graph + """ + + known_attrs = get_known_attributes(graph) + # Go through objects and count. + counts = {ii: 0 for ii in known_attrs} + for _, obj in graph['objects'].items(): + for attr, val in known_attrs: + if obj.get(attr, None) == val: + counts[(attr, val)] += 1 + return counts + + +def filter_attributes_with_known_counts(graph, known_attrs): + """Filters attributes whose counts are known, given the scene graph. + Args: + graph: Scene graph from the dialog generated so far + known_attrs: List of known attributes from the ground truth scene graph + Returns: + known_attrs: List of attributes with unknown counts removed inplace + """ + + for attr, val in known_attrs[::-1]: + for ii in graph['history']: + # A group of objects, with unknown count. + if 'count' not in ii: + continue + # Count is absent. + if ii.get(attr, None) == val: + known_attrs.remove((attr, val)) + return known_attrs + + +def clean_graph_item(graph_item): + """Cleans up graph item (remove 'required' and 'optional' tags). + Args: + graph_item: Input graph item to be cleaned. + Returns: + clean_graph_item: Copy of the graph item after cleaning. + """ + + clean_graph_item = copy.deepcopy(graph_item) + if 'optional' in clean_graph_item: + del clean_graph_item['optional'] + if 'required' in clean_graph_item: + del clean_graph_item['required'] + + for index, ii in enumerate(clean_graph_item['objects']): + if 'optional' in ii: + del clean_graph_item['objects'][index]['optional'] + if 'required' in ii: + del clean_graph_item['objects'][index]['required'] + return clean_graph_item + + +def get_attribute_counts_for_objects(scene, objects=None): + """Counts attributes for a given set of objects. + Args: + scene: Scene graph for the dialog generated so far + objects: List of objects. Default = None selects all objects + Returns: + counts: Counts for the attributes for attributes + """ + + # Initialize the dictionary. + counts = {} + for attr, vals in gvars.METAINFO['values'].items(): + for val in vals: + counts[(attr, val)] = 0 + + # Now count for each given object. + if objects is None: + objects = scene['objects'] + for obj in objects: + for attr in gvars.METAINFO['attributes']: + key = (attr, scene['objects'][obj['id']][attr]) + counts[key] = counts.get(key, 0) + 1 + return counts + + +def get_unique_attribute_objects(graph, uniq_attrs): + """Fetches objects from given scene graph with unique attributes. + Args: + graph: Scene graph constructed from the dialog generated so far + uniq_attrs: List of unique attributes to get attributes + Returns: + obj_ids: List of object ids with the unique attributes + """ + + obj_ids = {} + for obj_id, obj in graph['objects'].items(): + for attr, val in uniq_attrs: + if obj.get(attr, '') == val: + # At this point the key should not be present. + assert (attr, val) not in obj_ids, 'Attributes not unique!' + obj_ids[(attr, val)] = obj_id + return obj_ids + + +def sample_optional_tags(optional, sample_probs): + """Samples additional tags depending on given sample probabilities. + Args: + optional: List of optional tags to sample from. + sample_probs: Probabilities of sampling 'n' tags. + Returns: + sampled: Sampled tags from the optional list + """ + + sampled = [] + if len(optional) > 0: + n_sample = np.random.choice([0, 1], 1, p=sample_probs[:2])[0] + n_sample = min(n_sample, len(optional)) + sampled = random.sample(optional, n_sample) + return sampled diff --git a/constraints_splitB.py b/constraints_splitB.py new file mode 100644 index 0000000..c4edf54 --- /dev/null +++ b/constraints_splitB.py @@ -0,0 +1,1055 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +# -------------------------------------------------------- +# adapted from https://github.com/satwikkottur/clevr-dialog/blob/master/constraints.py +# -------------------------------------------------------- + +import copy +import json +import random +import numpy as np + +import global_vars as gvars + + +# Some quick methods. +def apply_immediate(hist): return (len(hist['objects']) == 1 and + hist['mergeable'] and + 'exist' not in hist['template']) + + +def apply_group(hist): return (len(hist['objects']) >= 2 and + hist['mergeable'] and + 'count' not in prev_group) + + +def caption(scene, templates): + """Constraints for caption generation. + Args: + scene: CLEVR Scene graphs to generate captions with constraints + template: List of caption templates + Returns: + sample_captions: Samples from caption hypotheses + """ + + caption_hypotheses = {} + + # Sweep through all templates to extract 'interesting' captions. + n_objs = len(scene['objects']) + rels = scene['relationships'] + + # Caption Type 1: Extreme locations. + ext_loc_templates = [ii for ii in templates if ii['type'] == 'extreme-loc'] + # number of objects in the scene + filter_objs = copy.deepcopy(scene['objects']) + attr_counts = get_attribute_counts_for_objects(scene, filter_objs) + hypotheses = [] + for template in ext_loc_templates: + # absolute location based constraint + constraint = template['constraints'][0] + extreme_type = constraint['args'][0] + + # check if there is an object that is at the center of the image + # roughly in the middle along front-back and right-left dim + if extreme_type == 'center': + for ii, obj in enumerate(filter_objs): + bla = [len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']] + matches = np.sum([len(rels[kk][ii]) <= n_objs / 2 + for kk in ['front', 'behind', 'right', 'left']]) + if matches == 4: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + else: + for ii, obj in enumerate(filter_objs): + if len(rels[extreme_type][ii]) == 0: + hypotheses.append((extreme_type, copy.deepcopy(obj))) + + # sample one at random, and create the graph item + # Filter hypothesis which are ambiguous otherwise. + for index, (_, hypothesis) in enumerate(hypotheses): + uniq_attr = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, hypothesis[attr])] == 1] + + for attr in uniq_attr: + del hypotheses[index][1][attr] + + hypotheses = [ii for ii in hypotheses if len(ii[1]) > 1] + caption_hypotheses['extreme-loc'] = hypotheses + + # Caption Type 2: Unique object and attribute. +# filter_objs = copy.deepcopy(scene['objects']) +# # each hypothesis is (object, attribute) pair +# hypotheses = [] +# for ii, obj in enumerate(filter_objs): +# # get unique set of attributes +# uniq_attrs = [ii for ii in gvars.METAINFO['attributes'] +# if attr_counts[(ii, obj[ii])] == 1] +# # for each, add it to hypothesis +# for attr in uniq_attrs: +# hypotheses.append((obj, attr)) +# caption_hypotheses['unique-obj'] = hypotheses + + # Caption Type 3: Unique attribute count based caption. + # count unique object based constraint + # Each hypothesis is object collection. + caption_hypotheses['count-attr'] = [(attr_val, count) + for attr_val, count in attr_counts.items() + if count > 1] + + # Caption Type 4: Relation between two objects. + # Out of the two, one has a unique attribute. + # find a pair of objects sharing a relation, unique + filter_objs = copy.deepcopy(scene['objects']) + n_objs = len(filter_objs) + + # get a dict of unique attributes for each object + uniq_attr = [[] for ii in range(n_objs)] + non_uniq_attr = [[] for ii in range(n_objs)] + for ind, obj in enumerate(filter_objs): + uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, obj[attr])] == 1] + non_uniq_attr[ind] = [attr for attr in gvars.METAINFO['attributes'] + if attr_counts[(attr, obj[attr])] > 1] + uniqueness = [len(ii) > 0 for ii in uniq_attr] + + # Hypothesis is a uniq object and non-unique obj2 sharing relation R + # global ordering for uniqueness + hypotheses = [] + for rel, order in scene['relationships'].items(): + num_rel = [(ii, len(order[ii])) for ii in range(n_objs)] + num_rel = sorted(num_rel, key=lambda x: x[1], reverse=True) + # take only the ids + num_rel = [ii[0] for ii in num_rel] + + for index, obj_id in enumerate(num_rel[:-1]): + next_obj_id = num_rel[index + 1] + # if unique, check if the next one has non-unique attributes + if uniqueness[obj_id]: + if len(non_uniq_attr[next_obj_id]) > 0: + obj1 = (obj_id, random.choice(uniq_attr[obj_id])) + obj2 = (next_obj_id, random.choice(non_uniq_attr[next_obj_id])) + hypotheses.append((obj1, rel, obj2)) + # if not unique, check if the next one has unique attributes + else: + if len(uniq_attr[next_obj_id]) > 0: + obj1 = (obj_id, random.choice(non_uniq_attr[obj_id])) + obj2 = (next_obj_id, random.choice(uniq_attr[next_obj_id])) + hypotheses.append((obj1, rel, obj2)) + caption_hypotheses['obj-relation'] = hypotheses + sample_captions = sample_from_hypotheses( + caption_hypotheses, scene, templates) + return sample_captions + + +def question(scene, dialog, template): + """Constraints question generation. + Inputs: + scene:Partial scene graphs on CLEVR images with generated captions + template: List of question templates to use + Output: + list of object groups + """ + + ques_round = len(dialog['graph']['history']) - 1 + graph = dialog['graph'] + + # check for constraints and answer question + if 'group' in template['label']: + groups = [] + # Pick a group hypothesis + for ii in graph['history']: + if 'count' in ii or len(ii['objects']) == 0: + groups.append(ii) + + if template['label'] == 'count-all': + # Preliminary checks: + # (A) count-all cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, + 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': len(obj_group)} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': len(obj_group), 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif (template['label'] == 'count-other' or + template['label'] == 'exist-other'): + # preliminary checks: + # (A) exist-other cannot follow exist-other, count-all, count-other + # (B) count-other cannot follow count-all, count-other + for prev_history in graph['history'][1:]: + if prev_history['template'] in ['count-all', 'count-other']: + return [] + + if (prev_history['template'] == 'exist-other' and + template['label'] == 'exist-other'): + return [] + + # get a list of all objects we know + known_ids = [jj['id'] for ii in graph['history'] for jj in ii['objects']] + known_ids = list(set(known_ids)) + n_objs = len(scene['objects']) + difference = n_objs - len(known_ids) + diff_ids = [ii for ii in range(n_objs) if ii not in known_ids] + + # create empty objects for these + obj_group = [{'id': ii} for ii in diff_ids] + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': obj_group, + 'template': template['label'], 'mergeable': False} + + if 'count' in template['label']: + graph_item['count'] = difference + graph_item['mergeable'] = True # merge if count is known + answer = difference + elif 'exist' in template['label']: + # If heads (> 0.5) -- difference > 0 + if random.random() > 0.5: + if difference > 0: + answer = 'yes' + else: + return [] + else: + if difference == 0: + answer = 'no' + else: + return [] + + # no constraints, count the number of objects in true scene + return [{'answer': answer, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif template['label'] == 'count-all-group': + # we need a group in the previous round + prev_group = graph['history'][-1] + prev_label = prev_group['template'] + if not (len(prev_group['objects']) > 1 and + 'count' not in prev_group and + 'obj-relation' not in prev_label): + return [] + + # check if count is not given before + attrs = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_group] + count = 0 + for obj in prev_group['objects']: + count += all([obj[ii] == prev_group['objects'][0][ii] for ii in attrs]) + + # create object group + obj_group = [] + new_obj = {'required': [], 'optional': []} + for obj_id, ii in enumerate(scene['objects']): + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = ii['id'] + obj_group.append(obj_copy) + + # create graph item + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], + 'mergeable': True, 'count': count} + # clean graph item + graph_item = clean_graph_item(graph_item) + # no constraints, count the number of objects in true scene + return [{'answer': count, 'group_id': ques_round + 1, + 'objects': [], 'graph': graph_item}] + + elif ('count-obj-exclude' in template['label'] or + 'exist-obj-exclude' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + scene_counts = get_attribute_counts_for_objects(scene) + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + #scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get the known attributes for the current object + focus_obj = graph['objects'][focus_id] + known_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj and + '%s_exclude_count' % attr not in focus_obj] + + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for attr in known_attrs[::-1]: + if not focus_obj.get('%s_exclude_exist' % attr, True): + known_attrs.remove(attr) + # for exist: get relations without exist before + elif 'exist' in template['label']: + known_attrs = [attr for attr in known_attrs + if '%s_exclude_exist' % attr not in focus_obj] + + # select an attribute + if len(known_attrs) == 0: + return[] + + # split this into zero and non-zero + if 'exist' in template['label']: + focus_attrs = [(ii, scene['objects'][focus_id][ii]) + for ii in known_attrs] + zero_count = [ii for ii in focus_attrs if scene_counts[ii] == 1] + nonzero_count = [ii for ii in focus_attrs if scene_counts[ii] > 1] + + if random.random() > 0.5: + if len(zero_count) > 0: + attr = random.choice(zero_count)[0] + else: + return [] + else: + if len(nonzero_count) > 0: + attr = random.choice(nonzero_count)[0] + else: + return [] + else: + attr = random.choice(known_attrs) + + # create the object group + obj_group = [] + new_obj = {'required': ['attribute'], 'optional': []} + for obj in scene['objects']: + # add if same attribute value and not focus object + if obj[attr] == focus_obj[attr] and obj['id'] != focus_id: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_copy[attr] = focus_obj[attr] + obj_group.append(obj_copy) + answer = len(obj_group) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exclude_exist' % attr] = answer + elif 'count' in template['label']: + ref_obj['%s_exclude_count' % attr] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + ref_obj['attribute'] = attr + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-obj-rel' in template['label'] or + 'exist-obj-rel' in template['label']): + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a obj-rel-imm in previous label, same as the current one + prev_label = prev_history['template'] + cur_label = template['label'] + if 'obj-rel-imm' not in prev_label or cur_label[:5] != prev_label[:5]: + return [] + else: + focus_id = prev_history['focus_id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + if len(single_count) == 0: + return [] + focus_attr = random.choice(single_count) + for focus_id, obj in graph['objects'].items(): + if obj.get(focus_attr[0], None) == focus_attr[1]: + break + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get relations with unknown counts + unknown_rels = [rel for rel in gvars.METAINFO['relations'] + if '%s_count' % rel not in graph['objects'][focus_id]] + # for count: only if existence if True, else count it trivially zero + if 'count' in template['label']: + for ii in unknown_rels[::-1]: + if not graph['objects'][focus_id].get('%s_exist' % ii, True): + unknown_rels.remove(ii) + + # for exist: get relations without exist before + elif 'exist' in template['label']: + unknown_rels = [rel for rel in unknown_rels + if '%s_exist' % rel not in graph['objects'][focus_id]] + + # select an object with some known objects + if len(unknown_rels) == 0: + return [] + + # pick between yes/no for exist questions, 50% of times + if 'exist' in template['label']: + zero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) == 0] + nonzero_count = [ii for ii in unknown_rels + if len(scene['relationships'][ii][focus_id]) > 0] + + if random.random() > 0.5: + if len(zero_count) > 0: + rel = random.choice(zero_count) + else: + return [] + else: + if len(nonzero_count) > 0: + rel = random.choice(nonzero_count) + else: + return [] + else: + rel = random.choice(unknown_rels) + + # create the object group + obj_group = [] + new_obj = {'required': ['relation'], 'optional': []} + obj_pool = scene['relationships'][rel][focus_id] + for obj_id in obj_pool: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj_id + obj_group.append(obj_copy) + answer = len(obj_pool) + + ref_obj = copy.deepcopy(new_obj) + ref_obj['id'] = focus_id + ref_obj['volatile'] = True + if 'exist' in template['label']: + answer = 'yes' if answer > 0 else 'no' + ref_obj['%s_exist' % rel] = answer + elif 'count' in template['label']: + ref_obj['%s_count' % rel] = answer + obj_group.append(ref_obj) + + graph_item = {'round': ques_round+1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + if 'count' in template['label']: + graph_item['count'] = answer + graph_item = clean_graph_item(graph_item) + + #ref_obj['relation'] = rel + # add attribute as argument + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [ref_obj, obj_desc], 'graph': graph_item}] + + elif ('count-attribute' in template['label'] or + 'exist-attribute' in template['label']): + if 'group' in template['label']: + # we need an immediate group in the previous round + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + + # if exist: > 0 is good, else > 1 is needed + min_count = 0 if 'exist' in prev_label else 1 + if (len(prev_history['objects']) > min_count and + prev_history['mergeable'] and + 'obj-relation' not in prev_label): + obj_pool = graph['history'][-1]['objects'] + else: + return [] + else: + obj_pool = scene['objects'] + + # get counts for attributes, and sample evenly with 0 and other numbers + counts = get_attribute_counts_for_objects(scene, obj_pool) + + # if exist, choose between zero and others wiht 0.5 probability + zero_prob = 0.5 if 'exist' in template['label'] else 0.7 + if random.random() > zero_prob: + pool = [ii for ii in counts if counts[ii] == 0] + else: + pool = [ii for ii in counts if counts[ii] != 0] + + # check if count is already known + attr_pool = filter_attributes_with_known_counts(graph, pool) + + # for exist: get known attributes and remove them + if 'exist' in template['label']: + known_attr = get_known_attributes(graph) + attr_pool = [ii for ii in attr_pool if ii not in known_attr] + + # if non-empty, sample it + if len(attr_pool) == 0: + return [] + + attr, value = random.choice(attr_pool) + # add a hypothesi, and return the answer + count = 0 + obj_group = [] + new_obj = {attr: value, 'required': [attr], 'optional': []} + for index, obj in enumerate(obj_pool): + if scene['objects'][obj['id']][attr] == value: + obj_copy = copy.deepcopy(new_obj) + obj_copy['id'] = obj['id'] + obj_group.append(obj_copy) + count += 1 + + graph_item = {'round': ques_round + 1, 'objects': copy.deepcopy(obj_group), + 'template': template['label'], 'mergeable': True, attr: value} + + if 'count' in template['label']: + graph_item['count'] = count + answer = count + elif 'exist' in template['label']: + answer = 'yes' if count > 0 else 'no' + # Clean graph item. + graph_item = clean_graph_item(graph_item) + if count == 0: + # Fake object group, to serve for arguments. + obj_group = [{attr: value, 'required': [attr], 'optional': []}] + + return [{'answer': answer, 'group_id': ques_round + 1, + 'required': [attr], 'optional': [], + 'count': 9999, 'objects': obj_group, 'graph': graph_item}] + + elif 'seek-attr-rel' in template['label']: + # Placeholder for object description, see below. + obj_desc = None + prev_history = graph['history'][-1] + + if 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = obj_ids[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # for each relation, get the object, sample an attribute, and sample + hypotheses = [] + for rel in gvars.METAINFO['relations']: + gt_relations = scene['relationships'][rel] + objs = [(ii, len(gt_relations[ii])) for ii in gt_relations[focus_id]] + objs = sorted(objs, key=lambda x: x[1], reverse=True) + if len(objs) == 0: + # add a null hypotheses + # check if the object is known to be extreme + if ('%s_count' % rel not in graph['objects'][focus_id] and + '%s_exist' % rel not in graph['objects'][focus_id]): + random_attr = random.choice(gvars.METAINFO['attributes']) + hypotheses.append((None, rel, random_attr)) + continue + + closest_obj = objs[0][0] + # check what attributes are known/unknown + known_info = graph['objects'].get(closest_obj, {}) + for attr in gvars.METAINFO['attributes']: + if attr not in known_info: + hypotheses.append((closest_obj, rel, attr)) + + if len(hypotheses) == 0: + return [] + sample_id, rel, attr = random.choice(hypotheses) + # add the new attribute to object + new_obj = {'required': ['attribute', 'relation'], + 'optional': [], 'id': sample_id} + + if sample_id is not None: + answer = scene['objects'][sample_id][attr] + else: + answer = 'none' + new_obj[attr] = answer + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + # remove objects if none + if sample_id is None: + graph_item['objects'] = [] + graph_item = clean_graph_item(graph_item) + + # Add attribute as argument. + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], 'relation': rel, + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + + elif 'seek-attr' in template['label']: + # placeholder for object description, see below + obj_desc = None + prev_history = graph['history'][-1] + prev_label = prev_history['template'] + implicit_attr = None + + # we need a single object in the previous round + if 'imm2' in template['label']: + # we need a seek-attr-imm/seek-attr-rel-imm in previous label + if ('seek-attr-imm' not in prev_label and + 'seek-attr-rel-imm' not in prev_label): + return [] + elif len(prev_history['objects']) == 0: + return [] + else: + focus_id = prev_history['objects'][0]['id'] + + elif 'imm' in template['label']: + # we need an immediate group in the previous round + if apply_immediate(prev_history): + focus_id = prev_history['objects'][0]['id'] + else: + return [] + + elif 'sim' in template['label']: + if 'seek-attr-imm' not in prev_label: + return[] + else: + prev_obj = prev_history['objects'][0] + focus_id = prev_obj['id'] + attr = [ii for ii in gvars.METAINFO['attributes'] if ii in prev_obj] + assert len(attr) == 1, 'Something wrong in previous history!' + implicit_attr = attr[0] + + if 'early' in template['label']: + # search through history for an object with unique attribute + attr_counts = get_known_attribute_counts(graph) + + # get attributes with just one count + single_count = [ii for ii, count in attr_counts.items() if count == 1] + # remove attributes that point to objects in the previous round + # TODO: re-think this again + obj_ids = get_unique_attribute_objects(graph, single_count) + prev_history_obj_ids = [ii['id'] for ii in prev_history['objects']] + single_count = [ii for ii in single_count if + obj_ids[ii] not in prev_history_obj_ids] + + # if there is an attribute, eliminate those options + if implicit_attr is not None: + single_count = [ii for ii in single_count if ii[0] != implicit_attr] + obj_ids = get_unique_attribute_objects(graph, single_count) + + # again rule out objects whose implicit_attr is known + single_count = [ii for ii in single_count + if implicit_attr not in graph['objects'][obj_ids[ii]]] + + if len(single_count) == 0: + return [] + + # give preference to attributes with multiple counts in scene graph + scene_counts = get_attribute_counts_for_objects(scene) + ambiguous_attrs = [ii for ii in single_count if scene_counts[ii] > 1] + if len(ambiguous_attrs) > 0: + focus_attr = random.choice(ambiguous_attrs) + else: + focus_attr = random.choice(single_count) + focus_id = get_unique_attribute_objects(graph, [focus_attr])[focus_attr] + + # unique object description + obj_desc = {'required': [focus_attr[0]], 'optional': [], + focus_attr[0]: focus_attr[1]} + + # get unknown attributes, randomly sample one + if implicit_attr is None: + unknown_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr not in graph['objects'][focus_id]] + + # TODO: select an object with some known objects + if len(unknown_attrs) == 0: + return [] + attr = random.choice(unknown_attrs) + else: + attr = implicit_attr + + # add the new attribute to object + new_obj = {'required': ['attribute'], 'optional': [], 'id': focus_id} + if 'sim' in template['label']: + new_obj['required'] = [] + new_obj[attr] = scene['objects'][focus_id][attr] + + graph_item = {'round': ques_round+1, 'objects': [copy.deepcopy(new_obj)], + 'template': template['label'], 'mergeable': True, + 'focus_id': focus_id, 'focus_desc': obj_desc} + graph_item = clean_graph_item(graph_item) + + # add attribute as argument + new_obj['attribute'] = attr + return [{'answer': new_obj[attr], 'group_id': ques_round + 1, + 'required': [], 'optional': [], + 'objects': [new_obj, obj_desc], 'graph': graph_item}] + return [] + + +def sample_from_hypotheses(caption_hypotheses, scene, cap_templates): + """Samples from caption hypotheses given the scene and caption templates. + Args: + caption_hypotheses: List of hypotheses for objects/object pairs + scene: CLEVR image scene graph + cap_templates: List of caption templates to sample captions + Returns: + obj_groups: List of object groups and corresponding sampled captions + """ + + obj_groups = [] + + # Caption Type 1: Extreme location. + hypotheses = caption_hypotheses['extreme-loc'] + if len(hypotheses) > 0: + # extreme location hypotheses + extreme_type, focus_obj = random.choice(hypotheses) + # sample optional attributes + obj_attrs = [attr for attr in gvars.METAINFO['attributes'] + if attr in focus_obj] + focus_attr = random.choice(obj_attrs) + optional_attrs = [ii for ii in obj_attrs if ii != focus_attr] + sampled_attrs = sample_optional_tags(optional_attrs, + gvars.METAINFO['probabilities']) + + # add additional attributes + req_attrs = sampled_attrs + [focus_attr] + filter_obj = {attr: val for attr, val in focus_obj.items() + if attr in req_attrs} + filter_obj['required'] = req_attrs + filter_obj['optional'] = req_attrs + filter_obj['id'] = focus_obj['id'] + obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, + 'objects': [filter_obj]} + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['objects'][0]['%s_count' % extreme_type] = 0 + graph_item['objects'][0]['%s_exist' % extreme_type] = False + graph_item['template'] = 'extreme-%s' % extreme_type + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + + # Caption Type 2: Unique object. +# hypotheses = caption_hypotheses['unique-obj'] +# if len(hypotheses) > 0: +# # sample one at random, and create the graph item +# focus_obj, focus_attr = random.choice(hypotheses) +# # sample optional attributes +# optional_attrs = [ii for ii in gvars.METAINFO['attributes'] +# if ii != focus_attr] +# sampled_attrs = sample_optional_tags(optional_attrs, +# gvars.METAINFO['probabilities']) + +# # add additional attributes +# req_attrs = sampled_attrs + [focus_attr] +# filter_obj = {attr: val for attr, val in focus_obj.items() +# if attr in req_attrs} +# filter_obj['required'] = req_attrs +# filter_obj['optional'] = req_attrs +# filter_obj['id'] = focus_obj['id'] +# obj_group = {'required': req_attrs, 'optional': [], 'group_id': 0, +# 'objects': [filter_obj]} + +# # also create a clean graph object +# graph_item = copy.deepcopy(obj_group) +# graph_item = clean_graph_item(graph_item) +# graph_item['mergeable'] = True +# graph_item['objects'][0]['unique'] = True +# graph_item['template'] = 'unique-obj' +# obj_group['graph'] = graph_item +# obj_groups.append([obj_group]) + + # Caption Type 3: Unique attribute count based caption. + hypotheses = caption_hypotheses['count-attr'] + if len(hypotheses) > 0: + # Randomly sample one hypothesis and one template. + (attr, value), count = random.choice(hypotheses) + # Segregate counting templates. + count_templates = [ii for ii in cap_templates if 'count' in ii['type']] + template = random.choice(count_templates) + obj_group = {'group_id': 0, 'count': count, attr: value, + 'optional': [], 'required': [], 'objects': []} + + # get a list of objects which are part of this collection + for ii, obj in enumerate(scene['objects']): + if obj[attr] == value: + new_obj = {'id': obj['id'], attr: value} + new_obj['required'] = [attr] + new_obj['optional'] = [] + obj_group['objects'].append(new_obj) + + if 'no' in template['label']: + # Count is not mentioned. + del obj_group['count'] + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = False + else: + # Count is mentioned. + for index, ii in enumerate(obj_group['objects']): + obj_group['objects'][index]['required'].append('count') + graph_item = copy.deepcopy(obj_group) + graph_item['mergeable'] = True + + # clean up graph item + graph_item['template'] = template['label'] + graph_item = clean_graph_item(graph_item) + obj_group['graph'] = graph_item + obj_group['use_plural'] = True + obj_groups.append([obj_group]) + + # Caption Type 4: Relation between two objects (one of them is unique). + hypotheses = caption_hypotheses['obj-relation'] + if len(hypotheses) > 0: + (obj_id1, attr1), rel, (obj_id2, attr2) = random.choice(hypotheses) + obj_group = {'group_id': 0, 'relation': rel} + + # create object dictionaries + obj1 = {'optional': [], 'required': [attr1], 'id': obj_id1, + attr1: scene['objects'][obj_id1][attr1]} + obj2 = {'optional': [], 'required': [attr2], 'id': obj_id2, + attr2: scene['objects'][obj_id2][attr2]} + obj_group['objects'] = [obj2, obj1] + + # also create a clean graph object + graph_item = copy.deepcopy(obj_group) + graph_item = clean_graph_item(graph_item) + graph_item['mergeable'] = True + graph_item['template'] = 'obj-relation' + obj_group['graph'] = graph_item + obj_groups.append([obj_group]) + return obj_groups + + +def get_known_attributes(graph): + """Fetches a list of known attributes given the scene graph. + Args: + graph: Scene graph to check unique attributes from + Returns: + known_attrs: List of known attributes from the scene graph + """ + + known_attrs = [] + for obj_id, obj_info in graph['objects'].items(): + # The attribute is unique already. + # if obj_info.get('unique', False): continue + for attr in gvars.METAINFO['attributes']: + if attr in obj_info: + known_attrs.append((attr, obj_info[attr])) + + # also go over the groups + for ii in graph['history']: + # a group of objects, with unknown count + #if 'count' not in ii: continue + for attr in gvars.METAINFO['attributes']: + if attr in ii: + known_attrs.append((attr, ii[attr])) + known_attrs = list(set(known_attrs)) + return known_attrs + + +def get_known_attribute_counts(graph): + """Fetches a count of known attributes given the scene graph. + Calls get_known_attributes method internally. + Args: + graph: Scene graph to check unique attributes from + Returns: + counts: Count of known attributes from the scene graph + """ + + known_attrs = get_known_attributes(graph) + # Go through objects and count. + counts = {ii: 0 for ii in known_attrs} + for _, obj in graph['objects'].items(): + for attr, val in known_attrs: + if obj.get(attr, None) == val: + counts[(attr, val)] += 1 + return counts + + +def filter_attributes_with_known_counts(graph, known_attrs): + """Filters attributes whose counts are known, given the scene graph. + Args: + graph: Scene graph from the dialog generated so far + known_attrs: List of known attributes from the ground truth scene graph + Returns: + known_attrs: List of attributes with unknown counts removed inplace + """ + + for attr, val in known_attrs[::-1]: + for ii in graph['history']: + # A group of objects, with unknown count. + if 'count' not in ii: + continue + # Count is absent. + if ii.get(attr, None) == val: + known_attrs.remove((attr, val)) + return known_attrs + + +def clean_graph_item(graph_item): + """Cleans up graph item (remove 'required' and 'optional' tags). + Args: + graph_item: Input graph item to be cleaned. + Returns: + clean_graph_item: Copy of the graph item after cleaning. + """ + + clean_graph_item = copy.deepcopy(graph_item) + if 'optional' in clean_graph_item: + del clean_graph_item['optional'] + if 'required' in clean_graph_item: + del clean_graph_item['required'] + + for index, ii in enumerate(clean_graph_item['objects']): + if 'optional' in ii: + del clean_graph_item['objects'][index]['optional'] + if 'required' in ii: + del clean_graph_item['objects'][index]['required'] + return clean_graph_item + + +def get_attribute_counts_for_objects(scene, objects=None): + """Counts attributes for a given set of objects. + Args: + scene: Scene graph for the dialog generated so far + objects: List of objects. Default = None selects all objects + Returns: + counts: Counts for the attributes for attributes + """ + + # Initialize the dictionary. + counts = {} + for attr, vals in gvars.METAINFO['values'].items(): + for val in vals: + counts[(attr, val)] = 0 + + # Now count for each given object. + if objects is None: + objects = scene['objects'] + for obj in objects: + for attr in gvars.METAINFO['attributes']: + key = (attr, scene['objects'][obj['id']][attr]) + counts[key] = counts.get(key, 0) + 1 + return counts + + +def get_unique_attribute_objects(graph, uniq_attrs): + """Fetches objects from given scene graph with unique attributes. + Args: + graph: Scene graph constructed from the dialog generated so far + uniq_attrs: List of unique attributes to get attributes + Returns: + obj_ids: List of object ids with the unique attributes + """ + + obj_ids = {} + for obj_id, obj in graph['objects'].items(): + for attr, val in uniq_attrs: + if obj.get(attr, '') == val: + # At this point the key should not be present. + assert (attr, val) not in obj_ids, 'Attributes not unique!' + obj_ids[(attr, val)] = obj_id + return obj_ids + + +def sample_optional_tags(optional, sample_probs): + """Samples additional tags depending on given sample probabilities. + Args: + optional: List of optional tags to sample from. + sample_probs: Probabilities of sampling 'n' tags. + Returns: + sampled: Sampled tags from the optional list + """ + + sampled = [] + if len(optional) > 0: + n_sample = np.random.choice([0, 1], 1, p=sample_probs[:2])[0] + n_sample = min(n_sample, len(optional)) + sampled = random.sample(optional, n_sample) + return sampled diff --git a/executor/__init__.py b/executor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/executor/clevr_statics.py b/executor/clevr_statics.py new file mode 100644 index 0000000..b40a6a5 --- /dev/null +++ b/executor/clevr_statics.py @@ -0,0 +1,47 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +COLORS = ["blue", "brown", "cyan", "gray", "green", "purple", "red", "yellow"] +MATERIALS = ["rubber", "metal"] +SHAPES = ["cube", "cylinder", "sphere"] +SIZES = ["large", "small"] + +ATTRIBUTES_ALL = COLORS + MATERIALS + SHAPES + SIZES + +ANSWER_CANDIDATES = { + # Count questions + "count-all": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], + "count-other": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], + "count-all-group": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], + "count-attribute": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], + "count-attribure-group": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"], + "count-obj-rel-imm": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], + "count-obj-rel-imm2": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], + "count-obj-rel-early": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], + "count-obj-exclude-imm": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], + "count-obj-exclude-early": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], + + # Existence questions + "exist-other": ["yes", "no"], + "exist-attribute": ["yes", "no"], + "exist-attribute-group": ["yes", "no"], + "exist-obj-rel-imm": ["yes", "no"], + "exist-obj-rel-imm2": ["yes", "no"], + "exist-obj-rel-early": ["yes", "no"], + "exist-obj-exclude-imm": ["yes", "no"], + "exist-obj-exclude-early": ["yes", "no"], + + # Seek questions + "seek-attr-imm": ATTRIBUTES_ALL, + "seek-attr-imm2": ATTRIBUTES_ALL, + "seek-attr-early": ATTRIBUTES_ALL, + "seek-attr-sim-early": ATTRIBUTES_ALL, + "seek-attr-rel-imm": ATTRIBUTES_ALL, + "seek-attr-rel-early": ATTRIBUTES_ALL, +} + + diff --git a/executor/minecraft_statics.py b/executor/minecraft_statics.py new file mode 100644 index 0000000..9d2b9be --- /dev/null +++ b/executor/minecraft_statics.py @@ -0,0 +1,44 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +CLASSES = ["pig", "cow", "sheep", "chicken", "wolf", "horse", "villager", "treeA", "treeB", "armorstand", "boat", "minecart"] +DIRECTIONS = ["facing_forward", "facing_backward", "facing_right", "facing_left"] +NATURES = ["animal", "human", "plant", "inanimated_object"] + +ATTRIBUTES_ALL = CLASSES + DIRECTIONS + NATURES + +ANSWER_CANDIDATES = { + # Count questions + "count-all": ["0", "1", "2", "3", "4", "5", "6"], + "count-other": ["0", "1", "2", "3", "4", "5", "6"], + "count-all-group": ["0", "1", "2", "3", "4", "5", "6"], + "count-attribute": ["0", "1", "2", "3", "4", "5", "6"], + "count-attribure-group": ["0", "1", "2", "3", "4", "5", "6"], + "count-obj-rel-imm": ["0", "1", "2", "3", "4", "5", "6"], + "count-obj-rel-imm2": ["0", "1", "2", "3", "4", "5", "6"], + "count-obj-rel-early": ["0", "1", "2", "3", "4", "5", "6"], + "count-obj-exclude-imm": ["0", "1", "2", "3", "4", "5", "6"], + "count-obj-exclude-early": ["0", "1", "2", "3", "4", "5", "6"], + + # Existence questions + "exist-other": ["yes", "no"], + "exist-attribute": ["yes", "no"], + "exist-attribute-group": ["yes", "no"], + "exist-obj-rel-imm": ["yes", "no"], + "exist-obj-rel-imm2": ["yes", "no"], + "exist-obj-rel-early": ["yes", "no"], + "exist-obj-exclude-imm": ["yes", "no"], + "exist-obj-exclude-early": ["yes", "no"], + + # Seek questions + "seek-attr-imm": ATTRIBUTES_ALL, + "seek-attr-imm2": ATTRIBUTES_ALL, + "seek-attr-early": ATTRIBUTES_ALL, + "seek-attr-sim-early": ATTRIBUTES_ALL, + "seek-attr-rel-imm": ATTRIBUTES_ALL, + "seek-attr-rel-early": ATTRIBUTES_ALL, +} diff --git a/executor/symbolic_executor.py b/executor/symbolic_executor.py new file mode 100644 index 0000000..21ee470 --- /dev/null +++ b/executor/symbolic_executor.py @@ -0,0 +1,1678 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +import json +import numpy as np +from copy import deepcopy + +from executor.clevr_statics import COLORS, MATERIALS, SHAPES, SIZES +from executor.clevr_statics import ANSWER_CANDIDATES as ANSWER_CANDIDATES_CLEVR +from executor.clevr_statics import ATTRIBUTES_ALL as ATTRIBUTES_ALL_CLEVR + +from executor.minecraft_statics import DIRECTIONS, NATURES, CLASSES +from executor.minecraft_statics import ANSWER_CANDIDATES as ANSWER_CANDIDATES_MINECRAFT +from executor.minecraft_statics import ATTRIBUTES_ALL as ATTRIBUTES_ALL_MINECRAFT + +from utils import load_clevr_scenes, load_minecraft_scenes + + +class SymbolicExecutorClevr(object): + """Symbolic executor for clevr-dialog + """ + def __init__(self, scenesPath): + super(SymbolicExecutorClevr, self).__init__() + self.functions = {} + self.registerFunctions() + self.uniqueObjFlag = False + self.colors = COLORS + self.materials = MATERIALS + self.shapes = SHAPES + self.sizes = SIZES + self.answer_candidates = ANSWER_CANDIDATES_CLEVR + self.attribute_all = ATTRIBUTES_ALL_CLEVR + self.scenes = load_clevr_scenes(scenesPath) + + def reset(self, sceneIdx): + """Resets the scene + + Args: + sceneIdx: The index of the new scene + """ + self.scene = self.scenes[sceneIdx] + for _obj in self.scene: + _obj["identifier"] = None + # store previous objects in a list to better answer + # xxx-imm, xxx-imm2, xxx-group and xxx-early questions. + self.objs = [] + self.groups = [] + self.visited = [] + self.currentObj = None + self.currentGrp = [] + self.uniqueObjFlag = False + + def registerFunctions(self): + """Registers the available functions of the executor. + """ + # Captions - extreme location + self.functions["extreme-right"] = self.extremeRight + self.functions["extreme-left"] = self.extremeLeft + self.functions["extreme-behind"] = self.extremeBehind + self.functions["extreme-front"] = self.extremeFront + self.functions["extreme-center"] = self.extremeCenter + + # Captions - multiple objects + self.functions["count-att"] = self.countAttributeCaption + + # Captions - object relations + self.functions["obj-relation"] = self.objRelation + + # Captions - unique object + self.functions["unique-obj"] = self.uniqueObject + + # Questions - Count + self.functions["count-all"] = self.countAll + self.functions["count-other"] = self.countOther + self.functions["count-all-group"] = self.countAllGroup + self.functions["count-attribute"] = self.countAttribute + self.functions["count-attribute-group"] = self.countAttributeGroup + self.functions["count-obj-rel-imm"] = self.countObjRelImm + self.functions["count-obj-rel-imm2"] = self.countObjRelImm2 + self.functions["count-obj-rel-early"] = self.countObjRelEarly + self.functions["count-obj-exclude-imm"] = self.countObjExcludeImm + self.functions["count-obj-exclude-early"] = self.countObjExcludeEarly + + # Questions - Exist + self.functions["exist-other"] = self.existOther + self.functions["exist-attribute"] = self.existAttribute + self.functions["exist-attribute-group"] = self.existAttributeGroup + self.functions["exist-obj-rel-imm"] = self.existObjRelImm + self.functions["exist-obj-rel-imm2"] = self.existObjRelImm + self.functions["exist-obj-rel-early"] = self.existObjRelEarly + self.functions["exist-obj-exclude-imm"] = self.existObjExcludeImm + self.functions["exist-obj-exclude-early"] = self.existObjExcludeEarly + + # Questions - Seek + self.functions["seek-attr-imm"] = self.seekAttrImm + self.functions["seek-attr-imm2"] = self.seekAttrImm + self.functions["seek-attr-early"] = self.seekAttributeEarly + self.functions["seek-attr-rel-imm"] = self.seekAttributeRelImm + self.functions["seek-attr-rel-early"] = self.seekAttributeRelEarly + + + def getAttributeType(self, attribute): + assert attribute in self.attribute_all, "The attribute {} is unkown".format( + attribute) + if attribute in self.colors: + return "color" + elif attribute in self.materials: + return "material" + elif attribute in self.shapes: + return "shape" + elif attribute in self.sizes: + return "size" + + def execute(self, functionLabel, functionArgs): + assert functionLabel in self.functions, "{} is not a valid function".format( + functionLabel) + function = self.functions[functionLabel] + answer = function(*functionArgs) + return answer + + def updateCurrentObj(self, obj): + self.currentObj = obj + objsCopy = deepcopy(self.objs) + for i, _obj in enumerate(objsCopy): + if _obj["id"] == obj["id"]: + del self.objs[i] + # Current obj is always kept at the end of the visited objs + self.objs.append(obj) + + def updateVisited(self, obj): + if len(self.visited) == 0: + self.visited.append(obj) + else: + newObjFlag = True + for _obj in self.visited: + if _obj["id"] == obj["id"]: + newObjFlag = False + break + if newObjFlag: + self.visited.append(obj) + + def getOther(self): + others = [] + if len(self.visited) < len(self.scene): + for _obj in self.scene: + notExisting = True + for __obj in self.visited: + if __obj["id"] == _obj["id"]: + notExisting = False + break + if notExisting: + others.append(_obj) + return others + + def updateIdentifier(self, obj, attribute): + if obj["identifier"] is None: + obj["identifier"] = attribute + else: + identifiers = obj["identifier"].split("-") + if attribute not in identifiers: + identifiers.append(attribute) + obj["identifier"] = "-".join(identifiers) + + # Captions + def extremeRight(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + leftToRight = deepcopy(self.scene) + leftToRight.sort(key=lambda o: o["position"][0]) + extremeRightObj = leftToRight[-1] + for attributeType, attribute in zip(attributeTypes, attributes): + assert extremeRightObj[attributeType] == attribute + self.updateIdentifier(extremeRightObj, attribute) + + self.updateCurrentObj(extremeRightObj) + self.updateVisited(extremeRightObj) + del leftToRight + + def extremeLeft(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + leftToRight = deepcopy(self.scene) + leftToRight.sort(key=lambda o: o["position"][0]) + extremeLeftObj = leftToRight[0] + for attributeType, attribute in zip(attributeTypes, attributes): + assert extremeLeftObj[attributeType] == attribute + self.updateIdentifier(extremeLeftObj, attribute) + + self.updateCurrentObj(extremeLeftObj) + self.updateVisited(extremeLeftObj) + del leftToRight + + def extremeFront(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + backToFront = deepcopy(self.scene) + backToFront.sort(key=lambda o: o["position"][1]) + extremeFrontObj = backToFront[-1] + for attributeType, attribute in zip(attributeTypes, attributes): + assert extremeFrontObj[attributeType] == attribute + self.updateIdentifier(extremeFrontObj, attribute) + + self.updateCurrentObj(extremeFrontObj) + self.updateVisited(extremeFrontObj) + del backToFront + + def extremeBehind(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + backToFront = deepcopy(self.scene) + backToFront.sort(key=lambda o: o["position"][1]) + extremeBehindObj = backToFront[0] + for attributeType, attribute in zip(attributeTypes, attributes): + assert extremeBehindObj[attributeType] == attribute + self.updateIdentifier(extremeBehindObj, attribute) + + self.updateCurrentObj(extremeBehindObj) + self.updateVisited(extremeBehindObj) + del backToFront + + def extremeCenter(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + numObjs = len(self.scene) + + frontToBack = deepcopy(self.scene) + frontToBack.sort(key=lambda o: o["position"][1], reverse=True) + + rightToLeft = deepcopy(self.scene) + rightToLeft.sort(key=lambda o: o["position"][0], reverse=True) + + prelimenaryCandidates = [] + + for i, objFrontToBack in enumerate(frontToBack): + numObjsInFront = i + numObjsBehind = len(rightToLeft) - i - 1 + if numObjsInFront <= numObjs / 2 and numObjsBehind <= numObjs / 2: + prelimenaryCandidates.append(objFrontToBack) + foundCenter = False + for _obj in prelimenaryCandidates: + for i, objRightToLeft in enumerate(rightToLeft): + if _obj["id"] == objRightToLeft["id"]: + numObjsToTheRight = i + numObjsToTheLeft = len(frontToBack) - i - 1 + if numObjsToTheRight <= numObjs / 2 and numObjsToTheLeft <= numObjs / 2: + foundCenter = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + foundCenter = False + break + break + if foundCenter: + break + # assert foundCenter, "[ERROR] Failed to find center object ..." + for attributeType, attribute in zip(attributeTypes, attributes): + # assert _obj[attributeType] == attribute + self.updateIdentifier(_obj, attribute) + self.updateCurrentObj(_obj) + self.updateVisited(_obj) + del rightToLeft, frontToBack + + def countAttributeCaption(self, attribute): + attributeType = self.getAttributeType(attribute) + objs = [] + for _obj in self.scene: + if _obj[attributeType] == attribute: + objs.append(deepcopy(_obj)) + for _obj in objs: + self.updateIdentifier(_obj, attribute) + # self.updateCurrentObj(_obj) + # update the current group + self.currentGrp = objs + + # update the visited objects list + for _obj in objs: + self.updateVisited(_obj) + + def getAnchorAttribute(self, attribute_1, attribute_2, scene): + # The anchor object is unique. If we filter the object list + # based on the attribute anchor, we must find only one object. + filterAttribute_1 = self.filterAttribute(scene, attribute_1) + if len(filterAttribute_1) == 1: + return attribute_1 + else: + return attribute_2 + + def objRelation(self, attribute, attributeAnchor, relation): + assert relation in ["left", "right", "front", "behind"] + # find the anchor object + if attributeAnchor != self.getAnchorAttribute(attribute, attributeAnchor, self.scene): + temp = deepcopy(attribute) + attribute = deepcopy(attributeAnchor) + attributeAnchor = temp + if relation == "left": + relation = "right" + elif relation == "right": + relation = "left" + elif relation == "behind": + relation = "front" + elif relation == "front": + relation = "behind" + + # Order the objects in the scene w.r.t. the relation + sceneCopy = deepcopy(self.scene) + + if relation in ["left", "right"]: + sceneCopy.sort(key=lambda o: o["position"][0]) + else: + sceneCopy.sort(key=lambda o: o["position"][1]) + + # get the anchor object + attributeTypeAnchor = self.getAttributeType(attributeAnchor) + for i, _obj in enumerate(sceneCopy): + if _obj[attributeTypeAnchor] == attributeAnchor: + break + # save the anchor object before the main object + anchorObj = _obj + self.updateIdentifier(anchorObj, attributeAnchor) + self.updateCurrentObj(anchorObj) + self.updateVisited(anchorObj) + + if relation in ["left", "behind"]: + sceneCopy = list(reversed(sceneCopy[:i])) + else: + sceneCopy = sceneCopy[i+1:] + + attributeType = self.getAttributeType(attribute) + # get the main object + for _obj in sceneCopy: + # and not equalDicts(_obj, anchorObj): + if _obj[attributeType] == attribute: + break + self.updateIdentifier(_obj, attribute) + self.updateCurrentObj(_obj) + self.updateVisited(_obj) + del sceneCopy + + def uniqueObject(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + for _obj in self.scene: + found = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + found = False + break + + if found: + break + for att in attributes: + self.updateIdentifier(_obj, att) + + self.updateCurrentObj(_obj) + self.updateVisited(_obj) + + # Questions + def filterOutObj(self, scene, obj): + sceneCopy = deepcopy(scene) + for i, _obj in enumerate(scene): + if obj["id"] == _obj["id"]: + break + del sceneCopy[i] + return sceneCopy + + def filterAttribute(self, scene, attribute): + attributeType = self.getAttributeType(attribute) + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in scene: + if _obj[attributeType] == attribute: + filtered.append(_obj) + return filtered + + def excludeAttribute(self, scene, obj, attributeType): + filtered = [] + if len(scene) == 0: + return filtered + for _obj in scene: + if _obj["id"] != obj["id"] and obj[attributeType] == _obj[attributeType]: + filtered.append(_obj) + + # Update the visited objects list + if len(filtered) > 0: + for _obj in filtered: + self.updateVisited(_obj) + return filtered + + def filterLeft(self, scene, obj): + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in self.scene: + # if the x-coordinate of _obj is smaller than the x-coordinate of slef.currentObj, + # then _obj is located to the left of self.currentObj + if _obj["position"][0] < obj["position"][0] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterRight(self, scene, obj): + filtered = [] + for _obj in self.scene: + # if the x-coordinate of _obj is bigger than the x-coordinate of slef.currentObj, + # then _obj is located to the right of self.currentObj + if _obj["position"][0] > obj["position"][0] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterFront(self, scene, obj): + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in self.scene: + # if the y-coordinate of _obj is smaller than the y-coordinate of slef.currentObj, + # then _obj is located in front of self.currentObj + if _obj["position"][1] > obj["position"][1] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterBehind(self, scene, obj): + # assert type(scene) == list, "Excpected type list got {} instead".format(type(scene)) + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in scene: + # if the y-coordinate of _obj is bigger than the y-coordinate of slef.currentObj, + # then _obj is located behind self.currentObj + if _obj["position"][1] < obj["position"][1] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterPosition(self, scene, obj, pos): + # assert type(scene) == list, "Excpected type list got {} instead".format(type(scene)) + assert pos in ["left", "right", "front", "behind"] + if pos == "left": + filtered = self.filterLeft(scene, obj) + elif pos == "right": + filtered = self.filterRight(scene, obj) + elif pos == "front": + filtered = self.filterFront(scene, obj) + elif pos == "behind": + filtered = self.filterBehind(scene, obj) + + # Update the visited objects list + # for _obj in filtered: + # self.updateVisited(_obj) + return filtered + + ########################################################################### + # Counting questions # + ########################################################################### + def countAll(self): + self.currentGrp = deepcopy(self.scene) + self.groups.append(deepcopy(self.scene)) + return len(self.scene) + + def countOther(self): + others = self.getOther() + if len(others) > 0: + self.currentGrp = others + self.groups.append(others) + if len(others) == 1: + obj = others[0] + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + break + self.updateCurrentObj(obj) + + self.updateVisited(obj) + return len(others) + + def countAllGroup(self): + return len(self.currentGrp) + + def countAttribute(self, attribute, updateCurrentObj=True): + filtered = self.filterAttribute(self.scene, attribute) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + self.updateIdentifier(obj, attribute) + self.updateVisited(obj) + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + + self.groups.append(filtered) + self.currentGrp = filtered + return len(filtered) + + def countAttributeGroup(self, attribute, updateCurrentObj=True): + filtered = self.filterAttribute(self.currentGrp, attribute) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + self.updateIdentifier(obj, attribute) + self.updateVisited(obj) + + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + + self.groups.append(filtered) + self.currentGrp = filtered + return len(filtered) + + def countObjRelImm(self, pos, updateCurrentObj=True): + filtered = self.filterPosition(self.scene, self.currentObj, pos) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + + self.currentGrp = filtered + self.groups.append(filtered) + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + self.uniqueObjFlag = True + else: + if new: + self.objs.append(obj) + return len(filtered) + + def countObjRelImm2(self, pos): + if self.uniqueObjFlag: + # del self.objs[-1] + self.updateCurrentObj(self.objs[-2]) + self.uniqueObjFlag = False + return self.countObjRelImm(pos) + + def countObjRelEarly(self, pos, earlyObjAttribute, updateCurrentObj=True): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + filtered = self.filterPosition(self.scene, objEarly, pos) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + else: + self.updateCurrentObj(objEarly) + + self.currentGrp = filtered + self.groups.append(filtered) + return len(filtered) + + def countObjExcludeImm(self, attributeType, updateCurrentObj=True): + filtered = self.excludeAttribute( + self.scene, self.currentObj, attributeType) + if len(filtered) == 0: + return 0 + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + + self.currentGrp = filtered + self.groups.append(filtered) + return len(filtered) + + def countObjExcludeEarly(self, attributeType, earlyObjAttribute, updateCurrentObj=True): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + + filtered = self.excludeAttribute(self.scene, objEarly, attributeType) + if len(filtered) == 0: + return 0 + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + else: + self.updateCurrentObj(objEarly) + self.currentGrp = filtered + self.groups.append(filtered) + return len(filtered) + + ########################################################################### + # Existence questions # + ########################################################################### + + def existOther(self): + others = self.getOther() + numOther = len(others) + if numOther > 0: + self.currentGrp = others + self.groups.append(others) + for _obj in others: + self.updateVisited(_obj) + return "yes" if numOther > 0 else "no" + + def existAttribute(self, attribute): + filtered = self.filterAttribute(self.scene, attribute) + numAttribute = len(filtered) + if numAttribute == 0: + return "no" + + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + self.updateIdentifier(_obj, attribute) + new = False + break + if new: + self.updateIdentifier(obj, attribute) + self.objs.append(obj) + # self.updateCurrentObj(obj) + + self.currentGrp = filtered + self.groups.append(filtered) + return "yes" + + def existAttributeGroup(self, attribute): + numAttributeGrp = self.countAttributeGroup( + attribute, updateCurrentObj=False) + return "yes" if numAttributeGrp > 0 else "no" + + def existObjRelImm(self, pos): + numObjs = self.countObjRelImm(pos, updateCurrentObj=False) + return "yes" if numObjs > 0 else "no" + + def existObjRelEarly(self, pos, earlyObjAttribute): + numObjs = self.countObjRelEarly( + pos, earlyObjAttribute, updateCurrentObj=False) + return "yes" if numObjs > 0 else "no" + + def existObjExcludeImm(self, attributeType): + numObjs = self.countObjExcludeImm( + attributeType, updateCurrentObj=False) + return "yes" if numObjs > 0 else "no" + + def existObjExcludeEarly(self, attributeType, earlyObjAttribute): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + + filtered = self.excludeAttribute(self.scene, objEarly, attributeType) + numObjs = len(filtered) + if numObjs == 0: + return "no" + self.currentGrp = filtered + self.groups.append(filtered) + return "yes" + + ########################################################################### + # Seek questions # + ########################################################################### + + def seekAttrImm(self, attributeType): + assert attributeType in self.currentObj, "Attributre <{}> is not valid" + self.updateIdentifier(self.currentObj, self.currentObj[attributeType]) + return self.currentObj[attributeType] + + def seekAttributeEarly(self, attributeType, earlyObjAttribute): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + self.updateIdentifier(objEarly, objEarly[attributeType]) + self.updateCurrentObj(objEarly) + self.updateVisited(objEarly) + return objEarly[attributeType] + + def seekAttributeRelImm(self, attributeType, pos): + filtered = self.filterPosition(self.scene, self.currentObj, pos) + if len(filtered) == 0: + return "none" + else: + # Get the closest object to slef.obj + if pos == "left": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[-1] + elif pos == "right": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[0] + elif pos == "front": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[0] + elif pos == "behind": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[-1] + + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj["identifier"] = _obj["identifier"] + break + self.updateIdentifier(obj, obj[attributeType]) + self.updateCurrentObj(obj) + self.updateVisited(obj) + return obj[attributeType] + + def seekAttributeRelEarly(self, attributeType, pos, earlyObjAttribute): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + + filtered = self.filterPosition(self.scene, objEarly, pos) + if len(filtered) == 0: + return "none" + else: + # Get the closest object to slef.obj + if pos == "left": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[-1] + elif pos == "right": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[0] + elif pos == "front": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[0] + elif pos == "behind": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[-1] + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj["identifier"] = _obj["identifier"] + break + self.updateIdentifier(obj, obj[attributeType]) + self.updateCurrentObj(obj) + self.updateVisited(obj) + return obj[attributeType] + + +class SymbolicExecutorMinecraft(object): + """Symbolic executor for minecraft-dialog + """ + def __init__(self, scenesPath): + super(SymbolicExecutorMinecraft, self).__init__() + self.functions = {} + self.registerFunctions() + self.uniqueObjFlag = False + self.classes = CLASSES + self.natures = NATURES + self.directions = DIRECTIONS + self.answer_candidates = ANSWER_CANDIDATES_MINECRAFT + self.attribute_all = ATTRIBUTES_ALL_MINECRAFT + self.scenes = load_minecraft_scenes(scenesPath) + + def reset(self, sceneIdx): + self.scene = self.scenes[sceneIdx] + for _obj in self.scene: + _obj["identifier"] = None + # store previous objects in a list to better answer + # xxx-imm, xxx-imm2, xxx-group and xxx-early questions. + self.objs = [] + self.groups = [] + self.visited = [] + self.currentObj = None + self.currentGrp = [] + self.uniqueObjFlag = False + + def registerFunctions(self): + # Captions - extreme location + self.functions["extreme-right"] = self.extremeRight + self.functions["extreme-left"] = self.extremeLeft + self.functions["extreme-behind"] = self.extremeBehind + self.functions["extreme-front"] = self.extremeFront + self.functions["extreme-center"] = self.extremeCenter + + # Captions - multiple objects + self.functions["count-att"] = self.countAttributeCaption + + # Captions - object relations + self.functions["obj-relation"] = self.objRelation + + # Captions - unique object + self.functions["unique-obj"] = self.uniqueObject + + # Questions - Count + self.functions["count-all"] = self.countAll + self.functions["count-other"] = self.countOther + self.functions["count-all-group"] = self.countAllGroup + self.functions["count-attribute"] = self.countAttribute + self.functions["count-attribute-group"] = self.countAttributeGroup + self.functions["count-obj-rel-imm"] = self.countObjRelImm + self.functions["count-obj-rel-imm2"] = self.countObjRelImm2 + self.functions["count-obj-rel-early"] = self.countObjRelEarly + self.functions["count-obj-exclude-imm"] = self.countObjExcludeImm + self.functions["count-obj-exclude-early"] = self.countObjExcludeEarly + + # Questions - Exist + self.functions["exist-other"] = self.existOther + self.functions["exist-attribute"] = self.existAttribute + self.functions["exist-attribute-group"] = self.existAttributeGroup + self.functions["exist-obj-rel-imm"] = self.existObjRelImm + self.functions["exist-obj-rel-imm2"] = self.existObjRelImm + self.functions["exist-obj-rel-early"] = self.existObjRelEarly + self.functions["exist-obj-exclude-imm"] = self.existObjExcludeImm + self.functions["exist-obj-exclude-early"] = self.existObjExcludeEarly + + # Questions - Seek + self.functions["seek-attr-imm"] = self.seekAttrImm + self.functions["seek-attr-imm2"] = self.seekAttrImm + self.functions["seek-attr-early"] = self.seekAttributeEarly + self.functions["seek-attr-rel-imm"] = self.seekAttributeRelImm + self.functions["seek-attr-rel-early"] = self.seekAttributeRelEarly + + def getAttributeType(self, attribute): + assert attribute in self.attribute_all, "The attribute {} is unkown".format( + attribute) + if attribute in self.classes: + return "class" + elif attribute in self.directions: + return "direction" + elif attribute in self.natures: + return "nature" + + def execute(self, functionLabel, functionArgs): + assert functionLabel in self.functions, "{} is not a valid function".format( + functionLabel) + function = self.functions[functionLabel] + answer = function(*functionArgs) + return answer + + def updateCurrentObj(self, obj): + self.currentObj = obj + objsCopy = deepcopy(self.objs) + for i, _obj in enumerate(objsCopy): + if _obj["id"] == obj["id"]: + del self.objs[i] + # Current obj is always kept at the end of the visited objs + self.objs.append(obj) + + def updateVisited(self, obj): + if len(self.visited) == 0: + self.visited.append(obj) + else: + newObjFlag = True + for _obj in self.visited: + if _obj["id"] == obj["id"]: + newObjFlag = False + break + if newObjFlag: + self.visited.append(obj) + + def getOther(self): + others = [] + if len(self.visited) < len(self.scene): + for _obj in self.scene: + notExisting = True + for __obj in self.visited: + if __obj["id"] == _obj["id"]: + notExisting = False + break + if notExisting: + others.append(_obj) + return others + + def updateIdentifier(self, obj, attribute): + if obj["identifier"] is None: + obj["identifier"] = attribute + else: + identifiers = obj["identifier"].split("-") + if attribute not in identifiers: + identifiers.append(attribute) + obj["identifier"] = "-".join(identifiers) + + # Captions + def extremeRight(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + rightToLeft = deepcopy(self.scene) + rightToLeft.sort(key=lambda o: o["position"][0], reverse=True) + + # Some objects in the minecraft dataset share the same coordinate + # values leading to nonuniqueness in init. the scene. To reduce the + # error risk, we choose the extreme obj with the correct attribute + for _obj in rightToLeft: + found = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + found = False + break + if found: + break + extremeRightObj = _obj + assert extremeRightObj["position"][0] == rightToLeft[0]["position"][0] + for att in attributes: + self.updateIdentifier(extremeRightObj, att) + + self.updateCurrentObj(extremeRightObj) + self.updateVisited(extremeRightObj) + del rightToLeft + + def extremeLeft(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + leftToRight = deepcopy(self.scene) + leftToRight.sort(key=lambda o: o["position"][0]) + + # Some objects in the minecraft dataset share the same coordinate + # values leading to nonuniqueness in init. the scene. To reduce the + # error risk, we choose the extreme obj with the correct attribute + for _obj in leftToRight: + found = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + found = False + break + if found: + break + extremeLeftObj = _obj + assert extremeLeftObj["position"][0] == leftToRight[0]["position"][0] + for att in attributes: + self.updateIdentifier(extremeLeftObj, att) + + self.updateCurrentObj(extremeLeftObj) + self.updateVisited(extremeLeftObj) + del leftToRight + + def extremeFront(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + frontToBack = deepcopy(self.scene) + frontToBack.sort(key=lambda o: o["position"][1]) + + # Some objects in the minecraft dataset share the same coordinate + # values leading to nonuniqueness in init. the scene. To reduce the + # error risk, we choose the extreme obj with the correct attribute + for _obj in frontToBack: + found = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + found = False + break + if found: + break + extremeFrontObj = _obj + assert extremeFrontObj["position"][1] == frontToBack[0]["position"][1] + for att in attributes: + self.updateIdentifier(extremeFrontObj, att) + + self.updateCurrentObj(extremeFrontObj) + self.updateVisited(extremeFrontObj) + del frontToBack + + def extremeBehind(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + backToFront = deepcopy(self.scene) + backToFront.sort(key=lambda o: o["position"][1], reverse=True) + + # Some objects in the minecraft dataset share the same coordinate + # values leading to nonuniqueness in init. the scene. To reduce the + # error risk, we choose the extreme obj with the correct attribute + for _obj in backToFront: + found = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + found = False + break + if found: + break + extremeRearObj = _obj + assert extremeRearObj["position"][1] == backToFront[0]["position"][1] + for att in attributes: + self.updateIdentifier(extremeRearObj, att) + + self.updateCurrentObj(extremeRearObj) + self.updateVisited(extremeRearObj) + del backToFront + + def extremeCenter(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + numObjs = len(self.scene) + + frontToBack = deepcopy(self.scene) + frontToBack.sort(key=lambda o: o["position"][1]) + + rightToLeft = deepcopy(self.scene) + rightToLeft.sort(key=lambda o: o["position"][0], reverse=True) + + prelimenaryCandidates = [] + + for i, objFrontToBack in enumerate(frontToBack): + numObjsInFront = i + numObjsBehind = len(rightToLeft) - i - 1 + if numObjsInFront <= numObjs / 2 and numObjsBehind <= numObjs / 2: + prelimenaryCandidates.append(objFrontToBack) + foundCenter = False + for _obj in prelimenaryCandidates: + for i, objRightToLeft in enumerate(rightToLeft): + if _obj["id"] == objRightToLeft["id"]: + numObjsToTheRight = i + numObjsToTheLeft = len(frontToBack) - i - 1 + if numObjsToTheRight <= numObjs / 2 and numObjsToTheLeft <= numObjs / 2: + foundCenter = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + foundCenter = False + break + break + if foundCenter: + break + for attributeType, attribute in zip(attributeTypes, attributes): + self.updateIdentifier(_obj, attribute) + self.updateCurrentObj(_obj) + self.updateVisited(_obj) + del rightToLeft, frontToBack + + def countAttributeCaption(self, attribute): + attributeType = self.getAttributeType(attribute) + objs = [] + for _obj in self.scene: + if _obj[attributeType] == attribute: + objs.append(deepcopy(_obj)) + for _obj in objs: + self.updateIdentifier(_obj, attribute) + # update the current group + self.currentGrp = objs + + # update the visited objects list + for _obj in objs: + self.updateVisited(_obj) + + def getAnchorAttribute(self, attribute_1, attribute_2, scene): + # The anchor object is unique. If we filter the object list + # based on the attribute anchor, we must find only one object. + filterAttribute_1 = self.filterAttribute(scene, attribute_1) + if len(filterAttribute_1) == 1: + return attribute_1 + else: + return attribute_2 + + def objRelation(self, attribute, attributeAnchor, relation): + assert relation in ["left", "right", "front", "behind"] + # find the anchor object + if attributeAnchor != self.getAnchorAttribute(attribute, attributeAnchor, self.scene): + temp = deepcopy(attribute) + attribute = deepcopy(attributeAnchor) + attributeAnchor = temp + if relation == "left": + relation = "right" + elif relation == "right": + relation = "left" + elif relation == "behind": + relation = "front" + elif relation == "front": + relation = "behind" + + # Order the objects in the scene w.r.t. the relation + sceneCopy = deepcopy(self.scene) + + if relation in ["left", "right"]: + sceneCopy.sort(key=lambda o: o["position"][0]) + else: + sceneCopy.sort(key=lambda o: o["position"][1]) + + # get the anchor object + attributeTypeAnchor = self.getAttributeType(attributeAnchor) + for i, _obj in enumerate(sceneCopy): + if _obj[attributeTypeAnchor] == attributeAnchor: + break + # save the anchor object before the main object + anchorObj = _obj + self.updateIdentifier(anchorObj, attributeAnchor) + self.updateCurrentObj(anchorObj) + self.updateVisited(anchorObj) + + if relation in ["left", "front"]: + sceneCopy = list(reversed(sceneCopy[:i])) + else: + sceneCopy = sceneCopy[i+1:] + + attributeType = self.getAttributeType(attribute) + # get the main object + for _obj in sceneCopy: + # and not equalDicts(_obj, anchorObj): + if _obj[attributeType] == attribute: + break + self.updateIdentifier(_obj, attribute) + self.updateCurrentObj(_obj) + self.updateVisited(_obj) + del sceneCopy + + def uniqueObject(self, *attributes): + attributes = list(attributes) + attributeTypes = list( + map(lambda att: self.getAttributeType(att), attributes)) + + for _obj in self.scene: + found = True + for attributeType, attribute in zip(attributeTypes, attributes): + if _obj[attributeType] != attribute: + found = False + break + + if found: + break + for att in attributes: + self.updateIdentifier(_obj, att) + + self.updateCurrentObj(_obj) + self.updateVisited(_obj) + + # Questions + def filterOutObj(self, scene, obj): + sceneCopy = deepcopy(scene) + for i, _obj in enumerate(scene): + if obj["id"] == _obj["id"]: + break + del sceneCopy[i] + return sceneCopy + + def filterAttribute(self, scene, attribute): + attributeType = self.getAttributeType(attribute) + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in scene: + if _obj[attributeType] == attribute: + filtered.append(_obj) + return filtered + + def excludeAttribute(self, scene, obj, attributeType): + filtered = [] + if len(scene) == 0: + return filtered + for _obj in scene: + if _obj["id"] != obj["id"] and obj[attributeType] == _obj[attributeType]: + filtered.append(_obj) + + # Update the visited objects list + if len(filtered) > 0: + for _obj in filtered: + self.updateVisited(_obj) + return filtered + + def filterLeft(self, scene, obj): + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in self.scene: + # if the x-coordinate of _obj is smaller than the x-coordinate of slef.currentObj, + # then _obj is located to the left of self.currentObj + if _obj["position"][0] < obj["position"][0] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterRight(self, scene, obj): + filtered = [] + for _obj in self.scene: + # if the x-coordinate of _obj is bigger than the x-coordinate of slef.currentObj, + # then _obj is located to the right of self.currentObj + if _obj["position"][0] > obj["position"][0] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterFront(self, scene, obj): + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in self.scene: + # if the y-coordinate of _obj is smaller than the y-coordinate of slef.currentObj, + # then _obj is located in front of self.currentObj + if _obj["position"][1] < obj["position"][1] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterBehind(self, scene, obj): + # assert type(scene) == list, "Excpected type list got {} instead".format(type(scene)) + filtered = [] + if len(scene) == 0: + return filtered + + for _obj in scene: + # if the y-coordinate of _obj is bigger than the y-coordinate of slef.currentObj, + # then _obj is located behind self.currentObj + if _obj["position"][1] > obj["position"][1] and _obj["id"] != obj["id"]: + filtered.append(_obj) + return filtered + + def filterPosition(self, scene, obj, pos): + # assert type(scene) == list, "Excpected type list got {} instead".format(type(scene)) + assert pos in ["left", "right", "front", "behind"] + if pos == "left": + filtered = self.filterLeft(scene, obj) + elif pos == "right": + filtered = self.filterRight(scene, obj) + elif pos == "front": + filtered = self.filterFront(scene, obj) + elif pos == "behind": + filtered = self.filterBehind(scene, obj) + + return filtered + + ########################################################################### + # Counting questions # + ########################################################################### + def countAll(self): + self.currentGrp = deepcopy(self.scene) + self.groups.append(deepcopy(self.scene)) + return len(self.scene) + + def countOther(self): + others = self.getOther() + if len(others) > 0: + self.currentGrp = others + self.groups.append(others) + if len(others) == 1: + obj = others[0] + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + break + self.updateCurrentObj(obj) + + self.updateVisited(obj) + return len(others) + + def countAllGroup(self): + return len(self.currentGrp) + + def countAttribute(self, attribute, updateCurrentObj=True): + filtered = self.filterAttribute(self.scene, attribute) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + self.updateIdentifier(obj, attribute) + self.updateVisited(obj) + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + + self.groups.append(filtered) + self.currentGrp = filtered + return len(filtered) + + def countAttributeGroup(self, attribute, updateCurrentObj=True): + filtered = self.filterAttribute(self.currentGrp, attribute) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + self.updateIdentifier(obj, attribute) + self.updateVisited(obj) + + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + + self.groups.append(filtered) + self.currentGrp = filtered + return len(filtered) + + def countObjRelImm(self, pos, updateCurrentObj=True): + filtered = self.filterPosition(self.scene, self.currentObj, pos) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + + self.currentGrp = filtered + self.groups.append(filtered) + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + self.uniqueObjFlag = True + else: + if new: + self.objs.append(obj) + return len(filtered) + + def countObjRelImm2(self, pos): + if self.uniqueObjFlag: + # del self.objs[-1] + self.updateCurrentObj(self.objs[-2]) + self.uniqueObjFlag = False + return self.countObjRelImm(pos) + + def countObjRelEarly(self, pos, earlyObjAttribute, updateCurrentObj=True): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + filtered = self.filterPosition(self.scene, objEarly, pos) + if len(filtered) == 0: + return 0 + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + else: + self.updateCurrentObj(objEarly) + + self.currentGrp = filtered + self.groups.append(filtered) + return len(filtered) + + def countObjExcludeImm(self, attributeType, updateCurrentObj=True): + filtered = self.excludeAttribute( + self.scene, self.currentObj, attributeType) + if len(filtered) == 0: + return 0 + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + + self.currentGrp = filtered + self.groups.append(filtered) + return len(filtered) + + def countObjExcludeEarly(self, attributeType, earlyObjAttribute, updateCurrentObj=True): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + + filtered = self.excludeAttribute(self.scene, objEarly, attributeType) + if len(filtered) == 0: + return 0 + + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj = _obj + new = False + break + if updateCurrentObj: + self.updateCurrentObj(obj) + else: + if new: + self.objs.append(obj) + else: + self.updateCurrentObj(objEarly) + self.currentGrp = filtered + self.groups.append(filtered) + return len(filtered) + + ########################################################################### + # Existence questions # + ########################################################################### + + def existOther(self): + others = self.getOther() + numOther = len(others) + if numOther > 0: + self.currentGrp = others + self.groups.append(others) + for _obj in others: + self.updateVisited(_obj) + return "yes" if numOther > 0 else "no" + + def existAttribute(self, attribute): + filtered = self.filterAttribute(self.scene, attribute) + numAttribute = len(filtered) + if numAttribute == 0: + return "no" + + # Update the visited objects list + for _obj in filtered: + self.updateVisited(_obj) + if len(filtered) == 1: + obj = filtered[0] + new = True + for _obj in self.objs: + if _obj["id"] == obj["id"]: + self.updateIdentifier(_obj, attribute) + new = False + break + if new: + self.updateIdentifier(obj, attribute) + self.objs.append(obj) + + self.currentGrp = filtered + self.groups.append(filtered) + return "yes" + + def existAttributeGroup(self, attribute): + numAttributeGrp = self.countAttributeGroup( + attribute, updateCurrentObj=False) + return "yes" if numAttributeGrp > 0 else "no" + + def existObjRelImm(self, pos): + numObjs = self.countObjRelImm(pos, updateCurrentObj=False) + return "yes" if numObjs > 0 else "no" + + def existObjRelEarly(self, pos, earlyObjAttribute): + numObjs = self.countObjRelEarly( + pos, earlyObjAttribute, updateCurrentObj=False) + return "yes" if numObjs > 0 else "no" + + def existObjExcludeImm(self, attributeType): + numObjs = self.countObjExcludeImm( + attributeType, updateCurrentObj=False) + return "yes" if numObjs > 0 else "no" + + def existObjExcludeEarly(self, attributeType, earlyObjAttribute): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + + filtered = self.excludeAttribute(self.scene, objEarly, attributeType) + numObjs = len(filtered) + if numObjs == 0: + return "no" + self.currentGrp = filtered + self.groups.append(filtered) + return "yes" + + ########################################################################### + # Seek questions # + ########################################################################### + + def seekAttrImm(self, attributeType): + assert attributeType in self.currentObj, "Attributre <{}> is not valid" + self.updateIdentifier(self.currentObj, self.currentObj[attributeType]) + return self.currentObj[attributeType] + + def seekAttributeEarly(self, attributeType, earlyObjAttribute): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + self.updateIdentifier(objEarly, objEarly[attributeType]) + self.updateCurrentObj(objEarly) + self.updateVisited(objEarly) + return objEarly[attributeType] + + def seekAttributeRelImm(self, attributeType, pos): + filtered = self.filterPosition(self.scene, self.currentObj, pos) + if len(filtered) == 0: + return "none" + else: + # Get the closest object to slef.obj + if pos == "left": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[-1] + elif pos == "right": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[0] + elif pos == "front": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[-1] + elif pos == "behind": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[0] + + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj["identifier"] = _obj["identifier"] + break + self.updateIdentifier(obj, obj[attributeType]) + self.updateCurrentObj(obj) + self.updateVisited(obj) + return obj[attributeType] + + def seekAttributeRelEarly(self, attributeType, pos, earlyObjAttribute): + for objEarly in reversed(self.objs): + if objEarly["identifier"] is not None: + identifiers = objEarly["identifier"].split("-") + if earlyObjAttribute in identifiers: + break + else: + continue + + filtered = self.filterPosition(self.scene, objEarly, pos) + if len(filtered) == 0: + return "none" + else: + # Get the closest object to slef.obj + if pos == "left": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[-1] + elif pos == "right": + filtered.sort(key=lambda x: x["position"][0]) + obj = filtered[0] + elif pos == "front": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[-1] + elif pos == "behind": + filtered.sort(key=lambda x: x["position"][1]) + obj = filtered[0] + for _obj in self.objs: + if _obj["id"] == obj["id"]: + obj["identifier"] = _obj["identifier"] + break + self.updateIdentifier(obj, obj[attributeType]) + self.updateCurrentObj(obj) + self.updateVisited(obj) + return obj[attributeType] diff --git a/generate_dataset.py b/generate_dataset.py new file mode 100644 index 0000000..8e3c9a7 --- /dev/null +++ b/generate_dataset.py @@ -0,0 +1,952 @@ +r"""Generates CLEVR-Dialog dataset. + +Needs access to the following files: +synonyms: Contains several synonyms for each word in the question/caption. +caption templates: List of caption templates. +question templates: List of question templates. +metainfo: Meta-information related to attributes and values of CLEVR objects. + +Usage: + python -u generate_dataset.py \ + --scene_path="data/scenes/CLEVR_train_scenes.json" \ + --num_beams=100 \ + --num_workers=12 \ + --save_path="data/clevr_train_raw.json" + +Author: Satwik Kottur +""" + + +import copy +import collections +import json +import multiprocessing +import os +import random +import re +import time +from absl import flags +from absl import app +import numpy as np +from tqdm import tqdm as progressbar + +import clevr_utils as utils +import global_vars as gvars +# import constraints_splitB as constraints +import constraints + +FLAGS = flags.FLAGS +flags.DEFINE_string('synonym_path', '/projects/abdessaied/clevr-dialog/templates/synonyms.json', + 'Path to synonyms file') +flags.DEFINE_string('metainfo_path', '/projects/abdessaied/clevr-dialog/templates/metainfo.json', + 'Path to meta information file') +flags.DEFINE_string('caption_template_root', '/projects/abdessaied/clevr-dialog/templates/captions/', + 'Root to folder with caption templates') +flags.DEFINE_string('question_template_root', '/projects/abdessaied/clevr-dialog/templates/questions/', + 'Root to folder with question templates') +flags.DEFINE_string('scene_path', + # '/projects/abdessaied/clevr-dialog/output/result_clevr_oroginal_test.json', + '/projects/abdessaied/clevr-dataset-gen/output_finetune_20_objs_with_masks_many_attr/CLEVR_scenes.json', + 'Path to CLEVR scene path json file') +flags.DEFINE_string('scene_id_file', '', + 'Path to specific CLEVR scene ids to generate dialogs') +flags.DEFINE_string('save_path', '/projects/abdessaied/clevr-dialog/output/raw_data_modified/dialogs_finetune_20_objects_10_rounds.json', + 'Path to save the dataset json') +flags.DEFINE_integer('num_beams', 100, 'Number of beams in dialog search') +flags.DEFINE_integer('num_workers', 64, 'Number of workers to use in search') +flags.DEFINE_integer('captions_per_image', 5, 'Number of captions per image') +flags.DEFINE_integer('num_images', -1, + 'Number of images to generate dialogs. -1 for all.') +flags.DEFINE_integer('num_rounds', 10, 'Number of rounds in each dialog') + + +# Number of beams and distribution of question types. +# Start cutting down beams after 5th round. +# Heuristics (for round 4): +# A. count <= 2 1 <= seek <= 3 exist <= 2 +# B. count + exist <= 3 +# C. Independent questions <= 1 +# Heuristics (for round 5): +# A. count <= 2 2 <= seek <= 4 exist <= 2 +# B. count + exist <= 3 +# C. Independent questions <= 1 +ranges = {3: {'indep': [0, 1], 'seek': [1, 4], 'exist': [0, 1], + 'count': [0, 1], 'exist+count': [0, 2]}, + 4: {'indep': [0, 1], 'seek': [2, 4], 'exist': [0, 1], + 'count': [0, 1], 'exist+count': [0, 2]}, + 5: {'indep': [0, 1], 'seek': [2, 5], 'exist': [0, 2], + 'count': [0, 2], 'exist+count': [0, 3]}, + 6: {'indep': [0, 1], 'seek': [2, 5], 'exist': [0, 2], + 'count': [0, 2], 'exist+count': [0, 3]}, + 7: {'indep': [0, 2], 'seek': [3, 5], 'exist': [0, 2], + 'count': [0, 2], 'exist+count': [0, 3]}, + 8: {'indep': [0, 2], 'seek': [3, 6], 'exist': [0, 3], + 'count': [0, 3], 'exist+count': [0, 3]}, + 9: {'indep': [0, 2], 'seek': [3, 6], 'exist': [0, 3], + 'count': [0, 3], 'exist+count': [0, 4]}} + + +def mapping(tag): + """Maps tag to attribute. + + Args: + tag: An input tag + + Returns: + tag_label: Label for the input tag + """ + + return gvars.METAINFO['tag_map'][tag.replace('1', '')] + + +def inv_mapping(attribute, arg_id=0): + """Inverse maps attribute to tag. + + Args: + attribute: Name of the attribute + arg_id: Argument id to use. Append 1 if arg_id is 1, else nothing + + Returns: + base_tag: The string for the tag + """ + + base_tag = gvars.METAINFO['tag_inv_map'][attribute] + if arg_id > 0: + base_tag = base_tag[:-1] + str(arg_id) + base_tag[-1] + + return base_tag + + +def get_tag_group(tag): + """Gets the group id from tag string. + + For example, tag string of is 0, is 1. + Assumes single digit group id. + + Args: + tag: Tag string + + Returns: + group_id: Return extracted group id + """ + + group_id = 0 if len(tag) <= 3 else int(tag[-2]) + return group_id + + +def replace_attribute(text, tag, obj_group, eliminate=False): + """Replaces the attribute tags in text using available object properties. + + NOTE: If shape is to be replaced, we use 'thing' in its place. + + Args: + text: The text template to perform replacement + tag: The tags to replace in the text + obj_group: Available object properties to replace with + eliminate: Eliminate the remaining attribute tags + + Returns: + replaced_text: The replaced text + """ + + group = get_tag_group(tag) + if mapping(tag) == 'relation': + # Actual relation tag, else position tag. + if tag == '': + relation_list = gvars.METAINFO['relation_phrases'][obj_group['relation']] + relation_cand = random.choice(relation_list) + else: + relation_cand = obj_group['relation'] + + return text.replace(tag, relation_cand) + + if mapping(tag) == 'shape': + if eliminate: + replacer = 'thing' + else: + replacer = str(obj_group['objects'][group][mapping(tag)]) + + # Plural forms for groups. + if obj_group.get('count', 1) > 1 or obj_group.get('use_plural', False): + replacer += 's' + elif mapping(tag) == 'count': + if eliminate: + replacer = '' + else: + replacer = str(obj_group['count']) + else: + if eliminate: + replacer = '' + else: + replacer = str(obj_group['objects'][group][mapping(tag)]) + return text.replace(tag, replacer) + + +def realize_text_and_extract_scene(scene, template, filter_objs): + """Samples attributes for template using filtered objects. + + In addition, creates scene graph for the new information added. + + Args: + scene: Current scene graph + template: Text template to use to generate questions + filter_objs: Set of objects satisfying constraints of current template + + Returns: + sample: Contains the text realization and scene graph + """ + + def default_list(): return collections.defaultdict(list) + graph = {'relationships': collections.defaultdict(default_list), + 'counts': {}, 'exists': {}, 'history': [], 'objects': {}} + + # number of inputs + n_inputs = template.get('inputs', 1) + # sample a text template + text_sample = random.choice(template['text']) + text_sample_index = template['text'].index(text_sample) + + # extract attribute tags and get them into groups + tags = re.findall('(<[\d\w]*>)', text_sample) + + tag_groups = collections.defaultdict(list) + for tag in tags: + group_id = get_tag_group(tag) + tag_groups[group_id].append(tag) + + # sample a random element from filtered + arg_sample = random.choice(filter_objs) + # scene information obtained from the current round + graph_item = arg_sample['graph'] + + # remove tags from text not allowed by filter_objs + for arg_ind in range(n_inputs): + obj_sample = arg_sample['objects'][arg_ind] + avail_attrs = obj_sample['optional'] + obj_sample['required'] + + for ii in tag_groups[arg_ind][::-1]: + if mapping(ii) not in avail_attrs: + tag_groups[arg_ind].remove(ii) + text_sample = replace_attribute( + text_sample, ii, arg_sample, True) + + # assert that all required attributes are present as tags + for attribute in obj_sample['required']: + required_tag = inv_mapping(attribute, arg_ind) + if required_tag not in tag_groups[arg_ind]: + print("required_tag: {}".format(required_tag)) + print("template: {}".format(template)) + assert required_tag in tag_groups[arg_ind], \ + 'A required attribute is missing in template!' + + # start compiling tags to keep + tags_to_keep = [inv_mapping(ii, arg_ind) + for ii in obj_sample['required']] + + # filter out those not present in text template + optional_tags = [inv_mapping(ii, arg_ind) + for ii in obj_sample['optional']] + optional_tags = [ + ii for ii in optional_tags if ii in tag_groups[arg_ind]] + + # if tags_to_keep is empty, sample from optional with 1:70 2:25 3:5 + if len(optional_tags) > 0: + if len(tags_to_keep) > 0: + n_tags_sample = [0, 1, 2] + else: + n_tags_sample = [1, 2, 3] + n_sample = np.random.choice(n_tags_sample, 1, + p=gvars.METAINFO['probabilities'], + replace=False) + # lower cap at the length of optional + n_sample = min(n_sample[0], len(optional_tags)) + if n_sample > 0: + tags_to_keep += random.sample(optional_tags, n_sample) + + # now create a dictionary of placeholders with actual attribute values + for tag in tag_groups[arg_ind]: + remove = tag not in tags_to_keep + text_sample = replace_attribute( + text_sample, tag, arg_sample, remove) + + # remove attributes from objects not included in tags_to_keep + if 'objects' in graph_item: + for ii in gvars.METAINFO['attributes']: + if inv_mapping(ii, arg_ind) not in tags_to_keep: + if ii in graph_item['objects'][arg_ind]: + del graph_item['objects'][arg_ind][ii] + + # record the caption info + # Record info and merge scene graphs. + args = [] + # if "unique-obj" == template['label']: + # print('yey') + for obj in arg_sample['objects']: + if obj is None: + continue + else: + for k in obj['required']: + arg = obj.get(k, None) + if arg is not None: + if arg not in args: # and type(arg) == str: + args.append(arg) + else: + arg = arg_sample.get(k, None) + if arg is not None and arg not in args and type(arg) == str: + args.append(arg) + arg = obj.get('attribute', None) + if arg is not None and arg not in args: + args.append(arg) + if template['label'] == 'obj-relation': + args.append(arg_sample['relation']) + + if template['label'] == "count-att-no": + template['label'] = "count-att" + + graph_item['round'] = 0 + sample = {} + sample['template_info'] = [copy.deepcopy(template)] + sample['args'] = args + del sample['template_info'][-1]['text'] + sample['template_info'][-1]['index'] = text_sample_index + sample['caption'] = text_sample + sample['template'] = template['label'] + + sample['dialog'] = [] + + # append history, update scene graph, and save the new scene graph + graph['history'].append(graph_item) + sample['graph'] = utils.merge_update_scene_graph(graph, graph_item) + return sample + + +def realize_question(dialog, template, filter_objs): + """Samples attributes for template using filtered objects. + + In addition, creates scene graph for the new information added. + + Args: + scene: Current scene graph + template: Text template to use to generate questions + filter_objs: Set of objects satisfying constraints of current template + + Returns: + sample: Contains the text realization and scene graph + """ + + # Number of inputs. + n_inputs = template.get('inputs', 0) + # Sample a text template. + text_sample = random.choice(template['text']) + text_sample_index = template['text'].index(text_sample) + + # Extract attribute tags and get them into groups. + tags = re.findall('(<[\d\w]*>)', text_sample) + tag_groups = collections.defaultdict(list) + for tag in tags: + group_id = get_tag_group(tag) + tag_groups[group_id].append(tag) + + # Sample a random element from filtered. + arg_sample = random.choice(filter_objs) + + # Remove tags from text not allowed by filter_objs. + for arg_ind in range(n_inputs): + obj_sample = arg_sample['objects'][arg_ind] + avail_attrs = obj_sample['optional'] + obj_sample['required'] + + for ii in tag_groups[arg_ind][::-1]: + if mapping(ii) not in avail_attrs: + tag_groups[arg_ind].remove(ii) + text_sample = replace_attribute( + text_sample, ii, arg_sample, True) + + # Assert that all required attributes are present as tags. + for attribute in obj_sample['required']: + required_tag = inv_mapping(attribute, arg_ind) + # Make an exception for and

+ if required_tag == '' and '

' in tag_groups[arg_ind]: + continue + assert required_tag in tag_groups[arg_ind], \ + 'A required attribute is missing in template!' + + # Start compiling tags to keep. + tags_to_keep = [inv_mapping(ii, arg_ind) + for ii in obj_sample['required']] + # Filter out those not present in text template. + optional_tags = [inv_mapping(ii, arg_ind) + for ii in obj_sample['optional']] + optional_tags = [ + ii for ii in optional_tags if ii in tag_groups[arg_ind]] + + # If tags_to_keep is empty, sample from optional with (1:70, 2:25, 3:5). + if len(optional_tags) > 0: + if len(tags_to_keep) > 0: + n_tags_sample = [0, 1, 2] + else: + n_tags_sample = [1, 2, 3] + n_sample = np.random.choice(n_tags_sample, 1, + p=gvars.METAINFO['probabilities'], + replace=False) + # Lower cap at the length of optional. + n_sample = min(n_sample[0], len(optional_tags)) + if n_sample > 0: + tags_to_keep += random.sample(optional_tags, n_sample) + + # Now create a dictionary of placeholders with actual attribute values. + for tag in tag_groups[arg_ind]: + remove = tag not in tags_to_keep + text_sample = replace_attribute( + text_sample, tag, arg_sample, remove) + + # Record info and merge scene graphs. + args = [] + # if template['label'] == 'seek-attr-early': + # print('yey') + for obj in arg_sample['objects']: + if obj is None: + continue + else: + for k in obj['required']: + arg = obj.get(k, None) + if arg is not None: + if arg not in args: + args.append(arg) + else: + arg = arg_sample.get(k, None) + if arg is not None: + args.append(arg) + arg = obj.get('attribute', None) + if arg is not None and arg not in args: + args.append(arg) + + # req_att_keys = [k for obj in arg_sample['objects'] for k in obj['required'] if obj is not None] + dialog_datum = {'question': text_sample, 'answer': arg_sample['answer'], + 'template': template['label'], 'args': args} + dialog['template_info'].append(template.copy()) + del dialog['template_info'][-1]['text'] + dialog['template_info'][-1]['index'] = text_sample_index + if 'unique' in template['label']: + print('voila') + dialog['dialog'].append(dialog_datum) + graph_item = arg_sample['graph'] + + # If mergeable, add it to the objects list. + dialog['graph'] = utils.merge_update_scene_graph( + dialog['graph'], graph_item) + + # If there are volatile objects in the graph item, remove them. + for obj in graph_item['objects'][::-1]: + if obj.get('volatile', False): + graph_item['objects'].remove(obj) + dialog['graph']['history'].append(graph_item) + return dialog + + +def clean_text_subroutine(text, thing, suffix): + """Cleans the text and substitutes thing with object (subroutine). + + Args: + text: Text string to be cleaned + thing: Whether to use 'thing' or 'object' + suffix: Either '?' (question) or '.' (caption) + + Returns: + clean_text: Text string after cleaning procedure + """ + + # Synonyms + skipping optional part of the sentence + clean_text = skip_and_replace_phrases(text) + + # Remove full stop, empty spaces, capitalize the start letter. + clean_text = re.sub(' +', ' ', clean_text.replace(suffix, '').strip(' ')) + # First replace 'a thing' -> 'an object'. + # Then perform remaining actions. + if thing == 'object': + clean_text = clean_text.replace('a thing', 'an object') + clean_text = clean_text.replace('thing', thing) + clean_text = clean_text[0].upper() + clean_text[1:] + suffix + return clean_text + + +def clean_dialog_text(dialogs): + """Cleans the dialog texts. + + Args: + dialogs: Generated dialogs to perform text cleaning + + Returns: + dialogs: Return the dialogs after cleaning the text inplace + """ + + # Replace thing with object throughout with probability 0.5. + thing = 'thing' if random.random() > 0.5 else 'object' + for index, dialog_datum in enumerate(dialogs): + # Clean the caption. + text = dialog_datum['caption'] + dialogs[index]['caption'] = clean_text_subroutine(text, thing, '.') + + for r_id, dialog in enumerate(dialog_datum['dialog']): + # Clean the question. + text = dialog['question'] + text = clean_text_subroutine(text, thing, '?') + dialogs[index]['dialog'][r_id]['question'] = text + return dialogs + + +def skip_and_replace_phrases(text): + """Substitutes synonyms and skips optional parts stochastically. + + Args: + text: Text string + + Returns: + text: Text string with synonyms replaced and optional parts skipped + """ + + # For each text in [], replace it with '' with probability 0.5. + matches = re.findall('(\[[ \w]*\])', text) + for match in matches: + if random.uniform(0, 1) > 0.5: + text = text.replace(match, '') + else: + text = text.replace(match, match[1:-1]) + + # Remove empty spaces, if any. + text = re.sub(' +', ' ', text) + # Search for synonyms, replace at uniformly random. + text = text.lower() + for key, values in gvars.METAINFO['synonym_keys']: + if key in text: + text = text.replace(key, random.choice(values)) + return text + + +def generate_captions(scenes, templates): + """Wrapper generates captions. + + Args: + scenes: List of scene graphs for which to generate captions + templates: List of available caption templates + + Returns: + generated_content: Captions generated for the input scenes + """ + + template_dictionary = {ii['label']: ii for ii in templates} + generated_content = [] + for scene in scenes['scenes'][0:FLAGS.num_images]: + content = {} + # Copy over image_index, split, image_filename from scene. + for key in ['image_index', 'split', 'image_filename']: + content[key] = scene[key] + + content['dialogs'] = [] + # Filter objects based on constraints. + filter_objs = constraints.caption(scene, templates) + for filter_obj in filter_objs: + # Realize the text, and return the partial scene knowledge (q). + template = template_dictionary[filter_obj[0]['graph']['template']] + sample = realize_text_and_extract_scene( + scene, template, filter_obj) + # Add it to the list of dialogs. + content['dialogs'].append(sample) + generated_content.append(content) + return generated_content + + +def generate_questions(scenes, dialogs, templates, params): + """Wrapper generates questions. + + Args: + scenes: List of scene graphs to generate questions + dialogs: Contains already generated captions for scenes graphs + templates: List of available question templates + params: Beam search parameters for question generation + + Returns: + new_dialogs: Generated raw dialogs with captions and questions + """ + + new_dialogs = [] + for scene_id, dialog_datum in enumerate(dialogs): + image_dialogs = copy.deepcopy(dialog_datum) + image_dialogs['dialogs'] = [] + + for dialog in dialog_datum['dialogs']: + # Pick a template at random. + flag = False + iter_count = 0 + while not flag: + # Pick a template at random. + template = random.choice(templates) + + # Filter objects based on constraints. + filter_objs = constraints.question(scenes['scenes'][scene_id], + dialog, template) + flag = len(filter_objs) != 0 + + # Extreme case -- exit + iter_count += 1 + if iter_count > 10: + break + + # Realize q question. + if flag: + deep_copy = copy.deepcopy(dialog) + gen_dialog = realize_question(deep_copy, template, filter_objs) + image_dialogs['dialogs'].append(copy.deepcopy(gen_dialog)) + new_dialogs.append(image_dialogs) + + return new_dialogs + + +def worker(scenes, cap_templates, ques_templates, worker_id, out_q): + """Worker method generates dialogs (caption + questions) for pool of scenes. + + Args: + scenes: List of CLEVR scenes to generate dialogs + cap_templates: Templates for caption generation + ques_templates: Templates for question generation + worker_id: Id for the current worker + out_q: Output queue to save generated dialogs from different sources + + Returns: + Adds dialogs against the worker id in the output queue. + """ + + dialogs = [] + for index, scene in enumerate(scenes): + cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime()) + print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % + (cur_time, worker_id, index, len(scenes), scene['image_index'])) + try: + gen_dialog = generate_dialog_bfs( + scene, cap_templates, ques_templates) + dialogs.append(json.loads(json.dumps(gen_dialog))) + except: + print('NOTE: Missing data for %d' % scene['image_index']) + out_q.put({worker_id: dialogs}) + + +def generate_dialog_bfs(scene, cap_templates, ques_templates): + """Perform approximate breadth-first-search (BFS) to generate dialogs. + + Args: + scene: Scene graph for the CLEVR image + cap_templates: List of caption templates + ques_templates: List of question templates + + Returns: + bundle: List of dialogs generated for the input scene graph + """ + + bundle = {} + # Generate captions for the scene. + # Copy over image_index, split, image_filename from scene. + for key in ['image_index', 'split', 'image_filename']: + bundle[key] = scene[key] + + template_dictionary = {ii['label']: ii for ii in cap_templates} + content = {} + + # Filter objects based on constraints on captions. + filter_objs = constraints.caption(scene, cap_templates) + + for filter_obj in filter_objs: + # Realize the text, and return the partial scene knowledge (q). + template = template_dictionary[filter_obj[0]['graph']['template']] + sample = realize_text_and_extract_scene(scene, template, filter_obj) + # Add it to the list of dialogs. + content[template['label']] = [sample] + + # Now generate questions. + # Group templates, exist/count of similar type together. + ques_groups = collections.defaultdict(list) + + labels = [ii['label'] for ii in ques_templates] + # print('\n'.join(labels)) + for index, ii in enumerate(ques_templates): + if 'exist' in ii['label'] or 'count' in ii['label']: + ques_groups[labels[index][4:]].append(ii) + else: + ques_groups[labels[index]].append(ii) + + for round_id in range(FLAGS.num_rounds): + new_content = {} + + # For each group. + for cap_label, cap_dialogs in content.items(): + cur_pool = [] + for dialog_datum in cap_dialogs: + for _, group in ques_groups.items(): + template = random.choice(group) + + # Make a copy. + datum_copy = copy.deepcopy(dialog_datum) + + # Filter objects based on constraints. + filter_objs = constraints.question( + scene, datum_copy, template) + + if len(filter_objs) == 0: + continue + + # Realize q question. + gen_dialog = realize_question( + datum_copy, template, filter_objs) + cur_pool.append(gen_dialog) + + if round_id in ranges: + for d_id, dialog in enumerate(cur_pool): + n_types = {'indep': 0, 'seek': 0, 'exist': 0, 'count': 0} + keep_dialog = True + + labels = [ii['label'] + for ii in dialog['template_info'][1:]] + for label in labels: + if label in gvars.METAINFO['independent_questions']: + n_types['indep'] += 1 + + label_type = label.split('-')[0] + n_types[label_type] += 1 + + # Heuristic A, C + for q_type, count in n_types.items(): + limit = ranges[round_id][q_type] + if limit[0] > count or count > limit[1]: + keep_dialog = False + break + + # Heuristic B + limit = ranges[round_id]['exist+count'] + if n_types['count'] + n_types['exist'] > limit[1]: + keep_dialog = False + if not keep_dialog: + cur_pool[d_id] = None + cur_pool = [ii for ii in cur_pool if ii is not None] + + # Keep limited number of beams (for speed). + if len(cur_pool) > FLAGS.num_beams: + cur_pool = sample_beams(cur_pool)[:FLAGS.num_beams] + new_content[cap_label] = cur_pool + content = copy.deepcopy(new_content) + + # Get dialogs with sim, imm2, early questions. + for cap_label, cap_dialogs in content.items(): + # Sample beams. + content[cap_label] = sample_beams(cap_dialogs) + + # Remove keys that are empty. + empty_keys = [key for key, val in content.items() if len(val) == 0] + for key in empty_keys: + del content[key] + + # For each caption, sample one. + sampled_dialogs = [] + for cap_label, cap_dialogs in content.items(): + if len(cap_dialogs) > 0: + sampled_dialogs.append(cap_dialogs.pop()) + + # Get 5 per image, compensate by taking from other entries. + content_keys = [ii for ii in content.keys()] + while len(sampled_dialogs) < 5: + random_label = random.choice(content_keys) + sampled_dialogs.append(cap_dialogs.pop()) + + # Finally, make the dialog text readable. + sampled_dialogs = clean_dialog_text(sampled_dialogs) + + # Generate the coreference chain. + for dialog_id, dialog in enumerate(sampled_dialogs): + sampled_dialogs[dialog_id] = identify_coref_chains(dialog) + bundle['dialogs'] = sampled_dialogs + return bundle + + +def sample_beams(dialogs): + """Samples beams based on the number of constraints satisfied. + + Args: + dialogs: Generated dialogs to sample beams + + Returns: + sampled_dialogs: List of sampled dialogs based on the constraints + """ + + num_constraints = [] + for d_id, dialog in enumerate(dialogs): + satisfied = 0 + labels = [ii['label'] for ii in dialog['template_info'][1:]] + + # Have a imm2 for sure + satisfied += np.sum(['imm2' in ii for ii in labels]) + # Have a imm2 for sure + satisfied += np.sum(['sim' in ii for ii in labels]) + # Have 'early' + satisfied += min(4, np.sum(['early' in ii for ii in labels])) + + # Add it with the number of constraints it satisfies. + num_constraints.append((satisfied, d_id)) + + # Then order. + def sort_key(x): return (x[0], random.random()) + ids = sorted(num_constraints, key=sort_key, reverse=True) + sampled_dialogs = [dialogs[ii[1]] for ii in ids] + return sampled_dialogs + + +def identify_coref_chains(dialog): + """Identifies the coreference chains in generated dialog. + + Args: + dialog: Generated dialogs for which coreference chains to be identified + + Returns: + dialog: A copy of dialog, with coreference chains annotated + """ + + for r_id, datum in enumerate(dialog['dialog']): + label = datum['template'] + if label in gvars.METAINFO['independent_questions']: + dialog['graph']['history'][r_id + 1]['dependence'] = None + continue + + if (label == 'exist-attribute-group' or label == 'count-attribute-group' or + label == 'count-all-group'): + dialog['graph']['history'][r_id + 1]['dependence'] = r_id - 1 + continue + + if 'imm' in label: + dialog['graph']['history'][r_id + 1]['dependence'] = r_id - 1 + continue + + if 'early' in label: + # Go over previous history. + cur_history = dialog['graph']['history'][r_id + 1] + assert 'focus_id' in cur_history and 'focus_desc' in cur_history,\ + 'More focus objects than one, no focus objects!' + focus_id = cur_history['focus_id'] + for attr in gvars.METAINFO['attributes']: + if attr in cur_history['focus_desc']: + break + + history = dialog['graph']['history'][:r_id + 1] + for hist_id, hist_datum in enumerate(history): + for obj in hist_datum['objects']: + if obj['id'] == focus_id and attr in obj: + dialog['graph']['history'][r_id + + 1]['dependence'] = hist_id - 1 + break + return dialog + + +def main(unused_argv): + """Main method generates the CLEVR-Dialog dataset. + """ + # Read the scene file. + with open(FLAGS.scene_path, 'r') as file_id: + scenes = json.load(file_id) + + # Read the synonyms file. + with open(FLAGS.synonym_path, 'r') as file_id: + synonyms = json.load(file_id) + + def sorter(x): return len(x[0].split(' ')) + + # Read the metainformation file. + with open(FLAGS.metainfo_path, 'r') as file_id: + gvars.METAINFO = json.load(file_id) + tag_inv_map = {attr: tag for tag, attr in gvars.METAINFO['tag_map'].items() + if tag != '

'} + gvars.METAINFO['tag_inv_map'] = tag_inv_map + gvars.METAINFO['synonym_keys'] = sorted(synonyms.items(), + key=sorter, reverse=True) + + # Add ids to objects. + scenes = utils.add_object_ids(scenes) + scenes = utils.clean_object_attributes(scenes) + + # Read the caption templates. + template_paths = os.listdir(FLAGS.caption_template_root) + cap_templates = [] + for ii in template_paths: + with open(os.path.join(FLAGS.caption_template_root, ii), 'r') as file_id: + cur_templates = json.load(file_id) + cap_templates.extend(cur_templates) + # utils.pretty_print_templates(cap_templates, 1) + + # Read the question templates. + template_paths = os.listdir(FLAGS.question_template_root) + ques_templates = [] + for ii in template_paths: + with open(os.path.join(FLAGS.question_template_root, ii), 'r') as file_id: + cur_templates = json.load(file_id) + ques_templates.extend(cur_templates) + # utils.pretty_print_templates(ques_templates, 1) + + # 1. Check if there a scene_id_file specified. + # 2. Check if num_images is -1 + if FLAGS.scene_id_file != '': + with open(FLAGS.scene_id_file, 'r') as file_id: + missing_ids = [int(ii.strip('\n')) for ii in file_id.readlines()] + print('Dialogs missing for scenes: %d' % len(missing_ids)) + + # Create a image_index -> scenes list index dictionary + image_list_id_dict = {ii['image_index']: index + for index, ii in enumerate(scenes['scenes'])} + scenes_subset = [scenes['scenes'][image_list_id_dict[scene_id]] + for scene_id in missing_ids] + + elif FLAGS.num_images == -1: + scenes_subset = scenes['scenes'] + + else: + scenes_subset = scenes['scenes'][0: FLAGS.num_images] + + # BFS for each scene. + if FLAGS.num_workers == 1: + # Single thread version. + dialogs = [] + for index, scene in enumerate(scenes_subset): + cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime()) + print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % + (cur_time, 0, index, len(scenes_subset), scene['image_index'])) + gen_dialog = generate_dialog_bfs( + scene, cap_templates, ques_templates) + dialogs.append(gen_dialog) + + else: + # Multithread version. + output_q = multiprocessing.Queue() + jobs = [] + for worker_id in range(FLAGS.num_workers): + allotment = scenes_subset[worker_id::FLAGS.num_workers] + inputs = (allotment, cap_templates, ques_templates) + inputs += (worker_id, output_q) + + process = multiprocessing.Process(target=worker, args=inputs) + jobs.append(process) + process.start() + + # Wait for all the jobs to finish and collect the output. + final_results = {} + for _ in jobs: + final_results.update(output_q.get()) + for job in jobs: + job.join() + + # Flatten and sort. + final_results = [jj for _, ii in final_results.items() for jj in ii] + dialogs = sorted(final_results, key=lambda x: x['image_index']) + # utils.pretty_print_dialogs(dialogs) + + # Save the dialogs. + print('Saving dialog at: %s' % FLAGS.save_path) + with open(FLAGS.save_path, 'w') as file_id: + json.dump(dialogs, file_id) + + +if __name__ == '__main__': + gvars.initialize() + app.run(main) diff --git a/generate_dataset_minecraft.py b/generate_dataset_minecraft.py new file mode 100644 index 0000000..19a86d0 --- /dev/null +++ b/generate_dataset_minecraft.py @@ -0,0 +1,1069 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +# -------------------------------------------------------- +# adapted from https://github.com/satwikkottur/clevr-dialog/blob/master/generate_dataset.py +# -------------------------------------------------------- + +import copy +import collections +import json +import multiprocessing +import os +import random +import re +import time +from tkinter.tix import Tree +from absl import flags +from absl import app +import numpy as np +from tqdm import tqdm as progressbar + +import minecraft_utils as utils +import global_vars as gvars +# import constraints_splitB as constraints +import constraints + +FLAGS = flags.FLAGS +flags.DEFINE_string('synonym_path', '/projects/abdessaied/clevr-dialog/templates/synonyms_minecraft.json', + 'Path to synonyms file') +flags.DEFINE_string('metainfo_path', '/projects/abdessaied/clevr-dialog/templates/metainfo_minecraft.json', + 'Path to meta information file') +flags.DEFINE_string('caption_template_root', '/projects/abdessaied/clevr-dialog/templates/captions_minecraft/', + 'Root to folder with caption templates') +flags.DEFINE_string('question_template_root', '/projects/abdessaied/clevr-dialog/templates/questions_minecraft/', + 'Root to folder with question templates') +flags.DEFINE_string('scene_path', + # '/projects/abdessaied/data/CLEVR/CLEVR_v1.0/scenes/CLEVR_val_scenes.json', + '/projects/abdessaied/data/minecraft/test_scenes.json', + 'Path to CLEVR scene path json file') +flags.DEFINE_string('scene_id_file', '', + 'Path to specific CLEVR scene ids to generate dialogs') +flags.DEFINE_string('save_path', '/projects/abdessaied/clevr-dialog/output_minecraft/raw_data/minecraft_test_dialogs.json', + 'Path to save the dataset json') +flags.DEFINE_integer('num_beams', 100, 'Number of beams in dialog search') +flags.DEFINE_integer('num_workers', 128, 'Number of workers to use in search') +flags.DEFINE_integer('captions_per_image', 1, 'Number of captions per image') +flags.DEFINE_integer('num_images', -1, + 'Number of images to generate dialogs. -1 for all.') +flags.DEFINE_integer('num_rounds', 10, 'Number of rounds in each dialog') + + +# Number of beams and distribution of question types. +# Start cutting down beams after 5th round. +# Heuristics (for round 4): +# A. count <= 2 1 <= seek <= 3 exist <= 2 +# B. count + exist <= 3 +# C. Independent questions <= 1 +# Heuristics (for round 5): +# A. count <= 2 2 <= seek <= 4 exist <= 2 +# B. count + exist <= 3 +# C. Independent questions <= 1 +ranges = {3: {'indep': [0, 1], 'seek': [1, 4], 'exist': [0, 1], + 'count': [0, 1], 'exist+count': [0, 2]}, + 4: {'indep': [0, 1], 'seek': [2, 4], 'exist': [0, 1], + 'count': [0, 1], 'exist+count': [0, 2]}, + 5: {'indep': [0, 1], 'seek': [2, 5], 'exist': [0, 2], + 'count': [0, 2], 'exist+count': [0, 3]}, + 6: {'indep': [0, 1], 'seek': [2, 5], 'exist': [0, 2], + 'count': [0, 2], 'exist+count': [0, 3]}, + 7: {'indep': [0, 2], 'seek': [3, 5], 'exist': [0, 2], + 'count': [0, 2], 'exist+count': [0, 3]}, + 8: {'indep': [0, 2], 'seek': [3, 6], 'exist': [0, 3], + 'count': [0, 3], 'exist+count': [0, 3]}, + 9: {'indep': [0, 2], 'seek': [3, 6], 'exist': [0, 3], + 'count': [0, 3], 'exist+count': [0, 4]}} + + +def mapping(tag): + """Maps tag to attribute. + + Args: + tag: An input tag + + Returns: + tag_label: Label for the input tag + """ + + return gvars.METAINFO['tag_map'][tag.replace('1', '')] + + +def inv_mapping(attribute, arg_id=0): + """Inverse maps attribute to tag. + + Args: + attribute: Name of the attribute + arg_id: Argument id to use. Append 1 if arg_id is 1, else nothing + + Returns: + base_tag: The string for the tag + """ + + base_tag = gvars.METAINFO['tag_inv_map'][attribute] + if arg_id > 0: + base_tag = base_tag[:-1] + str(arg_id) + base_tag[-1] + + return base_tag + + +def get_tag_group(tag): + """Gets the group id from tag string. + + For example, tag string of is 0, is 1. + Assumes single digit group id. + + Args: + tag: Tag string + + Returns: + group_id: Return extracted group id + """ + + group_id = 0 if len(tag) <= 3 else int(tag[-2]) + return group_id + + +def replace_attribute(text, tag, obj_group, eliminate=False): + """Replaces the attribute tags in text using available object properties. + + NOTE: If shape is to be replaced, we use 'thing' in its place. + + Args: + text: The text template to perform replacement + tag: The tags to replace in the text + obj_group: Available object properties to replace with + eliminate: Eliminate the remaining attribute tags + + Returns: + replaced_text: The replaced text + """ + + group = get_tag_group(tag) + if mapping(tag) == 'relation': + # Actual relation tag, else position tag. + if tag == '': + relation_list = gvars.METAINFO['relation_phrases'][obj_group['relation']] + relation_cand = random.choice(relation_list) + else: + relation_cand = obj_group['relation'] + + return text.replace(tag, relation_cand) + + if mapping(tag) == 'shape': + if eliminate: + replacer = 'thing' + else: + replacer = str(obj_group['objects'][group][mapping(tag)]) + + # Plural forms for groups. + if obj_group.get('count', 1) > 1 or obj_group.get('use_plural', False): + replacer += 's' + elif mapping(tag) == 'count': + if eliminate: + replacer = '' + else: + replacer = str(obj_group['count']) + else: + if eliminate: + replacer = '' + else: + replacer = str(obj_group['objects'][group][mapping(tag)]) + return text.replace(tag, replacer) + + +def realize_text_and_extract_scene(scene, template, filter_objs): + """Samples attributes for template using filtered objects. + + In addition, creates scene graph for the new information added. + + Args: + scene: Current scene graph + template: Text template to use to generate questions + filter_objs: Set of objects satisfying constraints of current template + + Returns: + sample: Contains the text realization and scene graph + """ + + def default_list(): return collections.defaultdict(list) + graph = {'relationships': collections.defaultdict(default_list), + 'counts': {}, 'exists': {}, 'history': [], 'objects': {}} + + # number of inputs + n_inputs = template.get('inputs', 1) + # sample a text template + text_sample = random.choice(template['text']) + text_sample_copy = copy.deepcopy(text_sample) + # print("original -- {}".format(text_sample_copy)) + text_sample_index = template['text'].index(text_sample) + + # sample a random element from filtered + arg_sample = random.choice(filter_objs) + + # scene information obtained from the current round + graph_item = arg_sample['graph'] + # for i, o in enumerate(arg_sample["objects"]): + # print("Required for obj {}: {} ".format(i, o["required"])) + # n_and_c_in_req = "class" in arg_sample["objects"][0]["required"] and "nature" in arg_sample["objects"][0]["required"] + # # if n_and_c_in_req: + # # print("bla") + + # if len(arg_sample["objects"]) > 0: + # if not n_and_c_in_req: + # if "class" in arg_sample["objects"][0]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(graph_item["objects"][0]) + # # print(text_sample) + + # elif "nature" in arg_sample["objects"][0]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(graph_item["objects"][0]) + # # print(text_sample) + # if len(arg_sample["objects"]) == 2: + # type1_and_class1_in_req = "class" in arg_sample["objects"][0]["required"] and "nature" in arg_sample["objects"][0]["required"] + # if not type1_and_class1_in_req: + # if "class" in arg_sample["objects"][1]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(graph_item["objects"][0]) + # # print(text_sample) + + # elif "nature" in arg_sample["objects"][1]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(graph_item["objects"][0]) + # # print(text_sample) + + text_sample_index = template['text'].index(text_sample) + # text_sample_mod_copy = copy.deepcopy(text_sample) + # extract attribute tags and get them into groups + tags = re.findall('(<[\d\w]*>)', text_sample) + tag_groups = collections.defaultdict(list) + for tag in tags: + group_id = get_tag_group(tag) + tag_groups[group_id].append(tag) + + # remove tags from text not allowed by filter_objs + for arg_ind in range(n_inputs): + obj_sample = arg_sample['objects'][arg_ind] + avail_attrs = obj_sample['optional'] + obj_sample['required'] + + for ii in tag_groups[arg_ind][::-1]: + if mapping(ii) not in avail_attrs: + tag_groups[arg_ind].remove(ii) + text_sample = replace_attribute( + text_sample, ii, arg_sample, True) + + # if "class" in avail_attrs and ""]: + pos = text_sample.index("<") + text_sample = text_sample[:pos] + "object " + text_sample[pos:] + # filter out those not present in text template + optional_tags = [inv_mapping(ii, arg_ind) + for ii in obj_sample['optional']] + optional_tags = [ + ii for ii in optional_tags if ii in tag_groups[arg_ind]] + + # if tags_to_keep is empty, sample from optional with 1:70 2:25 3:5 + if len(optional_tags) > 0: + if len(tags_to_keep) > 0: + n_tags_sample = [0, 1, 2] + else: + n_tags_sample = [1, 2, 3] + n_sample = np.random.choice(n_tags_sample, 1, + p=gvars.METAINFO['probabilities'], + replace=False) + # lower cap at the length of optional + n_sample = min(n_sample[0], len(optional_tags)) + if n_sample > 0: + tags_to_keep += random.sample(optional_tags, n_sample) + + # now create a dictionary of placeholders with actual attribute values + for tag in tag_groups[arg_ind]: + remove = tag not in tags_to_keep + text_sample = replace_attribute( + text_sample, tag, arg_sample, remove) + + # remove attributes from objects not included in tags_to_keep + if 'objects' in graph_item: + for ii in gvars.METAINFO['attributes']: + if inv_mapping(ii, arg_ind) not in tags_to_keep: + if ii in graph_item['objects'][arg_ind]: + del graph_item['objects'][arg_ind][ii] + + # record the caption info + # Record info and merge scene graphs. + args = [] + # if "unique-obj" == template['label']: + # print('yey') + for obj in arg_sample['objects']: + if obj is None: + continue + else: + for k in obj['required']: + arg = obj.get(k, None) + if arg is not None: + if arg not in args: # and type(arg) == str: + args.append(arg) + else: + arg = arg_sample.get(k, None) + if arg is not None and arg not in args and type(arg) == str: + args.append(arg) + arg = obj.get('attribute', None) + if arg is not None and arg not in args: + args.append(arg) + if template['label'] == 'obj-relation': + args.append(arg_sample['relation']) + + if template['label'] == "count-att-no": + template['label'] = "count-att" + + graph_item['round'] = 0 + sample = {} + sample['template_info'] = [copy.deepcopy(template)] + sample['args'] = args + del sample['template_info'][-1]['text'] + sample['template_info'][-1]['index'] = text_sample_index + sample['caption'] = text_sample + sample['template'] = template['label'] + + sample['dialog'] = [] + + # append history, update scene graph, and save the new scene graph + graph['history'].append(graph_item) + sample['graph'] = utils.merge_update_scene_graph(graph, graph_item) + return sample + + +def realize_question(dialog, template, filter_objs): + """Samples attributes for template using filtered objects. + + In addition, creates scene graph for the new information added. + + Args: + scene: Current scene graph + template: Text template to use to generate questions + filter_objs: Set of objects satisfying constraints of current template + + Returns: + sample: Contains the text realization and scene graph + """ + + # Number of inputs. + n_inputs = template.get('inputs', 0) + # if "early" in template["label"]: + # print("bla") + # Sample a text template. + text_sample = random.choice(template['text']) + text_sample_index = template['text'].index(text_sample) + # text_sample_copy = copy.deepcopy(text_sample) + # print("original -- {}".format(text_sample_copy)) + # --------------------- + # sample a random element from filtered + arg_sample = random.choice(filter_objs) + # if template["label"] == "exist-obj-exclude-early": + # print("bla") + # scene information obtained from the current round + # graph_item = arg_sample['graph'] + # flag = "" in text_sample or "" in text_sample or "" in text_sample or "" in text_sample + # if flag: + # # if 0 < len(arg_sample["objects"]) <= 2: + # # for i, o in enumerate(arg_sample["objects"]): + # # print("Required for obj {}: {} ".format(i, o["required"])) + # if "class" in arg_sample["objects"][0]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(arg_sample["objects"][0]) + # # print(text_sample) + # elif "nature" in arg_sample["objects"][0]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(arg_sample["objects"][0]) + # # print(text_sample) + # if len(arg_sample["objects"]) > 1: + # if "class" in arg_sample["objects"][1]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(arg_sample["objects"][0]) + # # print(text_sample) + + # elif "nature" in arg_sample["objects"][1]["required"] and "" not in text_sample: + # # print("changing with ") + # text_sample = text_sample.replace("", "") + # # print(arg_sample["objects"][0]) + # # print(text_sample) + # # if len(arg_sample["objects"]) == 2 and arg_sample["objects"][1] != "none": + # # if "class" in arg_sample["objects"][1] and "" not in text_sample: + # # text_sample = text_sample.replace("", "") + # # print(arg_sample["objects"][1]) + # # print(text_sample) + + # # if "nature" in arg_sample["objects"][1] and "" not in text_sample: + # # text_sample = text_sample.replace("", "") + # # print(arg_sample["objects"][1]) + # # print(text_sample) + # text_sample_index = template['text'].index(text_sample) + # ------------------------ + # text_sample_mod_copy = copy.deepcopy(text_sample) + # Extract attribute tags and get them into groups. + tags = re.findall('(<[\d\w]*>)', text_sample) + tag_groups = collections.defaultdict(list) + for tag in tags: + group_id = get_tag_group(tag) + tag_groups[group_id].append(tag) + + # Sample a random element from filtered. + # arg_sample = random.choice(filter_objs) + + # Remove tags from text not allowed by filter_objs. + for arg_ind in range(n_inputs): + obj_sample = arg_sample['objects'][arg_ind] + avail_attrs = obj_sample['optional'] + obj_sample['required'] + + for ii in tag_groups[arg_ind][::-1]: + if mapping(ii) not in avail_attrs: + tag_groups[arg_ind].remove(ii) + text_sample = replace_attribute( + text_sample, ii, arg_sample, True) + + # Assert that all required attributes are present as tags. + for attribute in obj_sample['required']: + required_tag = inv_mapping(attribute, arg_ind) + # Make an exception for and

+ if required_tag == '' and '

' in tag_groups[arg_ind]: + continue + assert required_tag in tag_groups[arg_ind], \ + 'A required attribute {} is missing in template {}!\n original = {} \n modified = {}'.format( + template['label'], required_tag, required_tag, required_tag + ) + + # Start compiling tags to keep. + tags_to_keep = [inv_mapping(ii, arg_ind) + for ii in obj_sample['required']] + if tags_to_keep == [""] or tags_to_keep == [""]: + pos = text_sample.index("<") + # Filter out those not present in text template. + text_sample = text_sample[:pos] + "object " + text_sample[pos:] + # elif tags_to_keep == [""]: + + optional_tags = [inv_mapping(ii, arg_ind) + for ii in obj_sample['optional']] + optional_tags = [ + ii for ii in optional_tags if ii in tag_groups[arg_ind]] + + # If tags_to_keep is empty, sample from optional with (1:70, 2:25, 3:5). + if len(optional_tags) > 0: + if len(tags_to_keep) > 0: + n_tags_sample = [0, 1, 2] + else: + n_tags_sample = [1, 2, 3] + n_sample = np.random.choice(n_tags_sample, 1, + p=gvars.METAINFO['probabilities'], + replace=False) + # Lower cap at the length of optional. + n_sample = min(n_sample[0], len(optional_tags)) + if n_sample > 0: + tags_to_keep += random.sample(optional_tags, n_sample) + + # Now create a dictionary of placeholders with actual attribute values. + for tag in tag_groups[arg_ind]: + remove = tag not in tags_to_keep + text_sample = replace_attribute( + text_sample, tag, arg_sample, remove) + + # Record info and merge scene graphs. + args = [] + # if template['label'] == 'seek-attr-early': + # print('yey') + for obj in arg_sample['objects']: + if obj is None: + continue + else: + for k in obj['required']: + arg = obj.get(k, None) + if arg is not None: + if arg not in args: + args.append(arg) + else: + arg = arg_sample.get(k, None) + if arg is not None: + args.append(arg) + arg = obj.get('attribute', None) + if arg is not None and arg not in args: + args.append(arg) + + # req_att_keys = [k for obj in arg_sample['objects'] for k in obj['required'] if obj is not None] + dialog_datum = {'question': text_sample, 'answer': arg_sample['answer'], + 'template': template['label'], 'args': args} + dialog['template_info'].append(template.copy()) + del dialog['template_info'][-1]['text'] + dialog['template_info'][-1]['index'] = text_sample_index + # if 'unique' in template['label']: + # print('voila') + dialog['dialog'].append(dialog_datum) + graph_item = arg_sample['graph'] + + # If mergeable, add it to the objects list. + dialog['graph'] = utils.merge_update_scene_graph( + dialog['graph'], graph_item) + + # If there are volatile objects in the graph item, remove them. + for obj in graph_item['objects'][::-1]: + if obj.get('volatile', False): + graph_item['objects'].remove(obj) + dialog['graph']['history'].append(graph_item) + return dialog + + +def clean_text_subroutine(text, thing, suffix): + """Cleans the text and substitutes thing with object (subroutine). + + Args: + text: Text string to be cleaned + thing: Whether to use 'thing' or 'object' + suffix: Either '?' (question) or '.' (caption) + + Returns: + clean_text: Text string after cleaning procedure + """ + + # Synonyms + skipping optional part of the sentence + clean_text = skip_and_replace_phrases(text) + + # Remove full stop, empty spaces, capitalize the start letter. + clean_text = re.sub(' +', ' ', clean_text.replace(suffix, '').strip(' ')) + # First replace 'a thing' -> 'an object'. + # Then perform remaining actions. + if thing == 'object': + clean_text = clean_text.replace('a thing', 'an object') + clean_text = clean_text.replace('thing', thing) + clean_text = clean_text[0].upper() + clean_text[1:] + suffix + return clean_text + + +def clean_dialog_text(dialogs): + """Cleans the dialog texts. + + Args: + dialogs: Generated dialogs to perform text cleaning + + Returns: + dialogs: Return the dialogs after cleaning the text inplace + """ + + # Replace thing with object throughout with probability 0.5. + thing = 'thing' if random.random() > 0.5 else 'object' + for index, dialog_datum in enumerate(dialogs): + # Clean the caption. + text = dialog_datum['caption'] + dialogs[index]['caption'] = clean_text_subroutine(text, thing, '.') + + for r_id, dialog in enumerate(dialog_datum['dialog']): + # Clean the question. + text = dialog['question'] + text = clean_text_subroutine(text, thing, '?') + dialogs[index]['dialog'][r_id]['question'] = text + return dialogs + + +def skip_and_replace_phrases(text): + """Substitutes synonyms and skips optional parts stochastically. + + Args: + text: Text string + + Returns: + text: Text string with synonyms replaced and optional parts skipped + """ + + # For each text in [], replace it with '' with probability 0.5. + matches = re.findall('(\[[ \w]*\])', text) + for match in matches: + if random.uniform(0, 1) > 0.5: + text = text.replace(match, '') + else: + text = text.replace(match, match[1:-1]) + + # Remove empty spaces, if any. + text = re.sub(' +', ' ', text) + # Search for synonyms, replace at uniformly random. + text = text.lower() + for key, values in gvars.METAINFO['synonym_keys']: + if key in text: + text = text.replace(key, random.choice(values)) + return text + + +def generate_captions(scenes, templates): + """Wrapper generates captions. + + Args: + scenes: List of scene graphs for which to generate captions + templates: List of available caption templates + + Returns: + generated_content: Captions generated for the input scenes + """ + + template_dictionary = {ii['label']: ii for ii in templates} + generated_content = [] + for scene in scenes['scenes'][0:FLAGS.num_images]: + content = {} + # Copy over image_index, split, image_filename from scene. + for key in ['image_index', 'split', 'image_filename']: + content[key] = scene[key] + + content['dialogs'] = [] + # Filter objects based on constraints. + filter_objs = constraints.caption(scene, templates) + for filter_obj in filter_objs: + # Realize the text, and return the partial scene knowledge (q). + template = template_dictionary[filter_obj[0]['graph']['template']] + sample = realize_text_and_extract_scene( + scene, template, filter_obj) + # Add it to the list of dialogs. + content['dialogs'].append(sample) + generated_content.append(content) + return generated_content + + +def generate_questions(scenes, dialogs, templates, params): + """Wrapper generates questions. + + Args: + scenes: List of scene graphs to generate questions + dialogs: Contains already generated captions for scenes graphs + templates: List of available question templates + params: Beam search parameters for question generation + + Returns: + new_dialogs: Generated raw dialogs with captions and questions + """ + + new_dialogs = [] + for scene_id, dialog_datum in enumerate(dialogs): + image_dialogs = copy.deepcopy(dialog_datum) + image_dialogs['dialogs'] = [] + + for dialog in dialog_datum['dialogs']: + # Pick a template at random. + flag = False + iter_count = 0 + while not flag: + # Pick a template at random. + template = random.choice(templates) + + # Filter objects based on constraints. + filter_objs = constraints.question(scenes['scenes'][scene_id], + dialog, template) + flag = len(filter_objs) != 0 + + # Extreme case -- exit + iter_count += 1 + if iter_count > 10: + break + + # Realize q question. + if flag: + deep_copy = copy.deepcopy(dialog) + gen_dialog = realize_question(deep_copy, template, filter_objs) + image_dialogs['dialogs'].append(copy.deepcopy(gen_dialog)) + new_dialogs.append(image_dialogs) + + return new_dialogs + + +def worker(scenes, cap_templates, ques_templates, worker_id, out_q): + """Worker method generates dialogs (caption + questions) for pool of scenes. + + Args: + scenes: List of CLEVR scenes to generate dialogs + cap_templates: Templates for caption generation + ques_templates: Templates for question generation + worker_id: Id for the current worker + out_q: Output queue to save generated dialogs from different sources + + Returns: + Adds dialogs against the worker id in the output queue. + """ + + dialogs = [] + for index, scene in enumerate(scenes): + cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime()) + print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % + (cur_time, worker_id, index, len(scenes), scene['image_index'])) + try: + gen_dialog = generate_dialog_bfs( + scene, cap_templates, ques_templates) + dialogs.append(json.loads(json.dumps(gen_dialog))) + except: + print('NOTE: Missing data for %d' % scene['image_index']) + out_q.put({worker_id: dialogs}) + + +def generate_dialog_bfs(scene, cap_templates, ques_templates): + """Perform approximate breadth-first-search (BFS) to generate dialogs. + + Args: + scene: Scene graph for the CLEVR image + cap_templates: List of caption templates + ques_templates: List of question templates + + Returns: + bundle: List of dialogs generated for the input scene graph + """ + + bundle = {} + # Generate captions for the scene. + # Copy over image_index, split, image_filename from scene. + for key in ['image_index', 'image_filename']: + bundle[key] = scene[key] + + template_dictionary = {ii['label']: ii for ii in cap_templates} + content = {} + + # Filter objects based on constraints on captions. + filter_objs = constraints.caption(scene, cap_templates) + for filter_obj in filter_objs: + for f_obj in filter_obj: + for obj in f_obj["objects"]: + # obj["required"] = ["None"] + if "class" in obj["required"] and "nature" in obj["required"]: + if np.random.rand() <= 0.5: + obj["required"].remove("class") + else: + obj["required"].remove("nature") + obj["optional"] = obj["required"] + + for filter_obj in filter_objs: + # Realize the text, and return the partial scene knowledge (q). + template = template_dictionary[filter_obj[0]['graph']['template']] + sample = realize_text_and_extract_scene(scene, template, filter_obj) + # Add it to the list of dialogs. + content[template['label']] = [sample] + + # Now generate questions. + # Group templates, exist/count of similar type together. + ques_groups = collections.defaultdict(list) + + labels = [ii['label'] for ii in ques_templates] + # print('\n'.join(labels)) + for index, ii in enumerate(ques_templates): + if 'exist' in ii['label'] or 'count' in ii['label']: + ques_groups[labels[index][4:]].append(ii) + else: + ques_groups[labels[index]].append(ii) + + for round_id in range(FLAGS.num_rounds): + new_content = {} + + # For each group. + for cap_label, cap_dialogs in content.items(): + cur_pool = [] + for dialog_datum in cap_dialogs: + for _, group in ques_groups.items(): + template = random.choice(group) + + # Make a copy. + datum_copy = copy.deepcopy(dialog_datum) + + # Filter objects based on constraints. + filter_objs = constraints.question( + scene, datum_copy, template) + + if len(filter_objs) == 0: + continue + else: + for filter_obj in filter_objs: + for obj in filter_obj["objects"]: + # obj["required"] = ["None"] + if obj is not None: + if "class" in obj["required"] and "nature" in obj["required"]: + if np.random.rand() <= 0.5: + obj["required"].remove("class") + else: + obj["required"].remove("nature") + # obj["optional"] = obj["required"] + # Realize q question. + gen_dialog = realize_question( + datum_copy, template, filter_objs) + cur_pool.append(gen_dialog) + + if round_id in ranges: + for d_id, dialog in enumerate(cur_pool): + n_types = {'indep': 0, 'seek': 0, 'exist': 0, 'count': 0} + keep_dialog = True + + labels = [ii['label'] + for ii in dialog['template_info'][1:]] + for label in labels: + if label in gvars.METAINFO['independent_questions']: + n_types['indep'] += 1 + + label_type = label.split('-')[0] + n_types[label_type] += 1 + + # Heuristic A, C + for q_type, count in n_types.items(): + limit = ranges[round_id][q_type] + if limit[0] > count or count > limit[1]: + keep_dialog = False + break + + # Heuristic B + limit = ranges[round_id]['exist+count'] + if n_types['count'] + n_types['exist'] > limit[1]: + keep_dialog = False + if not keep_dialog: + cur_pool[d_id] = None + cur_pool = [ii for ii in cur_pool if ii is not None] + + # Keep limited number of beams (for speed). + if len(cur_pool) > FLAGS.num_beams: + cur_pool = sample_beams(cur_pool)[:FLAGS.num_beams] + new_content[cap_label] = cur_pool + content = copy.deepcopy(new_content) + + # Get dialogs with sim, imm2, early questions. + for cap_label, cap_dialogs in content.items(): + # Sample beams. + content[cap_label] = sample_beams(cap_dialogs) + + # Remove keys that are empty. + empty_keys = [key for key, val in content.items() if len(val) == 0] + for key in empty_keys: + del content[key] + + # For each caption, sample one. + sampled_dialogs = [] + for cap_label, cap_dialogs in content.items(): + if len(cap_dialogs) > 0: + sampled_dialogs.append(cap_dialogs.pop()) + + # Get 5 per image, compensate by taking from other entries. + content_keys = [ii for ii in content.keys()] + while len(sampled_dialogs) < 5: + random_label = random.choice(content_keys) + sampled_dialogs.append(cap_dialogs.pop()) + + # Finally, make the dialog text readable. + sampled_dialogs = clean_dialog_text(sampled_dialogs) + + # Generate the coreference chain. + for dialog_id, dialog in enumerate(sampled_dialogs): + sampled_dialogs[dialog_id] = identify_coref_chains(dialog) + bundle['dialogs'] = sampled_dialogs + return bundle + + +def sample_beams(dialogs): + """Samples beams based on the number of constraints satisfied. + + Args: + dialogs: Generated dialogs to sample beams + + Returns: + sampled_dialogs: List of sampled dialogs based on the constraints + """ + + num_constraints = [] + for d_id, dialog in enumerate(dialogs): + satisfied = 0 + labels = [ii['label'] for ii in dialog['template_info'][1:]] + + # Have a imm2 for sure + satisfied += np.sum(['imm2' in ii for ii in labels]) + # Have a imm2 for sure + satisfied += np.sum(['sim' in ii for ii in labels]) + # Have 'early' + satisfied += min(4, np.sum(['early' in ii for ii in labels])) + + # Add it with the number of constraints it satisfies. + num_constraints.append((satisfied, d_id)) + + # Then order. + def sort_key(x): return (x[0], random.random()) + ids = sorted(num_constraints, key=sort_key, reverse=True) + sampled_dialogs = [dialogs[ii[1]] for ii in ids] + return sampled_dialogs + + +def identify_coref_chains(dialog): + """Identifies the coreference chains in generated dialog. + + Args: + dialog: Generated dialogs for which coreference chains to be identified + + Returns: + dialog: A copy of dialog, with coreference chains annotated + """ + + for r_id, datum in enumerate(dialog['dialog']): + label = datum['template'] + if label in gvars.METAINFO['independent_questions']: + dialog['graph']['history'][r_id + 1]['dependence'] = None + continue + + if (label == 'exist-attribute-group' or label == 'count-attribute-group' or + label == 'count-all-group'): + dialog['graph']['history'][r_id + 1]['dependence'] = r_id - 1 + continue + + if 'imm' in label: + dialog['graph']['history'][r_id + 1]['dependence'] = r_id - 1 + continue + + if 'early' in label: + # Go over previous history. + cur_history = dialog['graph']['history'][r_id + 1] + assert 'focus_id' in cur_history and 'focus_desc' in cur_history,\ + 'More focus objects than one, no focus objects!' + focus_id = cur_history['focus_id'] + for attr in gvars.METAINFO['attributes']: + if attr in cur_history['focus_desc']: + break + + history = dialog['graph']['history'][:r_id + 1] + for hist_id, hist_datum in enumerate(history): + for obj in hist_datum['objects']: + if obj['id'] == focus_id and attr in obj: + dialog['graph']['history'][r_id + + 1]['dependence'] = hist_id - 1 + break + return dialog + + +def main(unused_argv): + """Main method generates the CLEVR-Dialog dataset. + """ + # Read the scene file. + with open(FLAGS.scene_path, 'r') as file_id: + scenes = json.load(file_id) + + # Read the synonyms file. + with open(FLAGS.synonym_path, 'r') as file_id: + synonyms = json.load(file_id) + + def sorter(x): return len(x[0].split(' ')) + + # Read the metainformation file. + with open(FLAGS.metainfo_path, 'r') as file_id: + gvars.METAINFO = json.load(file_id) + tag_inv_map = {attr: tag for tag, attr in gvars.METAINFO['tag_map'].items() + if tag != '

'} + gvars.METAINFO['tag_inv_map'] = tag_inv_map + gvars.METAINFO['synonym_keys'] = sorted(synonyms.items(), + key=sorter, reverse=True) + + # Add ids to objects. + scenes = utils.add_object_ids(scenes) + scenes = utils.clean_object_attributes(scenes) + + # Read the caption templates. + template_paths = os.listdir(FLAGS.caption_template_root) + cap_templates = [] + for ii in template_paths: + with open(os.path.join(FLAGS.caption_template_root, ii), 'r') as file_id: + cur_templates = json.load(file_id) + cap_templates.extend(cur_templates) + # utils.pretty_print_templates(cap_templates, 1) + + # Read the question templates. + template_paths = os.listdir(FLAGS.question_template_root) + ques_templates = [] + for ii in template_paths: + with open(os.path.join(FLAGS.question_template_root, ii), 'r') as file_id: + cur_templates = json.load(file_id) + ques_templates.extend(cur_templates) + # utils.pretty_print_templates(ques_templates, 1) + + # 1. Check if there a scene_id_file specified. + # 2. Check if num_images is -1 + if FLAGS.scene_id_file != '': + with open(FLAGS.scene_id_file, 'r') as file_id: + missing_ids = [int(ii.strip('\n')) for ii in file_id.readlines()] + print('Dialogs missing for scenes: %d' % len(missing_ids)) + + # Create a image_index -> scenes list index dictionary + image_list_id_dict = {ii['image_index']: index + for index, ii in enumerate(scenes['scenes'])} + scenes_subset = [scenes['scenes'][image_list_id_dict[scene_id]] + for scene_id in missing_ids] + + elif FLAGS.num_images == -1: + scenes_subset = scenes['scenes'] + + else: + scenes_subset = scenes['scenes'][0: FLAGS.num_images] + + # BFS for each scene. + if FLAGS.num_workers == 1: + # Single thread version. + dialogs = [] + for index, scene in enumerate(scenes_subset): + cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime()) + print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % + (cur_time, 0, index, len(scenes_subset), scene['image_index'])) + gen_dialog = generate_dialog_bfs( + scene, cap_templates, ques_templates) + dialogs.append(gen_dialog) + + else: + # Multithread version. + output_q = multiprocessing.Queue() + jobs = [] + for worker_id in range(FLAGS.num_workers): + allotment = scenes_subset[worker_id::FLAGS.num_workers] + inputs = (allotment, cap_templates, ques_templates) + inputs += (worker_id, output_q) + + process = multiprocessing.Process(target=worker, args=inputs) + jobs.append(process) + process.start() + + # Wait for all the jobs to finish and collect the output. + final_results = {} + for _ in jobs: + final_results.update(output_q.get()) + for job in jobs: + job.join() + + # Flatten and sort. + final_results = [jj for _, ii in final_results.items() for jj in ii] + dialogs = sorted(final_results, key=lambda x: x['image_index']) + # utils.pretty_print_dialogs(dialogs) + + # Save the dialogs. + print('Saving dialog at: %s' % FLAGS.save_path) + with open(FLAGS.save_path, 'w') as file_id: + json.dump(dialogs, file_id) + + +if __name__ == '__main__': + gvars.initialize() + app.run(main) diff --git a/global_vars.py b/global_vars.py new file mode 100644 index 0000000..41f688e --- /dev/null +++ b/global_vars.py @@ -0,0 +1,10 @@ +"""Global variables (avoid as much as possible). +Author: Satwik Kottur +""" + +def initialize(): + """Sets up global variables. + """ + + global METAINFO + METAINFO = {} \ No newline at end of file diff --git a/minecraft_utils.py b/minecraft_utils.py new file mode 100644 index 0000000..4049f1d --- /dev/null +++ b/minecraft_utils.py @@ -0,0 +1,224 @@ +"""Utilities for CLEVR-Dialog dataset generation. + +Author: Satwik Kottur +""" + +import copy + + +def pretty_print_templates(templates, verbosity=1): + """Pretty prints templates. + + Args: + templates: Templates to print + verbosity: 1 to print name and type of the templates + """ + + # Verbosity 1: Name and type. + print('-'*70) + for ii in templates: + print('[Name: %s] [Type: %s]' % (ii['name'], ii['type'])) + print('-'*70) + print('Total of %s templates..' % len(templates)) + print('-'*70) + + +def pretty_print_scene_objects(scene): + """Pretty prints scene objects. + + Args: + scene: Scene graph containing list of objects + """ + + for index, ii in enumerate(scene['objects']): + print_args = (index, ii['shape'], ii['color'], + ii['size'], ii['material']) + print('\t%d : %s-%s-%s-%s' % print_args) + + +def pretty_print_dialogs(dialogs): + """Pretty prints generated dialogs. + + Args: + dialogs: Generated dialogs to print + """ + + for scene_id, dialog_datum in enumerate(dialogs): + for dialog in dialog_datum['dialogs']: + print(dialog['caption']) + for round_id, ii in enumerate(dialog['dialog']): + coref_id = dialog['graph']['history'][round_id+1]['dependence'] + in_tuple = (round_id, ii['question'], str(ii['answer']), + ii['template'], str(coref_id)) + print('\t[Q-%d: %s] [A: %s] [%s] [%s]' % in_tuple) + + +def merge_update_scene_graph(orig_graph, graph_item): + """Merges two scene graphs into one. + + Args: + orig_graph: Original scene graph + graph_item: New graph item to add to the scene graph + + Returns: + graph: Deep copy of the original scene graph after merging + """ + + graph = copy.deepcopy(orig_graph) + # Local alias. + objects = graph['objects'] + + # If not mergeable, return the same scene graph. + if not graph_item['mergeable']: + return graph + + # 1. Go through each new object + # 2. Find its batch in objects + # a. If found, assert for a clash of attributes, update + # b. If novel, just add the object as is + for new_obj in graph_item['objects']: + match_found = False + obj = objects.get(new_obj['id'], None) + + if obj: + # Assert for existing entries. + for attr in new_obj: + try: + assert new_obj[attr] == obj.get(attr, new_obj[attr]),\ + 'Some of the attributes do not match!' + except: + pdb.set_trace() + + # Add additional keys. + objects[new_obj['id']].update(new_obj) + else: + # Add the new object. + objects[new_obj['id']] = new_obj + + # if a relation, update it + if 'relation' in graph_item: + rel = graph_item['relation'] + # update it with object 2 id + id1 = graph_item['objects'][0]['id'] + id2 = graph_item['objects'][1]['id'] + rel_objs = graph['relationships'][rel][id1] + rel_objs.append(id2) + graph['relationships'][rel][id1] = rel_objs + + # update objects in graph + graph['objects'] = objects + return graph + + +def add_object_ids(scenes): + """Adds object ids field for input scenes. + + Args: + scenes: List of CLEVR scene graphs + + Returns: + scenes: Adds object_id field for the objects in the scene graph inplace + """ + + for scene_id, scene in enumerate(scenes['scenes']): + for obj_id, _ in enumerate(scene['objects']): + scenes['scenes'][scene_id]['objects'][obj_id]['id'] = obj_id + return scenes + + +def clean_object_attributes(scenes): + """Cleans attributes for objects, keeping only attributes and id. + + Args: + scenes: Scene graph to clean + + Returns: + scenes: Cleaned up scene graphs inplace + """ + + keys = ['class', 'direction', 'nature', 'id'] + for scene_id, scene in enumerate(scenes['scenes']): + for obj_id, obj in enumerate(scene['objects']): + new_obj = {key: obj[key] for key in keys} + scenes['scenes'][scene_id]['objects'][obj_id] = new_obj + return scenes + + +def pretty_print_corefs(dialog, coref_groups): + """Prints coreferences for a dialog, higlighting different groups in colors. + + Args: + dialog: Generated dialogs to print + coref_groups: Coreference groups for dialogs + """ + + colorama.init() + # Mapping of group_id -> color_ids for (foreground, background) + color_map = {} + groups = coref_groups.get(0, []) + colored, color_map = pretty_print_coref_sentence(dialog['caption'], groups, + color_map) + print('\n\nC: %s' % colored) + for round_id, round_datum in enumerate(dialog['dialog']): + question = round_datum['question'] + groups = coref_groups.get(round_id + 1, []) + colored, color_map = pretty_print_coref_sentence(question, groups, + color_map) + print('%d: %s' % (round_id, colored)) + + +def pretty_print_coref_sentence(sentence, groups, color_map): + """Prints a sentence containing difference coreference groups. + + Args: + sentence: Text sentence + groups: List of coreference groups with spans + color_map: List of groups and associated color maps + + Returns: + sentence: Text sentence with colors inserted + color_map: Updated, if new groups in the current sentence + """ + + fore_colors = ['RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA'] + back_colors = ['BLACK', 'YELLOW', 'CYAN'] + insertions = [] + for group in groups: + group_id = group['group_id'] + if group_id in color_map: + forecolor_id, backcolor_id = color_map[group_id] + else: + num_groups = len(color_map) + forecolor_id = num_groups % len(fore_colors) + backcolor_id = num_groups // len(fore_colors) + color_map[group_id] = (forecolor_id, backcolor_id) + + forecolor = fore_colors[forecolor_id] + backcolor = back_colors[backcolor_id] + insertions.append( + (group['span'][0], getattr(colorama.Fore, forecolor))) + insertions.append( + (group['span'][0], getattr(colorama.Back, backcolor))) + insertions.append((group['span'][1], + getattr(colorama.Style, 'RESET_ALL'))) + + # Perform insertions. + sentence = insert_into_sentence(sentence, insertions) + return sentence, color_map + + +def insert_into_sentence(sentence, insertions): + """Sorts and performs insertions from right. + + Args: + sentence: Sentence to perform insertions into + insertions: List of insertions, format: (position, text_insert) + + Returns: + sentence: Inplace inserted sentence + """ + + insertions = sorted(insertions, key=lambda x: x[0], reverse=True) + for position, text in insertions: + sentence = sentence[:position] + text + sentence[position:] + return sentence diff --git a/misc/method_overview.png b/misc/method_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..2f2191783d3dcbf4070bff45d576c0f3e85f9484 GIT binary patch literal 102188 zcmZs@1ys}v*F6k^prne5NU0zp(jg6kbV*5rfRr@SjS_-{NVjx1(hY)ubW3-4_jkto z+~*##5P7Vo4`Vx9vm(ug3<FlW@QqKbQukNs{nF1OsOB-dXpPKx}vbX@5Zly7Glzu>aj-J3iw zg?5ZOOOQr*e}{8tAT9O7BxPXHx0G86ctH=XU(Sl;b7x^_QEsc$&fmRZ^Pjc<_3b<kplY6yGJkfIE$Pc_54mZX)?6)7StgKjXO~$39kU!)?$0Z@BqYKpUjJ{4pPfMGc zk@2~>oA)fGU8~?Na%VKF?nr@lEcJ2Oilj-xqZ=Rp`I&x+e!QBUzdTvPrKIWbR6bo= zZ@DLt#i-BI3lpF16iwaj;^6$G+bz$=d8W||KI}pR}Y3|&)gEUfUL*uP2ihF_Td~q-wndTtA z85b89kY=>|_}{~M6zePOrl@#ttjyftbbp2S6X_!w8k&^)XjWtUlZA-3jt(+1vQM|k zgf04|UYvAwCh#SELdUh*tXN_){lmg(yMCj-UxrUgU!OuxPw%;E$!AYb)XnlaoV=>d zO4S9mFiWOaPFE*;OCfP_QUU_k=30Zw)<21EJb(5~XJ_`Ox<$kF-%-pyTeW9_l=1c- zd7KV!k&?Y_=6Ki6LBiX-B*{tWMi>CW+Cu_r|guhe9;(Kx@RB~PR3^OrBl zG6}rrmq&G!baZ(QfA+iGE<^VBt%U=L2yCLnspH{JhSOHC$O9I`TWX~y$kQ8TQ;UC$^HV=8 zb&Y@M<{2?zD=I1)sdM8)>IkRRZ4ae>XJGJQuk%GnR8(7mb`xCq?EG_4y;!W?@vF@A zcpZ2J7BOdNZ?XvUhegTSH zQZ#RJ-Zpt-+dTVlxIQYGz#FICv9+Wp1)CamPVEB>PmV74m+>&v4u@H&k z=tCC6ud9REO7$mjSn8dTjAW}!0=>lsGOU)f&8n>lE{7^%p`r6mq$`(`hdf8)A8Pk} za7o|n?CiYXoo};wzvQqKmoC}AA2(HHmw#;_m`p^VbOzGnjoWb(sRDWJ)uq$*3_%|p zR^=G8@)@+LHN*CttSq_A%*@q=2(807KHK$3acOBBDv|%35b8_zgArsj93oLOGde;x zlj#r{-kF-C$+>9byqUsQ65T%~Mo8;_etc8)Gl{^CTkZ?%usSkm(=tc>Lmnl@-O8^Y+(Ojf#Tm11 z3zk{M!u`eHQFF|vHNJ~-a&qtQS z_-#omD<;U&AAu3!;X_%*+L4T^o`T5|~tE6cpZ4F>E_sDWXe-wlE$NX~%`Wlw$Ny@Rb*Pb-}E7sSJ!yno2EE4?p zPTYE!*h?z;=FNTcnuGh^+HZ?Tm&YrZEAxtN*53Sq)3-2f4J7Vm2|z|cvw-?Eoc-j0 zHW@MuQLKIhZjD!jbj5LrXh=hGiB2p0uq2)E*esf&%5Lk>^umq_almu6XPbyeGL+tU z9;e;i4tm7JS+{k{UB5UvIWd{6j@qibnv5W~7%MfksWf0Q&FWryS2i{ZmC^K133B~{ zRiVq7t>gakW0}x@YqS8FKz>HX^x>HK64dji84IKWc+FyGj7Xj9MZu_vd#l>{*wke> zI;r#58mFUc)3#NkB}RDA8vIFkqBG^w(a2H8e;DJh4d(}C!8y$Za&5kaI>8O`4=Ih+ z^cErq3I?;;czFg_l2{1ExJ45-_xW}cFYXDR4EIY{*GsWsMoLN_$gH!*J*oa?2CQf{ zQ;hmigM?0eE$0Tv6Dk@SL24mY)rYoKaX*ZQuf3L&YaGawU)!G%?oX@y790CJl2JF3 z&Gcc+3v>Oe%ZqWRt(wQlwExX~PkEF5u?<@sQr_S0$S9(clH}CXzI)w#2_L#Re3Z0Y zSf^(Z6?w0VD+mg0U!{$)oV>ilYGyi8jpTZDHZ^C#4qXz3nwlDN@$0v5jpG#`2>4vN zH~##XTQBZjY#}ty*+VCiidk)kgm3$auZ1|P<dZh==KG`K zrkO{4e5C39Vh|dFc@sx+%F4eyP|ytrv+mKVm140g$;k;o5s`{!Ni-cRy&W>>mBg!8 zK4EsaIq^mNf%W=GaC39Bqs?bIKWJNDzkYpfOwiOe#K^~&FgvT$#kuwzPVT;<3sms7 z?(TG#vqNpZ5N|_y7fa4K>*XGgRuU(FHM0t#_-Fqfm}iUrKBhp4$!L*Ru3DK@+tY~G zZ{D;Z&L>SO<`}|pjc4@9f4dh8x9!Tc#5}mjMeo|~EOy07n3z0QQBg4&DR_zH`rr2y z_O?I0{_k(7Ggv|qbpMl}sEq&X?_Sl;~K3*1xL%f=WBwZ?BF1k`fk)QHU7T_JfuFR=G`Rv!$U`Yk5`A7cNhS z>r~qjEhF42Sva65RISQ3H&wmNY~uL>M2K*?K@$U(#3=MFwRX5YopPJ_b;n+*<7@eK zX4EhA+V4tKKz{bfZH6gn7KQ?TSbM&}WG3K>K)GsWi`6Vidxonl&e;D>YS!X87lffnB zHKDeHg09w{tNNo|&TN0VH{42w=O8q7oGgf2hx&!tIJ-mzz>ZqThjQy*pa%Srzh5v>NWH#*r zR1aP1GU!16iLAdd78w~?-~#Np}w7f4)>wIwQ+65J-` zHkIm5!&1j76P$IZhkqC=NmL{yB^7iRbSxBwn)as!q>e-6&@Omm5$I5t!7Y`jfV!>P zKH+wC5pIQf|9L?uA*Xdl>Nr$e+4QMN_njw?D$f5~JW*#@UiPyj{g^ul;yo30l>DEx z>EeIa!TaBId&wSPIGm?3vcRgdj|trz`=j;)lZnc(WRal0EJYf+%H^bt;YeEL0(Zb> zjF7pc3#Z=C`x&rG&8NB=KB0@r$fP?RZQlNX@=w~cppiS(L7RF_|0Yje4EiEq@kI}G zlJ8n|uGTAkUN&n(LQm_U9K6ZbBtE!Ub#xI7I~i2c>V=^ouXTd`xF(IMr zTFyH{^_FF0og7QCfJOK$*nUR`#u2ft$r|MD+FyW4x7)hnd19a`F`JAq zK#F(fs1!%?IF)WiPp?JLsh@P2*$=h=)8Xje3N(j22I5m`xySvWm(s35-L`2;7UtthN`5GG9 ze703*P7uFk!ES%P(|U3?b=5^|H;!DdsHCwFQkDdVR1Mg{of6A!t|b8Rzwh+)khgmT zaj&aSm6>b7yEemJY5Vo-b5_ll`?Ry+YG#RQWoGZ*z3ZuSb887C?)dsx5J|?m2|&$B zf5K&R6D}XoqwLUG1mx=#A=T?k5kvBS!1xhL1Bu(YWxUJb=x5W4<)kl3bb^2LF)_UN zYRm0P%^%SWAHT2mt{RNC-Lllkzez#J@Ron%;P~utW3Jg34~vMs#}4j+zPfdy5HUMD}`1rWV(+_#Qt@BT%FBiq#mkFBfzUx)?xJ~-5{Qp z5~KbRz<*v|UgxVyby-iV+^$@AcXttx|HI<%9byE!guddNMQcO-_gFm>&~bd4%G4Q? zdg&*g!nGgpf{4>89vvw*z#UfqAeFl}<$6Zz>+1_s;2*o^P)SXqCR33L3k#83$u1%t z@YWo9ZFt@?>HPZoI3+!uS-TNc^~d|`^{Ek>;);rjaes*L@B{$`u1wYOG3)=HhO4dJ z70Uskjor_JEGj0J`P2^PhpAV-S3FQ~HnL?>N(WN^i3?Yf=GIm`Q%ByvV8N3!7~mH~ zmHp07{EDMVr@~Q_+7Iv<)w}I92=ZcNqxY&A(9vzk2B@)!*hHd>_yW^WKa@SkL1q9* zoiQ)=E{>+o?o2}K&V6|boWqy0Nk_~khW}0oL#iFOVf6T%W`y;C!5zAKt>)4D&;O*~ z@yQckL^1Z{+-t^@`5F#7=7TxIBL1H^I5aG5uqqH{ z>X8lGp*#&telf9|K*Z$A6WqIZZ`}24!})O3fSm{j2~mct9rl)o^GVra0)X^!JsX?a zo&JPdWVfYRU)<%^pC*O4+%Q~2X?^(hh6Pmn9IZOuEKZhJ1b@rm=U#e5?Usd;#~sR1 zX)2p?m4H?%CLxgw12qw!Yb+G)L1UT<2%>La=xZZk8tQ@0$i&373^1fQ75Pm{N(zJB zraGj)OqJ~h``LPN#ZC+1nu7O)*%AN(#Pw-ze)a9!H}N&a{f)7R@$vDDwccBLe_PD<-YXP^5@TdUnR^_G zpnCR~W#L$oqyzD>rPD)jh&i`Mbb~BAXv+qwVgcyx?(G>()w=w$IqIFZVE@(K-MV7k zS8A%(VTFN-$zd^#1mGx1uOl3JNnBj~S7&D`j5{T!V{DIlwM+iJMzP4=)oDUCX9`ERym+PXW+=g+ku=7f*lP z{s)bBag$g7FMusgR%yL*=itYG-f1f+=|4c7y{YB-|9$(r{|Bc3?_<6tO_O{D!yqe>#)elfviyh8=UF#I<*X7LB4ITCFAKXm$>@8Qv9DLTDu&K_VLo~5`_mo zIlh=_JeLARz+tHo+uueb-)76u-(%Y9(-wZ-W^nt>gC#zK*(=SaKTZ!e>DBoh&&bA? z?q|w0chJYudDjgV6K<-MI0g#xP{tn(9UUa(V7a0t_j=4&Yq3We?!RNNxt)hIv+uIZ z=an|-xg&BGGczUQVRPiRK|3s8x>WN|?oM|HyqJ7&@Z)yz6BTTXT(r!@chx8hhW_uf z_Zl?wEl5s96A#}U6skIrcAjdwMZMNLUCkSOa2dRMYdk2iJ&?j&Am#kJjfU;x8vzZ+ zN0;W&X?mh=DO(j`5r00YeyNN#tlmnD9~Sj?BX?2OC0~;j!MnbeE!u%0Ff~jVcQ1>t z=HPR6GlnFK)KN&U%Z1k5&TykE0Nz-K^@%FzHbf?Dgg`l1Z~S3>6ubmvP6NRBm5Hhs zWWxSmaqv)Q4BEK1sy|o>yHUyfaQl{N@u|_`abe=SCW2)fjdapZPl58DF^k>F%g_36 zNuTX#W>~y+BYQN$kNcyAja`9_z3nZFi}~Q8Maa_tSMLodp-=~_jXfW zGxZSJb6l(R&uYV#3nc_L^ zv1UapxD&_V+FJ>lW(Og?Vf%zTgC7izv* zlvICV`TX*f(h^<+mcpB~uJ;p~0L*3Pd=~ngoKa~ zOw;l4v9P>(6T`2L4wOeg+`q|Hy&lVFHGkvYz4$^Gm{JKR%y@8oK0(*=uAu%HH`KLW4E zvzqF(lj~n^G_sblR41WpHh`MvVnk^Sjr#4E-$oljI zS8*VIhc%sOVMKF2ny^Elo6zv^_`(oDBw*A4z@W$SfwqO?gW?X14BRuv_BHjXEg3bdoV z(OLZc#{O=KHN1l~i_GISnv5)39Qt=%9QEhaX9QBJc&O?a1& zF|F#GpcB5l!;=VGo|gUx0@*dUMQ6g(JtevbavQcd7IzAU^;G1x`_iXx3S~Dt&rqNy z_3B>sBK4N!FV;|gdt@)iJRp}fZR@2bH{aHhFK`*Ie84=C6+vwnu7--y`(Q|finlsb zb~t~2n3}p5k`_ccK%3S|n>G7s4|EuNVK(Q)tDc{1xP;Lg7}IZC zFnY9FR;WML3#ZrW0&MuXV__DU&UeroYd?&Rj^?OWe1q`^E~Fud1{Y^XOF$zXI=nRK zj23$N5*g-{r;LojZkKxr3W|!1R`a5+M^#hT$BltCeGZ%hsq3)^U|KX>lHWj|tZv92 zB%H7GrD8fAuJ5e$gFpiKTNJcGes>Ri-&Rt$*rO@81fb;ifM$QOY@jf;)!pX0ReO1A z400d|@5y|K`y9ZqMZT*OTp%L(R5YZY!k<7ye}bpFx#0y^q`a|LM^~4m0BsFG+XDc8 zpH_7sqxxJ^({be~@bE8HR0!-Q9T0OlP%IRZk^Jxk=tYR}B7kX@`tf5A;G>5)YYnR$3<8yvhk}nRcAXv zM;ymZ*s9-*#k4=acYs>`feSeq^T53OPmOC42JRCeGDgZR$to%; zY?w}E>@JU6`HDYsF zN0t4r7XWU&sFc(K43p^7`ED24-DguH<=c&zE|q^e5NX8gQYpiSnCk<^mZyL^RMxFd zq(E&X*y_buO{S)gWhA6y)MRl$J4es2T(nuvexIyy^k6r7kz1iQV<`IY1~#g%N) zZNCi8xvkSg->rAPPINsugqxOYW=F}Yo= zY4rnP@_nXeJP{%Hgiy-p0}nI~G#(O23EvoXTeg9a3JDFpYjFYchH|AfH4L75+tUs6 z3k$($Jn2M*&9M)4?^X-r#Jrn`@10MSVE3lfd`<_YC&AT zJH8Vt1{E#sLIXM}Dt9n4q%$SV^$?=l@Ybj!woBC9nf6QchOTo^VX8`9T=gsrt!sCZ{sbD{~UZu^2T>N(7K;Uh*>pyTon zE_~CZDLbGjA_8jIX3}YkXR7%Izourq`AyV%kfBvd3~xh-kwJ|Bl>gBek8((r^R^_5 z)y(MvCF^8X%?XqvXRuH}e%9jWEhBsom? z557H>0CoWl35t?|dzWd6u$v%GC1K+yqP9=Oo0>CB(&#=rJ~5~_BHX*}NK?6MpDS*R zqK9hs$FYS{KB+foGLFApS@tk`Nj<=0i}CniwP>FqzO-`RErYeCp{@IgRAVeg=t|?p z^I+MxWc7+RC9)6C4|W1Yij74yI!gQa8oEtV=Hr7YmU8kG+xJbA@fUq}R|xVwwp0X3 z?YsEM>aG~OQ9mg;M4=ii9<7E(i;1e`Iy!|k3=tjW1z{xA%`dIaTmhNSv6fS8I9cs5 zz?A-}%bM#|zE84Jz9#w8r?Q%wT|i2xQH~nc%xiYD)i3uZa{#CULJ?C%AE<)msk*D6 z&4oiIqdxLVyR9xzff}J3{00${9bN~5ZE8Wm*T0hCX?A{XG;lQ}?g*Dcbu~A@hiR^a zuV3rou8kJzqJ#X59KQ#Q15&aZP*v)-qk2e^QA}Q8G)h*p%{Of~$J?OpIpa}kxt=`L zZ4JD>{Zv-1#_>Rm-~w?=&~b>~{`9+tfZBj2f`RzKelcDTrU?%40c`yd?Zt$y$Qv|Y zsN3qCumSun++x4g;(b>UM5_fHBP)JFf%UPDuXkYO-7%sP0X2#Q_6sbAV`R`WmYgn* z=Yr0jFk5T1vPc2P-Ub?XGa#sk7P)-A?OtmU3DIExh=^h~*sVvwg>VKn8^jp*X4in* zQvC?W41&XUzIjtuSJwgXEs$gBom+dkWs`n#esE^HL2l=Bp@R!v_?n+jRXc?68qTEE zHXKZU97s)#2aFP{FQj3N0p6fWBkKBry~6> zW1i2fGuZy{*w*?6QesV2<=oH^DK;`>o6wy$eki_IJ!DO6?X@VY^ce5btK&e9nK3bO zE!zCTRcf}e$7T0}J~$K*!2RVXv>lKy(d~;Ko)teNkym(MKFKIGZdSHck%b9aj3Llzv71lkai| z7wBe{DRNP&iP!4tlE9NgI|KLAje7i=0%&Hy_uu3PsS<(^IAQsvZY$sgx%YbnLE`ZM zGX+adErwEhNP`xy`p791tE&eNzWnun8Yhyj`WBcq@+)Gt;86P_Loln_PsUSJ`Wg z9b?u+f-fN{@T7Evy}&q<>NRJ3OhLY*X|A+QhFj!DNm8c;u@P~R;%JfW8^hns&B4}E zS0bDw;xDe#a}@L$*tl#e*rdqHhCFG<6PFcl@@_;jF*HPrWMhz}YHChY zob73695)M{IV8U`XbP?>YvyqBu#cT)ds0~T5v0`SEpk$c1e}#cy}O3l(eA0=n_b^r ztWC7gGx+eo4Sgl0U@dsd5W(nr6gjzmNv5L_9wri%uMHIT7LKE%geK^3h>r}jqR)6D?!~xt|BP6UD6*4g=pnj z%A|Yk)1yDnT=aUb58KqZY6P5DN(y^cilor4J@GnHC77^H<){9g^-LTSlbYB5bNj4L zMutEIwvJ8Qh%%R=G(H`+=x54AF}&y>yk7Dz~;-T9Newq_MFRXWyFXx#8`jWJho9T<<+#&ILaNUAaYI0gw{})uun(r z`wC~)X@Ur`QMoBy;0q4Fw3xT6HqjXwx#`>zm^u4HZ4`Puc*`S&WFYM_#;}?063~Gb zO3uX<15OsifW&CKuH40YW+HpT012?H^M0?0WIXri3y;MFFJ4u|v;>U>8DZjlz^soo z<-8{)N?*a3X2b;lZW4SFFh93LjyG2<#bdti=-9w+0JZ-KHFf68;e9?J?|;W~gn)?F zU#Ker6!{7CX55a^?RC|e@aFnsD?oE|J!cGX=T9qtTkSVjuY6EjxARXm@QuLP*t# z1aKCXr1Xu}3c=q6zoek5rjv)Cq7K`B4oH391j~V;nu_SE^aS^bPjl9$=x;KgG~{Bux0xJeB@hsQ!bAd#mx6 zRt+718}S@Fy_rTyKv ztUF?vlEU(ZkH_v;$gn2R758bgo{D}c$DZl-0?5QU&87PQEj2Y2%uU9OFT%ZT z(JyAyX}$r1>JDevAn3G+$(}_@x9!_gZjkD0ZN?Ih!Que{jgen-_z(mk2!!LICgbRq zzElas!2wyveY%`{z7ym&56*+9%*@3N0wG)QHb{uXpe-AhTXVM9Zv;By7cjZXYn<|*Dr(Fpnq>2aAF zCND{@I?KhbE)y7S{QUexWn`8>??i}AweLQ{D4W&KY5tfKxIHKG2=O;^$5Xx&^K%RtVA@OMdT z?UcA^(Rqnc7^yi$$LQWPgUbYJ&CB_#@hH`f)jQ6u4+$q5&LPp;z>?Jkqj+=6LjS9m zvEJZcLH5fpr ztcHFyzqlx;q9O)J3dQ;*OeYBEG|Ut8^Yga|2_>MCzu%y;V6t8t3IHHv!lw4?hM!?F ze)U-V#%Qq*IC1@91fGOyO;>lmi+1&V?V+qJDZ>);AIGb3nL&rY`8JB67L2wB-?$X3_xmKCYt<6DL(Po`9|@8- zE@_u(*+fgD7iPuPfEgPa#?9-_2)OeeQ78Y+KZks(bs+*p)eX@ zohWlj7+w?Vi68EL&U0gMu_ZQF>sC@PUU2D52~*@+a!ljOS5f2rPn1s@dA`g{`b6K# zRT0@z!FeZdicT`i-XBe13~kPxql#WD(wD&Y75mlU4-%0Moc)L5L5w%E-o z9O8QK0E{xih#7vt;Ay|qoZTC0W6t7b570LTI7WOMnsWk~dd{sw6ziUSWIp#=cOXHSP+LX~Q5$X;aIKFv=tR zB?$jE)VFTs7Jc30?HTQw`Af0O!>uXO0;LO3$z3!zfe+Y)G34w{gi*9GxES3+QPmOd zPEe$=<=#LpRy0x%+x*iR#aXxWa-cU}(<_cSAfG2CpT62=IC zWOT3zVE`?gH{V297_Hc#Tfh19?eIF4gJ3SjaWKUr9*?^b&ams4_havKTEVoO9)^v; z7+N^~{kB9K1aqjir&)^G%0J#KHq*|oa(&G+RtZMic{BD-=z~VaL2`SQ0V@f8U6Nx> zjnOQ(tN+#um%z1G)a?w6vI-Sg?HXpW-Y<3h1NKwMIVqp^CrY6eQn#&lbdu%Iqxy(< zuxrY`tjNZQFiMKy6!Y~>p`_?#Nxn`@R}tx1l17ztb;H+mkhEd!*nt3i zCDdado4**bk5(+&F5{@)YUL_Djm!PE;9lu^T?HG17VOyms3z)24MyV^2-Q2Hd{&0Esu zLVZ$~OKy8HQ^$w&R}~fuVM;}=7pLx9641#&ssh5m3QT0jjb+GIWjta=Go`{46chI) zjxP7QGrCvOqxpjld4sFxDFt1s#{(uB7+~Ing^^Ae>`z19>6m}Mgc5@FS@SrPw}UiU z+GYdSr>Ozv{nz5XrKV%Jc8689mN4`Km1OKPo53RvodiH5|N4%sTNNX4Bx=IS_EDmFdJ% zAve`CV8y1*dM5i}M0j-SHz{D0jfrV1KJ60p@#V2a?H?#Get7tRFj`8`&Y|cUduXHL z8}Za)A|ISx%T7YMvxR?I9K3SHNe#*f|{C1DXBE;M>rsV`_}TrT-yq0%FHTuR(y_RMzbt7@<3 z{D}2?t1tE}sp*%jia9G1HK+Fbl@*Js-9;2yXKragSe|fOgJe~%PUnq7+^%6a@e`a# z4^0@J#X7L0L~uuvj&-;^gNzt7Qd+7Mra!-%peHy${cv`6_L>!*=M!4m08F~_?_GMs zO47Ha?-lEJa&jc|^*!o*zhRWZlq2x_6ZV-Q*12IaQX@7U3fhXHYgAU|;r1B!CyUgA z3M>p;*Du8xcLi6<87$X0^|~;gjnj=s8(d@pPf`|B^xy?s*_Y!;)H~M1A|_b25yCkd z$&Gp1mDZAySa<0nFzv){msUH=^~Z$t{$Ng{ZIP$!?4C;<9O$dNO2*zOV^U9Ij6|JL zz?0^C6_Zb<@XLioA|xk#z*ALP;e|_zB4PPp8@J7MaY2#R0d7V<)}w5km|+tV8nr0H z5%&4h`$rx+lBo=#-kDMCSRvv~^#fQeQX)#6(hJQiXc1DBID|LZxP<-VtRj3T7Mt}A z?oc;vnv$n?NKa`(^#XRc_G(wle)^k^* zj%}B^ZYIYXvbZBw@_zDTjSr5+_{{fL+4X6oy_QMTcx6nCNKGq?hL*Q8r`$w}i@O@* z!@a#%%FIi%x)x<@(%ja>!lr-x3a(BLE+xVPXZ^8=V@vXcaDMGknd4^TVAJkNpEbkF z6$-t~dkW+c9z}^XcwU-3k6)XT&=4A$Nn)u-zjR%7Jsdln;g!&y-d(3jx=VN6+epxE zG_i4iIgQE7s$733z@yA>!eXj z&Ssaq&X%w_fKqyUUl_@=Lui^5`PMTIqt17qkG7VK4rJoFqkvL9FBvgcLD()l`+Jsg z*@(!Xxatjv_+2abqs9BD!HK5Jelf3^?|q6MV~S##YwY(l z@Ct(6HAV{(ZzsPjVqy$yw00UlPwV$uWMha$Yxkd-y|SZL@_*2UJ}U7`;}qGl^ns3r zw$j@X76uju9v5{*m7hDhUPK&UWvW8&V#RD4C$tI@Jn##Wi@zwL71&?vqyIJ_!>-|f z>D?oN9XGx8u3ZpCqu+9kLTE*3M4i+3-cDpCA#rE2R%oN?1BEEc46BIXsf8PQH=l;C zEG+9;j?B(xhdJ1^ICbKknvY1f~SIisBHL}{4Xt>%2ct-pE!&OYLJ?JCpOzw zA4XN5;WlnC9x;CNHTuDWP%a7TEcPznEIjt#7~|f)nGV-hS*tw!3bm(~jE1=%m$i?P zwg<*3o3Fa~#rok&)GjsS8?)!N%VyfD<1i3oOdwbG^{`Ip9-(+MamcE?obkB!$M)AN z3GqX$$!w7wiY!Ou-29if>a^=XpDNN&wo%S|UHspU`QMt;{pD>@3O<9NIt$01 z$U7&j!f~|H*H?lPKDdc8ac4T|M&=ZpnGfE1Z$Z>9y3e;s%1L6xl`9%h(2nwTV{Y;EPZ#L3)Ck2Yt;S<1Yn_ORlmq?%@|(- zFZ@PADk20AxmkRWEo~(`y)<>m(6-`_IMeLD$n3A;yLPjGr=B>XTVx+$jjJAcvKtu3 zPb1g!Cy*pTQb(LgUy)G{y`2Gv;QLP`Gs(_o3+k+AG#uJA*_!Y21xj0!1>WB|_ZpJ8 zyh!v>FV@;~K$E8Wj~VfR;WU}?SP)w$_wajZ`!!M{_nb4YOf`b6;KsAHO1?6WhQy># zL{uHg;xrj{!w=4~zSdF(A*wn^io)T3D6ytAjzQx<G(PMaOd*4IW{M&ns1FJoA4kS6s_x&y~v`6#a-}P<{uWbr&`pNAC+U`>?gcu ztj^RAhC;jsB_42V%Tl==5LorK(BS2KsFAq-ivFg63-$BoDLFa6kIqxm(|yLT>ok14 z&SE^|54Og*Z;sDWL0w0Ook)ZzjBrpGP1#?azEDt;wn>0cd^HPn<(E@cp8aW-V;Rh< zdtrHNKT0RRkmEV$hFOxD(bO64-c-gO2$LhC{IA|;xmUhEGS*iBw z{7uIj+}wNOQB(_~juJ|)!|zGRrUv-?S+NKsD;w(rj9!SE#kGts>FERw`v&xi`2{@f zGHYG1uy60X7rl9tzNh-?c>pG@j@t>^ysIo0YlN?el;LusyqZb(p4K%XSdC zao7>L91ob=vrh=Nc!=<@|MdchGgH}w&)QbKjvgycOxP5g_eo=LzWoxTlf?zC9D}J+ zFxTy9T}DSlp0izHMA9n_OHvAbf?ui7P@L@YbD&@G?P2op^5Xu_KgK=>{lH5e>j`UQ zk(6-J*egz&^TpkMuDq>Im^G#Fc`7=3&G=Lte}X)gaUGUaVe1W${aNzV8kmpCU}FO~ zB?PMmlLe3CK4Jm+X1N&3vRL?bfYc;;%;3k`MnA`(#*@c{D1++A#8Zt8$ z`Mul`C1f>O6`L^OZNoP+V(nWl2JhovyM?Mi3}E2b$lju2hS)xl0PR; z7on<5wa&Ai<_2KjV_44LaULppIh@N&Pw@F>z~p+_z>7rxJwt0*_6fXzVAA($l@={- zxlHGw-n+_+fcZm>-cVxXN(cJ9uopeC^7AY>{B$FFe6RgkQ!?rM&?i(3JCf?r;$%a) z+O5LQNpZ*aT-279BR0fR1dk(ze7b+UF5cv`zcacw$+eU5G4FBXJoq*>;kzBoUH1qb z4_1Go;BxZ=-;U7N-1g6K99fa)au|Bik=~6Kgz+=^X##BWUS^K< z{4W%UL+KmpvV0$Akkn_dnki99#xxehuo!<6|1$sX+d8K$_Bx}j^+d>@>_z6@MvCWq z=UW8)Yn3lphVWco@BOKCTU)((&og0QK1e|16_@sMevm*IDfI`pasxi~(h*h8ZgbjN zkrz?%6)zDB&!P9(PYeTAnCFZ)i63~(Uvcgy9L1(h=trp|gJn8bu@HvHpHMwts1;zI47y4bWsUUW^VHR%z$|MpNbAgb! zXj|<8br`W^97w_=uC1N8bqq!|nMvSY+zb0)_qzj>o{AZ7Lghl8>j>itxC^jV6Tlvo zfY_8ahg`7PzV6-< zOOW3c-n==zgfGfKtp}6UT8r-f8SZQ2Y9kMnYQv$Q!Vj%QDh*Oz7MJ1J+?+eW`6adB zTVuyD!5&Aa-tLu25E;EfV0c#a+UYO`$7n>2oCFoS%BTsN2S#_at0J?>VD&-IU2mhe zoN+0JjO4A&+QnnN8@4*Lc(0>(e;JX;B$yRmD8F?QbRs~}Iy=hO=g1_x5zxDhd*F;N zLm{l@eemH`ZpO8%UrF2DkHTeTX0|Enn5Dc{(8|W@U!#c=#OLniJar*Bs5#A`2qp;+ z!AsoSV!fPb_cDrXrivQ@deNGg`DJU~7Q_CS0$2zi0xBU2G(*6@-cOEF^v%q}i!jE8TI-UfY z@&CW+XIsxoDIqI+8+HNTf|oOV?gykR;@df3>C3s57zZ5+&JXoH((!Q6G8o}z6%`jF zzT2bi_wRS`oieaxIcV1@FT?W_HU&XCq=LOFgw?_sHnNZxpmdC>J8k^1-W-pD;({L2 zDGc7Ov!kt}2?O}fjpv%xkx-8M;A<4n*0OP?p+y#$@-Yk2q~SA1}A1Hz;S%fR;( zc^U`#{u~NLbHB&p)P*C1-PE5)L_FK|C=a}6&bn|PvU~ruP zmF3w++1`AJ3~cs={Ea+DHn#BNWBaKo&=culWAGIeGUrv)jP`%*%WI?OZAV7T)ddJjaOeYE5E5_ z`2W~?>!_}`w(FOc5Tr|5KvED8X%GPkX{3}E0SW2uQbbZp8Yxk_yHrvd=@3!6I|R=4 z-OqF0an2a;AKqi%j=c?7zqPKI^D{9^UiWF!wk?P+i5H*C60<#*w8?V#y^J|lsc&>; z>7wbwN)>qfY)vP9m7*tdJ||^Wo%H9^%?~{Xe_HuT@6_PcIGV7b-?!5`%*oNY@kA-e z4M*KSL4%0(fmW-eK*(pole9axlc&E$Mo`X@_&jH3H=S{?Y!18e_xNs-Zwpy~XP2K< z+@sglsGZiu$2V3xx2sl|7Y;zDgeY*~`69D#1VvXBRwX`s6Ri;q32gU4&y6?~r(d<$ z7TlyExCRmjAwK{rAVh}yv(6(|7TbM`IiLD3u8xJD%!;31zkV*k^|qD?*QmHQQ+@Cg zFQzXqN;gx9S-aWXI^Rv#iH(?RR{SRPc4KW)=AD%>LZ za`p3Eo&}}gh^pbY)iup0`6^GuFA6hmlr-+)BEItxk0QMatfG6cD()3o$_yg-9jB>c z0&<(VYEZSJ=90n=#dmJ`<#;hIPtyT0Tu#5>x-^G_Z+K6NDdtyA%LTXFjKj5YBR_G9 zXZENi&1Ww&iKVUk5a%+obMdThekV0=N)g<7ouCfb68;111Er9hhxD)(RgWC#?+J*C?QM^^;odv)OOnte##0_Xj-HbEw%V zW@VHEV*A$<4O}NBzI+noN4+7iYNx@dyZIDFns>T)aRi6aj&-aCUQ5l#)2SishSK zo9f4s+aHwn_I=Wy;%<%CpPrZ9ml=F;yn3Ui&4@Dui(F~?_TEbi_QGIH>g)KdXyzP0rudp^Y&Y&b~ z{a}#}&OdKykamuAlz`?vP1Ma1HrQU%K#>_xe?ULf;IhFcoYf2YY(`3?_Ljr zHG&2lj}jmYwTk!O>!3fV1$_<*rKl?xqP+$v)^Bj@V`GR}HKSS8*h2i)zRul?|-&>*97yu0-sk`>k{WPKoxep>P9PnrU4SzEUW9Jvu z9z4L@x<=>jKOlMa&wsd&r{y0Xe8`qzI%_rJ;tA_}wD>#1X_vG2?48v2{*nk;Dzq}( z6mh#F()u{JR)0FOW>}EC+R^EWVhlrofPl2HRDiS)m#$qw!DI4o79#mdJ{E2pqAC)u zVZ~-$oSXsirSiIHS@`V<%+K{G`xoAdYMGHHex%8t-&)s+)GUc@4j8oRXwtd6?vGNh z`8Z*Vm<_eRQ+4j~$Ub|3PTTJLp@p8yGFZqEu2T z%_jQ3*r17_HP#`Rkd6I;Qs=3P$l{dS*l3;AwOUm(vs7K&%G|*OSFP}G&MR`c>;;3g zYUr3oGVHnLohYn~izg(r{X>RILpzR7xTH z%wrE&s6m^wKdPzfuvs;p7ZLT{G@WE6QJ#4?Z~t>aK^yON#B~Ad-gK845C9|JNOusY zQwiEo!XDUg%=gkJmYN}2!;@eU+G(*zLp;7Ozk(shL!_>tpnDpDWWf&mx6e^xdQAud zB0^*{9UXF@mLI5fSk!;=UV&r*22m$bec(Qy` z)&8ccSlhV-a|d{29sY+4cZ{xh{|XfS#;`=SmDqMz?5K(unWtX7|I_4^f}G@tZ@`bn zl0v}@r=0%VO}J7XID+H6KiT4{`L#oXDtSUGd$n_EndCnj_U4!s8Hm~356=}_vx%to za*CH@|C?o@^f-<@m>1hBLME$^Fn+e)l#2M_3DJ4TI+lA!Im*oxhM!4p1lTv&#hBXka+61NpU0`@+{BgjzQJlLPtE>`$fc9yZM4CFQN~a%;>zSYTW~Z*t4! zUVhy*$v~#~GFY40+Ap?qr|nEw;G@dT0LRZ>ahsNS=Aye(>d(-k6K3;r6q#qUXUl$= zHcTk6IGr6ybU#jWJy7nT*Esf^`SA96hol1gOCkTD0&6|X4W6z5PC(MYb>Y>YJ^x{2 zv|z+=JX`vW!0n!R*x;EavjWBJgGFY?q5~-LznQ$I9m}(J1U$SP)!4uXGMHL9|)KakMg9(w+3_O89hoau*DkXX@uTIzSNMnOcv|G<2 zt!jMhC){Fx)mOO<>f^E=zUBi;z`M3&n|`Hge>qJ}l3n*1ysbaf6(xamTNg1Jd%&x} z3Ve8Hsr|V5xT(@dV|i%i-+oi`v$pK9F1WqWR}O%WazL{@`;#ic{I2gmcThC`0Imu; zC7mr}-BPn?to589v?02+_KYC>IVz>r2I~kkN3~F7*8NR*3RH`!ZRdSZM0-HNi-_=# zR=1-kiL>v$znjMWHRH@D4_mSLg3q@1$nDPJte~81Ym!dSp8A5-K%VRVT6I*>p#H~r z83j>Ip30Eu?jh{egG$`atDOMLI;`ToyIAfkGgH~id{sW$MfWHfrPIEIRhD3l*rO4; zGPDnL6gdhp1yt>pG!Uj&WLR-ue5eg1eJ<&C@m}_hv`BxU;?x`SgWd)1fxP*5csg1QF`vMLr{FZeP)s0^OmP$r4h~@9HFpBks2$YVXg{$}6`l6y!HFYL*9;EVyZCR)xSd1I z^&Thj1^^G|4NORe0btc6ay;vB;?&mwt@b;5$#nQB&*|U0LPM5uz}KqeaHzZQyuTPF zsq&hDRtT_Q$8f}ckYTFav*FKLtI6qGtPevRS6cCJgsGuNok zJ8GXd9R0E$2(5Tu`fB#BhPbeo&2Yt!4{ZgX%yV0`EMp5BHDV+XOkgP>sdT1Gp;6!&VY#ONtbUE7T*-s-kB7cr*f0Gjp)w z?fp-&jmdK^6L17H(A%AVV+~`H?m8zOB|QvIY*NL#8BBpKzhxFx6^%{$B3n96v3~uM zG(<6mlHk3+T+csCeDI|`KKrr#hhpU+-kt#h(%A=yg`$4*#@m90`vfA z1*i*ZgXtEQLm7J5zV zN{O&eK7I@&Q4tCN;@ozX#R||8A1HPsLy3tZYUCmM3jg7tz7u$7EOPF9!KKjj7WsD( z+8#JT%NsjSg6II!b9kgz{#?Od8MFW(V(x`AQVV_vwDB6m;bk*1xN+In=VxRzzqRrX z;a$KJgn0y@93;_6UAXfO~pYzkB(l=)*C9 zVca*$I?=8OBu#DkbZ@68(O5; z9u$keYc}5uPf+HUGf7)1?8tt8^`^qiBJTHZ!c7jhmK(bv;|U|Vn(VhCH|Ktzq9*0# zJUlneAuZCCJn=kqZJvFm>hgrh$5UJ5*>kM^p{)8Odp4?zNIC9mJL??lWj-FOsn{|v zeYn&g!&nDh{5`izUdRo}TZ{agc+dY1E7ct+<}EHg#f;}nXa^{sjv-WUQ-nM3VXClc+?J!Pp-3+n}W$A=1F}h4M^&zgWs;G2}U1< z*-_>Nw708;QtbQ~9gAO2-nR;4tEAkt@<}zwsFYFNP(8~j`U9Wi7howX>S12Gh3K?k z_do+pnwFN<;NV~@m<^Lcsb>@Zn}xwV?7rw0ykP|1#Q?&A>V8+E)ATMfhX7OAly~GK zIuB-W6`6zJ3h_&*q=^i{l~U;d5cK_z#Gr5Z?tt zOPy_So00%6E#fOfCUtO1HJ?0@1tU{qrc>U2Wfjn0?)scL|3|59I?}gm+?Bfocq%g1 zX!*nwtYz1%)G&SElGG6@x%N`!uoqLO;=KR4G!fBJ$nAfFlum52b&41op zE?PF_5bfiWuY%K*OPUa$*=$9TQTPu?Zi2kry!SNf-F@2SwkfLjqpoWx)R8;BaQ^ok zrHwoLiK4LO%pEg->8EH!_hYo*6i3K9bFRrJ2HlV&mmO^1$wZm;$UjZFX`-DdaYo>Vit=1p+b5sPTFIwxp4>-IR>2=JMv>9W4%jG9&-FYg}R%ZT)+8_j((9=i%?f@f-Q& zVl7hNUT64x_3Lxbcy<5YV3QKnVT6P0&+oOrHUehs{}OJ~Bu0q7Tfco{_bBDOO{Dm8 zK-l}=*>{NKg!AS^Mz&^WH*sY3&iU4=e@^b8$mi=C5#GTanV1NUi{0zta~<2NG$9() z($*<=npP>S&CaiE%F9w=6&G~QtQblucl|5yE%{A#xW=rS^2a-J!Mx&L2M1FTaT*?& z>U^tdqqcTRfmcFi1ELv>4*mbYZudoSck6 zmNXO*J1v>cvo_crq`OI#v$TDln$m~fSXEhBIUGI1i<9jL$iV*zU*WceHoX&hkzgj> zwfDCdL|9fIvg%UyFTo7%0mV&{gpUYIoBEwzqiwMfHL!jucI;Ev zUzFyhRIHvS@~`09-qkF1h)628}T_u@<2nUWO|Km?SUR)zm3^5W6 zJUm`emC$UoD88;Z)xe22d-&r5;T{!E8s{;C75V z=lu%*5Z~d|Qv;uO!u#4I4PM_RbcDWQxtTAdB<@V!Bys;YH}vD`-_xxRhYE)0cTP0=$(*>X?C20*m*6B zJizUY%o0FB_y@K@^6TtB3Uy+)O=Bq}f+C09xdqDONwfM^3u&@_4w6M=U;kn}yj6)$ zy0$`R&XFvwET413M1Zn!hclu4jH5m*o#vSjc2oG-&;wWV^#;)uz2QW?-k}(Ti!gxcNtBvv(Q}C9{X90&b?6=n=)KgH&1*eNMV%2{j?1?wnf4!EPcZ0 zK;fx8jKOPW9PuI|BH-b?h42A@aO?yfHf-2B1jTh==!1O0kL%PwfL2oxu~<^wx1)iN zXl`X?oX7rB)x^RpJmjZ9&G#D)tmH4+OL0aGMR2nujx4yjtitXm_2-8Hx5wt@ERSd$ zFu+bawfKYNL(b>VZ=r@y1!GI~TlJ51r`zl6qG~Q3ZVw2)xk&7OBiSD(t9v*7F48@L zZa3k1Z!$iwP=B*@R*1Bc?UfGm`F2SESnvA4ozuMlro*Xn(Z1pbbSO*Kx%`FV$&Jit zRd~{@l~TA0T+|kw{b{2=?`3k4UAL5~5W>~-GUbn(6ntdG+9()U9S`71reFr@h~bHe zE^vI;g4BI=tmr1VPDPNB>8^X@_s-4fhS*D=&nN3MPv)vy8k^oeJkuFzcr>_b)acDw zBPo!<`RYtm=W3beawk9J+iwNUC!JnD?wt1s( zx#`CE-+QVfan~g(N#;eH?`PcU8SrmB9*Uw;8;Q(NzDaIrB=qT(=vK?U?jo-Zb%T}p zhPRK>pABx@eah#m+5`QnEb!!@<^eAAOKWQnc=v>kCd`CSeg%Di36_fQ=+fDeX3MM9 z^OO<^ytS5tRda{ zwn?{6$Gf$GjH|QwF4I+s^nFUNyt~&9MN`CGl{&T*6gEZQeK!>l5GeINb@V-|$Qb&f!-EXv zFOU7M5-kOi6YuoA9_G&&m^?ZbJ$+!Tnda(Osg3&FGv=*4eJj0uAn;oUvGjAsj&s(` z`&8nCbhy~q=qvs7i!1;BFTo(I$VPRvyx0!&X5r7m!t>b8U0Z=C+c^aW zwCn5IyPHQP(}TjM&l4e$QkmY|*G%toaPS43Z(21SY%_8HpRqb5qKE%b_ik= zc~oI%hI>MS$u5r^1avt^w9|XnDigaI`pHPU{xwM}hLzL2p3SOooK@%ENi}S-E|%TO z46yz#WUw26Zi$*utu~d_LEJ6jEn3Q;M!;7+T{BNyyu!d!<>%!XI`tu2EB1FRv7%Bh zku+gcr`iBb{g{_= z862+K`|65cZp{98Theqh)PR}mdpTLvJBfnOn&l$=cPZZLrIJovL}|i4?cp}FWAZ)^ z1C^iNto7X+s{B$ZEb%B~^!Dbn44%1f_gcJDlm;Z@dhb}Km$;84V5NJRQzhi;)!N`~ z`;`ZH`3`xr_uo%h8>IGh{5btt8CjgT(bN+y&IlIyFXpAuVrR|8k{egYPos++BF2&RV1{u>?~sG7MpajFWGl zXDSeCuqp}#NO7l`>B&5~m6sr|WQv37pj<}uv9chkXOqmCoOX41@I$nTe{^s-Q}8XK z>sAbv9mF^}jzkyySFQpSD0WsYeh>8T{Lhhq|Al42H5-P`i!@Id4Z6dRS^(C3xM!`s zfXeybK?tNtQt@Nu$hB$u-ADDjPxFjQaJ-UKP%VDg)#{5p^&#KsZAH!oRUK@u!XGL* z%O}@(`@1w6yPNt6H6ngUZ zt<9tJH@6~xfthtF^{z$vS@!zAh@GrQ63Of1KLdW}$?h}1;}$(Fe&lUWkp2CTm{4lJ zN?vXIYH!TxqQ>PqwVHB9di*obi<_wqj`&yGD|R%#@~@?MPLQIU-blZlr}$<+)$Q>= z_iAiKlijW7sFO{}!xQXG4itPSPwAUz9yX?{URK>D3|^Tk5U@9j>zQAu+j^T#<#)Kw zb?RuH64`WCZX$Ma@=wr@&aQrywWLN;`R)f+Fd1s;=r}=8hXV{&;b14nEzh;hTLxun zu6bpq8fV(q3ad4{$)3!c(^tPv%da64m3`3^*W8ECR9mm*A?4}X^nN=I^&Ro@tAbej zT>b1aF@LEBpH|ns_(8U!p#4ER=7*Uf(w0^=>Zt9}aiocatnX|H4gCe_!Y4cNU&lx@ zRLPvuT-y93_rR2|_?Ji1#UOniUMYw4!<8}_Fpb$5KX!tbf=jQQxn#ZoMs)XIJA`aiqG#{h^cMr)Xgv zq-b$yjGKmjTr&O6Eg8T28GnwrVhz}pxt!T{`@jDiaQ@fRm@oeDMT)9o$3Wkf)dSsR znYW5+q2KJImKjWL!lJaMU(fk$aZteGNjp%;{9~}2nw4TUNx*fGSTfTDgFcjLn_wg>?NSxUD zFRI3a4xJz8mf~0Q6?m(YRjH4*#Udj6br~@nl!zi&_R`1OE~fU+r#JbI;;r567b?fy zmvUL2d2cZmUR6h4`S{e`uU5*}7+Bq&%e~Hy`DJYLxlB4Y#%tP{fcH`B-``1k_sA~O z48Jftj%x(iv{l=uJx5UzVVG&Ns;1NYiZQHsVVk_g5dCCeYw-RfvP9>%aV5!8w@T-Q zb7--5L{$U>oA)K~Ohjk&|KzYMrQ|2>Qp|Z&Ek;@6OTP?`CP)u^{N?4~ln(Lszl?D7 zj}BLudU7*KA*>Dh{K3q`xVN}hX?g>ApWt%Flv>Fuk9>LA6*}6|AUPT$aaA2Nw(>q< zCG6rsMrDY=y?fAxzisr(KRSA~8bFQ0iLr&dzPsH_ccp@T5>mI_(uV5<8*FcomI=|=uHm&x&}fV<|oCamvhzXg{CP1JFA+{kZk z4=>?aenc_-H-vB7g4o{Zaw`94`c9gm51rez zX%0443U96`ha)5}<~8on@DqN;o*UPl!L>JQaP0dVB)q&Q$)A~LP_b{-JyD$ifS7Xo z1!=F@-X3{o;#9{T_Qz==@xXX6Nq$k$*8W^BHI=|G@0j7KRoz%Jj1NVmJd!a61gx6xt@iGzx`DBnSq!<{W+!Jt=D4+JXh_L$3Lvfj$vF*lNkALZG)>Q z9{SS{RAPiP7er1KIHWWJW2eH!U#kAZ2tw_7eb4wlt`JQgLFs3ProG;gtiFG()iRxb zo*o%R|I4j9%VF^}HClUL9Scr=g0H3HCaV8b$Z{C|s|LzTzNJb^>P!~fso(IZZZYVb zyGPA3g;fw6TT5>9($d(#mb(fcf0Tuoxzh1uqpHuLgjAXxLyeSRsH`%snO0=D?jxF} z<#onJKD`eczuW?G1tt84lVzl{h{<;?-{~?f91MJBPHwgqur|5bATd?6ap1c8K|Bp@ zkgV&4$WJ`GY9ngK$5y8DCN)#I4LX8UQoGZm?RatSQW$+QA^IJh>71Aoo>rEYzmN$u zjOh`C26D%Os&RRl@GrXtuAyJgovI%)1KQfu?YtQDJQMO!#eYYCcGh~0bW^3I*Rb()}^QbH}77 zc!ryFB%kK1tWHZuM9YU>d>w+UN8^rMO8adV^E*2v8T&ubEuLTJdM8{Y>36n2+!#oB z8fj~d2=m^^Wz#r(BSH}E^p`uY0X6kE+X^kxCUiVgFYS+8lELktPO0&7)L1@ehvtjb zn_Q=B)-TuBCx<59#G;u{_KR|>C4{I^H~EOh)Ej7#QVf-N-N1rDd7gtn-!E zZ|;oLY=p*HGOEAf%Cuiq@v1hKQaZoF8}_t{_^x`CJ%Rj!O7Opd_f@T|D2F1 zjRY>ecE;C#bWAL&zjgKGyNP8#%u93hsW}=&{ryFUT&ZQY_4AazseWc|06ErFv@QoK z$7;~6&ACeh?NkHN>I&aw!85m&Zwum{YK53RgxI+;?RSEXZ}-G$Fmr{w&q#kKk;zv6 z_S<3l^%38N>yZI25%b)zJ4;nv8cv!|L~w?Jl+aSuY^^ahV}r$fo50`%OJk@2Zy(_> z?|YV30tpn~DIV7-4chg+Y`OcbnxuXU>tt|3l#KnhJL~f2ZyuEFa+HVUz?7J`?4GkJ5>WY=Y z;v|Jdg1-9sN~Or(hc()N?qwvrvsFUW5>TAq2dR;DH_6)9xxpe|l?n|*kq5!G@x}{# zCAy`1*1t8R*)U{@u2~MqjgRASQhDRO4r7|2j;JtS)oHBc{dXL6;5d8}P)0n%D&mKu;%A~yqBlfr z0M7@Ty+{~Ai=i`%2AH6hGNzl@YX(mzs(paA`wky$hU z;ny#}e`a(rM4>BSpC6+(w$q-Dbq#%rau+}RzOC_U=qrUJRz-#BGBUwuEzx)Ewc76} ze0NNJ7Uo>OfQ1w4yxrfbVjvY*8n=0Ho9DU`r!&(7u^5H~`RF$t6M8l8i}0sU@=U}d zKGJb>2jj>`6Wq@vN7GciQOeWRN$X6hahE`hP|~d_ZseMc@Z>$E%y@=4wvQGbuj=F7 zSZqQEH5e&9AG=FM-e+g&Zx;vHc}8!7$od{|I&tGJ%>W{tpL zm8O;dm#O%Ol$#Sz=(EjS?#BO(xme1bYv%7+D#EWU;rLi@use4z^na$gKFwGFM?wXO zw91@k!FQExe_eroQF>qaHZM8oisD|; zMUE8HCXrwo4?c9xcWS)(_w&2YQ@Dd@UwL_H)1D{2@ZW<`eC)Pco%2taB6JQt3?=nl zaOzsaJT9~D4ZP6LkiYdK^F7^#N|Be6i?WZ8IM5*TV8Dfxkw+d}y`~^e^M;ihl)U_w zgPBcBCpjV=A87tAjRCk%7%AQ$01DEXzCtZG(d;9NXo?Y9o#DO^z~B+aDp<3z|22sY zYj;j=&gYRr{1l#xiwpdEUP40Y5f#=Hki3u!GkRdWZv6CWsTt4aPEWab@+hYVh6zYW zlhB{&hoEC~3qMk3CPvOA#2KKt907g?_I)^cR*q2DXzJHRx;cfu zp~TE(it~MH%2tNx8wqaiZ(Z$o!DP3H!ilY4G7+f~5`?ftl9Ggxn#eFEWnqW%-i;uf z5}4Gl2?{PJ`dr1M&h5Ql&3}Odt7cfe^eSSMo9IX9EWOOD7JsjvJg&TZkxNS`8PFQd zYIie$u)2aCH-uL@%l~V}fbzg`gP+CL`d&Hj#krQb^9ibm8&{TJ#^;Nkg=?Qazv0cy zn>D;Ycr4jnn5ZE<=cImMUTBI&tR0vXZ>(v{t1>)bBQj%UuIwo9qa6-pL$E2s7P zYuI?{P{PqHzqj84`5oq56dI-i<#6$;8LXep8XeE z27wF}pcX(Vd5E?Vm?>kQg~dSzJOeZT`N`##`t{=Myu45d7U9@z05qZ+95?vY_a)|3 z{mwLzu!Y&?V=v94Z%!4GP`e_~85*Y_cW0bOl#vn(tPQ!cohN^J{2++{AGT`mYc?xK zRI))DfN+TrS`m`p2FU`OPCkhP5^!KM^YU1CUI7)s%O_%A?<>eE9AFLMIIM6R!2=^> zBs$NPET1E>Zg1MI(ehrcTcYpFHW^Lv8f<8lvKDbn!-RHq->EsUClFhjXE~!4EwXWj zYTyJD2ge1FVkMx;O)!&2GCVYSu8#49OM#L+g2N;+SHx-Gt?XsdP=Yk)U-Ozc^o0^g zf%x06`H7jjc3Wg)@6veEW?!zR9#`YNO82ex~wqPqt z4`KcfTbk8;Q?ocU@U)-DV;?_J=D9V{aVNHFoTju818ND*^i~aquZ)%PDER{hoW1BH z&WAy+{jZW(hvT}`-l#F7n>GjCZ4uG(7r$zpga` z{KV>99H>C4FdRW%E@Xg86?FkZ1bgT`5Hc|mpJD7}KY#>!*mqD_*u#CdUFHHoEhvcb z0&*|*sy6{u3lzvONXV#M(Y=B{#R2|v11Zg+0wOhHEPLoo#9v8~Emn!DL$HKx=XwZ-VCXk)~MV;=zDAxMQ z;jDt{*<@Ts_Gg zVZrT5CSUva&QmXJT5~5ok1veFPO*~kP`d)XDgGkLZ$P~Ry|7R@a(s038?pca#R%IS z6>VEc&=iWWZQYKJgh&^}S^@lN`l5S?P6XN3f>@jpB4T%sx;xpYXeR;x4qN)gx17aY zpYdRsh;XXTL`lXPLdDEy9a=66=6b&gnZt!9&=x=gA;w4adk@H zFL~t+@So<+^js*%9AGW72MR5Z>C1zB$-6)eArM)~Yz!h(72CytJS@yt0JZ|qtO+E( z8m=CsH(#Bf)ty4PBqA(08H)1#hA@%gk47cw>-|gjR7})MBi;Qs4w3}}_7ha0g^*GJBsciXR}YU>#}}$ovCg7eDa`a2RV>^HNH=z9^S`xp#JF-X|xHl#I;diYcbc z&}eaKtQ6+g5WN$QXb|T3-9RwR2NG!z&@`#e(e6W>u2Y4aiNxgJXVW1M#fmVP2X*g! zietplNzf$Yo2d5S$9HMqt!&2*A!IbUnIbDIUsRU-@f(TyyN}qSu4CnAy|R{H=ti7r zJjT0UGaHi;b8nV=SW#xi4d~e$7p6%`@ma5A{lTnlxbQjD=&AfAS3i>RPQo(rWfX&L z79;*!CfTGc!gfOcKDErsze@exg=AO(zZ4S~lv!Uhj`;M;9_vI0Zn(up5LA3{W0>YI zRv5OVW+b5^(45LjwlqT@su$4`2qkYP=n456toRmL?tCBQcp#7W^1koG*~kdy-^#(I z{7Zr?uN6gW2+>UZ)zD0EZZHpg5tPnfVVn)jRp>i9zm=uuEJPe4_cBcV`Jj~#Md1U7 zAHi1FB&r(ZdWMwSe0@{K?z#C3=x}N=zBh^vtb7J6G9=y^nYVY{ihBO5H9)Vk8K%L7 zU7g@rCldH1( z-*};3(3r^W%=HJ?Xuq-Nr?w-(VZu(VDBv|iQ)Gvu_pfo_CuC>xoJ&Fi)STay4?X)*Z;J0RafobzL-=rq}wKW2-iyZVgl!ru94$Iu6ewdP^n4N3Bzk{1`u}=MS?(kH7 z*GZK0!=(y!hlGlKe9p1ci`R_!Obey6cwDhGlM2smH*B)J2v8XJmrDBM0(z~*Fwq-9 z`aGbzM855x#iHTbl{L=XUCuz!OV9r}r!g>gm~Xm(%s)D#5$ zH*K1jS)`{nrKR<;xkm;pQ$+iioPd%&qFs`o@ zQYuaQinHx{5Z^tynbK7qEy4=kNqKX6A*v+8xg?u_#HFR=Lu%ZY1q_%9?-5lV@-=-( zARG*+e$AOm+VqhE%rDWhTI` zI2|gXeSnMAOzh65-Ltd9pRTe-OqU*@;6lW$;syvOnw(GPZvwDCm$wgUzI6}CJWVX% zU?{Q}E4%hJA&i(@@##0zuM1g;k&8t+GBR%oLuHb&^l_=p)GOmYF`;w5$f}g);0+EV zWxURBlf8&X$W|#3-6NNiARWX=m}E(sV1A86X2t|}XJtQ@1JWm z81915sy_C#ing7VWbpIgXd*e>koFhX)rO1p?a?uqSR6m5EU*r^(BtV<2YTzs)K~gY zrJJi>Tm2=PS`?_jL^Ldgi??a&<^Ik;q39{iJN#f2_R_eZIUW{$_TU6d(stC=k*(hK zl5IDmXN=MEgse$uxukixCf~RliaMRXq$RM>$LC$?Kf}+PMR_1N%N*!iab!EHf2)#q zBbwlC%Qps_(6Y)7z8PyJw_jB}@~cH9!*nWv8K?qpI5J0^HD99KdvC{qaSKl)+F2NK zB=nj*@9i&57AhgaQA8;LO~h?*V44|kz;(HOk#9Rn{u@^-D&mUfa)+FyBvJcPy=L90v z@Y>qT;`%j?Z8~VQ1?>G&yj)#%kdqD%_DyVF$h+dqkJ!?tu^{O!uiYr`{oi;^ul{{l z<3O*CENjYt>u&VV90wy7Pph^xumL3_e}8WK_>xNJh<$^v;8jC>APA}$svzjYC{{oM z`Q<592;J2N}=Eo0~+Xq56UvZWeTvHy& zTXs6_Z0eLRHwb7Wj-nrCLh4?G&6A46Dx$n9O+jHVtG&J=^yi-fzDr zr3^jrT*=K(oN+ukh(+B_W#SbM=8dML)Nm)wo_bXFdZ*@w!gU-*Y1oG_KW=dB#(#vn znx>kwLUNr@y26tRkZ>h50xKa8?|0_~wgPQigTqbbL8@exGB3~6jJnAZ~AJ0Nv zoK#Hn%04vjd+UwYCJ+~-HfyJuW{MiC>ra20)bRQ)h=wK;g^?ly!$Gk59OW~}3mQO8xaaOpF zVFuj_iQc6OPCsfpwn?ZW?>ixEt83(%`i?VhKhr;=G6m<7zF>;M*e8j!W12j=RfYXx z)4+x_P2!$pM_Ah2xYe9rY1$+A%=}ORY%0d>iXQu)R`m~WqceBchVqC}1+!ZUmTC?v z!7gVV$OE!9NnWx914uHywORmm>;_iU5K0$a_^2u^)U;xUHoaWXVKaXE%p549gF@|lTrlk>P zCHF7g2a7g&W;_~e!JhJ~0AT`(=Cz(h1DlxV-=n)SgZKiXFlc?Zr(D*Oad;FnLt1$s zKmJ!#x9)r3LL`Io8Il&+; zLRPdMFdtm+fEHC5G?O5pwcYnXP~uRO4MK=HWHog`vK-0E?llB&@%8nM<I={}I~@+aIn4H(X92^9hbS&wXRM&uXlh>$gD!B{K#St zsj%P!^{S-kZuB_y9q_xtdzM}G0%2PNIR(kR23`5)`{jbk^vf}CiW9^9DGxMuj9r=Q zw(OORjpa`lyz?Erl+$8e7-OWTCyorSK7A8VvLO_ko{__CNTuHx^Wer9wrh}8h;G-8 zUQok$n1p@3Gfd^hNw67de!@!r3!>TWcnzwGmGqe63-VgCT75x9S0S2e+L-4)Mf}YC z8ScqG+g}oYvLCrPUTshf+4O@3(2+stlKWJG;%C~X@pM~FZM=O?@r-|td_wVi0tPn1 z05oq5*Yy}ey@#KVXlS(jaN1+#9}@H7D^kCekF#o@#z@GHsw|n3XM1Y&@;+~j26sF$ zy>lyyEcIAGwUu;Z0)v{oIa>0Y>)AP)L<`y^nfzu!*c9$+%$y_`3I>S}YAhFi6aBFh z=2*!M^dA3%SPBjpK%f27e>PhLDxHLz_Ice9S%1ypv0gfTA%)%R*$;Zh8?6<_nk;IqXR}71( zn(P(-3oqBLwxI%oKI$J|9YrOj58_^eFu{=o$6+2wZvh17B7>&SUy8wN zZ>~oJtn`1Iul%f^JdsYB=m(c?-O=&$+V|eDe&gGU<|#GEeEqDxpzAx`1-fZMLB;A3+{~)bU?!p{6Bbjpcqz^hnc;@U;OuD^#vJaP;maH{MN14p zp?F8NeUc%{GWR%_r{iS@DJ(J(ayOqvCv`rs9kEiFU;Ys(Q!U}q@L18aRogB%aFDo2 zikVrqKJK9>t@Jj551~j!Knl-}A%jpRzindQ)^Z~3%h3oVf4}kh{3D)hG2E?~OE>I1 zAZfu7i|(pbBBK+U?PzVqv1fXI6#-~QF5X;p7W+#@|Mo{zKUwR95ImB48ese(ns`$S z3!8CJG3z%D7w1CbaSwr^-E7BcwrVzq#)jG2Ln_mpkCV&SST@_PEKsPGMRuwU%W1t8lIG%lgnjq@N(*{lw5z{&ou1Ao zBd)|C75J19>?2MPVV(EuMKwpc(*SMnuk6>PhFvt}o^3uGQ{~1j>Dm8db=V%Q-!5W! zQoS#etqxD>BQA0{Gjwp1)l3RQ!Z)4$+oSJi-XJGO*y4{fB+{UjcneDi^7KUQE|B~4 zVR0TET+HjNmr07>Z}5t!s;WZxT40|*Y5n7N>e0snYq}*EGv_{hO+ZJ`xz+EZTC@w5 zmu`(6E$qKuk?T6BmNcBcmi!l>l|OGZmSII)#~NF-K}#{DH{CUDX`5|$YgQ|zc=Rx8%eZSxA1!O6|mHw_kYS)scQW`9%$?jA=h8uB2bM;5aZH~!;Fh5$4 z6AJK2=l5}ZB?-5-Q72JsEPP*j5Lvr^w{yp56}XIxIGH z+}){iqG_m?Qm2j_e5E8w?xq)fXiF2YH{|Zg8SQF7x0rupVIFhRsgonE2(y*@6<_9; zoP8bIGU>N0~kIp9UDT zARbJgx-Hd0;{oV?j^2j09Kcmc0{qpqcndGbzW2DQ z&)tHavLKBCg}R71^IpQJU~Xk}z_+;EdkN)gJ&u-D7_xlrt?AdyzAxHLR)y8R3Cyy& zHN*bz_IKSH9}G+pDtmh+-FUOq>DvjHV|oL%V=AsnYzpQInBQZt6XHv$unF1nHQHQ+ zhTqxTaOZ45-8wv2w?RdVsi3F_`*1Y#d#5Ds{SsMRlhoH7+YGBWu@>Yf?i?nz<7ly> zqa1N=58CH7K2?bJ3u9!#R@0AB!BXkWdGI!sPj zz>oT`AlAgDDr&ybNih!&%BP@8dRPCi>Xx_x1sjIGU!D!xnNb;hq#e`{p}yZ89vij4 ziOI>2rTFYI$^3J9Ocrjj5k#L9h-lc!$+H+jmh%`eyBR`F{`6r*Q+WC{&$HNCEj&ut zuh~bZ=6|lCum5(2P8QGBVw*ceBf$WNfra2>LeTSw*-j2rs|i%p0`*9TE%P5hb4Y!O zLUU_mX~ea{%GI^MznG$zm|)ut!>Hscw(99PN&b)>4k21r?j!!c+H?~|=n9^N&j*^1 zT2F0mO&1#zDl)`f6tT?gOFMjelT3Gyk||h2K~XYg{^@xj#_Aih)93$2&LW!}nAXLw zAFYxbehj3cjPxzu*636)%DjXA@OJ1!BNn1+0wOfq2eD)5JU7#D^6$v!t!km8NI$Lp zV6*L`DJUA2%N=K4@%EQLHudnY(vAQ&Dl>&dbAbZ6Z)0qhFlr$d2{0Ky@E`1VvD&T| z6F}p}ZoTl@YMHh(CmP1#`Wno{#*X7K`q?Z@=B+wY>1ijck4wD7?yTtDIY#Mwta1g{n#WkhV3>bs88j;77&yU4x^?yqLjvIH2Y~s$4EVt zkrArxWUMUNXia{lQ(9yqI#tS$qo(2U0OfCd0Wn)fjgDCNDmRVU)2t!stl*GbGL4iZ z?WDEXmuGIYQVO3i;?gj*6mQWQzS_hf6sViNU9OIewG>~ioLSXnDM6w54?N8HShd?v z93Q{H)nLScdU@o*t{biQ4Tbp#^D{KR;^K+`l+ArM$(JnxwE2>sB)M-B7F33eWyXnj zNSmx;=q5iLri+9>LnR? zq~|RBCI1uKAs&oB4Y&G4E>07?EqXCdd&P4JbuGDaqp9CMMmPCh7Jivl>Tj>O8iH@i zQq`)md$Zca+F#AJq<~RC7N%XI{I|sv*%Dn>Ap4aZAsTr%HxRLH9#k2*--y?EB&;v(Z!b z*LRtFT3kQl#uI|Fg>24f=N*o!1Vh=>K8u&Ev7$+ph6Tijt`^M#jt;qbRdd z=2^&)khu_LC^9CI5``pXCL$3+NTxzWC36Uop%f9l>um4)dEV#!z3+d&`?L4`sg28Z zo#*#FhPBqQj=rE2cD}8&K`J#y?SaaF?9*;+9X>^BeXK96;;FH%&rVW8ZJ*P>;$vN*}FDwr$rBu(0~eWWve%EiY~ z>*1|44{6-y9^aiT^tZR%UAGd)wMlUw-Hq43UFqwUg~GS&j^Z0Pl}tFEKV>?Ybo!3J zzNxVrdF%mqqZTplfyXD(*;s<3MKzPm4tYl`2%VoEnyny=ztDc1m4%1FFM_B-okk^U zMw!ELfWE^h@w}}9)wDr*V}1Xhwv_eP>c_+;-W+*Rw3#G@yCk% z-tRjHqmV?i`ft>Zxle9xI_AWEbMlXB8jZvoY#PWbXPB}tq!xXndO-In<5<@@EiWs5 zm3udYwxv<;=JOU6e_-Swu=8Zh%*vnMgLCDAg|mNxPMw=_$lYL2`Jpz1KSxO^^yJupZwWzO+I678alo642r@HY6(ae~apRkzJB6Lju}XSGhiY0CUPUtpDGrIVm_XqCz_5w0WU{+K4~@NJ%PJ8{M? z^!EdvoFr;Z508m~g;sOU@`c&qxmRi4dz$Wkx*&V-0MGOCjX%r07{-{=q@QnoY{)Tt zdY1n(WxK?#v~}@~*+Yvd26;+r3hNQ-8?#DFzBInLeFF=@VMt32QfpV>?}a0>va%-t zf3RR{(3f`wTA|7`=?Bu6l;}ofM1}POs$PFxkjD~~wU?5n&CFt!g7?d(7tK$pcQPNT zWX|%X5SnF?uKQ&yEW={toIaM1B?Fh0b%Up0h6|eHlTw|nOvqA4e=xPP`eto$%6x2l zLVF7{BmM9s)fq~G`8=}3%wY+^%LSLHCp^fY1-dp?}G;`d?cBEoZ;W7%_4^_FK*=I{Ae1Lhp8lR|$gG z8C6YM(+oxG`dalZEU1nhVgBtm%<|=M#EY*{Y%>Eh1_Ez5<(9nek0l5;w~;rsoN%&>w;jiN-To`BN%P;2#SsiY09ch03T75K! zzgfH^yy`X6Ibk-2&$oGHen?mP%~*q>@zjT)ORfB*@oS zmWPMO`;_8Sn;!_NP10iO-jJy9OG)}kCRibg&m9Dj*aBPF~K38!a06c@+6(lWQ{Dc*3;V` zS~SDF$)OU9(9t@y`1vx7I22xD;Gf zpu1)iJlcJtRZ8|_-|!!qysO9QzI%+X_%PfODSdZThQ;^3m`g(A&6al4YyNe0^T*0- zo7%{o-y7XGzUA`xQjUzdQn!n8rfKk7>)W|93yRXgs;r4|mqTL|EEQ&kKh1_bV@>iX zdwSVXRDA67*oJYJjmz&MZeQK1>348BY^ApG9W4jwMW{UxlBCIFh*7H<0n0tqVRcZSh}8OT2(X{x9hyGBTs-2tB)brYqVuQC$ zerx+x49Of^bg1<1UadLhW=O70n?@b9?zt(5-zMC`WOMT_0}E5CtoZwuKS< zjBbaL)3F`jFPN}Nztv&+pw5-=FP)1LD%1*uL#os1W_HQzn_}*m&tA$gr<*!jVMCK! zn(=u3;5)j*nRFv=oKaFH9Nn56Db>`64<94*;oq>f=3#Zt%Juq2`=TSJF8f$I^b?Ip zjh_0qa!>G%rrDE>BqcJbPG6&97;3xx^eUxPVZWNWv!raJYsg~nN3I`kd5wa(_BlIP zF8yBh)6eY@to*Z?FO#omeEfR;Sh@Gu_I`#!{Y|;Gq5E@Fl**=4R3r|osa={H+IeCr z_E~4u%lt(iC)kj0Bw?{Iw~){l6%`fPC$3U5)Z9D=53+6Aw5k5!_|()EM@Pr>@8JD6 z)6vmEMVS3@`k`U2$4LDqdwcuZw{LgE?34+)dGoDZLNQRCjT<*!myG6$WKM=>sbWFb zBZG4_bHMWVfaB?TPbb++j5mWEuYL>gf zytFeQlf{37v76F%%6#L!ZJN*4%QGa~B|YTn9G~frF<6E^Ghzcv`{G3l^LUv{2@tL~5@R{Jy4ccEfBvmoRkGBhk)3ml;kPhBk7dpXC z_L=h9t0O9%G&%8wg2Kh*MtQc5Hf-aq0s5bB)ejbmRoUc*{@nG&MV)bAIYZ{CMahpN z6C(^}`MTkwbon2o_&6>%sH@$ryFTI9{Di{7Bg=><`6tu%Se=!^BguVlw#w%09WIJ` z%@@IH$YK|H_3Dd|O`J8x3V|c_;e_RotrVqr$9E4euaLWUcQr<{hQj-r{IF7|gux4# z(>B1pi2L@d&T>yXL53*dYHL;14FnI_Sv)`Q0eu`3oE~;u!~bo?x#%fegw-Jhg8U(D zz}m23!}X-3EB*bJgdTubJ-Vne1?JiJA^Q~1=z6qw%-7$){^iTVi&lYwiX8I3Bpe}M zKR**^1(&${M~BKC?Ci*GY;2nCA2WaE5Vxuf16Oh%&i9+Csh?%Hw&L88kDu0~d<8dUdP3f`Ujvf2*6F>$CxSU941p zkdw2SW~a0={+C($=-$11Jrm!$ZmO${Q$+tfmi$Kc{=0Q&%O#nqg>H4Rr`jFWrWs;; zWztN8bGwI)5uYi#UFKb6?4wmEYEAh=@5YF_`NkNwO=msSPH9TA(|+?^PKG?Ytm>TA zw(;XnY{}%aTSP@0W{vh%fAblnVwWwQ{qt^9YM0P*%7|mAQtE_EUHu995vDcP{>w5; zE?+%Ea=3+`^Ydj{&-$(@tTj@Ig&IUT+ePIhoY3B@IQ4kiGx<>3bx$eTim?|R*-R-Z zuZBkqNX$>E1^YQ_`>uvwKEiEjIs4)TFLPm(x9mZO5k?!za{s~b`gE2->vK8dqZ`D< z9m^{fLh@br^$&0=kKEcK9(%I9d@%S<08@&k*YNn%rt|xcRDE*XdOmi;hYwc4&jPkE z%=UVCRgg0?^VwIKF5GJ28kMFhPq|&E&VM=jwhI5vEp}nE@~B+wC@GUnohM2kOR>_i zW(%pczY3dksFbo#R!WMdd{n(pApV%=E;aJZbw}RO%=?`tjvIT{^R;A}r?SP#{MgBA zVV4)p5Xj8TjA}A1=+8Q_6dGHQ7vLDmwP#N>#g&~mdm8%wqXp2#_P~*eiOY}*kb8Q1 z&Mz%B!6zdBcO|NOrjslEH5b|L-@hLj8_OJ%1PHGQ%ir^=W1#1|247?Dy?b|{5UpKX zUGl`T^03R7jVQbJj%}o*w5t!>qCOPEwx71Ht`7EW5k2GZ;N1#!^Tiqe8Ju@4F3&M= z-_Fh+4cxqTaF7G9(FBg>US9u~nj;CC;dP;uqo71y!aaT)6H^2j*u;e1in9XsFJ6p% z`4Tled;dhSR5Y6kbaPtzRJC%+HzL1dG!S zPZmbIM*&BxC@YiTL>VG!*T}Vd_eMe=Ub-l7B_VM1VtiKrZ<^R?%Vn9j`}jZJ%spE@ zUTP#apqXHCTT}NbkJzEgQx7hFOWqgqc(wi`zlO=*)d4Bbz^v<}+T(Vdrk-|cXv(kMgF=Ry_wj_Pff^36o| z{U~IqF4u!Pqj_fL!L5?TeeZ9P2@i7MdOd|q@Cmf9cQn5 z?i+UNzOF_g!6BUUWR#Uom$_E2AVSlMmV&Wcm_yia0^(;YlFbE&#of%pj!CXwx{&=?i_eLscMPWTe3 zVv9sHwJV&m)DT2>N;_S`YI>gCyAPGCdw57-;X@JJ+@+nnHeR`Mg=su>ldY}o2b?Ns zPIw>woERq}M zM;s!kuvEOgr5~iHcf$1GrW9592KY29A3C%VJfzdN6lk{Cu3fu^m8_TXd9f_>3S3>% zA)9i7fmdetK(hbQ8*y`f%O3`oCpm0>iPWie8jV6Byu@y)VQcwhMQtP7)W>CP03XCLWwl$GS3 zsQmO6S}u>0w`9*YPneL?eP+S8qae^TbhGKx;38}ud>|Y z{!uLxT=bgNhFXCR+;N z`(+C@XHLx+-Z+a*IdaMc-Pb(jWt;213t#G0ZX?Nb57)WM2c>N=dL`4krz3cDd{~xE zhoq-Vk@`O8d(~?S_D5Gf)9KPQipY#+>o?0@ zdx2PT-!K;opTE8SGC$eLdjVu#&ap?S&1(T*v|td^aoxt*;^ujzWyr6S;Tv8DXHy{FKjO){OsXi&xohu2+nOmTmX}_OZVoHkrBuT z?o0;q^70xyvQ|j{PPgko7`Ivk*MS2&u`II*TQkT=uz;%vwO$nTCome(78_91PM;Pd z4R_}n5i0%4b6=w%_2QM3G}z2aoRbxEqPz4fl^VY2|K$?p5Y_==p zFa(!m*PUYk1zAPQjI|UWag>rmevgd^W=u$2uGA z+ZX$fyL)YD{(j&v8>41*@sqUXv)U!MQ{NUgEW9btZgtqXWMo_4F0Ha4o%>t*)>dnA zKF+2E_1Oh4R=Nsp(~(9hYrDM~ud&ffBBeZCkziN88@pA(i8CE~t zs+o}G$k~$bV(-WGQ2M}^M;vUYo?rJrCPSh6RklvJktZwW?fuo;?+cZtz4fgw>9IDc zw4W`y;W;cPXRj?9eLVRRk4u51Az$>3ly}*8N4YF*_(!=$$1U{u+2+~lIK1Xlv_}~; zIq4ccCE7ia)A%?(cp#t3x2FDrYvRwhmaEC~*P_0z-J(ty)?p87p$Ny!#xS1tdgMq6 z*ymM24!!v4`3cLK3#=r4oD1RVc4YG5nHg#nc7hC4y9EU7aE9XSRN>nWr^9I64qM2r z;N8rH{KhL7YSIEBr{{PpqqN%x`kI;=co$G@0b;|v?R8Wy$@IvRmx82WDGjY@Iy&J#YYJMqy#0DVraBcZ0D>fv`wpfybxf@-TUd;>F63 zUmls}d(QUDRjuU7Uiymd9xKKX^`AbS%;|ASA5PTO)$Q?ren9cp*p7`96rZq0+xK*z zSrbC+Ha51aP)FFo+?o)Ei&>WIySt0rgVb#M_U+obZ@e|j&egb%l{uegWK({bq0=^1 zL2DxG8MUFsnOuA7n#s(ro`e_C)YO%W{tw~U7Gi zt=ZjGox?--AV+ajJ56bwtbMyg_Vh%hr$#|x^SAp8od%DLX&6l(jVg|8Qps4J_2Q#j7(&M)~n{_4jizM{%kI?w1)-y`7FJA*Sfon5no0T!-*r0U9+MqI&WBo z`1Rh|FQGndLcv-Dr(R+idXCkzeW(*-s3{_Zsu77}aU8J}$h1#0Qw{@8p%|drfaXR( zH>DPK$NpY!LB zoH)T1xcR)^~o^FjTJcRR(SA7m#Q=lIy|*qeLejub_*l%+)ISNhM5B6ZW0 zSx=>tgzt^Ya=YgS%9CX+rEF9aIW|(4<|<xA&fi z)EtpS}imuR(P7k<+Z)dg(nL7OKK7Kssol9VOa|BHwtTyub7tIdv|nUenjn| zMJW|=E|84FJ^vuJ$rw)It}B944@bDcUYiX>gN{Y)CSN_if7U^1n~u)9{ybLfO0nua zkgD*RX1c~M$0PNSMg83V9|4b!*IqD0RO7a9*-T4|(07oYo}Q~b?U?BRCnfqE;02=) zz$pU+xaT~S8@2CU!syIQG=9GtY6Na>GUV(;>_wvGkT(4Jyl`VTl0Bo)Wdl0 zT1zpsvo+a( z>#YZmmQ1N;f3Y7TThm@Tud5fl@4~Spvtou@cWt}2UiQrTVsvBpZS7l*h*gutn6#Ck zN`I$7x5O(U-Hgr#JuWvzzE*OI2zBhy8VzorzC7_GA?T<-@0RzrmwZDRvK`8yw)@3j?aM#)ak$QX{iN6fG> z^>HgH*>qhuPC#p<7GL*bXvjuC#V5(a+vj~y77mSn-ZbVPm>oM(U+O8Xg#qt@MKw9P z6aKd^6g39!g<;Tg(?rL;BUnp??t`3E9=LV@eT;--J3F*oq@O>3CaeY+hr7T2E?-q$ zjqTdUJIfh&?Xt>EG6Y5E_@ly0ONqKZjJEO0D$9vZcdPxe`5v76^`+#rZt90RXBdc` zUelDks=m1~aqLT#XEm$#reZ1M&pJZOx2Sh##b3vBI%)9jW)zw*VhzvA~9`N}`c&chw&OgEo@er`U0KQ+7fBfp-v)0wZsvSS-WS)&EC zrPYJU6O*wk+BIV50!w4kE+w6&={Ltd{`f;zVld-38Nivea;(*AXgg!$QR|L^dqI2D zGWb`i)WRKA!@r5MIC%G!Cweyo>iQO@$U5b3@fNCqUXAzROlC#~i;lH*{N|p|Md!C% zx*4Fx@Y%BP#*dr5vM&Z6ebp+Mr{h#)7tl*3PRBj-lnBg5^;h}rxYSr3)RSkTpKClh zIDnOVy}hi&U8&+q&6?ElR)Q)777E4BBaKbwSRRM^9_h z<*tss?k;+T#?sD4L#%_@GC@@aHLmS~6QvbvYbn;()m|j_S0z<1fBv-TD4V%U>+a4? zWIFu3i>1HjbKbK4Y+oPtWixEeKPhp&x2{fx#X5Lk+dD6Am;LNC)~Al26?U4xHrdG2 znV50DKf^%(;4PM{8HQG-PiA>P8uzSaeDimhT<=KYxT77jZDsD+$RC;}zPq2?Phap) z@oefU&s7pjlG4S4kQOrAL zoxuF}?`lp?f<*mOMoJuo3iS}WO2j=fz`^B}mBeJ)Q7x_f?G?gFye;-PYA?~&YFF}3 zvk5Kk<-svEk96atr@w@rkXJ-xD>#Z1(JJ@#mBnt4&fB&zy{f8`2T46z;Xee&G3wL_ z_-<=n{W|>lb3}PLHsus4oBgn<^cFfjRAW`JG!J-Y6w^E}pxGuP0lcY2D=Bg6H47=Z z_BJvpL(632Ex>|gZyFAidyPjq$*khq(RAiIS+A)szTc}ue**bcsC2Fs zpXyN(vF!bkeUkcf8JWb^vwqW-odv}oS#+aljxEs~>s@@#JXBIwV{_%=Pd&pU3pM6u z{jRShi$B?kCEi~DzV`=v=IC~__7N*9+46|mx>6p>LJKR3EHMF{>c7X-T61$hh zeoB2>jAp6wzQF9xsy)%-z*4T)rKQ3C+C=I@&LCY_cVhm%Px)wx9$b92!lIr-A-$UL zBi(3XVuG+sm&I%#Hkfj=Y$dn{3?`&u%(S_^y%8R|(Sg6ohJi(>ySeSDJU=H^zB-2{ z5EyF{7$%~v!-gjwfDlA-0U}CtUT~HFgh>n{ED)U+7y&Lp!O51OZgqc~7>6!=kD0G{_>daJ28-ea z-yYToH+KRl0mn&z%E+ZTM%mY{Umm2#u|&O00U$c`?%lhS)xFPyo_GF=m}R&d=@As+;>YND(sICGxi@$qnIwmWua0 zy|8O~!)MmQBJ)83*V$ylCFc({5lb#J zKW%Iir$K;j1jlWiojf|xTCc-K zOrKNv*Db$}(O@)XxiYutV0JiTVDXm2fsNwkeNz~73x+{59R0QI6yxU90ei=;Z?!k5qsr|O1^2qoAe`e#2^_0RTJ96 z@BnaI4am1)PMstIH3bGn31$0boMwy|aUHta-rA~)&J9KQ>YW-PZt(2i&kSZv{ltm1 zjD>MOSGu!bYgt|ZW*8YB*3Z4MdhaI(Hi<+YJU`1ND_h?6iF#~oY#3GE{QSHGo9v^? zmFbexOLipaMr@Nc>-c_DY5OTgo|@mdz)trEH(%tr9+rKWQ z_|A+Rp4%?)?wntH?ax{bYNv?S{oXgUMY`N~`mRnIQbn0|B<=gYCEv`MIfUNpA9C24xs*Vi{u`_V7y@@LOp3=G`0juv*a z+a)F@*0X(M&(W!;>Pb(kc0RhZd2hOMz{<@N?QUBdcc|H25jdaCnE8?^Ywh7D8-w3u z)!l{>P1^mHeETizMz7d6uo@1Quyv8rMTugFr+qls69pl%EnO$eb4{2%Mmp0zr zrqeHC_w9^y`2HPt@9j;ZJZ|ZjyO#60HK@wp)D32y4B`*fG{@>ArW8sHk4t9EzBR5C zzcyOiQ$*6=X#2%Vsx;{NC)uTjb`ir|F_Dvr9x!a17v*)f-1c=YbEVE#hVqn>w} zG_fo9HqnuE^B1RCnG(x1WN#c-5;*s=Yd@8lT%juSIA_=K%dbmwGmBR=Vyd3vC7*pCD26HX1tr>CbEV`a?Z^m{s>`H7ftA$5On>Lm0|Dpf4l7xrT+eqBAL zzBJcfdoCtnQ1aEoeJtH(+S*bDcg|^kHIhhYmp|7lq3!3s+?ls3Wx*&T`@{LH(e^_o zu%1;zRUhSFe19lxx`;5d+nm{Mr&NTI{)q4 zz5_pHg=!XgjSn6;P~X#I5~;gu`}UIzcR#kYY(es9MAo9`;J60%OE5u`0LhRB8G48b zv!j}ts$dih3=Kn&xy=(kYB7r*5EiDwQIz4akS0D3y*LMdLSVS6N|j)GGlfy=ZEp^z-ur3cGj8 z0!cN1SW$Zz2?SeT7J$Sl-R^vFMIbalUH$N^oR!&#>^H8ApS~4+qkc6<@>aKYQL?&! z;)kp#)$XCrf&-Q710&?YQ6N@8OD^*0<$Q_6^N4raAtIbWH8?n!h>aG6g@()ZR{a&M zO#Sm1$ezm<=#u{V=Yd5p8X-*8g&(u_+XM{z0Y&b{WihKteG%ei{`se;9buJVVeZ_y z3VoA7sBuJ>o{sW=eS`6SAdK~(7)eeWey&g3{CS{$;{>M9P4DHmJvr33@?XC$^2;b- z;Ccjk07-Ntk8|f5QP7-lb=8Wo%V}0p_qVf{68QI>g>dEF1&D2_zf5*%!f$5a+3lU5 zg}5{C{pX#BAU-@0&U!4848d~io3p1;PM4Hh z5T(iQ<Z{ux;C=40@1rBf!NHOrJuAVGBTL@jCJT>o;Ik<`E5zE1jN6+7XyG zGbC4t=?Yr^?dP%yU;b1vhTaq-?aWxO6Z!Dro|J>@dcgQrU(#9mcx8teoIo@*ge0C|##@;6=Ud|ShJO-#b- z8yklq(O$hKlAwu(gAPeLk+Crp7kKCBqDPQFfYeDedO*{HcN)7>>(yh2R}xrEnBShM z@b?p))zHrL!%oPxAdpc9S>ySoTL{3g>V5s%jt)AAZMHM~ zfzd%4%3=^=#FA-!Q_~1QKK{fRYS6j@U^Hm`J13!8fF)tM?*b2r3o=mfCNI#m(_9-Z za(Kn$vPa>e$uikLr#saMI7lc+^55|N0H%=NxrEZ21`vJ(2z4F_33^FM$#TE%{Ds!l zR7u(tBwuXa_4xJ8fq?2sm++kNpg@Mj4v8dz#LW^3w*+E}U$qxEhp4xSH3i^tY%w}$ zR&*lRU>AT6+z5Jy>>V8H`ugs0@DPtd*9#UkL&NR(rtl%h7wn7s&kP>2 zw&qCQ=dMZfb7OPv7M$qFiEYJbChD*p_fzp&{5Xve>Emt^7AB zK(T$tj_}ccSOIx4o0{mE88>1+&9p}jpN(h%@hlPLLqkJD8VIZh2Z)2w@akv4V2Pt% z$;8G@n+((xHc7ww`y6Vis;c6adHncda&yS$**O|e^5~a$Tm##2rQP$~Idu(Ze5(%=J{4Z{HcA-Rlh!w^vUS3kzU>MD>lg!1-8_FU0 zH_VxY$U_-}y0-i8n>iZ}2^H~O1n+>AN`+3Fp5v@z*|{=~75D99vZYuFrD5Fo@+$Ml z+#DU~I*k__nmmd2AKwsDBMt#!$M>Te8Y-wb7h)e;im_056P)1g-CRJ3@#bNrvI&#b zu>cJ^)%b|0@t|wu;>C-(jDN3p94KNQ1%+J%#04!7841)&9Y73yOUr2R5m(xWAaej$ zxRr!*^hFe-h=l*5Q_D961qDfv;6!T#v|`&e8$ehHkKs@-sK;`|faDBgHkzDA=?s?)>A z=P(`;0C4;)96&w{o(EpUp{+yG-n{v{qN_&*V>YFJoU?a&aXfq{W9 zgaA{v#Le1hE3;A@v8RqKjkHc2q_cXEk<1P~r$dY?>3hdwSAOp6PC#VQ8_0d;26ut| zX#!pmGY0!la9|Ys_wPqSY7$(wM&z0utlo;|wmZ+{m75hdS;pW=CwN}80cn?y5TAcV zGf!nP@YB@C$oN1alxJo`m`BhLY-g&1fy$f&($pYKU+K1#|HKA$Erml#1j&L z`H>fB!!*J3B3o6s8N9__M@$BNqdhNo`%O= zoY(O(w;S=(2wEBaa0}ua|NZ;-?s0(fz56cC4+m$sf#cyDN5I@Iv>z618WlteH=IS6 z93Kz2g_Mkb0Ni}zcqv|?B@9w)wF)s%m57%CMaXdT_pi*b1bDW4JT5M7_c#K)kkAsS zNzGPK^T&q_XWRA<#mDzOJO3!d(%wEJqaXC)?r}Fax7TI4&z?QYm_av(mo9Y`5PE9Y zM5|E&Y~^UHD9|m#(@?U-d7Y_Rh{)*2H4zg!hSR1|k&?n*GUwsvSCQd{)Xg_;XlS^5 z9C46u9Oeakg!W@Gq(%i2yGE;ff4PBSAs(bSuWY@0x8lDqE!~b65fzo`%R)I9_ZXU_ zIIor#TxSRbDl__FIm<1yAMy!}3P?xr^L=GHhJ_fijq|!KZiAm+T$Em=ePu7D(Hk9& zUjklBvlZeEJ;T!|oZ`HI%-oFE($>aRQ}BP1(GQ+sx6sl@*GC$ySPQ7pDkm$O=x&72 zrqK(MRigsWl~yZ0gO=eeLfnn`Ra7~chAOb&6WR}FOpOW@IGU~DmoICz4m=MUnmOp! z{_0&*(-Fg2Ebxr;Lc)pj!s3#G3`|eYUAAn@HdmP*ypV0eCA6JbE;`7Pj zla`D=2n`L*aDy9KMn7CK`GuC=>M13~OJ;A~S#Q9ggop8;e;#Bh2mcmk_L}?eQgXB5 ze;1+nuK%ryn}$z<2aRm(UhXMw^vY5 zP~yPpGiT_q3e+WFbH|tU|E_ayZ?(<*1O1p9_H+?0G$*mso$xTeaN)x1EJ^?EFbW|2 zj-v=2@hdm~%dI@f36Nc?2Yv1`Q`nl?v-Wpjpf%t=Xyj6s14oeZfvAL(7oPbFK%^^ zpf!NbLGF=r{HYjP^xU)hHu;ZN-)C1UJiWIyGYF8kQ-8T6aeNGGu-Y^PW$u=HdUp?m z6Jj?U;E20-ci|Zh1!P&@-cCy@0-lVtJp$blLDB&YBTNB7s*sb4pii=_^xm>GJMaRE z0TKy3*)j^5k*O&{Pq+2`M-ik95{|6Fxr@-17oEW+h9+fuOAAqd0Qt4^Dz*Ca0mU2vgBII0@hB>C5JP3)c7!iHckJM(TAh7{YNj3+ zjc8c`)YthaNJ}$7rWhC)NC&<>$fK}XAD z>4>|9Uv`jSj-bN@noNoIU2lNvU3@ie5d23rrKYA5ZYV_2gC@`1WQpL#@MDvm>68)F ztRV!Oh1kc`WBdQq#eI?epSrlI_qYB&ln-`JVOt}MouG)w3nYYSnA?rQ%_CrBZk6bR zpkyI_E6PO6YJbMu+}!fB6SOElqn<;qR#6^{)Wt0$ld$sftTObPNK=P{WSsN8V`jAA zfMHAefJ52JaT{@h;u2mtR=vikb9_Ng;x#QUFH6r5ULU$3wJG6@eQJVJ5$ zO_(3ZGRBE!hY_zYa2)Hp0mm_ zt;hKeD%D%puh-%bhs|}n$Q;M8@HDRLGy=;iR?&Vy>Vt!9U)b4psS71%?mXotI_?z4ht1CP^y8DY)@yNT%5Ct1-=&an{2NujTN_r%#~-?ju%Y!l==UwdV~@CatWL7@m0faP z4S9+gtAUUtKSX!;FzCVi)h2}LwBgx9dl=i zMhP_|I(GgGHucV?h6d~wm6kPeJJwm}Du+;?@4Wrr~BhJWt-f-fcow;tDA zn&}P2nI)8eP>x(i3r&i-3N_f)s)NjRah24^*zG>>A-exei#U zZUsRv+`ZV)VLD*)^Z(?Rnh7+Xuofh=f*BdR0mU&QU>=4R5&NctiNO@WUW7COLO-1^ z6tIAytXcUY(sJ%i>|LT~#vV&{zz0VzT#$#7B@zv5ic$~<85tQdo^X1w>I%9AJKQIt z$$|FPV`b6Vw#H3|HMWlSDoxl{=X=KXGDjG$bp!?__gALp;UK^;Uejo89YMl`dyx>y_$j_sBw5zdjYz&fORwBgf8JG~)6 z92s}9#jx*A!0Ciwd0AN&sU&JpQff#wZK>>RY+IrK!YiqKNaujuqr8w@LUUmPeE{?X zkfq&W-=pngVKiP|Ud|;TaAbJ~aZx(W%&r{SUiHwSu->z;6AHGf=4?H;EbDW7Ww5u` zd^yO}l!??bQ0$WG{WKe+zhy_{&Wtf=6se$STE@=tVq8FCQPKmsNkqIo zdp-hhgt;gup;trnB_l!2q^_?|kHYOTqW?*($snAD2K}c+Ny-okps#oG3e30JN8&v{ zhuqGK$Pa|5{^Z@KPxs>?ryvn|3Ax*I`kg5EBpRD)sE>modq~5bRe!Ier$=c=4Kn7z z==0y-u1cK$xwLcy2u2-?8U~sO)unc-JSz#Z;#i#Y6ZrLnbdW@vot-5Fr6dwTI^vB% zPO8KuCDrvsisPvQQuS2?8D7Yyf#NW<7cWM8`$+gyf0=RZsC*8}&V0r-wr}XQ=lrlr zI*U88iWst2Y7*qw+<|}A&ci-(C)(yZ1nel>ei+=E&8UIyX#xX8xXamrDvDim)BeHd zx@utZldl3tg_!z)JM{^G8P{KpKiB~iCR<0x%m1n_R4>tzWCMOaGzccd6&GtzxBXsQ z+Z1jicAM?+-1{TPexgC&2K5%ZQqa0Q|8M&5ctzn^2kd!PE+DE+QoF(_%?ivNQVG@Pxq30ezBMT>V%w4wJdeA@!fQuxA| z$xP=KwI@ixD0;SGJZATWIou7Dd8}4_@sv3-9F?f_cC0}b2BUqRGWaN(Q+zALwkiH%ofFo`X=cT`6 zHOAOTX}zZKFC@QdYWi>4?>`04t)Fv4;=iiz-_GKr>wNrJN=2^g$ZeTdmc5sJI4UFZ zGqWCBCVkJM!7cCjzKpzbnBzqyR}b2sdP&!6^ESVJ#h4=xWxm=*mU1+Ad?bHwXcO>m zt?gsk#gFS*k7CWg^_P{Fh8Glw`ld3AHd=g9`&?C-`G!?4wT#om!6uGeIbA)5n7$#N zE!3clmfryANW!!Cz9C%R>rm_vl6s)6n^6U;?>Y^`i9|Sx5##{!6|qS|m6ap`@PmlG zpy36@#MIFh8)OpYCFJL|@82^+bWPMzM2Gn>LWu^cS`AJ2;U<^&g`$R4qQ%}*hS89& z0B0kLVt^d<%*>bZD7K#*IEGh2x_*FH1NN?NY#i?tt3)FFqyDdy{6A%Xp8l{5$_iMNzz|$SYUs5F8{uKOHLh^-1K#ogW&nin0{6 z{dnJd(~Tv+8A>&=2|%wRLjavPEnD5SN`$rc+B)%xrXdz zm0`2$_vnX#<@{;ty6TSj&^F9>0D?K~pag_6RTPOsUw(>~xih`JmwSUqYHfQ)AFW~QQ|+H=dj;PPn)BKJLU z>7gR726hL4W}qOI`~MIoJ}6=dDTn>cI|0!b$v-8=V=Q|(n#EOv0&ZQ@xy;w`6-eL+ z+y;mbjOTcLd6cdSHiOIH6(9aRoX4Ly1%rM#iHRiuZZGpp^5=oN-1(|iSsT*R8|!N% zZiK3iZ>bEan45>E{^2X~CeIw3=QTI#|C9sQOUIOq`e{y?O(>Evf^|rLxuD=+lP45z z!5ZO1^L#GooG|u!=!V`u&2QwyJ*X(r$&%qV5YYf-_92c3i1A0j6Kq4sKm$t@WbPlI z6i;q=ICp<~ZY~xRfW%~qi5DG$0>o8~NIrzXSoYw-7vK<37^=W$f!N8RC%g3D>L_mC zmt~lFK`U!&GpEst!$t^5pjJUET8BB?NZu1utkpAXU(J8Y>^jilzNRz~^$X(N1vr6@ z;D!Vvm7?}3MtzN~$>OT)B&~~1h&Tj=fa(MK1sJ~avfpr5 zp6trJ`m)A&U@g+_);?}|3KXX5_=JaNfBy%WJIXuG?Q+!IoHZmQWF9EiNdP-cuiDz$ z82u(qWuOQo+SZsD8uW@fvy_yVpwYTH_%Qax&G-fBueM+J`kSvFcUY3SHmZNc<;#vA z+(y;|mrkwLqE_4tDqmWwhh|DutYwB{o4O`wz+ z4>9)qFLH@FD`BChc-}QC@M;?Wf}Hq?I4__!aq&c7l95s&9gn)^RG%~?!G`;!rHu_| z5yj)Y(3|`Vr}o1Lf^QtUx;;WmDEzcqeZF*S7|vqwCe8~C?5QE!v+l`BNdQ!#`_rgE zt*_Ne!zy*iQ1^CwBLm>hxOld`r=m0Zp;r+UdXGe?QGx7#v=tmdlDm>-tKX4zjPf?vuDb5uKxcA26HO@Zm^SV%XU_D z7wwMC;WSZKlInfxr1zhrEB`Vq`6uTNd`Z+r;vcyDAD}y$Ku%;fENHK8`!AHd!ALnj z?vTSG^OqytOeQ_*UM^xGzHoUkMi)|tYdZqvA<8cE{;OJ5i4u(jz#pz^>p5iYLy*4^ zo1_7&G_g=eXMq45eeF5GeP~uPuc^|9ZNtf_MQxqKy*b49^e_^W59nIoEz8&tK%h}5 z{9dOjumK8CMoPiNz&EpTSZ5O>(gkq`OdWc|67ddr(TJe9FI$n5e>G9!`d*A()+4R; z)zPCzl|4L)I)~6@=jiR&Xd8#DWyA|S|ANpOG167KL8WS#{opxjoW6Qu)&e?J0{H+f z1x`;D?Y0N_{8I99SpJ^b(=p^4Ynzyovb-iuZs4G9aa{6CB>f zWUkHm&X@z1*`Tr(nNxf{KuM))Bk{fwSL=xRFgs*2vyxLZsGVYw9c+#Ym0?XOj7%xe z^)s|v;Y$E6_4V~Hp`2hUY$Gv= z0c6H*{r9hB=pAC<&b#q1FnF`W7R_25b~XLq@VWXQfMTwoEdw=m0>PDl^RN(xPm`ts zmKttDHM{V}0g66s@jqU9O~xTHWWw)5U!PK^%>NUr)O{>^?2)zTFAk8`S3m9%5TFDo zbQsDc$O@f`bqR$;5eg_Gy@MHd>dx7QN`(@Vk6rh;@~)#B#vSX9joi8Q@SH#-Gpbp3 z4Ef!89WHkJ6|n&d^X+tyNs^>bU&W9;{oUJ8Pr~Do`~QUP_+|*MlWy?hlGXn~TUW1W zyy6$W4bX}SSTj*mfpRVay`5t$OuBx7*02dY;1!~Xg9zu(y)^TaJO85vsQykHn`g$h zd?sMu9i2xg=2ho5PSIB~+_;9BX70rYg8Pga6!$PM7#c@0&kOsDn$h1F%s3YOMpf@j zhQJ-SvI{|nm{hkP?cjes=hsdy7tsD1Yt8mL4kn!+Bzl5odnt>jr*(XN%lV!U1^u~; zq1q(Qpx-xee=s5Y3V|Cmk>jPRwa-x6I}qE?5h(pY2T*xm;7HK(>=G1u=~spm1sP^5 z1oNzl7i0;*6U72u!uX59&{cExw~S5+xevBzMqQfWE~{6<=zk3!)}y z+kT~C&&`%SM=-K{b%)Qu?^%bw)(twDgB6-e~mkEm)=pbih;C9MJylX+eQ|@jX!?p({|;p9N11K55VK3B2YGI=Wb? z9uw$|zE$s!6~2kO91P`dLBU8kL%ex*{tPA+ZN%ovl)30Px6*ApTFr%Lb^;t1D*h6X zf+Q`Q@!6+-XdG4bGrM_h7xAIl0a@Bhv4SqY543SpARPvz-@RiJ7k|X_R&bUhZD$Ps z(9pSa=Yad&ymRMxMt;qQ&{D5oSSuBZQ7&jXenqzcj<}ha7@42%TKqODHkR@m#jVFB zC885h&qrowJJz`NxcJmO&omTLPrtFaxX9y^UH*$fAm*|yv`^?6zui5Ewy8L^08((Y zy@c@P)h|TN@n_kx3_m&K%W3}@c%k?~dWc42SK{8cj}UtU>VUt-gxb)VU}b$Nq7n$> zqL1H;%jkfDKYX1(L6h}>w^MLUSlV*)OH0qw&410i=2fqA7t5 zSc*Mm*$II6R~I-33KW{{uF_*(+WbcM{S?aEdeQE|Xr`Fuw}ZxU{c~Pt&-Rvk@}hen zED?SM9xpmUhBCz-;|z!2y$GE^qjclwZa~}VZwmfIso^H$ua;H5ohK8MwfP|n(Z*vH zm7u4o*~qrY%er}XRwYADMzrSe> z2CljWC^%6qNH&fnwtgAkFxkF`)u%>{|pV)Q^tzaYQWYOpEbB!+JAtESyb#?U` z)>eGjVqAU_=)pE((jLnt!o7$KLI7Ogt(S*G=`hBxS!0LK&cysrP-t&&2P1O}8oO|` zLdU;V0Br)YzQ;5PD9Yb$Q%rk`UVjKe^Z|%$Q3z84KPMs*WXKD5JfkSXIfE#Rl#!K% za^=z3|Dx+H!>U}n^>07~3`7xWL_$hBB?Uw}B&55fk&-SE5GiSC5Rj6RE&&M%NkO_n zO1k?$?zNu1pZ$J#k7I@6bk2F#b&WXB-;l{>)eu@0q_d8PO`sJv1IB$5_IOs}(8s*I z*A@w|7(Tvxhed(z+n2!(PmWSfT#9toC)m41MLG@lZrs5_=Sqbbsl;|#===BYpql&+ z`5M%e;oF1yL--q0yRPT{i=U<(u)Uj=Ya+cXsP8g1H0%w?AhKxCb*tt-WMJsr8HZU5 z$aElhk58~FS z2&CHbzWYx89e))!Ma;XRBO|{9wWheV6#RN{Fyv5+P|K{%6n;SW1iG(O@MO5Dh(n|T zvEp$LeT+0%1i1lA@eTs8X{u*1*5j`|P+ zl=k3S2Pmx*b6e5ji4mTo`KIMnTkf%FmPukgg;fsyyZT+M_lR-f3pCM}6)7MJM3U^Z zUn?kPU_FHru&B9onuI^a88teZNHv&#R3D60Aqylrn(qUIWREXO_Xbk%uCL0W;>{5D zy?x#(65b-)@SoQRKbJ)$q{!fnGcpUdr;q4RpREMIQpTC;+3JG!GPE1215_IEK%8iB__L^Tq9R?N(+vq==IPy}w>|O2lzX{S= zU(xU1#gfi=DjL3X7Z(?qr3C|fD{qTm+n0nRZ!+?kGMke%-8?KtAJ$y|}_>}l0!&xn??Q+t%VX(pq^%RtXx`U)X#Ly#^NFBt%@=yXJM^q8&vpqNexjVE^gc_ z)fhibiE-!?7S&i$%1mree-_Z$=+udbst9PTX!sD3 zXmRzVFduPlwN@(XWU?zZ4Bd(9Z&~l+3BszCQ~FExwbaoEU8{h}rhIbSaxOehZ@uom z-Bri30^g5X{o0T1!!k$2WNCgG8DR0&@b)hUeLmvO(^?PZvZUZUcmA&2@bIbkdTNn< zxA=QB%tZVne*c-wTX^DddlW}{D*R1!jgO3L25P%vn%{T&{{HOf_B>w%JoN`v-d3EGS(QwBYBcOTpwmT>0&vd6PS! z^>*^{t4aG3MOKE!sDt~5Xwt;prd|y(jj5G)JJ^J@D>Z8@h{hXlgr8!>&N;eq>juqE zB<}@ISyDv}^bW^}c-cI=C4luMJZCN9@1-kGkm5E@eNRtQSeSxS#`aw~|GvCCaq`+T zTd`M+Kbs2d@ePk{pkuE+TX$<67b@i=vI?v`YwggHAlD)@+Y?iE3txHZ27y_v-$A}l=?&NVQo ze#|S0MDiScu;$#}UP&YkmT6TF33x>FD3Gdzp3|wKSuyK&jO}8J>0_J>j!y%Wv-j^urv2$pBt1S~xrgHA< z9%-0VdU_PbOOmU@N~K3zgL0G{rR1(fO;&&G?P(0?HOh8h-45{Pn)%+pYc;t=qG28% zUZFA4ZWE zwZ@M0uMLxI!}HoyX~3q<(x%^hp1DM>fwB7AtRDk(|hlY8k+an%$YS{+;c#p73BZ?@r1dh_x|r53gZ zbynJMv`~LN{6Y3nW+Wi#e2r60UI!1Y%$_;5uD#mtrL<#shp}NV4u93%D0Pl8y{Pz% z@mtqSP~&De$L)@=2dQVJu zV(p`2tG*;jv<12lnsjK{az-Rj@k#xbwJ?-k*{qMEL^oT^j7bku!o4r3UIp&I{5TQS z8rCC!flm1_MVG=!g$jy_6XJ6qKjCrF-BvN`B*tpf{&ObdZL3kg>+js)WRxd5`Nd+A zog%vQb48+Q|CYM{=zl(;RsU#fRul_%e>bA@UV+_^4{!eGlabHj z>R%?H{P&B%Z>1H~)j!^TBfK0Tj06n-zEDIi6QpeL7U>dbNZ#{5KX01vX^)_SPmRfw z_`Xc=zn}OUV$)EZB1sx_}yBltF<4 zH0Z^02NwYr|Q}tRZVQCW)abivX=S=cBK)($N-8~>IWmwrVR>4IS0NK#o($Wtox`P)J9j5wFKwn3%1~=#} zbgN(OEJz_52MD`C*mJyL4rTE4UVHEsfb2E^v_TNAA+Q}F;ITqOFjHw3UDqh|Vn@2& zDuBdXLtCz!lAS8>rk}5s-%827F9q5G$)W+3$=KMW*4w}G3ce=AYw#J)M@h;`KZ{F# z8YZMxg)fK(liHLhB&;*Ow~nGJ@VqR0T_MUEPEn0PWp_=gg_b5NB9}^{=!e;tn%4>p zF`tweiht$UIoP%Qnk32+rC?ZPYZ)7R?XMjsoz>Vsb<0_ec0!vJQnpW@q^0aF`0TEA zP(RF$C>YKdUQ#u8jh4R3E&Z@Z{=lV*cXlDo^Y3aV)GEJUNh7ucRVO5q(VIz-BM1s6 zL&p|k>L!@SDhd8iFo+m~)&NSPK2ZKdhK+K7JT*j|3Um<*pv?UPH3U$U18lD&fy>h6 ze1G^DUXu%OAn2bOKn(@CazKkrfV3B%P667`kkKK~xCkl<+&g!g00r0%mypTt?qT-Y z!a@sFq>ut7(p=Bz2cwZ?wM703`r6qZ;M%*RW;cn z2*xf9grGkFZv&L|P@tUjVTs<2rvWog4HE-H(-1vp$aSjey|=*rb(u=Y`H)f; zqD@mDP@GtrR2wXv&`bH=Ns}WKZ5;0vZ+dbl{?P8{^^lR(H={RCKSh2(fuJuh^JeZ! ze9~iwYZ)EX44HMIB1S1}BNZl8+DU)?g)JV4M+M#W)}!PSEY*zRXQGON-+X$iuZ(A3 zozqS%>0~`_E74}iooGJLsGu_U^xb>8nX@dCp}e0oQ7ftJGCAuj3=E3;FChO50Hr}6 zSaG1S3$4(X(A0BW?n6KWU%fBAkjf261W}MO9*61A2b`SI&@)JdE}R1_ZvfY6gyInj z(jkD({0vmy!1xdb%PB5CzN*;;bOBf@B6QSipmL8CVn_wuqQPR};pur}0?q=K#jS_^ zR6*6z&3pt=93vjeAv3WULFg&hw!Z8+U7ToN2~SIBB|)(>`QAwl;?i??%mM;vAZBr{26j%gJ}w zv@{-YesE7Qb*N^uF zKiYsm0~}I_3V_pgDhZZF{HmpHDG$5Gpa(r$qQT1+GTE3jyDB?=X~MG@VVgqV;>0_mx4A;1%R(%y zx2{l6NKptWmQ8YnCiGS4Ro@S5(#MO@z=|uHDy+bGve12`SEE__`#Km9gy~GNMPPjU z#zqRuM)zczi?f}*Jtb&~$~Dg={m6GFwjWVGU-4Uk8gn9YTL2;umJHoxK^&prw6km1 zug`%@u;j$P6O^<{a}gb-&I3}FJLO+~_ay(-{;u@ZY(a&WY8vhj7;7TB6(c6AO^NDN z{!G7txMg2Uk?Rky*j8D>Q#teT{b~%#PxHB2KV(!^&prJYI3tyR<)1rHXMc{0P6{Yh zKcUG9D6<-kg!-SRPPGtddKGzJ{Dj^p^!sl>#jeGSlQOCUM3Zj5*2JOWRG`SDfj|QS zt8~@;6|iqwlqW!g?<+W>5tE;8Q8u(5fT8CP&BPek7PnxYH<$=gZ28+j`VG--*7XJF zhrG&z_Po#W<5nXhKYY|KczHdK#FV4u;X9(Cgf~t43-b2e@AM)QQi7QIB~&c1(`m}r zVN3rcYFR96c-Qjvxy=~0_R3Y!@cTT_+xP)X@5*E?4^*Tvciu2D!&FR|m6;6n9YpF2 zn<~;R1RonvLf(M}4+IoF5GEm|T+m+p<3`R>LL@R^jcfr5Q5Z<@z-m}*HTxK3sUUS# z&HEZVvdojvEoK_45ZG_Ueb-qqq`3Js=o{&K-MAO@_Z(LbQ>)6n)nXoXt}7M3V8V9J z6DgZc5Rd%e8k#2)mHS7mTuvIlMngGA%G%w+4Fe6zj)%Z!ycezkKgXudcO(-Dwb4h;@p=hxHOlSJW54ue)}6tqYi8lFHGyY%e!C3h&| zUxxM`K*$1M^MaP1B*-F1>)m-^=UCg{%7&ngJjKN5uMYSrNZMWBoc| zaA`{>t2AjFFUPPtvt@q~`IDb4!PAsn%oy34LJP(wk;y6Iff2m}-Un-osubSLr?<1m zgd7R)Sn)dBP0J_M)tQqH*20XHF8vmLcB(jO1_73m*oK`B{BiZ)Ni|;#HpAu1FhzL^ zZkH0-+wZko4}CJUoT+6PBU_tUx{}E2dK|K7dkt5cOvh|IhfDBEZASE?yfTlaBE81Z zsUXt}ISWTY;LJ+W8F4Ma$m35SF-sC~#r1(}4G|7m+6;Fn=xPk0V+0nDc<6{Ab4kEk z*T1%psLg>k1W%w%5F6qUgt(W{72+S3>DM7y{Ta9<=NgN*&1lai%zyWKq`Yqav9l1O zra1biz%m?qVPtxYOvBfM>fMNTuM~zGJvsRzie?r4d#h3{_6soxND#B%4z5*wOn%W3 z;mx3H+k~Ow)VS9)3j|j&(E>f{4Vb9JuM#}=zC6DbChyO{B|MNUW;0zA)F$ueU4PPU zY(F<|c%xnwSt4eo3`GT$a`9fqo!yz5pEp#@Q+dp%Ix+SqX(BQ_C9&HSYSv~W#T0Me zBdEPx74QSdY)_|Ne`m0hCe%f~Z1#|L64*zE*+a4JQzuHxrWO_qK(KIe4dNQU=U-m7 z{%mDxlKTP%L{%y)7uMwRq|qDv%!M{E-2Von<70f7oYGSjiLx|Z-^Lb?L%#*f(lifs z*Ed$OQJ&%3Y}`K@+ttCu~{(kg}}2Flh~d_Zgn?qIp}ZSvoRI+hxx6CpEwvDKv*aj+= zl%=2K4If_Z60>LFPODn4upeAqxU>p-wOqUZ*wr=4i`KVq(wkB(Z<%#RgZTBvt;Q9m z#CNrJF?;;i&T3qQP|i(z=Kmf{mpD0oa@|CYt?OAo^J};~3;i47Xu)MA-M1UnUsI*A zV^_HNG>kczL(5CN@at?4MTXg6`O8s$w5+O^z<7n=@h&N;tfV9s2-TRyh3;)82)sNA zo{*}m3^iJ7q;)y14`&p=h}H9+%gqkFXTH}Pe?e(mSO4yF_l<9i4VNc(^$In^Y6A&? z`ML0CRpW4)+|}!EpXe_oIy|eX3j&AF4U(#=G*8ZICJj|4YwVJ9@A|EbRFiBno>if_ zyFP4q$ix&1A=&KTzdwF!0>rF)@)03~l{#Qy?K#Fyaf44*K~s;e|$kWFp4 zn6@?hTNc~1y>QLQY%r5()4O<(!O;PhB5;BuVIuHPdtj6{qdsA69PL zS0~M8ym9rK8?;yLj(pM!O*mwM0u_f5*fhb1(`tcn&^1sIcZl%>$17t}P~dpAR*z}_ z+gTLONV(CYa*NXs36UYS*|fU^9zNM>>kQWJuqum#%0jnosqBBW001(d@h8wtlZwhq zMXjhTI`Vl7L7O4+5vzki&^d-B;DbH+NThV;h~;1N;m05NHl{qvwv}(CK9PQ&&m3!6 z_)K@{)iN6_z}v6vGt~5*wMmn9&%GAD+eQ9T?jIZLXGr5MAX(U-@6i_Q>game-qX=G zUe8@W_WH*Z_oe>aY@gAY=RWE6>NoP;V+3^b89URLha%d4lRG;5?P&fomTLL2vtg`$9Mb*7uS#y#m!;pd3dfC54ElH}>=}>m?jIfNdP1zNWbTAp&{# z{flp)301RrwmhIz?RkKwaMSa1U--`XQsMpc8=+Bm*8{?{G>W{ECM)7MmwKytYV7oR z5})Wc#ymYWdm=y>?uLD|DHB!YHHU@LGxcp`#Js*J)v$b3S~MhetVyB@6@lE z4tv?5XGcBS9tjnh=fx#rx{te73B(QimehBSjaDdoHYcR>FZ}EZ*So#-62$gmZoK&P z6QmSupmP9ZMI@}JNNE9_9%7D;jxh~pGO-1JdIcqpSG^k|6C%I=(eti5#J#AxO1l&p z&iXk!yW}9)gz;kd`fsm&K|p+jR!-@JXlBe-T4NqrkE6=`Sj?Vw-X5>GVl`&Jx!7;9 z&g=e1*OQ#r1H~CfA>S&R4En45&dgwXLtG`CHWMGA#|jO71vj_-@EI#?L@NZHA^2Jd zDgq4I0Nag#ip&e?i)SmPXF@n}eGV^Q*}taPn;>%JOMcOA4WQ|xlN0;DBWwr;I5Cj~ z$uMDCqLehlwiqYRa)%=_UE648JaHjP^g4ZC{-qFoyu#Hn8m*!}9r6B4{m&j-Yf zzgqoJY`vK$>UbPn4m9cVy|n`K{T@wfz7}PO+1akVq~R63>N6iFzdg!g**{{2Wib?w zA%yz&$1}87TN8J_ja~?TyD#KuG3C2eg%x|g#arFxJ|%V9bJog&>6IO%M}zQ;DV z;48W-dMV)wrK;K^r5V!EpFvImdMqPWLYS?#096NMCIW)@fKpv4sPVpj{hFszGC3*e z`pJ{l&}hTj;b6%7+k;2>3;g(E63;&Mk1Xz@V0nDH!VrTE>}Sx~XyB$jU$`VmdzxKH zDtI<5m~q*M%ICOhQI5R}7LpIrS)kF#__czR?qH$#4)}ufr-5G~1h;oO_LzX)Ff!x> zgiK&*0PBT5th#`VC9$=&MLN464)kBVWcB>kHS2eJZFMvdBQ(K&=g1)c0|Wy`AP@qD zY-CDGn0&So7-^7ViL5wX%iJOv8`kQyKWa)ELs*q;#6(An8;@4~`h9hnp;z>%YFYYTwSf|HBT zK7J@G2);15qGc0Si0?NR5}%4=^(VLIoT#sah3QHH$EVZVtEYoThq;~~Wof+5EpK?H zF1Hq^Wt$WACG~XkvsSGM%hKG{3g4K?l|Nq|FC_5A)?zbh7jxw0gPkH__>wz*#Lf`L zJKh&Q{H^EZGw~P&@-k$q#P-&fK13f@P$r>P#oJ{WExi{Y&8x#AzaHwm9 zb?vGU3C~Acy+l-DrNR2yM4tnl57L_jkE}-M-{r`px8A=h7Zyax@o@AjHHxsgW$@)) zvUhbZQ}#VYa7UH`GRqr#}70$9%s<9kCr-Ug&h zx4r9J5G^tl1)c)B;kw|AUJ0DbXMtfl%YC_zByQa=CI%+DKCho}xQpcsSZ&;aN* z0O|*6kP)adrA%<>hS9&ITeAdhWKq**z|e^6qx+lFPo@(#K@mL{~_(z@y?Y}$vQ6nSWRr|SQv;HxGrWU_N z?J%?I#i8^b7XrPcKEEIPYx_u;hn<7}>H@kMhTYQQbe&6wdakVaNz>27){gZKC+N3Vb% zB;oL6&p|aU#&(Z?=s<)aCW6pwH)6Ojbz9(0y7w=bw z7(^WU41XkaQsc7+rJXLqf;&~8%Kxd?F!%5L&i|%9i|uqZY?+EP#>6XHNUwo700O=zX{cm; zPK!q+&5Jgf9u=P)*X03~5$s4(%YVZeyt$w$LKxrawBb)GYL2%*`41IZvxIGs@jCi2 zHCWI2`R{i0{O(V8IIhSh_dF8GT{wUdNO-(h_QS#eFnRD(~}Ivxk-lBM!h;0J9rOPf`!`BSm0k|LsSIjt2|kg zI=6j`+|DXDr4I-(PSuz&dj1dGqY)AWtW)lSjQ*v4l9#QAH zpM{16;O##X3%o&FQ&;5(l2J`xk5E!xuI&l)>99vdk|g6S_@7=6u?x zfm5z22SlIEGC!*pj4f(z z1g`APK0P%bPUk>L(Jco%*8JHh$6~_eUVN6NGwkVoLdHC(1**Xyr$Q8*5Y=&Fd(4## zb-V6P7rwT3w{v9KJDVmID2|(#xW#6__qc3q2ixpmY|S=W+6Fnt_B=>AL^Zz^R!^kn zN5WgrVB9vt4rrH9s4r9y&K;RUO z6Tq1Y+DYI-A$rMF&jXG4PIV|(E5(%LY|3ebHC$ehU}6RaR#Zc&6Uxh9!ETIp|IS0= zz2KUrf{){6*V_}To$X$Uym@;vwZA-_uJA3xVfR)QZ`Fec$#?Hr9)ED+>C^q0JVJ?i z53<3C2Nc}&LaFJvaY@g=oXKg|sh5O%XZ6!2 z>>o?1d$BK{Tw*E{_>MoB%xNL4x_e_gVrwn#@K*h;@68WZ#iww6Z+UiBu1kgV$W(TF z()fg0>w3IdJLFa6vc|+AxrCu)MMEq7P=UQ(FnroT4;A^((gzEKM+1o-++K9k^=wfW zrFuWAUz5p-Qw@ia8y>Ej$W9htBPqbom2K-*{$W?B69`3)iVJr;-?Bn7e#w#9bIxTv zF=<)6z;8Md<4sgd4g7AuDOZ)~h@_mKkLBEO!N}o!&ABP)qmwVxnyn#YMV9pVcsyK8 z?^}EOVXV4!;8#`xZm{`7rU0-&}9Qs+={qo6a7%H15E_YW){E+Wml-{Rx2f3U|R zm|sLfotvE^n$?-uRP~zU>qOV{Xd!amDsn%HI3C^j<6cHp28mni+2!{L`(Wv2WF>JK z-Y)}g5>UvfQWHm;J{qT64doyUv0MUs@aJwVdk6t^lLh^IbZQAk4hk4XK(+0=$gWv{ z@7L6bI0-;qz5byVxq^XcW-eszj**4TTcquTly z{I-_*OrS0VhX4l%siDfPIzQ$INI(2O%W}=oQLE1o&d8<;A?bjjrPfyyZj20HC@;g= zUa7gsJbep^fH`+2!ep65!$!S`;b4%I&*%U>L&zdLy`R9j|oBFm->3dtc>ucTR>eomhuEw2A=yWOtDUVCKIOR9Uwd;!dix+3h!5Kt3GQw~;xtjeYP#>LHVkhg?VUwtlf^&u+6@S1caF-S^dV&Gd7(TUMt@CHUGQ7la-zIQoHN3??lsz%>ZKJ|bG3 zXE&~^GIt2K0`GYG{@4mIVuRRfFh%wApQXVhBr>en8*O_nha66Liojt2MBoI1t zqbDwBa(l{bAD?6}nbOt}Oa8P*sI`*cd*<@n2@=*io>(QE@|bvBmfYMF`iQO7U~n;Z zSfV(l&q+);F@4 zC@b>nakha9#Ene0qdnfxuSh*z+)Qm-h(wTt`V-?~;qaov(3*Lm2ryAD#Hf04;|BJt-J?iPU`orn7T(=T3AetU91i@hvSa zH3`UJKlJ?OwCfge;<3=@rCD*^cqYx@P$go*E7Jbt)!g=irVFzTz+5Uw?k~^KI7_<4B1B}d28bHcM94~v2X9-8gRD~!e^1ju z+*if1t?!LX?Sm$f3Go5HRf7*wbCuk+m`cTEA3hZH|B_@ik)*BIzLQ86rYZ5~YJ-@E zJ=*nZw3h|SK|7cqg&4&=1Q@C}qcZ$*5^*Z1Tt)U!ymW6w@XY8X?wIy8Uj%U7&_8Gp zUc=JC_){=`xaU81`NidZ4eAQLtL2XSt?uBYO)?bSZ?0?Gw%YACI+v!=Z(YU{pgb?| zeZnL9^9p}(jayS_@P~xx$`rjUjB+wZ4_q(x_(}nhH7s{;-^4imletx$N@#^GwG!wp zfE>tcndcdEREIR)o%%XC8ZAs(E3)@frG4U-;oVffZ%R|+PI#PmX+z(Tg;b#K%Qt)Q z03m86#1uaivwk_{eitRQEeyZlO1x05acLC!<#g@wh3;9=y}`WoZoSjjyF=cp=}My? z67!c<(y5>qVQjfu(0XOg04h^}&h3m+5DKcT<$tq-jKPA*yTLFu6>oYigA_A(&|;i& z-*rkx`Pyp#lJA!+f&N0OK%Dg=rHlr!R{$$5o;KA3DrNL|xdG+t61T|cN;b-^?!Bw8 zzv8_<{uE4ds__$mn?TGzV7w|f8<*)Qjxg$Ims~23%ch9w>|1kifodE^>{HgMuk4^r z1OAHA!pU47tiNh52>s^_I81=>=b0{~f8ncX;-~q-@lrkGcbX!>M)-P;Q;Xs$YptlE z-6V_k#hdG^r+6YG8ZPF~FC2O{iY;)og|HJY<`am z*58<^GrK&o`tWG>cz7q|VoBG(cd0okyRqpZ%gEGS!CegOl#~yZWYfVqJFz%r>$jNr z7FoI)mv%nwDQsRjAw4qWsd8DkigD)1{1$x?8;#K3V4!wJr^s!19g2^^*3F4BUq4gx z(#8!DP!`NlTps?hvf#2v=uK{@*|wL_i~joFndiyt6UvUi~HN!du)VgXXJ->2L6fllz zgj!VNqPr5R$0JI#YuswvmK_8e#s-VlbY_o9L!|z61!qkkr-l6v*xDOdi7k~DQIfaG z-L+ZBnYAaH3VxVOGGn2IgB zq;^7^b-Q6hbuyLAQ~J9Ja^J_!12?GLetinwzj+gX=t&8;U`^x^OHXTe{)e(s_6HQi zoMQqR+B-hH(J42juDM#ClWjP-aU?zvVz5nXY|U6Wo(k$7m+#BJMW^y`kgB1+%DhIa z{{B7jpuk(}c=q|Oef`04FV-+5>0?yiH{qly(dTS{U1@${LEE}BAGb7}E8u-jSKi>u z7$pjZ^3Thkr75P$i6;-}Kq9B2>19z)+)%U6@K?iv{kL(>wYT?QU|Ov=9#Vu^Yi8GF z_@qCouZ&LPtaAB77Ebc8;_*EtFlB5jGUHf`<}xs`NVxM;Y)QBEi?nMGW|a$ttEyPY z<#rJdR*dIaKiBMUd6T2pA9fub943}~R|cJn??pbsWKa&d+%^YvnTF%VU(;f{%>HP* zW=vIBkZd;@5*;u}%a5aC&{*6cm`+OxAKFyZyZb?snlo?n5xQdY zmZNi1dIHXZnU(cc?!%k;3F2~QJ3Am+6&n>W>4IUXC$Gyh#&nB#MObK>Zo#s%>^F|>jN zjI=MGn{pP|i3$?Agc<6}EJ%myt)1=rlo|@dbJ{7l?48rYocGDT?Cu|cw6t1$C@4I< zPYwKu-638qMFZ7t($uW&z1U9*G$l)@G14;vvd<>+7%w$b3ONfmgT*NMF7Ao==vkk= zJ#`F)v+>EgD_|riPmNXCUsOI3AG`S1yMvm~N;>1OUrI1pCj>X;Nc2B$B(PHIPxk@$`H$Ut9It zR7#n3f3f(NyJh%RD$<%fmcqh1wiF@6VfD!YVb+N+5}gK_S`eE&MU>%4L#zq2%`E`OLtBGae;{UWL|@eQO( zL2qr!lgt+L79B;2!7M?&xNyDW!mn0S%2PrMZSrqPjFv**ErOSrH5IlZDpqK0c2d+? znFid6&-`PG@BUfP4-8D?5GtQ|R`>P?R*aHJ;vZ~Dc{y5kzLX<=`h`E53e>vQ+5w5B zROR&(ubl9=)$Y@Nskd$*ap@Z#zfvBm-mItOVNpB3GAk;kLJAt9%+D`DTSHowP-kms zje}=@Cr1|K<6uegCUDP6+9*$6(H$pBZjux)jF9!)-sZ$W4HX3`a#-k3H*68m8&}5h zPw8$D%F%$D1)PC~?1?Rj+s~_|6zT7cWPw-+kl~rNN3RpylJ+ETlEE7CAH6;` zD{JxhU6`Zv7!dfBY-|zG1Q-CIEd&Z}psSJxx=9YYND$VwYcO6t3Jyi+WSNSX$3|9P z{{d7IX$1v!u$I|`$AAYyarHkEe%~}gm|0QteHR}4f19$PFr$UT1@$y5$g_aVBnPD$ zn0Po;Uy=GhS^$pfYRa(KnSPk72%;Zw5&!=*$zF$uXWUkm<2vb0=BLd>Tm}CeOLQbu zd1BoYAV88}?rp&3;oyh@Eiob-1EuFHJ3C~gG02dVRNnMu1P(x2#z`#q&4GxB2tH>Q zKx4=(M$j^X6#EvK5kIk85E`bE{_hdpL=@dudtNvS4*`%71w<}g;Pvl-9tj{$V{$^I z(*#y4ayauSZx@YPRV@Dkhv+eMysCleJ@US$kp3Ole;p22iIlSh5$KKI)7kOD&P97p zVr&SI5e^^1X7pOAGXRIZ94*sq}*q5)K|;kITlD9D-4Uv7 zP?!e!RBXdTM9qXBBqsWAWQ-f&#kB~#XyKl~;pVi0si`TdXJpr6-PZRT$iL7;S67k8 z9{R^&{-0Yy^(`c?&_x071klJ?32*ML4h6$;?m$ZrUww@wM_1+b1E*y9AIP}!CVk=j zA<*(5gL9E|5zy0;S>KOqKd(iYvHj0Y_Qr4ra4;$^_d#9A1@a~6q`ZNIbQgLCcq<7^ zw*>$TuL?{cQBaM7Hx>g9_0jD?h3BOafHv2W6#1WPOosvFlbkI9uq^a~Z5PB)g}1BH zenLw%0Hlv|>+4K!w?9(C4N&x7*U58fz=h})uDH6mVEO4SD^}0W&r{vJ_%ZqaHKG9C z!~b?3jA0}~hyb`!4Rv4_Vfc0b5R|~Q@Kitp)Se~r|JYPurMIqELM#f(&R8 z`W`0In}AIO%|4zF3@wJ44Jx1afTqy+sHjd9P!XV^smx&&seXWVeC>bF4skEQo_L`b z4A64a8Mtu}-2z~jalykGWZxXWlz+qks7`doctn z=;kfb$0&`^ya(t2t)9pBRUkqCfd~8{CFL9NWxs-sAt(}{7~|y2ZfarznQs%|CW1hr z3BN=hLKp%Z4n-L>bMOVlASVZ~(ZCBZ>Hy0=;Fvn$hC2cf7xMP7QizF(AuJX|=m3C1 zXqT9R5*-ZJQ!gdoho^Iug5=Rq?-AmFO8}d%VwcP%13&#elg?=o4YxoF+C=t01-L!Z zzhyNXuQWwlYaqXeMC*|%7Tu}O{8|o6y zQoZ)9ofaoipjm>U)tG(pv|FDQuiDEC?lS-fu)*l}TlWQ!p|QvW7ZgMU4=5aHK)|Nf z)UN1mLH>)uJf|U@8y42(s@4S+<*Mg3 zt9i9!H5TU@m^OCll+UiBC;hFLK=Y+;c*2-#UUgFgRj6ob)|Hbt)xlQiaAG0F9q#E3 z9fZOua32RGIZC0J757hMM@8D8Vz!Yqh#>GA( z%ITIxLhezgmkRJ8RXm`O)8;9yJXJC+@$oH?YaE`ddr_D1E__PW_VVhoIkTucRej}q z81mZRuP#ZV=f_L3GhMT5nUU=`6>B?PA@Fm*e}!p05I@nMqM$pi)vLA?BRSN6roUJ+ z^v~dEwW6Wa?#MUHTcmwQq#xB2MS^8$$@R$pqQt&d?YwmeH&*W7g(pB#R8>`F1-PVv z0fo=7+DTV@e7wOz`#-aSA?XW*8373S00jk(5Wxj%YHC{0;-WORw4eio1VvZRiHD61 z73J0AB=4{$^?NB+Fp%XD-26TWqo}5)riedLo7pHCKE!2z^&ZL$Xg%_h2fgQz)dG>T zi~zk{KtKQ{lnnmP`n1pf$6fJM@%0gQTG8aUj>3lDV8XG3DWqeOZ#fN&#?@!c~C0`sEZcS_aRA~ zd{N!BG!@m1sC&v|8Y1Cc>7hkUE>_&1D=XVROKg7_O0!;hHS)3l+dZqdd|O{xgrKp&>YA@4iVQ=*=JgJ~hGbc2BOw+*IQ7P0$pRMorcZnX=vTd%U zf?&akIzCFuBfHQ!#`Q0Kt=aq!pEovP2LkYG*vIOi9*X+j$+{0o1ExQ>W)H z+K$UZLCMLRdfrSVbVJ zpy-CLg5DAh>KktX*15MiO@4e_6z&W%c1#fE00{-`C3sL1Z3Qh{ruGK>_bn{x2!A%5 z{7D;}+ilnsq=(Brs%07Sl&Nu%zDbq)Au|GrwY1E~Ujn{Fuh1zn(Gn)JsHRlqyq6ru z#&`4kpy`s{T1iElfl{n7K~sCdh;rSH8l6H;&$kTxHc{Ko;zP-{sBwOF){K2iq4|3e zqy8fVF(E(a@)SrPudW#eSUytU(!(K-rpJ4H%uYry$Dv45bawi-+0eiz^v)S6Q?!~A z2es1K4)q-cht?O&%CP&ad*qI0e3-J zrF4{w%@=$&RqPe_zF7bt|&3FMZ6dJ%|ayw_iEx-9_hJ% zNC|(c5S8yV*Vy{php4bXKeIb4bPA8o2UBfh?xMApE-F^_SZCl{AW7YmEr0y*3R~sp z3{0(|QEFQYx?(X6S2P8f1u)z`B^JFbw=pQ(u$3{o!~m_`mUr!i@LcH;{`jpBCOKA% zsmIG$jxQXjqJR^LhBRNH*a)iTz-;cc&i}=T;hO$7vYj! zKYzY0DPdE$op4Ycuh^dEb8v7l(AW2YohA+XoG{BrIhdY-0WG){Ew#6(|LglMCA;$& zp6i(kT2}mD``iiYH7`@!oK7qU!g78oNOJ75>%AGL=Y7{hFZy0Rug@x^3cQP=yc1M$n8Q-J) zmPR<>9Z+jtqES5b%QwH|wkIRDeqLAptI1Sj0?Nl+I10P(KQPBK6cw-Tmd&IU1>2ZX zx_n$yb6@6kI^hbGO>gwu^O#WRZ4MSFk)|C}iNmD5LoWF9*|Hhqmo441q)4R+>5kU< zpJ*-j@SifAR$7lPuQYZ`;?y%?8}ux(rMO_pcR!OYBs8DHH5tdX=Pe#y%1DeCa{CyZ z)~#r(^^l!+BddOjMRzecCPPUbg%N$s?9p6Cb{1+9_OkBg=N!5QtbU3+F87XciGs}O z&X>?p5}N~pvgoantY_zDF{m7jq*d=mCnxyle88e^X`$eFcxA`?8-JAmv#eHp^VAS3 z^WQIj^dT-GnWscKI=y!TuD;UD7S#|M#t@X<~}yI#4B1d8#~RLqBJcyU=3A74`s+F(jF^O_lP5-`U8 znPh&N>?PXX@Wjl7{ne<|VoYzs>%sRX_FKG3l?iwSN2z{~$mF}dJ@78S)N&`D|2|Yz z&ekhcKpXsQ9n-$>yv}4C&F7P5Spj=B+CzI@bWKNhDF+oACV%Zh6oo*E4zqyFR;A+6 zFPfnvX41)CF%>$R7#ijprPFOzOt-4`1t_yC#t*=NY?im17t?WrLM{56(^;OacgxPb z;I+e%O39E*>yh_GvEJLionEQ)%@jLvxoHh%k|}1V-X5?*{wpeJbY9VCd%GNU;Bhs- z%hm%hzlVUD6ya7yJN8kXVo|)kEsB*}0b1UONa1%Fry$F3GAt=}-qe+Dd!i2Uof=@= zKd_5nnS1HIa->OtSD$0t8LMETaR%EmfXCm6QTnE*!yF9u=x{2lj*-DxTI4M$7S;hP zr-+MHPKEsn47Q66ChSCr0b2^S@e|3(HHz+o8)2=VsNtpNoQ@wSJ;GU5jm9h9-a+3# zCtCme7ZAYN+P}f(Z3np;{?mGeNFht_f6&!P5275xR`!iX#E4+3Ly1svXmulM%0(hbL~)Cy;vmS0;J zm1>%r*Nu#f0Na;SKU|>21MRx9W$l3&uYX=Dnuj32qR~Y;e}bowaLN9CNJtB`;58k+ zMl64SNmu>%RgzrDpx%F81{?XKx=tJvXp=!~Wz~OdfSPva;-3@qR%L0X`VB=nXI<); z@V}oPm7I*t!NIZRA^`PtLJ|_HhYzoQ{`?s^ELj@A_;?~PscTs=o4Y*u=Wk*PIRY~yIK7Rb@(`{PWu5Hn8BlDjViERXrO7vU4%>4WtIy#fm zT_5A)%a>obFh7w&pkHmG&`UrKZq3^&0{C_aV4&|R(Crax~dJe_O3(e@ZPylqvm zBeJ!1C-FwH*?@*L#16*BnT9|jLTWSt=?&pLWZOjZa1RmA=kbjj+1T_D65PMvLx@ad zYO8`VKcQ?kfMN`>gTTlnWE|`VMianRCPL;&{vdp5YuihxrlO)^E+723sskQhzCWHQ2euo@A~|LpE2n#a5TEQ_}rB(@<=PEIBv zwLmjAg1+WF;XEQ~Bdi2iTNZ3D0XA$Fqa#hv6_pGROuMOzho zA&$0WaNDyX)2Bl0V&u?j4Vp39s=&(6-lhZD8(Sfqj1W6em4(=m-pU?GA|@f>XcK?- zjI9ka*>E<5)*505eJ!I9trEklMiWpOBAiFyX>CJ_R-~g?WwQX!< zgtg5-Ab`CMAgyoNY#==gu|tv_qY2=x63!!`KOqlr#@ec2kY3wk@RYYzK@VBlAt5TN znQ#qUB!tepf8^S#AX4FI19eO&+Xldf4MGw@V{bG8-<)tBrjRrf65PGpO!(5wOnpV@ z=b}B4QB-6k;XDBGi009*Tp^l=`E+7}j7pZA5q#!whkPnI|M!N!t<-%BZ*neuyE?-< z^F7TD)qj303cKTf?y|SHSNHyXEC)o@H8quxb>maXa>{?6KMMz8xkH>^2cKVODyBvp zNWHL7JAw5CIJrJBtO6=)vKf)(VD&|6O;d4E@$o?b>Mw5?92~^oKEC$vL$hxL*1Bl) zfwXnWMd?d`A+W-*EEsH&0WGU{p`rfpm;u{27$i2_Bbdm%V0~!b!}PhFIw9a>A~V0i z!?a&>2B!nzdRr77t`6M*|DHt|_W1KuSZm=>RLFH+WUlKFY{SxW&~b7SqPchpYOgj} z#1V`-;uDzJc7a+Ln6Hqj^RF&1Ji)1)E+W(j`4r5R9MXN_b@o`DQPuB@AF$9NbIsl% zPLzYP_rFUFmN{Q^@&z*D53rOifX2`CP*zhT0L(x$#QEDW0){>?*8Bgl_1)oIwtv`P zQ52O*X2?iJ6fKbvO0?`9B85^`_KfV2tYl|pB_Sy*qO5G9LPkVF$mTuo=Xrkb@&55X z$MGD`5bp1N-`910KIi8g_%%eMhv3VMLvLaI_Bb|{;Mvx9KR2id0X)Hh{RO7})j;)j zP*PHoDsdBugnIyz((>|@zzIG8dmw@(PdomhApGTE`A#x~7W2rFBb9J(GrWD921$A& zB6v4_V13PX`0!5TFld!J2p>9hh(tnrejhGr$jT&w6~@NL2a%LT+=c9mG_g%SAyCv! zsTcR*cp-LZa2S+++yD_x{kc6cKmHCnPfKTK4OEdGT`4bD%S#gQ&Wk$D@ruEm5M~uh z+5hfl-Vf>TZzthvaNIwEG4^ve4e1Ni0H5ArVRA$&l9iSZvhCWHtO8oE4W5I>`UY$~ z9pLbZ*bWx-_B;3Opu~0ig@*d7@jrmMHmnCgtoN%6KPbx|NhEMXnM~_1_oA@S8zfCc zgufe_A(3$85FY>5OjwT}pumD%geS;D9NgR#L`a5}l`v@l8vH7R_F2ITlE?-jQcERW zL_wM}k1QPTaK@2HD4x*=`T%W!ntW#9otJs;NfMFSMMQ2q_vx{Z#A( z6D_R=7ywm>`2>30G5Ft$oq9l-mo5NHdo!s9uQlD?Q?_FdH+gWLIH8dY>=fIT3iP~Z z(;AU=h!>OKsU$VkeZgphd~Y<4l4B@#c_Fgc|`s(abf@L52mQMroT@$KBYD`)G$-U_z;S_d;PiN> zK;-vf)O`R0;FA0RHlm0enSt3h5pxIvyL|BPn9#sL*&8=_;C1@c1+gPLL7u4>7$Oei`Khv_vyWklHgPN`KARw-eC5fv5HsWXG-h2$Z< zU&-+*FC8a>V3ypGX%V>qC%>V(;FCZTVT~_bZlGN@5>C7AK~=HN-E&4^X{zo{8NUqJPT8i#7@kF{UKtu*m`;_&Cfg*owXi&`@ z!zKg#{LGR91%2RVSQgT-oXDx#j3HPf;xw>}$RqL=xL5#=Tx0liw6wKlS^ix=8F@vE z0e*t$k_8NQ?%WBNZ~K%x9uRD!+#pgFUcGvCT>mb}gfFxXVat(o+$|>*%_SzrOdz-# zZvVaLPe#E3_;&|Cl>iWQ9vep`mLFl6O#(8oS=)N(3HhStLPbkT!odLF*t1Sf zV%W$8T-KAy%O!z~644@vqa`B|#|4%hC}O!?3Uq-rM8m%1j6mQbqs^Ph2%kY7DcBwiq7c!2Q0fMV=p zQ&SBrW~h*|inp*5p?WZBCXjWKA+SkaK0Z&7kiBI%h9nkIlleS6^#xchz|9DMpmGkg~XF54HpuekX5Yi4F!@td$?<@ZjM!SNY4`(0xr4N6z|4ZNwB zSdScu{`cT&vyP04k~stl_~EegN?2jJpD~dsI#?8z9-Q`UK*R|q5GYBe!KCCsgesg> zG+5GMQ9Xn8YzBlSsWG{OD!NEz{tc@Ht&n~K^n4B zNY0WF@~*t~Q)8nXvg?RXbmN9b%De_I9sH?15&faFo*i!RU)9cPpQM$uO`h}CBZvqg zLubPNMoxmQ?+zFx5XPvH`JlelVD6#DLvxtO03E`~iiDu#bED728p7(X^V_`pIWZx? zzSrO?#s)-60;rPh6LZnX+pNL7g9cD5Bwd~jBW?PZ|EYLBvJg4i@M2KmeAb>PJn5I& z-jBdEv>p3#hyd%vK1!L`OgX(AHH3F386_hQsSl41T$sj5%8B!vaDIR9zb79#2=LF` zPN1^mo!M4U6vQFZ2}N8Dyn(WQ9X40V*Z}Orc-)*0H5@r<8mUUrLRZag9}%Ad{5BD| zEabLYylQKx(i37pxAGp8(`VKHR%YinV{jp-u>aCi)&sB_Mg9y?8MKzz+=L`zABjl6 zCf-53IvP7BM=eDkpS;V4IJjWc=>+IPWK3We%SZ9-{ouiObYO&AJ@I-W)9hGDoU^0j zHI%Ca)JCKx0bn=8MQXjZ69gV+1u-VvR*1j>862rZ=r4W%BXHDHIHpNSNU0bujQ^R*xEc_y*hyv zO)7N@_=5QE0VI2%iU;%Z%!Lcos13x##9kwy32!FxC5#m~czDw5ECLaIV~jBXQ5N0# z`a0Qe_fS*#K8X8?l;Kx*zHI|^Ya)s4PvYsoo@NVHXU9jo)U$6{|E)#6kI}#u+KGLz z5A)T?gP{`c>-eN|2i-n}`twA~6i@Bm{!ISFH6)tT*EYdVs}^?AeHW1ZwHy z;{(zCK{z(2g-!a2{jXAQ3qmB+4kAMmy_v&lw)*<|7BHO=Pl8c0J(?0(IXMaCLv8}b$ z^TC77B%FqNliam9I3f?8LPrMw;yevX-)gd?o;{~k2rT2Dh4Ul^Zw~S3(OEAqFV}bD zQG_BhZX0l(B?@6KE-qp#To?anF{(8kNg`DM>KPR$gC0`S-D|jx4(2K|+Fl z_@{1!y&Sn#MG+a!h*wo3VVHRJh)5Q=5&P8)@*MX)lUDE_ULcbtnsjGVs3q5vg0h%F zmZz+EU{uKC>2$I+Q0>z+FNn*!Y=mMQaZr-v*q?X7X#2<=@-m8u4HP$n^*@;f!17kM~glk{Jh^JQ6!eUaU0^m38frUZ#td~?FnfZLd93E#ooC&&tLbC4Bu(m_@*o_Kb1tDX5zwuY>3yec3k3TXN#@cdhu{IuG=waQD#q>#clX-YaM7+6RaI?QT=-sZ{Uj=? zmQiO)tnYtZfNr=L*S^Mfc(gW{T`EEN4!W=)E5i5M_$#1vrXlPLO^n^p@zlPClZtTd zR*L)hk8ME+3Y z3`0BK+e>`dlv%VpOhc#<8veaqqUocW~g`& zmyE_&_4F8rVEN2%F@%w4YQpZ!EeIO;JiBPGo2mEL^>Nz#F6Jr zhcbjaQ(0Rj{Qj20c`ePGp$ot|6@0&H|-_4>Xm;htIXf;j&5g3 zKOt=)J!B*Q<^10N3m4JzJKrbc=mlbr`7a_1#3U2cB7b@&A4#ZQFVCj-QEA?vgOz_h5o6@Y`5qj5kVu1YC950%&*gN)pmA0G7yS4< zuWkiq4N>@!pc9||``1T}zXmO(Fx~yvzVWt^bZBA0Ui1lRC^>3kS;cnkW594SHMCd z=9VOYls*`l47{BxuLj#2`Hv@VGlEC*0tqt+>Jpfsr=lQ|5bJX(h~@($bRcasB*L6> z*t*2JR$_e{Qm|zkcF@NIK*SG{wp|)aN`rn78;t1xEl1h9e&0NTu zf0k1@m><5P@C8&|Dkv(-J?7IeG6>VD$ER#~hzDX|U;sb(UBK}6MjhtmRT7%Gg#LI* zza*f`9St+?p(tP+6!(_E)sWpBuCwLeWtghG^?{7yCbJg1_jjH}?)H~^j5iwKSy)5_ zMZQ)LPCs%Ib|JOrtKy=fB#(O6B@&{PcR--WA6@g^l7Os!eJ$C8dO{9p7W81w=1l)~ z%NoR~thk9IRl)4xWp{)i0&GX0Pc|^WkA##_ocAOT^j}0OCvFYzp+nwEyx#CYg51#) z3Vl)~KHe^*$U=#tek%qPflAm_<>Nk$sFMA2@=|366TX62PRW-y+(&-=7=+)ecR~Ui zY!MBOjmgp1;V40THk`yFA|haCvO^}B7Nr5mT}N&2-NR8yhQ);!&=9nQ&T^x>CMFM1 znaToj!esxc;H7_l&3Fwigm0^N@2*y@LuZ#p7Pi0@%MZnYmbUi0j~}1u2*dRQUCjfG z?s`?pv)WKLJ2*KFf&~A+A7So3-Dvc^!nb*E**P5zmX={BmImy*)5y9qhW2)>zd}mu zP}HBJQ4tXXK=KFz2SVev!_{6@DE2}|MKS@OV_6fKci0AYu&}W7w*7OLy-_HMt!?K< z@->FR1G7s&IL;xu*$SpD2u~o#jMoPZr1XAiNMt_WJ~8r=Ek8ak?#VY_W+b7*Bh(*l zfU@s@#mrOTtfRt?HZ7fw10n6)y;a?mo5aF&wf!&pM9-5G}ICpq<|Yz)eRz#BNt}pTh#|S!4sS z20k0)GZl%z$xSI6ze=Cq1S~0?7?dzsokgf|i>v{u@tg3!IY8)6&F{ghStmCRK!PF@ z2JuBA=s?u71rUFhc2PqkN{5Kz$?iOkwC^HoEL(mPApf29UILLK!8}TxBj*D3j`{|sLm6eSY!3xzBC?HOoHwZq>R5`~P_AHXX&vdSW z-J>qBfSE_6$YK3bC4r+*3>CeBU2vubs-kBRC62S{Ix%>>*`1wb&i0WaBca@`?Wm19 z#&*BCT|^=GUe;_{ z8GasGsw415E8A;0fJyX67Y%X6{PR2+>&f;u`YCC;(p7aB87MXU(4 z-HP)_gK01?CB+L!Iq>238AWoJbN_(w$?VGb-qbMhwWdhC%$b>dusMj#TUuP?J@_|e zC~|otNPWjth5Lr&WDi~~5(&>FottnoErF5UTu-x|b3q*UvueL~1i3m`>1m3da~>bbi;r zwLjH8-idGkdI^%Q0XX8m!+EzbnE4@nngLf}e1N&!Bfv;-tz(c$JpRb8*%}b*C_8!=f*JxVn1WU zv`p74SO1P1M^#ngQ{2~J4cV=WVqpDc~5yv1L1z=n3JxK%|EcMx@nEP1}(jcfY9U`0>xZcFRNC0;~l2 zSTYB9*K>Q>`D~gi5b^f38;Rc#Q6RJCypwc9XwwsGzQCFrT$6tlg6q8x7KSv3CZrc`;rU=T&Hb_TxTCA)s><9T7rpZ;CdKhAVlCBNHY%l}AI$$Q0J%JZU7 zhEMi)+d{|b@$qdI%)hVbnEHl3Usd_%u+UXgql5dlCv4dFBN|U7bFSZb2RS)t1C@xK z0de-l%a`{{N=^YbLo!n3jRKC{{t*Xh6crWsM=IiD5))W}+Z=p+Td~yu6+EY>7iVse zb5$G`Q%10GVLWC^D=KJvy;s+Mc`AK`gO`tGZeHU!TWs*Pzv-5{X9Qd-M33(J`OaNN zLoZc3ShgA)bYtncsyiK{hWTcqe3>!#qj!ewwcE9A`-Ggi4l38rI(nZ&mv-}f^Ep;e z=3iJyV=q&stjC$=^F@wvY$91c`med{iLJ?(rSOCrCgv`1@7{OHngS+d?*Q;&Ou{?` z-Vt=JDVP8dwJ!P%^^At5=H?Gr+(ZkHE&Mz1BFL~ek%;716v8z?Q*iLNzw9OQ?(;-Z zmIgETcfP+3a!HCr`8jdn>GHBuz0A+-Rs)X?hB*!K(pfjXJs_hhzD6=^v!L4dAuq}2 zY?k23Wg+XTyj#O>%#1QBRs$QZHL5)R%h)wQ$=%vKL>v21aeJ}9)vZwvji~epDs(a1 zAB9DP9JH^7#y6ym-ZDFaKAKwgDA#L;*H!h&>Rv^jPZ&>Gb3 zo0^$@|7PD`L-hQx@TQ`;zvU_ekhzu?B^+B+#8ep8weLn}3Cb5v1}daW)%}RcWL5us zgG|jS-yP#%a8LdIZ0zcJAi5{^O#k9t&kr|@t;GcxH*6B!V?^2Oq8H9+aJDwGAK(6a zj$C?T7uk!s^@q&#nU6$AHn-hL^hp1-DI-7LjdW-S!{n%iZ$hrF7wg0G7aAN(Gi_TsfKCytfu)7)JUvC+JBXWQo;?y3(R+!z#RkMiwGRoPZT)3fuiXS%CpzryEBqhPMeol`^4oppNfG9PC;aDw-mrTP*c9u`Sy1k>XX6+i zyov$oFd*nkRG-&OP5m(SHv~A(-2R=C#1`s{3F`nD;An4%3|(Oi>?0y0kFQv$rewCr z9{r>Ht8w+-h)Fp`9d<*(kLRvc?NK~0h=7|^J@JeF`y=^-0;SEa2_1M!AF%uB!`b2a zHHX^T+WGR*oUbm5PJ5!_Wxs?TyF0RQ@RkU~)&m76&5X$$EJxTBxsUyHa24R=BNCv= zNPuM8#=?!aY}q1;uu4qsQNpkdVBFC0(_PheM|@cxb?d3Kyoy1|0-^U>PdPZm{5hZX z>%}9nv5ykwufOax{ZpPE{EFSoAAy6p$`j!+ah_GBGQCGg^{Hv+of4!cpCuePaDYQf ziXGT4u-ewe`H|TrgiviFq4`qDsEA9w=yPoA+QaS#nwxi@Z!^B7?t4*4elbu^Z)f0{ z;q2?dtZ&jt4<8xazdyLlbb_m?!GoJCfq8G{>$SR(<8H?cRC|h zXpW9L!{fG+!T!}@j?Un>sK1QD)|`tp4uPP!3KAYK{1t>(g5i!8%iQ`(r~Yl`~z)^Q&*WX6tB} z%i1Ga|8mKZ|CMFltgvJ%2XH`ML18Nl8q*e z=QI7%&eHJQ+vq!c^z_J*_~U{p3XwtmF6~V!iD$XQ&$v1?F0#AVKHPlaxDgj0-QDiR z?tNlAqr2!A|DkXA}-RA3M;GJyN)zu{$f3$WTyg&*H9Z+4~ewBEMmu?XH zl5+s`t-xy_3=sj1Ekb-iq~4EMkhYJY_$7o0I7ldw!Z@;LGeJs5^n6K8Ay=-CUOt{i zW$7hxnw8aCk;!cImnWxNm%DHB?@sjQ`t>XEqr?2ZeQY~l#lKBnSTt)HoQW8@yHUQ= zCn{IzVWg*@z-V$!yH7^Hoan;1Z$s`;6L4&~|XG_BVv zX-%$%9*(c6p55>rtlFe?>TQ*C{UNI0OOXx2FJD3_7WCri!Q`}DuisoBlCCDM2fw^F z8^BE!>|b5XzEjgVwECK{TE+9ve@E{{MAb#aN7dR2i|tt!(;4g2@lO3}^s6i}sHaTF zGuN})<$Oa|$KTxaLib$E^4VSIb`hOAG1o`iM7UUj+T+FJ?h{YjCIZ_98YGn9m}7(~ z+8{ih3HAtH087vah;Knj;*HrPM%v%;AW$Xak5zSby}Epd9Al>sn7~4~;*W6*CqVd_ znY__;qknSitSI__0NsVZ57y8`~h;1Ea-9cLlya8_AP+>%3=^tn6od zrqJD19Cujz0#gr$3SXN%Vbb%AMe`xk!ZwXby;kE63T{cqATR10hP};{*E;Iy#hhfQ zE;SmqC(llJ*$FX_rjIRdp%K*O6JN8xvahyhKD)(zIpfI7JHZ3Sl=(JK*nhBBkOUJ> zJiViL=8iRIJqyZY7T0nGR zu`A2@{=jMd{QN4hp9|mqe1wQZ?(6F_Ea>T=-O6!E2JD%iY*sQ^?;0AYL98I~3QWDc z(RC6}fDpo zVqpn{rCbX7X_FuXl>vWG~`;V;E z{_;RdH|D;b4|-x+j*2YV*Ocg8)GKDaeS-2%xfjQ$hME!9jp`hU5Bo(5JW9OwyxGCh z^m<>(MW2Q*Ck+P<2DaQBv?Y05FfdpZE!_D#ag|l#!zD7(*mxk85-W+cu+S&JI&?%% zzw9$_{Q4i810p^guSVc%7VLiD&)F!7R4v;GB0w2H&N7zAYl$zmn<$0g$jcdNFXU*+}_7d0nr7-hSz!W z*(UU-$XBR@a$OTXPn>Xiv4SY_TSk{yi6TMA; zLO)c!JnG5ev*u^ES|2bu(&GAU?vuR9pra3Cg54Hu1{G(gfhbZ5+ZL@n2ML|tLhxpsAXe!u$74aItaclj^W4vl53Uk@z)Hl?xldWTfbHxJm% zCSR8H)f19;bCV<)VgNzRsm=v4dY-*dk!EmH=6v`>M@y$WZnG0$7$k~Q1EDNh_%k~j z25;e~PdP`_U178Z#RB?kKRM2^u`xs}N%PXl7#Of)VA0`V{^e($fKS&6!-7*EgWS{F zTG#VjXVX0ni+E#09Vwfay29))V&<=S>bLu+`!kC!5egUf=l!suPdl5J*uB&*zuA0t z^TSme+HE8~i};PWQbSpxTX9Uho8R<~^}O3varKRF-ISB(Y^~+zM{OeeD*lpWo$Qnn z4D0FzykBbteEdATWpXaMZZB0(ebrkb9@D$Y8GrhI56J2HeR{8DAJ~1+;CIrFbu^-ej_zYOe^x{-+1?lEcsrG;wT=Okt@K+6X&1$u#qsXZ~CzjNoWz1NhwcJyNnMf1@bzsmwo@9uBoUJ#7YBUA8K z8(KJAf`8E-l44j`njHlPbTbJvbFdY`L)%NRb3jV@lqeO7KrGJ&%libC#q3V`%5QN+ z8Nu`1yGJHH?}|D9J(QE1OY}A1C1@`>ElrX}umpAW#Ivj&DiGTg|4rBE$AEW_bI4F$ zqK45oD)wnxgPnEVM^BvXS2&v7#9;XGe%j*f+qBn(#^eFi)WV4WM$varJG;1YxKjCI|uQX-iLUH!| zpOyIu&6F=vkWeb=>Cu3}4h)wS$iP{=B+v49$vnwfW^}qpm6-r|+88dj7-MN6Fs-lD zqs(qn2YWs_seFC;mJ33JNhFN!362Punnd#W1DmbK>~*%6NjJO_dc4%I%Wu~DVPe=0 z2`=Nvt;@?;99d^=lJuW@huTEaz$FI9NZ5p;* z5c93sqho}jBe@`-(f{(-koZ6oHaK4>fsd)lP9xo8ELw+u1)>@i6I`C$~>e8 zb!!MEqyuEm6KU&z1Qv0!_q$~;7barzX9R9BrnX=~C<6DW( z4HFt#pFgp}8f$q#DdK-zfDH1G=MS0pf@hudbR}@BS>~ka>f+4ee7AuxoFK*WzI1mp zfO*mwubZ2-k^{GCg7pN%T<7AKtUrI&K!_rdI1}g>b)E>{cKNyx8?i8T@+`fJ#WaoJ zSZuBk^ASy!H}roas8p<0S_VA@jy;v@AoC2SCaF38h@+L!;Y@ybIbgTBJc)q(A{9|j z`$CV5iJF>mmmpeu910#_#(|aI@|0)Sjp!?umDeF)Cz0r>E$M!?^=VPY(JT(=o>Jx< z_jqPu|H83DXzS75?s1=FT?3Ldy4eF>*kUptfYx2u<8}hnI)a)AwEi7vy#zjvu!YS~ zhAT@pWh6_uEYhy8uM_FfuwF~b&JKi!s1cA0!F1Mq(9(4#tG+eKC7C@omk(|5Ebt_` zP*YmYU*hE`y54t_=76W#`q0qW4*FQ0;+*XvA#4}lmR<1QkN$ML5(Dwr`O#T~Yy$+b z{QFD06k=z;pdfD$l`$~>K0NFV9fT1uoY$BEaPL-#7t#e=6N7R%ZHmLsb~D(jAgd6w zHV~)ZBOzz{&&UM~{yf^>AC$^d5YK82UcT|>bm7T1+3R*3leu>@^CCH3|LTS=Ld*f! zE_*~o@%5HZd!D{x!^2pLj;kced=UERPA^mEa6yBdNA`lD^gkDSz6gX-gy~TrQsfLM zxx|E9ugqyL$Ug55SFGMQa9i43Iq`)II?ii1Z&D&j0i%v;0Demg{Tz!TF&9rQfcZj( z81aX&Nk=qQYDNZ^kkI8*0!Yb}#*!6=FExbjB(!RR3l6W-$-Hg{g$adm%5tW4>B&An z=QL)zI#nj-B5-g_{GRN30cAuS^B|K><@f5nxN^aBtfk72;=Lc`Tvt4|-M`nd>SaHL zA_a%{s;sVV?45g@sat^mAGtTK21m5LB-%ZDynt77iHPhD3k!4jGrScAvK4TQ;=Y4| zf`Z=%20UO?3#aM5NOhFPGd2}^I&QZNq$=l+C^?&NuyV)`(e=}NwQJHM-kF=>M!#U^!IY8>v z(+dL20O#V^&!1pA0=$p~wHiIrix)3EGWjIxvb*Bf!EAq4cB8Vriu5;oFZ^t@GNHNZTy!PI-PYHGe0F@*;T66g#f)Kjz zhe0<+jh5f5Hp9p19c&lH);iOgt{HomF`aHyq~!N< zTI=itZkhf}7K+?2TpMOo0lQO_bsozq9Gc28n~>@FME#@eoNUD<21TmOvo|%Ud9G68 zzch!XwtI?>oVtF%NU>4MVVUQM?F2P-;?@@7SLs+}=Z}}UEIoQ8{fyP@8DTySfEHeh zBo9pc3?T>>%x-rF`7wxT_V3Xo-ED=!dWPFdWITU85}{LU(77|3xS6U@l-9b)FzM>- zEZ;s0Ng8p7>K9%o>)$W&?irOO<~o|(dri_>?zR{05?|RgEtlSMqKHg4@b?Ffd!I}8 z##E)Q&`Ku*I6YC%{ovt+1HrE(MHF@giO=6HW#1&hH9dDJ$+&HNepZ_xf~edDS?fL~ z-8!}MyHHeGrt67b+`!h zK&9OTlRzc8t!+7e%(W(9FM59xiHNVp!3#y13C0Zk?x)ZqVIFg0?2Y*&e(|+K%;QMT z9cBl|@m5r#fYf1z-TuphY=CQh&IO;`J*+aIO*KFNy{BO3aY>nDaxN||Ywia}MrC=z z-?hBBb5Ka=5nXcX+2oU0F*7qt}Kn&i>GbE|zzqRWvz)(`eQ7?hFwAXRNp+|2by{n+Z- z508fA-)C&h5r=pB)!+q<_aKlo^^%VJn>F8OG4Tvbz& zz`#tQ@b%9t<`XlE-}^<~f6%=wI0|IQw{?i8vZ_tTLV}w5&o0|ZkzcG|410sFm=5h$ zX~vv;yigMG8=*StcU@pbZ%#%chBTnST)TeV8)%9#mi~)ZZYv+MUuxeTqn`Wzy=`x9 zlg{@K)f?6E({HDLuP!@JIu25Q^?&VK`gGG{mM2xkx1<;YgVe?y5*9bT3tI?NeMre+ zurvOPt%viM%OsB%H$$iQi_z2^s4+FUvAzG!T2eE^`j>aXuYG-+*}SrA&R!{I+xy(u zWP)~Xnm%Ygc)w8C{$D@2v|_XjwempwV~#n`U4;vrAxvBVu#3Xh1#doaVwJA^etSej z#JpvmPe4Exll&d*l0g972+`CgPZDY&Y0SRCsB7mePx%bmOKu^dcpHmFB}GN5tgFjB z>-O9EUGa-sU(S7{d9y8%%nQne2ayeu9|CV)i8K*15DiRAbl>Lcy6c?FkuC9WZL8ZK z^O7oi+7IUc_Psms_0wKipR&*l;VaWI8yC-~wVf3EC66D*V>{KlN3z_@a(A z%f{cP39XqC87=MNsuy8yhBluUuh(+ppwG$~4asGp#e-U&n|*L$Wk-kURXex~DA!j;o^EbZuCGCHXHF zQrs4MwLR^n2&r=ZMeO2it%2#??j2X-#kr>YOt@1IX62}=PteZ%*}6`(yz%$M6^f+PI@S4K zb*X&SFQVAxEyo=ucfJ3nNQIP2tMDns%?hQ7p&w2=G?w;{y8LjJj5(EfYSiVEeO$Rq z)Qx+w14G&=RkyzY0979^|AL5B>|jJiXWIRo6@|xjh!>$Y0&kESoO=R4AWAR8BN0E} zK%ue(<4wY;3R0bFIK1y*6(gK_KmjC7hx|iAZh&`TTGYGk>Ztrrd3vd}xpwv~nQHuM z{Dh3`q;oFLoCA5;YWyIUHyMEyW4Jp-B@^&$fp%PAV4%W9vy8(?rTKVUd!fR2b=9SS zNP#LaxfCbbdgwQhTOMJ+Hzxy`PTq;@d5M|BO?vwc%4sw#jYjl`vfV?HyU(Gtkbknn9g4`GVeID z@d&i-+@6<^LfoK-c=_&b)!xIUICzfgc7djJ$p~ZUCL^$b8>9B_+jpBDvRmWbdzhHa z=#OH?USKjm-q~>I_qw(c$c?7-1>j;An3O@W$|DTc*9~FF1##K0tx~k(e0_ZtE<*d% zWP~9Dd4b7fe?>oct>KZ!HFLkr+$g?J7WbbB4D|Cm;_n|Ed^`{lltH*t3sZOLqZ1PD zh%KIWY&&!V$k>er7`4SK1m))DYBz$o(G*pL{kY&Ytl^?c-EcjMZ^Nzs&hc$}TXfPf zN7r@g&Sty6I~uRPi1_3_ac0`P_*r*lXkzHPR-1vO?%FPL-;nEH-)EL~B=3=MuBdu! z6%KE)+p8{!TnxQA(9->ru_baLdOh%0&Y{1*dX*n-sE=(qx4EKD;EdPet;+t#6^k1m z&mZn9InErMvoxF-v`^gg8kd)2rtCqUiwT!iH|Skv=c9S1s(W=l+^-QB>n&rj=)LDK zeot?$PO-r4z)r`HL#^zG%jO>S*jHXfeiw90uo+N5fNKa`Y%wZ#&Z)S1@*oUR+K(7r zU-=$CXWs3VVDYA{@UroJvA=20?Jai1@vk=u@bepe2)k-)(G%n5zCG0}zP9kz*C&#b z2I{6m3?4n+aR!y zBx3`c9}umZVOQP&2w-WUVxtKRePU1nYJWN?Ryc80(iHb1h)50;A91^1l^;I%f+0#o zri0z8^}_H>rVW{zTxeUUTJ7kF48d~UTh+Rez$GBQkokPp>+HsU4#|+>^d=FQ zqx?dRPbB?yiYAWOt?C}g6Erx@UX*q~^~P%a*e&HC_M_fcHr5VE9>0`#^`c*>aYu`$ z_R3PXL-#bFquck@9@E^yi;43)2Z!#6IBC&t^c4o1>mQDIe=%M(%4s|EAH|ma=V-<+ z^lMTMuD(^}YEFZ~g1#-&8L`Ekjh#M6^878O~))lGkBYKpz24gCqcsSpF> z;koq{l;FwT6I-dM41p>jhG-8)J*20H^^He+?>1VWm|P18`gO7I7sKtpz1@`M-<@QR z-JN5qS)C6gp5rfzUyAw_yICIeCEZkM5gokSZ}c--;PEM2{sbZQOHnrB5)T$m95VU) z;-`a})nUi=*>ABe&yL&Poj=*PV#GHU;y|gH;T$cxX47!6V|qHh-+;U%JZ9O~V)6Ql z&grbzdph!`*t^>Dj8#vMgAiz;`qs_@$#u4E{Uz2T+rXbjM+25w7Ne$?)<+bRGAPvl zvx|e41wuLYiDjpoH*aQX=3T~iOeB0k&YgmSJB{u9n+e2eT<^?MC+yDLSBKamomM_x zWCiPvqDKDX6#I*fgihBCvaCagcwu{!^igXyFF1IKX>ZA?8u`3Jv0LGz7Pcp@-!NIH z51-X#xoG_Rw_1AIH$MFgMH6cUY4VC%9;r>F&!2nhYv~5Jn)j8S0^P!CaN-ooh`O7N zSK(5KJB_T8255>fmF<}a0dEd>Xq37p#2U{WD6vwQQOmbK;W+tQ z&Uq_evE?g!ktOBiSDJ!)`?8W`YI4ODL_dqI{&l>`EqpvCOZ~K)ZY_K98MT*ZGSd!V zs1|zs)QA^Orby`SmGktWX&RgP+Sx7h<}u1NvL#2y_kDiCj@<7*NT@w8DhjzH;heu- zp#J__(B^==_Ryf$9>KwalN-G;!HIbXxKb9EzrK*z;22lrzUn-;rLyZ&Lf51i?Q+eV zM=9(5w|g2Vhq+0h|G_|9Akp>Bqt7f0Kfe0~`~Q$x)b4SRIc>a_(_)*$?C6&&JxZ zK0Y-nQG4Y{{nI7)o7346Xr|8Q9lqWc5u=u;T)^$V`aZKUULw+S?TvA6L9+gu&#}9X zQztaSGd>N6y9?g@`s@rg<*?{P-*d(_uTN$SbidBLEzOaTOeo7QMr(q%hq;XmpsOaK z6-T}9!IYSdrNP0YRE1)tU$b^~*46XHF~g3Ibsok?afX2Zjy-+%lHIA`IM z)9M$N+Hw6@6m!7^3bvx{?y1H~3{Ohuj7oYW8HDsIo13SSmo?7Lyx6lO@tjb?A;kqe zX-DWZ86})->u%vg4?_D(QHe}P7;8N0c0;Nw@ocdDqR^s&YGnB%)<-z6%D z>E6DVc4hzg^(pW0s#SA-H3!kR(F!d5Pq5{wznSU&CN9rN<8$y$0ubb=uaXX!G4Ob; zqfAD(b`f|4sA$4;#J}dR; z*75_|Nd387t;>3%2Oc=c`8?%x6LoypEw*{^_7v|wzvR8Km91?mBr0%6ZWvy|N5DJ* zYW-27vKSc&OGvnkl?KWSZstB#*8CSQ2pTz}k9>V8VaA}At+5>rBBMq!9&qC8{cEeY}$4o_^PtO@)TTo22i)b`O2<{s+V(c>8wf_wUOn5mFhiczgx= zshoMXR_}m>xazhU#ydO*Esyf)Z^P57WH4~yI1A<>v5x8k1y1H&*j0~5dJ9X*Tj>ZU z9DMg5x^Z7`!P38?i%Xp;-cxV!<&NB0n;BctlX?BT3(@TFzrNvPRixse!#S)Jus@aE z-}=JypA0f;I;WOO80)`JE^&T}8_n)Cco-cXSddN4S67^GZ)^C`JFLTOOjqbuOnkIg z@<)$e89~;6>@J>iZeQ*+KfcB53_k~aFRW*0>#xQ>HwZDBA9Qd1Ia1N;Y$??d6-7r+ z_rhXLGso%f=Jx1M$vX24^%fG7wy#1X!Xc?ZN{#I~Vy`;MAF0U0NPne>U7IelI>ius z1%3Sx!?-<5@t}HjYzIQnAI+U!3UI{GDDQ6N&k_`3tns zgM)*qs>CgmIc;KnsRI8vaH{8&USO$W?5D{lsi01JSoefnxOv#HWkR6I8QL&Oh|vHn zd!R!iI3hB#ve<~icHkpDL@o;d|L(eXiFjSHFmv#0^?Qfs?-ghL9;2R5%}CdLvak=R zH)g;8HKAbnLEcIX5n=Bz00HqwrkgT;0o>f&asm6jAD+P_QQ7dTkpjHFzHQbu=WpJZ zjqMmB{+QFzF)<48zQ7txK|6!-+w3RtDOx5bzIZt*WezCD6$1A2t4WD4(!l#578=8h zWwxoWCDmX)uxGh~{K2!b1G4!m7Hgr*0$y&o38dFfuOZqyWArsw#o$W7mG_F-HGQp~ zU;OJ_W|uTavep6_gk;iGB>oJ6{(tzMBl=pH&B468ijv($8mHV(c!${1amYO=fZUbu z{xhXD-qWY!uquwC@FyI@#B6zy)9N@laS6u&C+A>t?@1!>7NDUlNThtcybizS4a!|& zT3T8}LL>kA^dz(!E-WVpOh1R-kY7RDaLR3(YTZKjsY<4$omIMx;evJNy0&{_aI2}y zFyU`a7oCYOSz3a5m2k4p!3z$!SHuWa&kSARoo`QYT)~$4GRod};!~yMm6`^wX9)`i zTnHgkgcU?S43G&&XHbpLpguPH?6`0MR0Bj&P*fDTRecq34FWg=U~7edtEInR z?h;!eqHocfkr6)KaNIU~xr*{!_(mfm0pq}oh79?J5=4R%w%uC_Ux{OA#}2~S+62PE zfRYir76^0?OX#CndE~~mf}a2vbBqu8IqQ5n7^b0h-*{8vQRU})REub!U@@ogdZ0Pe-d|) z`~^T#bVvY^OyK%M2wt%u2&}Ke{P-TEyMY1y>q~Jc=&5^HSQ_B*PVin)03nT1m*_dM zF~h=x*fXKWgfhh&BU;JzMPtIQ^1*|p{4p;1km;pSO8y_CBofVI1-Ub7i3NBrwqjQ^ zEl9}XW5+o^1oM~?%uH@0QwYZVgz_G$PeR>UVl}h{vIDv7&Zt8h&}I<^JUKc=+ZY%a zhztY#YC?1jZtbB{MeewRagwf2U^Vg>^KWY)d@42#Hc~Gkv5u%X&Ef5!cskoz*wKwj zVa9*e^_SWWn%IwDPu_SKnNZiUaPzxb+G4_AECsq@2DQZnXPj9mlE|PKlF4WXV}P(v zLDy-B?tuvM4nD^fFH6Po-UfQ@l&?jZ&z?~g>1>xylyoI{(kPN)3lae<9uhb)co+DefEVrbRmX&XdPU5Kf(eI3GHc(&h1Nyo)95 z+VzQ$x1YXxe7xg&ZJT}F`AY}6B;v{Y*D5=yqw^Oh8(xVxPUIHD-Us!2FchFiV7Lc3 zKp4O65-}A7(D^NKc7<#L+=z8crMHN1*LNJ*D~U{Vl`o_LL|Qk+o+JbF3jw5i0mQ=0 zh2R6^mHyd=e_|7jE@dDgGabBL*${I(`(|$#h><6ubkeJEWA%VEitk6%O#&S7 zyZQcI&o0WF@;;aMHT_UeoK0-{!X##;R>z=Yg3*^IFmeFqUUdw)SW9XUSz}9oMn}{n z*zXY9Km;*BdGu_!2q?7*P?AR}KhxkndK^XM5CDQfyxu_dq~_lE?qvT2Hh`%6`e_Ii zUFKTlQ2{=#LEVM$csqdIgdXC;)h^W7RNJ<3V&BE%o*9WY*wm7Ah8$%)vfm)8E~vYY zB!+u%LL=zBxB{>=2t6tB1K`@?7Z{iV=pQa$L6`#B48Grr%_kF+!pssJP`t%obRY`8 zB821CKh-2{&zptVZ9RYAzoxLe+f(UigRG@sGRGM8f zgiVDG#lr%7K7=F_c}${~h9zdkRr8{+)^8Qu&gr*h`71L2wXdSRzp{Bdey6M)eQxD+ZT^VMO`te+?iDL^3f+< zPqWvYC=fAfJjQMhgK*{rp!moc6CAWLiMn=`Dcomd! zWybte3HtEunzfy?`Gj?0@d4qb5W2Ga*0wLgIjLrXw9Rr&JKk))8M?aswfkwhfkm8N ze46U%`{S01G^a`>TkzT7 zFi}13GyU?GJVCNEVS-t-IB(#1=14m>R;^^S*vw^!?MxZmFAwU%KJP zMTW)KMPK_OOvjYU=>F*Pt)>@u&Rm(CbMr9xq{n$c{Zq>`Q*V?@yJqY-q;a0ygFlZT}s|2P3z0}v;L@T?p~y7f@#^ofEcL_!&BNz~*Hcdf>zwW+laem$?b|06>AA{# z)ynjcV5WNYg>4-CWuR-SKYo>UJgRWKFGSMwG#MuXg z=pL1buYMjngGBV>Fp;ap>cZZf1ThW`Ty1bD(F^Lx$;fPi==&yy>Hnvva}R3bio!T8 zgF5vA42ch<2tF}D(=D$kK?DROR=_}^RvU4Pq6G_DOI;m7RBBnl3cf4BO}{Hr6`D|jMZpJbwU?p1krw1vH5pHvcTPY&iTG`ewQ3<2kd5-&DMe$!s~0& zRprWjulaj=M|(@=;22oluKpZbe3|HP*Pz~>8AqQlGNs?U(V+WfV^B+&7Xg!_By403Jkz{s)g3lLZVF_CC5K66K#ycdqF!Q>t|{SXrZu~CqK zv1!YeRuFU0!h)h}1h$=3s715xoGrqg0$L=g46WHajV$N{O-gIyx{TcSJ7bmI>t;7c zec6?CXpi*#*44KDo;t-bRYLN{pR$TK@K>(9YuFsL>aw(g$Q5N%M_FU@%YX;KK+gk` z%v8w9tp46N;pVz^BXkTGSJwzQB<1tA&cz;NDG4!q;b0@QP9au*jP~@D4gFIdP8s$3 zMjq`nr*$=VxGgWfWy zabA@O;m|YJVP?j6`(j<_uPDz8HHKqnm+GHXKjrsvmBNz2H%id}2cN9m*B96#>ci~tU4ds}byO&S?SCJ`ih zWeEIN*94XwJP?tq8poK@?3JZQ;^I{wJPXck@6M`-8;kr2s-~>{&KK)yG%lU~hNFA$ z-j)A5x^Lp{^jhoMhWf-~ZJ({IZmnwAf6B4Y<-X|-m@?=XERB3o0Kguc)ZpO$XQcp` zedt~y3;pZYi`i(}axWQyRyb>{Vn^N_{;l8>nH)b}IgH!!Gy*#5h3X~QXT#5L)R*sf z-D3{g8VX&+#U7~pUQ0w2j%JA%E(Xa{Kt!6J`cPAihxJhPtNU?%{@&NNzkN4;L}ELd zkNBs2-S~q~*!12AXHUOq7u0Ph9p*H>{h|)$VCx{~_BiV14-1y|Ro`s3{x2q>yD-ib z9uw;%dZozeZvcvs>jGs_{rk(8R?ZHe-%b zBtR6bQ4@9-rw5qF#}f9q>?{D5oF3D|5(@x-K9-Yh-6L=P$#KzC$keHklore)mLWZ- z{9)PpD%y<1Q7jN*pc>vf{z$k!+tQ|4mP$#(!iEa+n@~ft7cgDLGDu&Pyse|LmNrA* z6bVRvL;|7os&&Z2I6Y9=z7~S*piC_-izsbV_$AT<)F{OoBz%po*WErE;==@^xtu=w{1t5Uf&iX(Y;m*r%?>N7LrmJWb zkYQp0PN!B)FxZ?Pi#w6Z#&gRugm21sqG^d%ol4P&1Q3$z)ESwXgO`8AT#Uq`GUR9A&kudLtJ!)V3$kM#&Lnr(OZVbH)Z=RU(&Mwn*g9Bc1(eVrF IBGb122j)JsrvLx| literal 0 HcmV?d00001 diff --git a/misc/method_smaller.png b/misc/method_smaller.png new file mode 100644 index 0000000000000000000000000000000000000000..8224d15b2715b5069a7c079ad10155b64c6fb6c9 GIT binary patch literal 67803 zcmY(r1z1(#5-z+6>6Gpi5D<`-?vO^hyIZ4q`RapsvAvAAP|@^YjJU9 zS#fbnCl^NxYddoYgdyH9UQnh>gtW&{x3*S|bf1#?b2gtg7M}A2BW|`>0iKQ5G|B*&! z8d#()#g~dz)qqEJ{Q;|UyBDjG)?&B%drI)ubz?+qxrcM+hTq}HXwF{+glqUUq8}vq zCwwOpKl0u$(SNG?oQsu(6HDwWx+YyV$kOJ_!nePbszQ^HN%QGBY7ckPd`co(qUUGc z9|go4chxLRU+Z{_HyCY~657~4PbVeUE565)Wj4a5hW}My0%xujIbIZRLalIzjQJS2 zPSF1%!4~c#=~;nD*x&1p#}$t{3uBW7g%7lWzX|3a2oZvtL~<&{i~4K@`myyT;`t)%s(M}BrjQ~< zAeviZBMb&cP&~DHxmPp)#L|45Wf~L;K^t=M|8`g$3-$%UL{3@)^7!=cTT6ZdxPt5? zqx~KN!8CsQhA9xvw+0sxU1b#{5jT-gkSSibs|-~_Ae0bU2~joAx&1{CPu$5{#N(sO z)>#GamIO$)w0Df!4i9xOC8dn?al}=To^taFo*EXO8Z$#O2FeEpGHk4!7DO>hl z-024gSJGx&7CcR}c8o_`UZ(dCdKQ-7rCM%BON-|E zIP{9H#Vq*I8&zejt=FCAf?Fe59!wg^tcLE|)A?EcyH(`mv0GDDBs>mZ2OBeePi5Gs4*^nm;{5;pQ^PSA+OsV7#MKTp6(1o)t|z_#$MGZxxG3%swz{- zk!ny+mY{alo{pgK->%9`=W%SrD^8$SYAmrmn5%V|G_Taso8q#c<4nGA-JR$!)vC3h zpEB2fzd7Wr9jj67ws&X0)I4jhFa3$sxw!1h7u;mGmb=3ymOT{)T(D@bUyqvWi$`F% z6_>TQi-7meeuIUQRZwta(rCLsjUH_LX+OWIp>I<=2X4Cc+f7?(FzMCe2leSPef=pe zuT!(SU6-x9Qtj$qb3x1BU%@kMr^^~jY&QlHS@y6|!W{hCDoy(uONy1Ugd8UE$jA<= z%ATR(*O$=q@JyF#LC?pPz$WeFXDt*lfh*?vsQAoI+Dd9_Uvw*X=j+M`8+$(zt!wCu zW&bhPXVNTl)gGy}onH7b3ASEus@7td7!08A%gc9qQ{#DZ1a-SMlSPSxjhnwS_NvG| zK3SKsJs?4ls6w&Co?cY>3DFRW(xW+Ee*EthF`2HeA6QTC=5lK>d6J&h9826)mmEyQ?GdLN$|Laa4rdc7(q(1^3+b z!E%bfefu`1#8P2OVE_!|@87>7d8(6x7d}^u9?qLXsl=t?d8$;@$#CvLGushFA??t~53PlrX6;>iy$-~BL}C{SV9gZ2qO z1d;K(^OmN2j%*I6zc%iK=d_>0?eFj37yv&jvo$WM+W|>~$rANU*3bt5L{KxVC4m}GG4{sIxb_$0f&@|o++%}j_e>iG#2hX* ziL)e|o^Oxxy8MkC^oCK>;W^1vNar@bK3=W&xpD~0B>Lnn!#QPu1HHSFc6WDgXfq6e zPBOW@Q^FA=XWjRQ-m2jKTTvR|2q5Xh;dBaVyv(stQ9+|t5{il#)HF0qrM6(>uKy+| zVG$5S^h6VAGEne1khVVD+TK0N$;qAc(`S8iTyABV9#>z?y(NE9ny-`gE0UR2E55eoYY0E2kvkbyxi5II?_DV{%`ty|~tXUBD8fJryJ$&I&j{R9ga z_is&e)yCmc3kkpbzKFX!H#{s1pX+vDvY|gwYildU`Za?^nRnqmmxKLWO$XRS6;;(> za9Y~UNx!^(A@p!<`eX^dLKMIY_JOr$f_@6cIx->`@=f_J^3M{0<6+D=jHz3^ySYic zdrVLgiptHUMe!G`G#|w8?(O~lE?<%GX=UyX`BUWSAfJ3>Wb#z=Tj-|B^rN@30?73n zT{{*U-Wzigk5-yW;}Q_SG3-@7oh{$L9%;oM?^mDb~QA1Q>% zA;$~vM^+Ox4LVx=?&4BXwre7~FAwIK4U7<7N5HLK__)-b8rBV8uSbt zbinSKv8N7hJhULnf{%ccgoule@12()RbI~eiS!L@m>5ipLF~%P%G>EJ5OZYSzU>?l ze*8EyqXlU%E^kFC)sTzH#3BHmUJ6<8X|lVl6ciLsYjQRs-1oOXVK7mf8Q=F}Qj<`? zlZ}@Y0}?p0rI{95J!_{`s!^BZw=?#l#%^}3)Vo-Rr!cKpvwUr?)^?`WmH;A>t-Mm0 zpV|3CAssA))<)xp=;#v=l1;!sE_WCcQ9O>8Y9Aku2%>;z+IRfi=Dd#J^8WqF5XXpT zGn%HG+7A#nw5lx>8mu249_pP|g9kS(%*|KVV}*Nxdx?*aH-1zM3ky5GxG25+xjR?O zgo_e3?;QE*laz=E7K_+w$ggV|%um z-oW=nl3BkI7eE2e;}sYKp93y43yZk~0f^P;_h+q-4?gl)LS%N_Bc`}@JeISvQ0neRP*>Ukus7JV8( z3KNUi+qG#RjzP%a_mGm8M_cm0v*&bNBDJX{20)-V{$Nv)RDv2C>~92c3#La)Em~D( z$Y$(B0L=u_XL<$`3sZ4@eD3`e2PX;?Gx%14IeFe}Bx4g3qp&2S=8Q%@T?F2D<8w3c zKc?-0Fp-gwkdTh{>y!1t#E}es0-wwMAT`Ek%I=7jz^y_CmJZ2CNKi8r5i7Mmf`{h( zi}8C!4r_0sdxA@K>r6(!OL~IfKKSCi#pkLQ*uVjS&cl*;=V*;ItCrnZZj|4hhPtOG zFJxt@#RqSxW#y1vZ*mn`b5PLxoPnK#!}(%Y{f>QnY)r2g&8a;31HCh3fF=BuVJj&_ z88|H9@l7vwC$Bft9jy>4L;o{5PS+Oii_SzQ%{Z_Pd8$zW*T5qnXk6=oMf;qP5dZlz zEHIl@j^Q*eaT^;(c6N5kc$opg2m$YND`tac!Z&Z;AfceZW{r3>8ECrUj6hEZS$~a; z^vSWdga!r%x|l~rMa|S$DLW38g1@?;klzrRMk(*5}jps7DLwbwUWSsf;o zY~ZHJd$`!xK~IZYTDQ~*EZn$Or_y1O&&=E$CQujHas<8Jt2+DnpO8Q}L<}Pkzho2@ zdz+40b*UQMC&s%o3>ek*vqp2I*MYm7Y4PDP@Y=w;lSDuWF>YHCMJm+h#Q0~mHVQH_ zNOM2!_t>)iLx(MB09@U%1TG6$7Af5V@a15j8=t*A_P|*@83Gphr!C+SrC&gqdt#+O zf!_J?!EXp`GB8TjLR|J#w3jbmV&ub^I}U+xCfMkxA>Wd48|j*xcHp5f-L+{rdIFiKjykHiOSlc9j8RW*MgP<@e_OeLFD5uD-qqVh*Y= ze{lZc#V71XWC6hFAZDQ&BZyI&DGr>(@d*hbZ3Z~?9!KQhjJ~Y;zfe+v?tEuwheepv z=h6-Yg4WLS^}bjdUfvm@hZm5ih~(IM^FtRki^q{%=x(254&{HJBxj1iq$qPIGYA-j zmgzT<3JtFR0x%bNe`~qt{~d`}2^t|AWYQ0IX%88iDd>ZRj~@;)4dS4Pk@upmErmG=U5~p(a1P-luS&h08NW&X%PZI1s^X1j}ZYUl;s%k zjxM^Vvq-(T!f92ES*rp9-0zbu$Hm130;nI@wYt(r5g=h~Oy%O@a@>uh)NWE~bbG#| z_0x_Bq;4NWL*XIjj%&UfiJHef1crY!=)mBL3cIa(3%{D^u`={@sS3XRXaNa~>=LU_ ziA{2O?F~P;(znI?by+zjZ0tP!4-!1#!;1_X$TO?@4RveLH=|LSPJdvJ+ z(PzqO{&3zK;wf)YjJQZ(5a8@u3H7|NLbDa9P@>@yhVy#gb&XB%ef|BiH-q|BRV@M` zvql9W+AmK#30ZinBUZ_L$jkbY4>?@(CHPhx3EBP4aN@??%zRvS8cc-D7`)HlTz_DT zq}B;KBA8=iv6w0OW1c6H;EWjF$(&OAqC)$XQN_$kq}j^LPD&I<7*P>guCK()_}sWJ z->al#MUOql`-mV2Zj~aBe+^63%}#_1(-HT}_oY^@4x#x$>FNAEIDdUfuXMjAZEbDM zC!9V&?`VXC$P?%k#6To-x;oScNG+*}jS?3H1`>jRE~ckPs!^tg3;}o&$= zHO%9j6%;TqYgfT@SPY4P&;kQl4tVrypyEKlAO#@glVckcA5sC5F=m|_BusJvNJLlm zD+OS)o-V2=Muy%({))p99VVsc)pm4naGlU&J9$d2k5xXo_ zis*%Oa~952hszDfEgI{m)v67wY2CFjv8>#?F(-o_M=~mO&-~*g-=#9n;#`X?RvCC1^`aA(jLG; zVJECxL#ZJkY*X{_5I~MWU@!&w+?Syz7c+U>mMgz6)9;o#1Q$4K)Aiu`LPOsVQap?h zU52aO=HSYdiU_IJ2>Xh|uY9{=(W0eZBSNnvz)|eDrsWSbK7M+&d)Vkw_gCV zok;ZS)-7Z{i~f*Fb(3mmz^m@F7lh#@WA`v>e)hEWOV3;j9B=M)^i+9`LnYehZ=lUG zw4Dv_x7b{_mG>8$2)ZILr2!HG{%5~gH2KTS7L=({%Z~NE#rB_UBy{wDsTm+F)}~5z zQ(l`Q*c(4NRq^Cgxnmyi9L(~kc6h+>!X4Rrzs}Sb!TMrjqPbi`MZgA0v z#|4snp5(;0W~;qL(!V=JmnP;UfeQWF49dmX0&SQDYx=LjT7kXa)n~TD=QtfX2pYC2 z9P`!4A-~zWR5m@Oj_&z*S68li^QpMsOk($d1Xo108o7lYWOJ3iH!fe_yhpgXxjDYN z>f4`XT9%cS-Do`Ezo(X`vlvbb|LI;xEZs_S|1b&PwdlO1Wv5 zThuRq!(nc_9Cq1^O&5D^pHXyvXT7j85Ldy_w>za)H-;IaOOQTG!X@Z5<5b3yoy76> z==)e+1PGWX48h1FB^c5n;_2zx5c&uJ8^fztlCrYMzP`R}b~9{4S;7u z*g4aV#^bEz@l#4O>t0nG=$CHjx~6&KVJanf*`zONE`}~xeGAR>jQhi?U6m`vnAC6f zg!f@#VX+7aBS94J?&(>*m^Q2nusB?3P%c!9dz%N)u8gc~2S`DYt7p*y0s=rj*nBx> z`;_;laog{@r26~&6SA8j@wx9uf+#pP^#b_wU6d}3VHCTI-MyLJ*;|3%MT=E-6Wn`e zS6(F~^!BK0Fm=7*v&~-BT{|GDFLzQF@V%~pE>D*mM!d}v1F2DPNXY8?`n$vG{~z9? zOipgGJ_CuZjFM7>PN~%R4~35Pll?P5)eP-;?_PM{KinN@Rhm4r*Zyz#rMl3tv9URz zJ)eBK9aa1z6J0_S0LUogG&IFRLYv{}B<)6QPnn;}9_4@Ix~e0{Q`MU=EKE^;9ko4v z0mVd~xf-bz{zJU!f^DcDe*)VPWC?W$?0Wohp_MfW4EJhMF3D_3TNUh)V;h-Ou&mcdDweAqmE>XrCir z`?hwm$4J4IYV1|WGvJB=%V!4pt8y_J<_Vw}C==h3C8f&K0Zhd2;A!_0`XVyGvU>KL z8Y5ZKYeF;zT$KD176yjENgFq<&AmNTK$TXM)YlWz(XWSVOtnthaGiVmCC=Ar6o*Nz zd(MymYzdgmjmzN;myFi(?<{j3AnY^aM{Ca4sWH0U$|#s0=+ZtHWx$<^zO=8^8ARXO zDZM<{+CrpD(B8OwmC)b5>a<|9oao2;Z@*NPaBeC5uVL1EW8Uku0@Cgy;pD0vK6R3=szcpdvi9embv9$)~V&7|#Ndy3%FiZG<@hvvgn4 zM;rX1-a~0zU>@Kx(N8?#qV(P6!CplPz?1dv2Vkav>jG$>uZ6+1zzl5;K#6<;zIX55 zWwdJL1w1~~d!BqBH{Aq;3t7_e*EcwT&Z>QElZ?i%ciY2tYP1$p09J$AMH;S7#(Wd-m*2OGs)~*3mgei&(ak_jE}?!7p%+RQb*GvI?D2 z_cXM~9+UhQa#y~LaoW_8E&di$OZL>SOV(RXyo#)@AnpIVxyh%n&175N(9ocW^8h?w zzQ;By3JQSkdwzFQdlw9hj4oQe>nAIFdwUf_cHI0e8ELc%sLT0k5bm_L$=JsLJR-U@efw6K>mi z{WXM~Rd=a~-t3jnRJxYduvG5jb$s8MoGE|%Q54KXLGw2Nh99B+^XoVCXVuu5Zlmir zT?mTLq>Bo8PL%WYLr(97Vz)34p#s+r*b^1alsxM&V~5@k{IiXwJdw0QxH76Q_5pE( zV%pEow@QE#20&riDXbp4?$$>cT9-65_3*xRubNC|{K1?Xh^IhSTU(nY{Tp(3uS77l zgv$XyiR8DTzesx%u<>W znu#U$YrPa4-D^-(c!~ATqZ(9!)Z#xp{F7gZ>%)F={$J-TCIi=fX<=?rj80trjIVm5 z61^aBI5?#XcCbIaZPB8-!{krr_vvZ#%6N*$xA!ae3Bc?=3Yihzo1>bZBaF_qe{ zDz>>Q2y5KxFf4GG5fn0BWxa|Eq)`v-U!C0f3C<52@i1GO`~;gm8AYo6aXqOMuN?|P zB?qAb8?ge5p}SsA9}YLF0676O-699|#rDi^h$ksVz22wsQ77X3=t@|YM)rHSX>lVb02$9O-3u|JE9_cs z+Jt_)-mVgXzZ(Ric`T)S3ZpAdZ|ts_{vx5PwrkSm1qxW$aZ~cNVa<9yQ^v>dGO3(O zcf*N>M^d|{7T|>Vk|%Rdnf_=cpHG!X@HJP3zrCa8l!2ks_oR;X5ewkf414#chixK< zBJP%2*H+}`RA6-jqbp`o_Ir^H@${WDitj69qJ(l^aGTp+<8M)2{l)wxSc~JYgeEc( zso8-)N|@2jv3sYf?7GgQg~o@{yZn?>4a-YiR+b_ia$sb`(S@jl(DUyQy^)a}4(+%5 zvEt+H%b6OVnh`s+@5|up_ov8<6W*8n+2(6yJ=G7o9;zy;MWxAmnnk18Y6(#j5wnpf zm}2AGY>gbZDFq{a?!}V`=GiC%Vt9x7mf4cyGVlz4Yer>4Hm6eOKz98H@cDO-!x|+1 z%~=sSsbOD+n!eR(In(A~HL0E%{ha$sNBE|%PKEvG!*F=8T!fS9hS|2uyO-p-Hp6{3 zVW*w`46Nke%$6pRV^t!CLY7y4Pd|oA8lpgy858S$Z>o|N3rgzU_sBmV{&>fJBnd{L zXjmWg-Gu8|PW<42%G$z4u6O#-qhCNqBbal2NyqiW$fZFLfumC3B~+7RgSnzwsdYv>w07; zAi|3{yBY-WK7U|Hz3R>|L1+1!IP>VJNn^Y&NH($=@=Uly9= zgN|tnm4u$w%d@mKBGv2HuOHFQT>W;HE2kGtUwFC1_*!5z9e+&h$Q>2s$mB_#LL+LZ zNV3EI?b_ZSQGHOGXo{L>1rdCg{#^a0V#%Zja0MTHxxW5l30~53|Evzq)(rCmbm-mF zK?6C+vtYFWi`O~Zz0N2STxJxDu+ zV|ms=ayQqdJ6T4fsVx&!vH`E0$C4+)`P}P9GG-H2!N4Ee{*P4dYqI(Gi4)wiUF(+> zZroqct5{yr{!a_gv-#>75$3mn;b4(9xVB>%r)QnSk5%AO`>G2SI#SX2H%l9|KYP_h z>JdFQJ*5-OxL>SRSQEZUs;IK}MIc~DDi}w5nb(>$gCMXi7Gf6LEwti(7A6 zySQgz-{#-5&g9#B`QZnr*LD}w2^X9tI92~X3t8@{=Bd065EjoH?J%U1a)>8}VMk7q z4k zP#Y!*_BVhAIy4lBhRb3JX{21XMfUr>IA4)6aP$R7=pMz+0eXGH=EI9|nVNp}`Yz{H z%E@`cb(`f~P@l1QG0z(G)!pk2DEiFFa%!X_v4JD;%meKUZ|?Z@ZYcG_&`Zf;%A@6% z-?wt2I5ZN1E?HT3$X|6$%u*z!TY&l$^lI63r9J5Q?5x;VxHRu|6h+2H3Kx8c?%1ptd?61wh91cLM4kVm)^!>5193>dgnYh3%1ztRfoUZ@*(2UG=23`FDk4T~>>vEnQTrhY zq&#*=d%yi2B_>9QXmQ6RoN$*#?f3QZoY5bTSdjLMuDiJ8G*~xLA%BCPNRU5wJ=*oq zJM3$CglzMp{QW)Gixmew!FrNgRjy9mzV-g7ajNt9`Q&wy=NQqmAETSv5be0TF};Wn z5HQO9*#w%u)tk(Vy4qleP4{8cDB+woc>0_QJFU7C3oSjMayP$eFyLg#>kU1_e0w}R zqoKSbKjR^%&4G;AauMZ;d|=4p&V@+t!rFGv4rpS)w}ybE!xU7#-Ml-`rV|7qR*RR& z_gfj}aWc7Vsq!gY=A=^&x8zjW#)?~p=f7Pz?Uyo#*5T1uw&NVn#8A?*oB`1{PE&Ja_Vv+5GQ40TDkA^9{ppt_B%Y zM@8-Im<_ylUVJ3sDeFzpuAcpPwd94$s+bWbDvo1ZJ?peShj;J(H`yht$#s?4B%P}d z`o#>ec4917rK1;BTFjoHav7hT90B;^p-e#%Kv*Hh_T_}D4({I092On;RqV=1@I>`2 z^2=Ty>7HRtS8?%q9H`K=y@$lb#|N!;MTj%wo9@q4g0zj8&uv!%J#2zx2;_c~1HrB@ z7<2W7Vt2Z3+-nWNn$M$mAQucBF8^HFhdv}vVsyCFwqyHyos7UM1Y2m$`eZ_Hn)L#>8pMtw;%7%y;n4?mH?eFixg=(o z*uE|>=hx0_62SC~=W48ZJr4CUdNxDU4NbN(X|65SF$mES?rYxjLjXl@G<%$~ez5IId7HVJz^=F;3zkz{NI1FN5TE~7VnyhzQPlG=0TBp6$o}lvT3FTXm&jXj5u*{OSqZn+T3^9$-oH0! z7|9TX@@MXkQOY zBcS1Vm!WQ`P=cXY#OjF2=`Q*oTt?^B{DaGp+FcGTj>jv(=XAf@3+gl}8GgPw{3@V9 zPr@XpjRwXO3F*`9r-Ab`X4j1~)Dt#Pta?m(9-_lA@Y9+@_zUqX-b5XFAF{$^tH;wJ zPys&BXOVmR_Gz{kd(%0FZT?TQ{rp1C1{xaCw;@W2OU=nl3RYI!**II!dA9COI%Qbh zM`}zRmiFo*WMLXpsH*DcI?Y+Fl27h(UhmZ|t9V8YnW3Q}Na`>} zw5K(ITmewVCP3)|qDmJ~A;f}0M%R>xx3IaT<@#DrbUk1o5(o8%b;kj}H`?&t5)ufQ zB!lukP>BdHe3vhlt!%W`9mVT)nyXnCCeIQvY146%6OrNHYf-&+pZaclA`cN5c~Et} z($rf4sTgAQL(lJ{Rux`n7HGtrNT3jGZ*uH0sxNxj&%`3ed0 z?p1ucwl@n3z~xYU)m&pT>u+%&f?(CGE(Qobc5a z5B#;6c=3wa2La)x1Xon*O2Ta8Rj?)eEQSK835A=6_`O?a*kok-4>PuvBBdHM{QM-~ zmg3<1fFM_9kzyVP2dAY`&too=T^Mu6`l@m-!ZAL{9aGTzqU^-;X&dmP^A>8P2_#0c zgk#^l-=qX(n|GE&-8XWYMtOarQR(5B8Q;Sov^ihI!t?+4)1(M|CbaWB%wgg;-OveK*}hhDN<4i*+${_MKn zFk&Z^8C3cHF1i3^W9Q8?tYco~>-!thgIXV=Kig>AQ+z)(rrnk)qI%wwlqJeVYRMx^ zAfVC)5Si?0lp*NkJR|GMi#_-)VA36lqS-hCRDs7mdKczPy}gT2xf~9o<(<70-ukBT zMjP+8_m+jY7NJzCe(p*X)VCV_lfSZmJmL zFsNymY4PY`rB&b6l+K-Ax*$s((S;Dw;hG95z43jaQyU{)M~>`Sr4*2VcAuflAePS} z=yX0Ea(%Jl-`sGmUt)MC8B@16BCH$6su`e~=T)M?1eAi{3kc-`MTqeyn4W|uR0ml_ zV*;_CXQh$t$Ay<(sOhuHX9khC+^!52uGc$588w=(xWOLB2DCm{%pSF;;($%>qqtLs z|J&+c@4i9J=eoOF>&a|*DWu_BQS$bT@S&d2<7`u*<~!X67f>7Ym3T!9@cmq_ckU|Q zTT`6Zt@0F|ee)(EOsQ25u`qVXc94fShP>sCVJQpAp~FZkbjD+aQb_-Vg|E9Vr&m&M zuG&(mcF$=~{x5)%DG#03Hm@RClPDYg2)^FB!<7UybzU&U^Vrn>s`tWs;q`{FZ4H%k z?}7Gj_UcNMUfcG|y-Nl7hdh4agdRqZ+MA>MP8t}9SK-xw!iZc8qNJ8t%fiZYDvAtU zDbw)|4!nT#QVl!k26MQpy{Ryk-)$H6kX|K`?9Q9o#!QWL^auy0*R{3`Q=jbB4`YYn z{S_Y0V*Whxr$^lg)6={#ywf@w0%eRopfMbTHq7@_m2KMFEiAU(`*v^76-AAEKOF>* z$Ir;TRur4O+E-?69NKoC)?;UYQNw!>6C=g6>@#8mJUvz7+t+TpoW7Tny~IkJFh>1V zi8CN7-#eaR60%tlE`8-8$lE)p8AIAo@JblMyB!&Sm?df)N_8_=yEMQ{t)etH1Z5~#1&y&26i-9$b)B1B?-zCr# zE;S*ykk~tJ?AFdSTp_`R1Uqd`92w{fB@en0Mg#40WY4EFY>;Ne%Y<%kTitIB^BkXf zq%)+_*AA41wto8tsdHQ`x$F#o*hBxDzu46qpeSxJhMp25WhM8)`J&yym-~=HdBa@% zJ3|vrobN6#vjvCC(PB^bBopIAVwOARi<<_v-J1GlM}ji)YoZqVk*eguU~%WiP9nlu z9YW4LHll$xJ*l;=*m?s$=aq5uKS^H%`F;GSJ$|)#Ad)?oo}NeN4!Ad3@QMrZ$_j{G z_+GU-SM{YQrD<0eH+h%M<2;L1$;->kaLE6MQfc{mgDc*L8jFv11d$dGVt0noBG}JO z{%$lrQ#yS9K~`VPT8S3j;&FgU)wIihi`A-V2l{&vS}bDPeaX~-v3G3N#-F4b36n#o zo|2iK9vvKf+VwN4#{_I38WB7E6Yd5&V-3GkWRo=R;=}`n#9NWWy@-`KWr3@E37K3~ z+QmkLLk`gnDy^IWvrl^xjV;pbs=%i3^zU(p?4`qh)UfCm?V8Q)B(^ zNn{5kGQ2?Ebq|H^v@4^|citxVVK?Zpgo;t=)K~>wEeB8#5fSmaZey9TA1#DUd?{jb zCHrLa95oPz#O~L~proQAzJ^~b29sua&%mVZ$$%|tbLJoZ@2Z82=fpm)D(g>rLm(_t zQC1EFxkVhYBKrHQnTOx}N1pbn>efC2rbCJgaDMQOb#+@_)*HZ=6 z@7DDzuQTk`(Q=<0ho#8smK=KI1f#n>3v;d)ll3v<3VAwIn#XFi50M4lO&xQ+p~h#r@197B zocWJe?Z(wc?7DwgxJiL7i_G}pv{Cqttm^mop|~!_Jx#)J>$sk7(`05vXh;{?*;X=L z^KG9gHovQBCUxT#ymh`4oz1vT!3u!CxtU0Z4%)$SN}+zJq040$JnWXfMZu!HJggAU z3V3(}B#NE_$4Z!0rJp}nnfA_BR&(Hs=n_rPx?*9odONv- z8oEI)d1DK!h9{R)8rmsbp*Zga1a^v9Lg)w~Fi{e)_|aieIP@k!o@6yywCZM-DI6dK z^k7FS4aHuzb3iZX1if_w8D*pdwNrzhe5L>q0D)3^dU`R;X9ffMcGt_vo~u1OR+B%- zIP-{0?rz_^Zch+_{j^j%lDx+u8`KYsj{ML|kmVm*b9aR`nY&58dUv~5-eJTBRA?O* z)d%8CfVRbNZ4IE4_0CD1vp_G7Hoco2r!ShgT+{j4%aN1Wcj`f~Ud=_)zJ6l6?_t*S z0Z+Mj68IXn^x^I4a*8JjKoA^a@UTTlTpwLFVqis*J)7x)`}X7uhW?RfO0?melm!dw zYUe(SIz~e zF6}rUDC2I8b_vo4{iKMoLbdl=rJddB&~(rN_}>@bf@weJnrAF|48?3KI*XYql*GOC zKMwfyicvJ^ZAh}vLU5?N!YF5nzrfb@rP0nS2=cARV$gA zg&3~|TdEP#B76j&J}fnEaY!#sM1(o;;;5nF&Gkdmup-X4A|#ZuJ963BMo&x%AyN*@ z-x5I8N$A|02pEl0i$Ev_()g6sNfDh~7T9klX5fGUwn$=4c)pYWC}7{IRZ>pq*kS&D z{t6ojp4WK|UZ>7Mf1Q9!1}gBgY5bDgaT!_{niVtBI;UlN@Eih^c}_s=DIqDTvTn5k zsO&GPHw@~9#Klad=3b8%W-aR&&i-P>FxBC1)@9*_=ZSw)wHS3m|tjAJ*%9{VXP5b zL{Cl{)!ad0q^hb)ywHqpBD{V&=$}!1^1ZF?$k4@kgMKN-tIhQOfQ1E!TR`r=n3lZv z_nF<&eKKl4=ONtI~+V4K} zcsL%31N8$)Dcmj>$^0lhV(D`&Ci_f%^nc2M%5JTPR$OCzMp~4dG%Mw+9UG_iBg(_bod8p&jFMCL8J`X( z=gz%*yOnHEA;53H-8)tOezJsomHfrY21cvU`54{Wc?mpoDJIn7ZjxyFe z{f}7?x$tgp$2L>8_JUr04*Y2A0D4hz^=afd`fEKFGDKozp;)=)hzb6f)fvTus*@_6 zl56{|@_PSW6h@zo7>&}74O0TA^$GQJ5)Q24xjHN5<%B|I20i}*KtTYhJUTV4uMLp_ z)=TU_3M^4j0chZOV^hNh=Y`RqIK z3@kEiqwu=}?_fA6C9bkk`P_3HytkrO61;Z!)Lq06l^=&~NS8x6TTOie#W13@WJqe_ ze>G;UI;$F#FpCwwT{LR7O?>XnoeDXGavY;Lvk z;bPQM2=A+NV^3L;@ufog7~bp#`*fy0X-WCKDqQN%3xF59QwrwZ5~@k<|Ff0fRl*cD zY~t_h!G(`VT1Y_~UrObe0r8p_GODtD_@3H1jVQ=hE9x)$+T{#XD;R^8jDja^^1^o! zSd!u512F@_&^loJv5viPzx3?@P0DxuVlB=nG*Nd0m@m&5x6g0+C@{Zv7zOrih|uBD zfWu@I$g0IzG-8?uD*D>Ybf9v6|5pg<{3@*cqm(Rl3~hFnaDac91oQ|HWfh>0H6qKD zozRK}C((yn&(X_QsrP!)w=9MUGq_v;$!eSgVpN@CR6Q{qTRm(cJ`J z{qdU@_>Lg9>yg7OjRcs2oalJ&bUbXOHdPN{c!%cWLm&h1Mx^i!1U+S5(7(c9XzhmK zDD3I4DmtVNYO2<05}wf|WNK3eN-&2?FiTX^nX{f(X_dOA@jB0N5lx1Pf3w{y)l+aH&Ey)qib8|5PgipBS>~|EDw|wiR0jU0Fcl z9{8kR`OoDE9EX@!W6fUgK^ISR=lR5hT0Zmvbo~JV<#q)#V7x#UgfaM}s3aoVsQ}6( zq^E|A!CckCE;o1xpx^-0$jiL$(qJ`RmeIuy7+uiW#v3CSfkD>pdP+A7`fxjpoCLys zL1!n>catT(pr;2dq;Hs`K|f=8d3jeAA|^Q{=(#o4wz6UXID<54V`GC#P;k*0NTMh} zR=rkHlA8UA1gv~99)RHQFybZ0-vdfrHsTnJK>&2n zgG5#;_5uCG@baaz7CvZ5YS25ZU=|c4`zQG7?CiuMB?ZdDQe_4#Kq&&TAV@{4%m*3k z96>??`X%^dt^mi9fx(4GL4k?UcVb1r{71C8It zv!Gpv&GXnOzyAVgw!hM8{Wf95LSiJtF9sC8B2F@3MM5JY_?dl9dI+312Jj-xfp$+E zAa~NFS3t`@U+3sSe574%@#deRF^O6G*U3fR|Fi&W6?S0s*V{R!;_5NxopD`~FN&0Cdj~V=yz|!K*~K?)N|4B%ANG z6A%+BW(w?_n|!KHd7G#DcWaA31~dS*WEjxODk(`TE5~(J5g2%O>^vJZ^uKxP1jHD` zXVHrUihLlKZ1Lv$p_rlLkOsQ!y`2O98Sex-=&<`ZJge{cBpMTFU26OCVP29rb~9!* zjZJ9lTi;+(-7d2BVCDrKzD70nj5=V9EM2#2Gl9 zOuy%CxZ{c4q7V*X_YJwX5hAi=15mn19Wnj0*MB9+*3GWoa39oc2+`s;<6WBS1w3B zQF#BKH0~jnhtPSWS06X|%hbGE&8*!oRt6~$S31RE|9rR39Vht2qp?IN`|0_=c0#ab z*0gWA_`bG%h7TE?Fq*nC)*uHzyV^~Y0tqj-2_D-^`2YICvEx*E3%$n7g>Rp4q`}zr z~4j#GiGzgM`MHoMGNU&3Z5;sP5tOfSqcnb(iONgs@MFVYL z*2c|CW<%y;sl!2dUCL6;lY+z6PW=7?6))si)`DKqaFv!Dm@i*!;xqE?j-8v+(_o4x z-=cr`?<4vWvRz`22n30K1o9TeNS{@CiXn-oO*MJSq;>naSM_X);Vyj!Vt=*=tMp8) zN^L8=X?*6O?&=#F-Jp)miDJ)Vyww#MK@wJ7LQ=S&5tWp9H`j~;iUWR7xQ(W(CO|@>wUO&DQcuD6?7F>9-|*Nsr13pV@}9a_AlY zyAT(gy}D(8T>%GIV?EnAqt8B6}va>T1g)*{L zk`R);iOlQ~va=JF8L~H7S=nSKdy|#DH^1lE{r-Ia`CZ3xA4j)wIj{44y`Hb=JjkD_$5}5wF*bh0qsi{a{_URp z-^-u)quB_qu;Tq14#Qp&p^+yjf79Z_^On6Dqf>(nzWK}OCuj91u4a7ilY2Z-QQuK&sqi_d zp;8o?Vu0o3y+|R6pFwUWPp;L*Sr3>vpp|`y<(e+~v^i4@*`~Og=Z%!K?kWjt)uUdc zaqr|?c;pFGY9%;aH|bv$(YVXpm7cN1=h))U!t2;Cefy6{78|N(=R6%aKg}r##I>En zNHk@Bp(i(RcU~)Mq`Xxs0I1fbxMoaUcHiL#1a)2vHWeZTT#s{ZPal5f)X{WOwdKtn z+@`RVVdSl-F5ctj;1X|X#ELjMMrOWOcezaS9R!kl@lS5F8?2= z@}c2-0AoN82a#%^yl2NO&A1r;`GdLrUEB58p%~FOyEa$ZJL)XotmjxyXVo`u9%Yja zmzz>RclOJGIYZRR?11M5mgU@-o8^(#O)L+$4+Yi`?uUzw@gS1F10aA;yOF@5Z(_aP zbU~%FYS|If?v;NGS$KrMZ{zx1hU2^uBtxb*Hp1b6odNNPyi&m*JTJ;fH0!1;E>^}o z8OmNAVt(XFXnej>ra#twHhkWH{mQlbm&>E9ejgrxJN4XO7p~2+yFfc+dl~!nW!!qr zjnp*b@?s-)_GiRbjoTw?RMFWQ$Xc7}b;zz>!jO?6k`|Z#!P(|t?1X2T{6X?S;db1g z1FVl)u8-pE%a<3=T<7=Rirkxu-x2A#*L4uTD3ak|@oJ&rT| zAispLP67j|D3cWf(;k16WQ~$YQ`QGe6rv_lU0sdP8k$;LhyEUP%-^(meQ>J=rFSxz z#OpMviH%z;C84UZNs=LZlFB9e4>Zja}vaC~5YrGG``xu)Tkq=UWYa6`Ym?!B+os|P>;onNvWw6V|4#;cVgl=~{4Hr>*!I3t5e5=w9QTJm$d=O_KQgIo9TF2bu z;+OH`tu3~T=WJsd(QCjzC@e33psY+1Fb?uP@Z$INoiw`>cP-1o_O@wDNuDQJWd3pL1kukQtH3Q^@HY~ z9eN5@{ySkIA<2TFImoD*L1~2-D2;*5vs9k167J1#Ye+B{yHOL3MMNP6I!|TR0}9py!)u*iUW6f zuU3IcHy%|g8_KeA696C>DCsgQpKGsmbM`hStDJa%{o?;W85gQwh5v?bA->?DAY&>p3X+a`BhbZZsWP>mH+Lk zq&F%|`C9da2=OX4c2LcyXlaM;FGH!fc(C53NSg7RQ-)w-`#DAkwN0MxD(j}JtE)uU zeqBz`bn=y=73GYJ6{xAb>ZJTW(t)kCVKF(FMH$`Kw{c92DAUA%CvtdbSwN0i8Qsr! z(7-*>w}jlZ68Up>EK`r_7tZQt)MTl+@c&lDMFr^+o_co(lBnCNtn4ST&*Vg*($Fv5hhB@f3G8ngNbH*-*}mrwpQdCVuUwXFZeEZ!m^*Rh+$W4yR;o0#R3Nuv)6Lm;a&xH|l^1n&tKiRj6ZHw3f`S6j zDHPB&zTNfohi@Wm?Ce!#JNLBrM<*e&Ox3-WxDaJ<-Qd&SSsX z=97~7MXtkSqDFfy>dATNpG51&(u~5+qrL4o7T?VJE`zpMGq9$40hI_ZJ-~(m5zO#k zNK}o|)q-j8&O2OnZxzlv8u%Z~$xc}L?{XMFiv9>2r#JJ%!0{VGK?>&z{`2-naTq6! zIz~f(j(e!Bt1p?FngT>o0_0IZ=s{|#)`J*$lhFQW;OCFV5rws_I*XOwr^wSmqB>rl zec+Y;cQQv_g)TMO)uUe>t+i{ATZ-C&ZbZ)$+uhA(8kWBsomCfc% z0DX#Tj#^-7Xfq&lk`+%6$_SEue9#W=|F@rHyOmgFsxrxmaH*#pG_kVKX;U->vK=U} z(fRk^KV$|1xJliEJT<92wYlBpenfW&^mr6i?5!mglQr^8dSKbWtN2pl`c3DrQq0V= zEw@){Kw0a%%h6@A_AC|uAW`eS!*d}4yvdQt>reMo?H zh)Rwm2?^`G4i9J@9O2|U8H}>Y9uQ-F5-P#?{atWamr9#=v zG+;bT`^EQsIUylI2_R__KA8G2R&+kyYNe;D|8D^V2DY^XN;{g3aAD6RUux^(lzyYn zdq;!zn+9$6Ju;bS9KXvCoAx8*ST^mQyn{J8`6CF)8lM+A^l~Q$aOZwCMNm-y&Q?V6P$w(UvgUU{K%6i821) z!6~is6hgP{{_2ZwFpc`vv0dcwU*MG9lV&DSz47_~PFU&R4|$Q@kG#x7aL(XjUf})j zM?<-jlSJJFEWH1nS%H~FEW5dv|NSIm%6C;|<%WNY9=@4b6xnX{e@Q}MEF*D_S72-k zW7=iGjmd_D)hp}+9PIqrt}h7l6tI9R8lnpvoiANC>0-eK}bXJ7}0Lj1SA%rJ|wn2|;qi;!<47FJ;l<9o&d0O;nl& zTHgL7Z`$M=_0b@P$l*qrb#%B#Z3#xNzOrWP(~e<81g>3=4xCwM4G7PmZZdEaAVEH{ zc}S4^c=YXxHz%?+=Sye0R0-*mwN-^*DNzybY#0GNP02EPx9!9JVpBX=)VL^eI%}~s zGMXb2 z8ZW~BHvYPm-8XacD-@MX)Ok28+t7~ASIN4BT7FR(_*7mZqK>dZ3XP%6R7e`RiBx{S zJB5d-4+QTBadW29K=*6Nnr+U+mE+jn^=cPdqxrlWysgcQxiZ>gX)2yl+29Tw!HCG{ zfPJN|>m(yZ%CD+xdUU>rrOdv^tjhF&I2WapIW9V%PVBZ2o;=|z_;)eS|;EbeH;i+Gax$*{ZtP0T6GiIlDEVa8Xv(`s-pS|`iK_QLU;SSl_pu(s)UcS-K? z3u0Ay27-8DOrKQ5 zo!7@0$@K8myUf_vOo3E=Gf6;gY#P$ zbNXg$7B`TND`0iukQz$8mvHO%nfMheCjkZcR|wUG(bp z`Gt~h%M9+|S0+9=WWw{fmG)bJw92&nPjEJgAgQgD@#3T2= z*AHe?`s$=9*TZYJcvOgrLrYx|U%&^R=OGq)Lth^bDkACmxy>^{C&H`bSH+EZ_&m{(8OXa#GLnKrX=<6-7Dksdy80PWaQ#T)`XvP7n1t&`uZkQ|VvYp*9T%u%qqoDE7{y83NFNz_eRn$9+aT1D<$Uhd)Cj`ZoU2)9C1&`psPGiek19=3(ZFD5eF*(EEgYy<<$t>ezpPWKce6Q zyz>x5cIVEW4_LuMgQXVFfsT&|m>IT&c!!5SO`z-H_}fFsYM&Y)=C`@Py*cvJq4bTr zT9yiA$qyij;(fg!FLJV4VqV8 z1gX58iIQ#P>7}m{u!Q0B?BBodK+bIr6f%(`R76DNmffF+G$PK0I#1J2rxtTIS2j+z zx(+nI444(p4RxVVZfbrIxk<|xUW%61SpWk7}|>#9^s3I2OYKI%bxl2 zXS*FYPvJlJDt%7W^ccS8lZ}ZC_M0sO9TZP4E)Ej4p+e65c8p3ROd z$@Y$gXUZ)sEUeB?ylB8hZq9vCGCe=<2aF0s$nxBO8ZBHg{D0yTFru3o-7oW!g&7RG zUmDH8A}P>vTMBTHD$}dK40Lr+WPmY`KFAC>|INyno}2S2pLE3o%^HVV1(G6k0={dP zFZ&Pyh6;CkTi)TcwcC=NJ8!&qez(f$3)c;a3kfNC+tF_;UM~u`P&U@;HE~r~T3QpA zP&f0}(+}2HcA8MDM}}CaFBKKog6|7bwNDiGewic)IUwqxcc}1zGK4^ZVc5{n+KSa9 zvU3GS1kcuRh_Vcj|AB%QG$P>E*@Aiw0`a1wqjN@P4^T2{JdWpryEz+1FI&)h%$WT| zI6iyccexgzKK0$*-S#3Q!2Aa)2qq5?52A7cLQ|_EPo{^hvNj$yk=zv$O7>_~w+~qk z`PyVrszI^8zyApkW;8T3idtGBxw%~K+wE*HP~wK6gAb&}*Tuw=68!)2B#XIhGecsP zN$|&XvBB(4Lw{?>tICXU!QHepb`CRmM^ot_<7qy(NG3z>*r094B1Yp4^CsgJJMyJ)ZHj5D$2!ROc7F<-0VBq z2ws(G>uzhP7u)C0+&f>d+F&5UrgNSD^XJo9HPAHV(hzyRLg%enAY)F%vdYy8y{eg1mJPkm&H6*LcHz5)7d^Nik97GyGZ_dw zwN9Pv8%#}3I2T_`3uOgI>$+Q&b-|R%V4vgk3_ZKBkO;20m6OzDHmQI+|JT5*rI?k# z$%Olao8?Gx+V~tydwD^_Z41<1m_$Tqo{H|Ngzn*39PgAK=GfTVNs8+c!Cz^tylvaN zZ0@KGyub7QS2g3c9#!LWLCRUul9HEolIl>_Puwx81TWOmHgrw~hE{#ls&ns^)gAxj zq~y&Ndo!|Y7`lsRajc`@NN$xj{$oEcF5>YmL1C@^SV;s?($iPgi3O6@N1|kMw(T-Oxjghd^u8aeA=Drk&T6wNeif)$VseC@7}$u*Az(X zypmUL{!Lf?Z;vfo7(>}>CzQCj-mUR*QJbyP&s5q07zK6uWjY+kwwy%R3?DmYR#!t3 z(dZt?_uN!HUJvSL8`7+r60;qo)ET51)7H|uwOdKN)K~TN-+i4t37%>Sd1P4VxcBkU zY=k1Bs$x*R?rLx|+*l^2)P@%q%?7+6k*Z3nMIyQc?4g0M`WNd#fqw&iIxlr;mNzAf zp7hOcJ?Qvw?FuH%*}*0IB`P#;aj%rq%QUBk^s)-nLtn}tMuQwR58!-gjur9~9KxGN*C8sHNR{&U3x&wP3%g``PK%eC)=Pi-WD5a!SE!na956 zW;oN6Tr<{PNj{%n>-eZM7Ma(ij5&I#aLCkoKOJ(bq7{2{nv63H5KnJK4!8RGg+G+1oYOe1Vk=uuH04jN zKiO5+$bK-s#k6#FEKO~t3FrX#?p2P@J%ah}EyI>8_HN&VYsb!R3rFJ9jpZ%7C3#em z*C*?Lx_2ZKf7g*xzbh6tf4rC{9f%EaL}z#6u!61wNI)VeUbqAs2YYe3Sye}d>}N== z^tU&HB4DoH={$(OPAecomcS>tigWw*cA@22k0SX;z3H6;)@;5de8rsK_;1``pnIuE z{Yt2;p&WK6j~63q8-DNlXP9N<=;4DgHkA!v0i*W zVUvpE$71jzj;4Nhe*UXxdRevZxeXjUZ!XT9PzU+N?M8gaqTBw{j_^r2syQC;&jC&? z-OX}sbrrNerOAv2%G1u{6QYAcLaGkk=gTX^ZZX~1q7rbgBfq@> z;cBlKkS+bL6Jm+dva$wnm^rUkE>5qyZ?x}mgvKVcVTP|oFT0ThT`j07!Y-&2LgOvc zrgQym_5^?sv{!LuK1WeO6CYN~?cSW{JlK8*n4+fXX~jE4NHa{6{Flg;5{j#bhg^M#r zs+mu?pI!c2GjacTi3zepP)xo;D|8hkgQOt-9BIu9?IIfCIRldaDQbTbap42~XhpWE zg!m7Edn;rO_J5Xd>6S#6TWrc^Ta2^eh{4qbvM&{x*cKl>{Mz>WHt~0@RmPGB z%#oIz21YH6jW62K(QAQYA2g~@U#?U1~5|}gvm68oTIBWP3+~BlWPAOp zSxx1aVRZYpLFrRL`oNq!d1%=lX`F*TV&F~f?}br!>J8mgk0+OGqip{E;dMV7u_J{gchS+&3w;!Qm~IUY`Y6D5`V4f&=VL>HhkK`Ui?qWBV{{_FRwhL! z*Mywj2FKy;zdEBO)%gr9s@Kv}rqj#HrLAbo_G!aTf8NdtN#1^ZaM97>d%AT4%!6XvI`r!K=ZctovpSGp5o8vt)>)%KlMpOyvroAwuLGUo@+uugK zwKW+~W}d%aT)>MAQN;BWQ#DM3)H~6fBqMU3%!g=;rKd5T&T$<;#+nsmFzcI^r4f7S4jY6{@ES^V?r63jB@< z6NpCVOD?A^vwu|%(h;$Wl42lfI(V6QB=^>Z9+d883%X}PS?(ee>X8l)nTvhma^lJK zncS;inOHX8TpC{Qs(bdElGIi067fiJ;PgU7AWLrG-_v9wY+|y(>`~jx zr`39c)#D=(zuBSUNMVhf7TJ2p zbLe9C$<#RNsD2_Tv#&{!f#{2e3SLELT~#{f>wMp-}1qvRJhbJQL) z4ABhf+UQODB7H_Vo~G0*MH53~v`Tpm!+}lEtIWI}H5|MpAAZS5_ZszU0{O9bu;F+I6yMoQ;>d+6qM7EvbwWq`B16 z)-^)rVd(lp?+!!xEqWFXmJd{9A+m-E1;LW4muu=#(9S$Q+F~JUkl^r(@?jGBW99HB zx?$vzKY-pq7hfq?Q8KSjGN(V8E;dD}sDxCKBT{^UVTSM*Yp0G;z=FQF867!xnjC(# z99pRwmQTH+^$Iijm=t4KKuK!I(^9hrs?j2Cb|P`EvH%~I%WMy@1c*k|BzdlG>phl! z5~axC`!NR#7K!?`J$VKq@%2^g$`U3{pG;gM0_8TEyI&KWU$L3n)tTp_0&|9uU4?*F|yNmLga0nP|Q z*Zv6!w8)%pdRi|o_XBV|=Rl3Gu%tu^kYfOTr26A}6L4s7_(5rmCL2zt%{B$_L_)|L z%H(16eeA(#9WQ2hs7pVg<%XguHG%xwF5&KnEJw@KSm1 zGU7Dl@myb03}{?1=?77^S7T-PQvO#ip)v=|5nW%&8Bd;VYrq>IBwNrtfmv>Eo~9sD zWqW&jr}uDaRS*GhZ)u`R@T>9-wE`V{nBOB)m-@FDL_z0n6hEMSQv^zY;2IzfmK4Sv zG04csb?+~tn5qZxjJD2BX%H!e=?`LqQ4K&wm=GhV0UI!EK#9A%iy+$2)05F-SVKqk zYrmK6ZS3qCAk!p*Sw3pG!j=hG`hHNwSA#`O`gcf-=>R#}SD-6)1?WboLcl9%RoGn0 z(;K>O5O}wYCqS%4-0qeXEv(2qZ!k-j)9` zt<2e`#?Zh3@pXWfjfUt?pMT4|jS48%udFn#T}FYfpIO;0e*OV_GDmPD0h#UBmEM!C zE8K|L1quaFVgOr6ruO|98>0pQGN|0ZEL&Q78uWv0Y;3&b(l|)xpxkeUeCW#bq|G+e z1dV2Qpdf@?4hZsy9>Z{nDWU7ZdMG5D4O3I}h%&*?pOJB5V*W3t9v-01B@2Qr`C65ip;7>$zOQL%Fy_<) zCLLVi)5Gx5#w%fpkCAv0mk z5>ZJ0nqg$B|IXUf6styu5cNse=_aC|o-^72CJNssVZA-XnK*y8p5DT9o15%L z(DR&XcYFEgcZI478L1=xuJ`lX%4crc|L}RP2O8!Y>x%#{*@7`6B$H_;(CI=#SWXTn zz?zYUMnKHViSPV~H%r~cIW_dQP+p>L`{QHR&ZF3=LuHXj0z#3Xhl}X=Ym}N}pb(b^ zxZ$>o-LzmiVF0%Yg}~ge^YfEIC$(H>X$=b;k)w zN#Uob?m3)-0FFTPI8oC8W@?6N#}*_2oLyZR0R{~>yBVOy2)Y+_Ad3drpi}^pBZBMz z8saRa<)0uRYwzf2gp2E_&eyHeNnLx@wGS6FOMh_)jXqdgZ5@C+b>K5h9jLb1-emaHr5XvlkR&ILYo+Rxtqzx1_lPb zun79w-va5D7VZSf3+xg;n<(p6JM)f@kE6z`gvRSUJyGHSNk2d8@kBH}5o8PWXgZkV zVCs-Qg9kD-04>j6`|yyNu;w|i$@D?klUB$m)=h7w!7sgO=bUWi@X#65@_*QyEL1Kg zO4-}rK{SP-2RjE(PB5>Ie@v-=O%YhfVX!woy6~{GXUdMbz$KHHFCv$J451|XM$1Lw zW??!v-5ua9_Ge=KI{{N8nz&@KDc&_WQ1f?n__+#yQf;@PC-Nw_|{W_`q zi=i{)^`tw_x4dhUzKF8J;{jz<#6t)HTp6_yeTa!syLZWLGy4?OO6Y_lZDFL1@=8k5 z{k(>@K5?L!h*geQD-aNX*$NzpXcdAV#GqFP-YPAikQ0%Z7zT*=S?FaPLVFESzgPJ? zxfW{`@D4h5pRM={)r`kRV%J^Yv#3U|iG~25^gNDl{5Z(nxN= zo?t5~E-zTvHc(?!)$o&A_j&ua7ZFtXfk>%9@c#4@bfb~-+LQvPJ4Y`=pXGD`Uauj%~hv0N8LA*Zox-IN0Ek%=!@C z_X6M@633$D{Kz=fzQ8_0&=l~^TX2qQfxZSbpwpROb;sX}*2sdLpa+#A$Z=yKU_C&w z{}>vgfN&`e0sM31W$T(f@|2X6P6Z8ea&k;)0sRFP{ zFJPkrMXNWMM!`4&wgkl~C~5OVF-(4U_~)Yl%NdTo$e5V@UPnZ10shF3uaBY%4O@gYmr8`uiFU_3crg4skU8uqahy>Ef-Fx?dvP861F!%&*2Z~GYdId%-R5%J>b1LrV zER7EcrXDR1F#%M~a$i1P*=Y@_Mnai#hr_E8OuCy^x#HPm`v$yk8#HIs4BXRVNU{B} zEy$X2zj72g<3r$zkre8VXssoDki((6b7RL} zwXkc+`uh3+W2X)>%>q{A>gxwcC#^M{UT`9n9`ZCea{w8q2VEP)0^qa$488m2B>=#n zP~c;R*uHftB$mPbmjBF$0nITaQeT`+UCf+p)CB;@23%NzBO-_@zJH$u-@YF!1$AMN zfl;{D&=pi~zk5j{Y*C&nFq$lK@PJw5(ADP9?a8+i{xK$!phED4advY9)Quse7r^YH z1=K6Y1i7^Iu9z5!ujG~WeRK_s0L>ctKx+p7eM(eOTO!%R(w43fjGlcebf?N3)ueLL z(d5S}8Ln{?iLnGzKZtxw-|@L!>Z2)=e%Iz$WU~;&n$0KyhI65?5=#Cyf>t~T-C@{l zOwOGgTo+>pvku72L988en8HJm*N=ByMZtd&zf>a>a&%}EJ$oT5iwPry-V_%;4bQ8( zC7)|(q>m4@V|1PR5Yy7(RCZ>adphqFIXv-K$dMVGQ2AqQESaT(BK%gR$nKjuj*&Qd zzPXE+x&OK?YwKScKt$=H!V6m5*T6H)TQ2QwO^ql_Dq*&P7-^^x0P6FlTq!O`D3vVb zb+X6Pf2P~yl3f9gp_I$vEt})r)=j_{gvN~j%1)Mb_eqqFE^dM>8ssy=k=P_zXwdBC z^vCfCr6HAmg}Kwv*ti8s7;}w;Vodhr8$@&EYmc(q4ox5n&@bSiwc-r#nw`3?`Di>li^UJ+b+2j7e^tb?+Am zazlJtq2Jl+2W%5LOQrV9%E7i^wUw8h181T}$R1p@I@#mYKJ^edKDdHyN6W3PRkfR& zn&1o2a0#}Rd;B$c3WeMaa%{KRE{<(0YQ4U`v~-<^q-~ zxQLuv!-yBMEvMY<9MD(S3@L@2BFA8UaS{FrbI)K~F!y|^s=WtfDuY}QbJHMu%ta30 z1D&v7Tku%4P}NQlbsIFug~^jf$g{qv2XuF=#_t9f!QrB>nhW{=ix7T5P6ykmcz`(%&E<7qCLQhqj_MTmn zK`uNpO|r)s{qUZkClYK6s)0tT+73&-@F@Wws}Ta_K<;QQpvK?}$W0?#fe%Y==m7vW z^0|ttDoxerpsrOyX#w*+wdU=EgL2B1nuEy_N;aq=vcDgLBpP?iXt8myY7ra-+$~TJ z!Sew-G_?U$s3hSFxAfnt76CYvt3{*6wY2#z*btUw=XWQGS@RBpu!BBtm0h4#Jq!+{k$u zip@L?wi+F4UMNLtxLS^;eF(W)icCIbmz0#KCRBqsBU})?CHWj8k8co<0P+TJfD@BJ zZql3Mb^{s<|NELJ6*84tR=k6dXz25HL!dU}RRG~F?iRoj@e8=O$Ik2mYwuh0T?k8R zdHvvU2KSZEST6KR$5RD`{G#i&gX1Z@AdUztEdHM1fDm{@1sCOB(=hGIqXQaU3FQOE zqIH)67>@GuN%zER%0e*D)yROw{JU*UYRY{lwD~TyPSoZRHpEFdVBnCL2Pj&#R!J5v#glzcR=jTm;<_ zOWtlsC5tUV>(+!eF7vrz-w(ZSdsiH=q#|6oi|?!I3TN z-{E<2c0fLN$6~NE+o5=25XZu5UK^};o9{x}`prDSi0?({_*?xh?s^FB70q{HLYw`@ zW&6*g;36H5J=I`aXw94ODyXTcg-#6K1{23%TPP3Ac)MZsz$b>8d2o?G0j)W2H=L>H(Op`pw zA*C%-)vbkq;SQLCSC9U84RRy{_K0zxrf5+wP(fjMFoOGEgp}S(=mnwH^{ZL@x3*1u+lxBhI=u2s#Idgejs32xb;SPU|7y zwfdv65$&G|5EB>I6*x$c8yW~W6Op4oQNwkfVvyaW@~vzG~@( z)HGPe@OrSJ5Oqi^D=Wl&4~592AQFvGjq6)m_r$RNdz2~uQ6WQ3U!4cv6e3k?PY=(| zC|pBj5+NR*4`lQHSFXJUko7d!U;2XUi%ireG~7}EC4flMEQ`Y3f}Mm9QRhz05aix` z;E#j|*3_LJb3z4r2?bDzCK$kd>im!85&5K8D48Ni8rpT-J9hT=Tn9TqI04Z~W=wo4 zUkE}ophYDK{1rI4p0D$P8BDq{JvjRsad7s0k?*b4sfQ8o9){jA?^z^zW!?h#$#JIe0 z*Z|%L6Tv)yyW`BtN(cFWHwK!p(YCN%p^sPCdHXNMnW96s<)aqC$HZEy*Z2??Q)trY5t(B;Me76Lw*f~);=cbMp!>H9_6v;v1&Gk_wpc=A zuq1_<)O-xApvM0G{#PTA0oX!TSYnLW4%yfgjugXgg?eh_6+*6us@AxS3+%wah?Y-t zVRrT{G%!AKKg&`9cEN2;6Km^m=;3^}j3#RXTJ~^! z!e54NBLc)gDD%)v>5Q4EaiajCL_y7kwY5+{?Zt6(va|nymDd#mjfm^4tSzrTtK~Gq z)9;MI@sk8);ga#=`|gLgc3o>milJ^7=ZKEw7Jj{O1#neeF>qq#clv%Md>0=6z{Dh7 z^@JN)Fax4F{^Qo;6~qu?4JvUK5Y=kve}Cq7gDeA)K#$|rgm46KEsL^M=+ZU;BC9JV zlune2Q!V$FW*Iy{Fo68^YJ@3VpLz6zgyD9i$KcHa z>EFi_wCZYUB!oNviG0xc&xXhIbSq$_7#b0XL#RmyIgd^V+}$|tIOv8V>2cXAqLKk_ zW}PwkAYm(Zx+ztOGXed>&X`h|XFzSsbMRH(CHw|B4SjRA-big^J^XOhtP~GF@`J}Lc`7STI-4Ls#XS*u!O&U{rZXf1d3xs z+quLTg0cv3+jvThp~2n&8LoBNDxe=*pjtOBTP+>ZY1Fi?fqwxyt{Jk3&KS|1&fDPF zq+Qv*2YF#<48&K&)YhcqSARb+0FBG!86z1)MQg^7p^fG*nGEX~x@_ne81U2}(e4mE zLOPSkNXVoEVYopDsM(&8@$TJ zwRb6(+27$N4`k~1uIK}H<1d3xNia1HO@jKPC*y|UqW;hZuF~L@hKz2xJap+6-8d`PEPXUij=0A=PD|UQTXmi@btVMp+5h*Pv4DgrEIRZI%!@? z1vwwrc&T<3O{enfx|f%5-72wYs-+m-w^x=cbKBc7eUlksS0!)!qSRoPnRI*3H57OB zZKvyzmA4{xWR!4|9pT+WKPh&S5?#1ss>|giMX?>!>Om?8I3DTB!4;j7%oI10`$|YB z*-A+n$7TGx@88^XEL_JbmdKHv*^6SYq$NyXteY&CVE87r(3gj=C+cfsHAttE5b&d9 zj?~Kw?Qy27eRCzde4r1XD6wm3_PHnO!`}!G^XD}M0SQf)ZW3$H3|TP8OO6$$BF6^5 zjoOoU8Om9T;F?BHpLB7~AV9gbxJIMZt?Kpcuc}tPrD*(?bOe@k!*;>1%!b>D2i zA+9iL`&uPDew?#Asl~bW0oY! zr9k+EVLr9G7q&G9vv zpbTmizA43d?co5v^A!?~c2L5} z&eg2g-{b7fAp>>ws}R0A<*q{-k$oLxYjWR6);2mp<&N&%?-=0y%)L1H ztJIUru9_CVQ@0qf5vszL8FhPPHksz*6)DKA5-qesUOfB8P)5=3iUvP!euAEiiqw9) zcJG2|AtUmUsa{KZUe7p8zCNdNFAbCF<^s8tW<>K$9Y4d)Cb`;W)rYn#d{;EOpDJ<7 zqTv?o3?hFtMK+!Ic_tsevJ{g1dwa^9 zL@6GUAefy+=4Dp~ZQAuKD}8dIAn3liiH++$ETOhS9Tyja>sCZ*XFcVbx62hxPUT=y zYOsWlsa4}bQ7OZ+Y3Cqolu7SOhc=%?~SY00~fR?XS^DqiC&7ci`M8O5-Q zMV&myhy7J=MpP_?>~#Jz-#0LPbhc|#ba$vcelGc}JEpSxj)XrO7kDHRd2a*(2Ehb*Zb9I{}^Lfw?N^7@v69}&#_H3$pD+X>3=YJ|&r=Y|?P@rm}9}RfydFDME z&o%t`D|TRr>=5xBLsZpeRG8H%xz!u*UiQZ}`aRaSsICOMEVz7I7A8X3EEkAHINZi* zt{geETxLXxZ{YMt+Bl18xx4p$KZr_q_+IG5c5`(Cr!<99qHnFwV*F~G*0}uC+&VqF z$IBbttEU*ZsQTVa-!>cC$a1wPG^Rda6yUqM^I-pW4O`T^t?-YDEz7=F-M=4AsbB0r z#!q=Y|6$37qZE58Bt$Y15D`a%$teQGctzSe`deKjkH_8N_S1**N0BAHU(xr zZP7YX&uawTcG6;#^j*SY?>Id#g6J2Y-1gZ2Ipt~4uld^6e2f*nd<`3`67P+x2l@M_ zv{TBC~~Ht)^>TE6E_*sH5H^FNXv-g6qgy13M0y|r>;&B*>;R7iA>%PK)EbRbGr zTvx;VI5CN$_8k*-t%Q1jdYjfn;c{MOg~N<|%^hd`AlyX~>p8#sgoJKn*yzsZTk7!@ zc4!*fjm`omlfE?Qm2$nKt;A=HqFY0yS|6wmKcJg`s_-QKTz>Bcnnl4RZ(OI?Zh7r* zuO!mjg$r_2!_?5x;1~qyWAj#`qmz?Hb*%6d&bA6XSTq}j3*=&BPUa)}3CU~1`pruj z$y^+_w9~A`^sxQ8Puq5{g%+c?3U~y-f-xJY5e&odYv8D0OdUk_Mj4x~ zTq8i4^*b@#ryEDhaC#-XX!_t1Dur(4`9k7J70z>;wWUkG-&Rx>@!tq$0Qi8oesbNU zL&|+TnR9@tO57X8GvvDQrK(`zi|5zH8Uf2?@27<04MlMnOV`$0YCclYtm{e>R=v;o zL0@jiQ8T_3a#^}kcveYl{{gJCDmAezA_0=#x_#L58 zs`|+{l0R0)oJIELJ~NFUFKhq1*Z$;*w}w&(xwh-4NmAi9qVasvU-??MSa!P;b&O+P z@9hN?7k<&9exlp0S^@llwUvwW=B8IH@9jPu9xl5@1y%|L_TFp4sG58I$>tL;08z(l zwH<%8S5({`9vR^uHnt=?;7Pd}+3HyB2 zlCcu0T=k1gm}-=|>?9I7$}ei}|0QYmeD55Q7OUm>+C$2vdCX*SuMZ(K zO|t&?Ro-wiQ4o;0Ie$MrxIY@){*qnkWiP)_wZhO|_@Uc#dmVMpB6&$8wyR&%Qv-I; z!iT@dzPOyG7XFFBC2E40NBg@kGz$1OUI^Qz*T$6c53cL7aNwWceSnjUSu)3-tZ%XJNUK8GWiQBU+_lWDoh%3oFovqMk*@={Ef|+q8>_u*ajb zx@k5bLTSf#Eyp*L{KcN)4Cu5^w?C*-T?ziG(2cz^e63X=FF>L|Fs{uaTJEcMrB_Ps zm5>+vWEzeu80B6J4{5lc?%E5^;JVXMcJq8I@##}PH$#PgAwr1{b``hF4|W+jWBT7| zYq5)YwDoZ1)@k8Ela#Tm&n2-lVFI+OZ@i)Y>O~G>>D(v8TjhLz?M3lrna<_g>c8H+ zq%O>t-T9aqeP6qW-@{RRoVI#v%z@dMvrA6>Oy9dx#_vzNno{9Zhx8rcD;#}UJzwznVX{K2)!|W8d1OY>q_CSo zhbwcdw(O4g(|eYFHK)6V6*Nh{6k-#;e>@#uIsGcWwUW9K866k?;^`i{`$kCp#4St3 z=K%$CTX7?8owxOE=1@(CH*j?w&^|0(o%hqG*q8{vm+yex@{M37=Hbl}k2IqNy@C8x z$B#-QW7?0(3+(eQQFk+sY017QtBNYW^GU?%IyDaQz2D8laTSzqq~u}6OYcEXIKOE^ za&>PE?hT9WlHtl*i=o!91w)3dZUu%%ncjZdZWA>gy%jy;&WBhM*@|B}hHnfP>Jbd~ zcoOhAk!hTpniUzdwCFTcp;a|x4#7|8O?LznkEB@YyPV7z+Aa^FssGNpj?!B=5Dw;^ z3vzi}xSzJH{;LLqneUpfBrztBJ9RBTV|-Zym5kr)7zSzK(nVSFTFK+<#KC%ZKKE6q zQPz4|l1QFqpTB9=rs(g@_NUUpeNEOO~8q@KxZIG$S=RhrT#ZvdV6$_J~uJx1H zlGnC3CS6(Xa|{!D-=|Y3@%YT;HK->G>*INUE}Kzf&HSd*$KsTrHkR@4QE%hQqbLK- z8P;Vlk4M{~AD!Hyl+KUuMH4rx@ps;j!|$Cp`|{|Cf7~xx^NSyh;l(SW9yf(TXb3G$ zgsP<)ugA!39lqXqN9TO78cf&IEvcZvp;G&X=y5P>K09dmg^ zL*B!hTZlhQ);_cD*`4emx{?cKA4h)Ct&`VzmGhDP99{d-`F*o(Q_nA<>;$uDn$2^2+dSKeRcsH~C^rH%EHJ z3t=kVx07EXQd7D#uM-tWGdtQP{ps#ok5YqMu{Lv#B<3=kYL8Q@uD;tQqRkf|yF&(KaPWYCTZ zu55G@)~2~HY=>7j)#eYz+9tKc&qhDq--^htJ_0*a&+@N)*uISFxSeEW9xj!f^oQr> zH4`{~uT-S}5IwrFd_~eBcj%8j;9(D7m_6Ua`PkJ@0pPhDBn^5uGq-upn>?p%mfuAM z`9^Sveku4eALJ`3^8fMm9?)F3ef;>>-g_q5gzSv6X9*#A$y0AO%k$`?3Iue z8A(=F_TDpl`@cWk&-47w`JdnUeNWH1J05*M>-t>R`+C3DKp|G@%3$XAUF7H%LJkdF z>)(F7Zl<}jRQKZWiR^wQNi{x)?SmjY<2r+($`c=};f+kMIo)lyLj2JUOnK%&gnz4j zQeRA*1ocqPr`4ZP$30&!|DM>w|Vy=)k@f3zOcfIZ2lQ?rXl)l!B=$xG1-NyP%&t z4T78&Y^3;Pzn;)z$dRz^kOYs(pdWFG(S{2-?hwDQ&p#)`B)b`y{Z)ifJS{0UJZi$= z)jwCGpgT)(bad3S>Ku~$4@q&|9|lQDDZOeGJ`Xjv|HnLX4>Ui93~61OUz&PFo zY9)k-D*}<^5~br=AOost%wmaZAs}tGkG4*01^z}lU$-$WUrLF#9z4}`KENq3cU?An zO0V*YJY!mutY)aP)alev8Nb%~Z~Z^rQw0l63~7Yj9_xu}Uv8voGw03O{(RaRv}5(> zRFzqX3TU?oNP{@N zK#E*AL@vPAj*#?bgbm9Q3 z(e^tNAq?e|=+g3GDsq?fas02O25L6NIIGGtQ8E`y3S+wZzHDS-@xX;B7r(Der)9n* z0MIZTPAK3&lEYX$OCRyt*(EW%W;B$K#3{jqp(IK0{-#{{8{72m?zYneE3=XJR?)HV z0vC8kry#%9ZFTR6(2L!m>KQXM=%VA|0-Bo@c?>=i!lftxQFhDs;=s4ZT9}ahzITz{ z;H6a$u4(>+854PI)AAkKI})anR7>=0M2v}N8+opA$7`y-OcMK;HCUs}J0!P%&&ExU zf4mkTXTd@euBhBy`d!kM7KYQdP2Cj+!9vnOU-8$>f`r&)_wYL3?FdbHtvAr#e9tKV ztx5CR2Vy$0$C2s}J>~iM$Kx_R_E*BVuBYLkc0CFhQ|W#abgQR8;L!;>D${JmbYkoh z4D4r4vr}K=UFaDpFo3yMEBT}-t8W>F0s=rQfI|Ynz(7c2NVEp@moEu8P6YY@W#ABk zGD<<1#nR_(PEHt!FZ-n{%i8r!-pxP!cW|3K@Q*)<#>#VPv$GPn0yRJd|kpKM;Q!YwXrh6xH zWR!Tt$5%35+2H$YHW$A6agdd>8)&?Wwq?_UHxj73q&54ftJ{A#J-^J^!*`E5+)med zH0k?Y;#@1#*pvQXod9)BOd4OGI`td{J3Brv?`xG}F~f$afZp#;yd+QBSeV~y;bF+l zDJE4|M+~f1&NMULb~yTl)ofH9c;Q_|I9dEKlqBK8ICmrCV0WMIv_T~s(Bz3z50D7P|m|g9D!^+bWd>JEN7dT}Oz*^hB zLV@@aLEGA6Dj5J5#Gj~UURnsx3q8dwJ&G&M$1m+o!)94L_v;^13=^o0X`(!@htg{j zSd3dAlkyx)H_7_Q83cJ#%nVK@sw*p?DmVEv(?T*v^I8d;UEFmwv}?RT=Lv<~WK(~rhS_9l3hilsNEss| zzLFAjmoBQmK5}TB{R9*k7PcN|I}zK#f?*5_z94xt2-O4%4D*c}w46Oo3hCs4hNvCw zv;iu4Jyub1?pY|;@(r&w{&zLciMSp^h=OXnX{l^-Le%P?&>J2zJm>YaH~}%0Ps#3M z1vH{;?bJ(`nKK?~R_CV=O|D{b#3-kC3<+BEvGDN~{T2q*C(IK7@YU`O8dNzg5(AJGEQW+ z$wR^NUH)Yu^eqpw<&#~m{5cz;Iy61P3F>>IG}O9k?;SD~-P80~;dXedukpK3GKFIx z=4VcRmD}2%?u&fa3z$qaNLJvYfjMLby40ny>g$1Czy5k3W!}DL>L?y}q+%(Y^z7J> zXQaX!g_b6x!KcjTQfGdLBhKu2jYiOWKdi^PEG9k?ExQI^T9kaRgl_th2xaB%2adcdA@Mvf*n4nq-e4nqnlg(FmZc~BNZxe;X=oi)k`Mqw!t3#b4 zD>|SE1<-GdDc6H-7f?K~8^p{~Qrrz84^@nQv%_?v?nw7s93xlA`p3IvZ4o^o9t60i4@pvPg3`Rzlz1?U_K z6hh?%zyfedo3FK>lm^ZIsHI+xdI~}?E?JyNKmzr~au{H}DeuW0r_D+bCy!M- zwOQ%`l)RRDLlStjd`7j_VcC|wVdKl%y`<$CV91b$HJ3)bKhAre=mgo(_{Rips2#@V2E8r9U=Gg(lTyoC^ zENMXSE$l1&7 zxBv~nGmv*ATLAJ~lH9X7QbFK(uu-A+0YcF%<|cj?&~G?j;kkY-7*DGDeF>?rx$&1S zS?x*>i(=+XmLIXfXUQf_fp~9<>yPtBFqN1{DW#2G(5kZ7J-Oe_L;BTJ=+WeKT)|VE zd_7j&iY%M2JfyPPFGcqUb62Zg>wV6D@8ifd@tEeDO1e;~F3%0e86L-qBf?uE(-|(e zM;G?;_gvNLuS8ugrTHQ08$fNFq3vE$s+o(M|Bn!$nqBSI!Z+uf%E}=YASUwXPr|v2 z{M%72ri~G+4g!hcB=06C3Q0Wbh!gl9%C5z$_SiTP#Y;xnG!}@2(?XwML)7P8? zTxPT-MNSh{!h*Av9qpSzDHaWxd!Wjh9kxnSg2;wLYY9jF4V9a8L-|417tJ1t(g-jW zB|QyMvL0cg!>0LaW&cQhJ*t+$rgQJgeKk)<)=+R!%{rV{Or>xpW*$H(a53^JE4S8lX?SU)M*9lvPY?H<_N6kY%7 zn(SiIqQ5#D@t|Pb@J6A)gPS*@y5DSSI5@AxFq)7ijtGNq1ED%_g%OTA;Unx+G+AHvTT+4ARlpnT-x*S zf@E$)*J(3Y5DNGov!GzE!!GYqSF z(Rw#YLQ)iF2Suo;@bST~FfIVX4{A>~YOH^kFw=?H1Ol}K9hR1-V{?WLA{+S6tp{-T zOj*($b>LBGT5+?}4TdV#ydgS#E1s#+%69ab|tsFSz8b0nKlD?K_I}PQSV#bLltO9xt|{` zBH?_`*GH&;F%!i8(I`=7wW-^n_et*r{=*AskneN2DT0+qKQ5kZL^Nt{b@zLFXMnoB z-?FipQGwBsuWq>w`~JCOm0DU~wwb|C_h}_U+C#;PkkQ$?8ndnF3|bint@*1|EM(?( zS|9JLd>?!N^wZx`>HF}}@o`gkHwh#v1odt26k^l?`xz6Mq+&18X{C-YeR?`dMMNi( zyX*NmElsd=>jG**(KIHLQe6|_i5e~bm=I>t-y5G`k`jF0Y|Z2CEnR=QtI%)Xk7+yV zD!*7Hwbr#`j~_(NYf#z7f8uI(vGMS5GgGW|JGq;?&IfV@BW^ionH*nb_Mj~s6wf>Z zSLYHSD-#|$vc8US{;@l}cTCtU5w`fiP3tyWLbV*9Q@P&f$KMXBTzH9D!pNqlr?d6# zfR||3(q^{468~1pEUBXnd|qa@vsHC#EZHaAyvZ&TgOGWs@rtzBoo3t4n{#uzX;KXi z->{#Km7r-5UW&Z)kJbGZD?axV1;txRF#Sw#HF_UzhLxAtj8j+TYX@ysdtYOSYaLsa zD0%j0)Os|deB46%`i9=8&EHhM^itsv<=a%FYW!_kMq{eF`hxe5$DU6C;{>>wO;3xc zg5!=JQ06JA!&;7)zWS;^6}IWe1y`|(BcE(7JSYz!&Jf8{tfC#nO(@^4@{d)8NOU>S zUk1uEBZ?y6&Qxgc6ayy^_3ZrAeGC*tj~+eZyAsuzTsZ6TJ|_n;ceNh*#070!3XjFM z#Sc19Uo^UrI~aJr1%41{1xN3aW(;ILskB?_$)?e-cx=49n)Sewa-u;Rp4r;*V*);- z8)0t>N2%(s3UJbPG#EZSbd}ZIQb@Q=btT}V^{UfA_d9WacPA^;k)Je7&Q8~QJw&Yg zDTNXV>lhg|Eu6E5R>>}3HZV_uEf`2g-P~>YCzO}W{HK^tkIbpCTl!CMzRLhi< zlv82+^7^ByRz)KkQD!8jLX(iRC~n+$?x(@=$>O}inFAbCCWFN!R3 z=7n#Y610}>TrZ7`y)*6p5EW8@C5s|Zab>Y=A7#vZ*v5DNfiXxZ0%V!|Ks5rQ5SW&c zkVr(P4RYGAaC-^j+*x#XK$lJ(i5CO2K3Fv+xJ|&k&BV^0Er52uR^DwWPK~@oQVwby z6o^=yrScM4j1w5o>Q*N(gCtYBxQU$XUSa`6%YeWe1dyP80(K*6nYFRj22-E$UfdmQI8-O;U6`b-BK_G6B*A!88JusP= z!9-|l6YSr;fcs_*6>aDOQffUCpvAxF0@5FBs*tdtgzz@VrwO||3Q|{i5N)mU+MUld zC0SUwA zF1H07OytCHI6^G_QPgpGC&X#bmpz2&B9!l_zsSkC`g(&qL3c4pD^dd5}Ecby=6N96aQ38zlvi))X5`xX~C)uKr%v0f4kVx}EF5>EdXQUcME z*gRDG<*YEW$--KefEE;5wEs$pC1i88b>kJ7NXg58Gvr9b`D!KC$NC5=U{R6~5jrAi zFvbb)x_}avMmi?zyIh%O2y{294JGT(kDn|(|6-bA)b1&;FAJ`d4|K&J=pTG^7E72yLay`I4h$}hRZByMQkV` z)EpFV^%v)!388H{X+E!Dup%UQ=<@_kTAd#L0S$sbDA%B*|NRU>LCDDB9fMgZSva`E z{hy)YYtX^Cg%I<;C+IMBOH9y!j*DE{?&c&TtT>1d8sc~WdT3IR>p-lm=pqRbQ428e zpY5&ON6HbDAF@w}njd6&EwSvXuZTV`{*;uGDg{v%GJzwppePeKo&Sm)Vb_G|QI|>- z{QV9lPQt~U1Qfa_5dm3vFpTq27YvWiOxNy?)uuGRDrO@OrTyE_;43%4VQ;y&(LR89 zM;yZIUgCY?@>$UCI&s^*ja=@Khs*WAY;yE=wi!U7IQHUYIpQ+)zKtlwx+A2mR=hf( z(`Nx=ni%9_nquPgsWz^Lkzv8=_9mJ?uKdIFeUb+LCAgYBk_FZiLwsN=GZRZ@9x3G% zhOZ1i8%GZFO%aMjy_*8V)!p$=+5ez(&e=gnOa7T3zczg@_`|4#vdya&Dw?K7vm*u~ zb1FD8^33u9r(|?cn%Kr0-A)zVK1ay%m&Hh< zuC2tvImdzxazVpLZ4QNJZj|J9hP(q_Kz5$)buK*6Ap0-f(XqBCsRGn|(a&aWLW}yt zTF4a#ZE-airEOmSeag_ozMmT%!g+{^$-F#BWd5F`(VA2GFkdYr&|a1lUA~F=&DMnb z-{B6_y*WyAM=Tc86=s$X47*>vXrGJMmOkfN=uXBF3qmIkeJz)X&uos1Fv5T;5}d3? z9G^ooVYE*?CMB8p^Pdd(>GsuK3~WLZw*RPO=$AkTKCiI^Mh#uc&Ub$x{c3Gk;GzJPA;`MZayeKQ^_d8E zz4I2-xJ)zlc&mP+`1i>R+r#6y=nUI`u}Di1B+?PpdYw%#bKJUHLEj18Rn7MGF8lB5 zN=8i<-OHRV<>bJR3X32!Hg9kXCaUaxk>MBebUAQ%Li{vG=W~bGjvo zouP;@wmF6P*S@62jg)K6<-UI0+^NPtFit4=T#KRoYBxPa?$U9*@N#d6L* zlg!a^z-WplrgPx}^w`PJW18J(V{+tL6VG}$41IpT#Ju0xx@FgoB_A426nb)UTi!*V zdZw$D<;ar7vE1pAXd;tyTvJ~+L1Rot$WvVct;EX|9y3wIWDW*q^2TQ?OfvJ>Lh73V za&h}#4?^P^;H#(r>>GRVQR@-`kK4g6kzv=bH*O5M${~pnHtGJA61&TI%E!(a_~FN~ z-J4V)>0RHLUsv6HC`p(k%y#9p%OkTekd50O)A{5#sUXsgW{|9v=@^fF?iiI++o-dIiXQL6XkCT+|%X0QpI-PRIbI_Kc7(1o9P&4~T zS`<&FEhpYXI$Jl;!Zs_qe(~Dd9VZp322VwW!-}<4!^RTbXP(_+0yZj)Bubi;=;D(9 z9{IK7wMzyvlObNVKB0?vd6!;XsJWAq=Qdl6U?L3#7MA4Q#5>vQ3usKMGn?OQC^}GD z^K->0;_-IQzFPgbON;y|y#EN1phgaNO5TR6Q;|FzSr=m8K}9Ff;{0>TOm6PyPmb#@ zMZfj)%o$MR#aE|>cpIwvUH?rF5S?#M;d2)9P2kv|#88fB8M3}rpPRmhS0b9fc9jAj z-{i;8jr)>n^M}%dN9DsGKaNy8Ow$QpvUoB|6O|IsZvF53Ueu`@so3~Bxdo+_&FPvj z|Ki1Vk)Klw#=Ie;D%xeL`x3*;7Lz!MotAUvHiio|a65tKg|A`gmc|Kl-tb2LSFP-w z1nslqy+mZWLq63>Pzc7$WQZkqhO_A`9H=_|D8z5 z{-5?x|NE_**EBS`VIG7<+w_0)Ag6>x4NOg-oci&9nFk%^Op!K8$ai-TiaY|zB$9^> z9nrwoueINHa&X400COnQVfG~wbObtmf?Vw%Fq8b9NF2j=)reyIgun?JBc%~Hpz(XU zWS}xYsm4z4RbMvdw3B?LF9@KnoEvzg&xG~d> zqhqVE=V1)qebjM73v=r;ya zDCuRqW7@rx*bf)bo5z*EmdbQ1E55HL!h*DXtx#gZZFf6aw*XH_!7t92QK9djR^B^2!7*F=z1Ulz zX9W^HYDl1bbXA1`i~X7 zde?mAWuR;J*$2w6=XY{1eIHl=*v4_M)fT%E3rSN6qpo}DFuU;PMq*TiRMd4V9|)vs zU#Ys6^#I!US5rM~IPIO8^I8E-WQMGvyY<1TRQkXE)@3ZgY`e9lYns&~Ue-I$2?OIi z&yQ$&$`>iN4oLX^NP-5M7{tm9>M7jrV#h{x*v#zgp!Ixe(i57OJJ-KbnpUCf_ga|Y z`rnO0PHaqzza4!9E4D->=tyU_=XF2XPnK_!90Hr={7~R$iUbWdMp~e(wv_LV$SEJ< zocu9CEW($WlOnLAh$uw|r|KzCFj*uWz!SaUeJq?L zYP$%4=|n9B=*?%f9X{#uR5{M$U#9GBfSv@C=x1{c2%zBJfA2nhzFVIlSIl3wDRDCN zc&y%;f?BmDP5JQPoB#!;m@%(kH;ezxd!b?%6zt6%v>JJT4|>P^w+Mac1CE`)jSc@3 z9cQ-w8Ehj2El!o+vt5;WcK$>s>?$?Vd44;sVREiozP0`CG3{~M#Nny$CWhbM&RXpT z-%j;1#Vel8xj&PFrpm@oPaMqVIaChLCar}tL81(XVl7}6M8??NhVy2q-ur|U;6TLm z47M5HJ3D#*)B;|hv_3+_9RKC!^OVcN){(HAZd^)_d&-{CYuy7lSzu7mG;GG&3JnI# z0#^Zd$$(xbm?hoL?O+t;m!gAx&b?wIDJb-&K()oUgV*f1uCeKP<#**}cwC2*I2{7p z?dNVg74C0E9VlDFX#*R&-{pNe8+s_=vL*{+KCp?10x>4B`-Ql}#mB*$NYTn22aa!u z6AgH;BKsaVNQ!CQ#L<$|=~f?_SDgndp@-DxbCWBWp!J&Sh+my%B-PEEPgLW8b|TuX zsI>41g9FpPeQ(|4jSG|GMd}y->voTHqWq|yh|#FmTob#BL(_HU&7XMUj#FFfO-~1! z-WQUZ%1TlNKD@T$diy)ujFA0Eddjv$H%6>OCEusTeHIb z#cjQncgRu`9}4{pMDhm)fMyf5?tnOAe{orVbvId5uDJM1TW8gsaV8y3QgXVN$3&M| zuYD8ODgu9(u5}sdl8!pwP*?KkoPkV@N&ldZ$G@w%KFA!fpuTpD;sAEAwYB@1<7(LFem zaVY!egBnC-^MiB2D`-?b82Ow5t^ye0g;?BGV2SZ)s9;Pr)L?`65K;t}1^U4m=1hA>Y)K@f!WI@O)UAH0yj{Wg#yXC-Q$mp!_d85hJy7xrqkv~>MlJK>CBQdc=g_+*mf38BTVN(Oa zzt9ehl3FlsKoI16<(7BV7({#7R}PS%X%Occeh_B_=L1ODzt z>$`BoEers1pk$)?~0r~sV(*EXa~LGq!{9-mD(t10z8;M-m4>t-%R))Y+i zCo|AHfri7HQC(GXh$U#qv!bNsfokBeqCLDB>IJ;1pftUE#U~BH<+V0LMYfd&Ncs;n zG5L(^!@&ZQm6H<}6ym2jYQ}TvyFM@ktSfH17-L}#8d2WLYTWSL-d=OPU#aT{b`KH{ zdDe+soGWcD&CRF4p+_;FBjVXga?~M)8ecBY=&c72-lR;vLbn#C1H(qdxQ(j5h`%%O z6*Gp1>36?Wx8-UsWervT2H$jrnfC)Wwyl{!FbKMOcl~T36&&Kh?3{OKk>9v}jJd&c ze?;-liWl_L5ME95Si#aG8F9sC+d`h7mQK@`=Cf5N0|DR-v1&EzfBQq~N&Vb93E3qo zB6#V(;T%ji@OL%kIU7tBX;IZL?iaeL(Ov(vwHXvPd25t7Wu0#)(V;ujyRGPOcInYO zi4&TMhsSj47w+iYooDu;L-_}0XIq1@LAWHvdMhV~9eHq;CAvHU1eifrMG>D_Fe@1K zYEWeH2LloCl0qifmqgwsUEHIgZ*Q`mOb1lR+&^@gd3tb%10?Ps9^a^ofHop@G-0j7 zfoL5=Hznf?%_ftc;dgDLqa@{4{G}&}mqoooq1NgBa98KkW3!W2@9N^P1~IEq#=d7F z(4pVdE(Pix$6vz4&(2Pml1M?5-E`Rg&oiD+T(0zUu|KviQ&Uo!9|R_-aR8Wrv?;m0 z9@6mnT%H!mLZP&BDHHhK1t_n*g^{K`5VvbE$mDQ6O`QX9cIqBmU*10_4txcOCLg>emjFz8~( zdK4ZR*@}4CKVRxGmC}0=5D=4?XlGRu!@~XJys=vC0}AElek8T!y>D)j4QBDpxier@ z8y2y^Lx~TrtIP=<|D~2f|F(x#?Z(+Edyf5Oh(TKa@} zu|{Bv@LB$R>yLo-KmLT>C_B@vDB1zT1r)Su6csHJfWZq(bTVEhtWn?*gWMQRHn@_S*ryN|YWH97I)x?zu5vm;{DPnx6t zFfZ1Yg_)TFs7B6tH;N1@iOEATu>;V!gOSmLMU^;C})F12?z$bi$s>t$Rd+VE?Lo%@R@COVprj3e#3_;W6;5549J zh5H|Ooe{JR@_rz60B~t&nuC3BAlL*jNlKo^(1t>v-aSW#g z5zKH}um=CdmcY9PGKt^H95rXGzJ8O04h%PUcm1{B`xwopf|+J2K2$sV>xV3%wvLYE zqJh9PX#qb7T8n1 zh4&ErNN}qSyKdczg0&$4s$RL`p)4SQAyHKTY~fUr?-S-*!yOMurH{PuE+N#)C4{9( z;lTrJpRq&4BTjH_SupDK@l z+-}^b50Yno!}stNaKe}c1fq$;mh3V)sAs`-3^Abv6QxDURbj7w-KAae+WIXeNOeHT zj(ZRZIu8=WpAXB)$$_cyb@25vu(&GnuCxp36x~bg1OsVg?^+wRDks5t`_SdvKF%Ba z(X%Is3m4r}8Q{S9KPWBFbTg)7{HtMjjaWa#(F4<#Xg1+tZKgYe4#FiDC7G zS!^M?`(!?0-5RV?^gmfQB3o8FEj;?0{m@4ud^AlKAN$iF;lg^Y0U3mI_D~y|TUnVw zTn;WV@h!wI7f66GXPn^GsbtSkLklgwOJTi>>BifuW*hy`Z$u_A@N%x#-Wezy#pKn0 zuBfcsZl41D&}U0M>R@s$gq`}44j~EW=XKkkXO&$X!-nkDz%0;rVCVt*RRX?tNEWxW zv@{azp`I$KCz&y1Ej)$}z5QQDCmsOwvBj&nk+)#+=n61`ZBrxInyN~IF{y%p$1V>z z=Y3J!gw~&K`2wrjTY>r!R~>eb*I3$FXKPtrKM^;orM=^(1^@=(u5NXTO(N#0$d+Ql zYv(S)#7ycbUxqdhYar&49LsbMZzMSx-BN7Su}A;m;US>$X>b#4z)LPeOL{h#@c|R( zcQB#;vm$xVnVN3Rc>3U%4wgp63G4Vb#NvY~{gBn}E(Zl-qYUmpJP_v%PB+#Ac~r<2 z(y@nFg-5XgmQ?-8hVPB%)>nC`Rjc15#KlE)%NV2k%LbdZopbgo zw=2#a+YI8mEN`q-WHn7KPwVR}@=~h?)q}r%=j{yW4#LXk=I)Ldlp}>BSknGYd!^5p zOHrQCFunttt0d8I|6u)gld5#Y10OaPUxNtRz&wh8^s;Ate}@1a(eKL5+d#Oc2c-!W z*fd!PY3$;X;riX$%bZ$WUv^w&pdxz_Qj&hVo|xo{IA*T4{$V9p)0higLbhSx_d=GN zn|la0dzuYC-mCR#oyb)KVKnn`0IgwELqJustBGjxrPugV^BM91;vyN^-!Bo;P{Sq{ z98d@lLl@r*3C8+*e`{!>tPI+sXaCxd3kBQ-CZlO+<$`}a4z%w6p})C(nNHaHs~@@4 zi)I$GB(Ya=V-c8!4e_|;)n9XmbQU<2#+RO99=exrqY!Iyu!RFE3Ain_0n-9;Isfe` ze&z3Dzu}JxAtbq_9L+dmf0gwnF9vkCus^(p_3E@%ij;n-!-!;{z^EM30GYNZ{HHVidStL4TzMIsk%@Ufloj-fYGW(k9E~ zp5=&V5*MQEje%M4ZqMa#_=Dmh?81Qk7;vnf#i=k;I80xfQcJ$y(`PUQ_ za)DtB<_B5N;B5sp?8;Cvq9MW#4|x?+eTR(V(#yBs1!%)Lz%hcBu$BAn9B_r^?Cj`Q zTV8(sD*H~twMhH#Ik4m~0R<~2WV*;a!GXn~9q|H1$jR6NAwLwk$rQNBFz78?9^+!Z za3p!b{j~oh8S5Qr@4#oyG!R%w7L@-2X(5KZ8CJOe`TPHR_y4EE|3A?2|A_-w?E!7) zjm%$e>;H};dqw;?Cp12RO7}n5uBGHJ_=tcG$b-&Qc|U|9W(PdK7NAfUSXes!DGS78 zV$YsSqhKU?CyBT8JlnX9=&~QP0xBzJPe~gOb3VnG>En#Z$0axI;!TCWfa%9CV3yCq z>W@Y6DnV+8P_vw4Yt+6)1HC^Fi$_muyIb!wq4n@Dj~D5m_zY2^_Mg2?rzb^i(U12q zKIsqg-oxW!X9)?i(2#Etx?e4A%%&rLUs6%%z|n`%Rg^o=1v9G0o%=70Esl=vhfU-c zfD@+9&sml5{i0^=V6sqW1k&n{7Q^ZBy!NO_DYdI77gz3^v=QgjCGp2{{SYjUU61Ep zoVw2>OW+$-#=awuW}3Mw$NA>XV;^@Dwm|a4e_9k4XdW!AcXnPQC3odVjyadNy&gbm zGc+9hw^2Iut4?C;U{coOxbAU|1_m7wq*46=^0AyO8@jk_z1ZD+to=%@OS+D#4+u8fS#K>xN(u2`*}!2X7aec&a-f5oL_9{7`>npgNn!4=KHI<8=h0CbZJ3R>vtQVUwW)aHX|Op3gRD!PhQME$u;IZOGwCap z1WaY~E!%;ph|s+35RVvARd$OcF&++jp`f1@_1FHs#=KGC$wDoCR>6Fo^+4XLcK4IWT3Y0F`H;PJ-V z$6Xflg=hFn-RbGqe*mXtD(S#g?w4lwXh_z;eNiKWkvozASIb_l6qAK%y?{ftJzfpS{VEiPD`*DLN% zlf>gOJe=wtuc2`}cQ@%_nwn8!;wAcB-RE)Hk9V`d{_aSzVX04%ChpI-4#i&E<|Yej zs~04UpZA4sc+**r^yfCMPkNvvS3h4H9+2@z<$e5sySBxKDbHZ4Ij?o6d6e*&2v6xpU z%lV@J8_5cM--;-{f67Za9-W)clIGIPxXU68XzSRGb+@EK;!{a+1A``SXYTvwS(;(G zFua_y%VN6cUQTwe;z6Z__wMBb%-0O;m#?yXw8TyIuw#?-lDhIa66jryz>7nu{T9HU zDsX_=lm!B+N_F$r53UnnlHT7vSnX-_P&!-FTRq#6mi|g(tXtd@9E>sBW zrJe@e*cDm(UY4nfp@N5NQkxQ-Z>26D9-6J*!5o%(cmye{Ph+|0M1kW3v{$s}IcgeJJ;@a6@&w$1ess>vFS-u7_&Xo4&_Of4UijzbNb@tYYVx13+JQ&l; zipzJ?&}#GA8{cX2I9nq!0c|oxa=|gmGRLbsT`$h;0pHz&MoXRJc?Z9&{0ouLhWsZw zs{?eTdE;oHE)ES%f=t%nQx@wtH^uVBI;bQAso#*^!LfnpDU3QNL%TyarJjQBvTn+c zf@=To;yGXlgVp!RK$*18`6Ql{N!=^{`pu0pn7~-aE=PQwY8irwk?e6IXU-S#S3a<#F?4ZtAx|nW}%-4x>cTU z$ju+#1Io%JMh*u%<&omXgNq-;?Crd%&$?19jA;r7JuD`=i?wdmxQ>WVeU?`rRKM{< zs#N-WjI%zO?TdgJGP2$*F1ERe*;y49cB~s7{T}zFm{2{d{ugK6Blmq;UTqhKY2hi} zE?0-Az`zWCkh}*%m_SB;bi-L9y%7**Tm~k?#jA|rfZ?CkLYbq7r%5ywsJ^P z)7I{8Gr(Oc_s>h*s|z7>al`nxXsENk;BUCNg|$X^#~IjQJ%pD{~Vx`%ZoRR9xi67u8f`TQfI@OVDVA z-XHG%ApcJVQozW<@&a)T=(?zMot(aR|A?_oCeHkqg#WH4MutxOluFF~7et;0Iu8k& z>tFbBzThh&dal%?gw?_AB1fsI_`dK3)0IbD8D<{c?>p5tHlvON$>o(<;}2Hd7nq$o z5}g_riQ5k*oVq#(dOuC>ZT)Uo)%7UDf5n4|ZADZ*h9PTqar-CFfylvf4n#ftY7Cf9 zz`<4-GGwjWiJU#_(@^b}WTm3IlbX8cTS6kuS$F>BlV>F*%~08>-zL8PlzS7;!3_hU zow5q&N%bo?e013df8np5)p}E19+&er^ZefNEza-Hm3)zn9t>1c0)NA6yL!Eq9y$G* z2Pzt_W0#CSDSufLdffP;HGkCjoTcvUGbMw%1lre|-vtQIwnr_!x4-F(cw0|Cj&r~B zgP*5oL27ibv8=n|;>?V*`}Fizqvn<>IK(&j$lW|t>O56*q&*7Lzy84|aH{3!bE!qg$Tq5b zzrW7zd+}=Y!Qu`(mxRIbkyZQn!o{xz0~U7*I)7dkwa~K1hw`*Lmj-NpH5XMaD#|gQ%(~rT;k2L}6Z0!OLEI=s z{LYiX*OQjh@?30+;ul`a+a1}{->#fU9+@AlTmF1D=6|s+w!D(hZftzpvEDg7SKV~mR&94#0qVSZ`p$k5Ht zwoYSG4aYXOX%4ELvQ_zF&X0#)oxBtO=B*ZPP8%t>bJd_i<;x;NTZ{i^+bgq+wx9cC zUT!oI)Q-<#q=0Ebq*Jw`!uORVFfPi?K_A~~`8Kmvb!Cl&*jQ^P6zZA&SOyN< zi|bb63?#0LDUmf>)61#zbsj|z1_D;D?FWP z9Nv%ey}{AAIzIZx-txmIkBV7&jO*hVD8IgcZ-o0Yd#;s6Th>%jmHf)&q~N6qiA`k< znf15c6XtpD)oETV!|QN=F|d_2B$@BNoq8Ks%uIM?rHYe`=IYsYSeENoiOn04fBNt2 zp-{5rmC#ZZPk59h<6G|lOlb1EC~G#3xI3-wape^PE^&8?_8h#FlzwF>ME==4Ki6uD zj5W9-ksAM6xyI#=WjSl9%M^f1T)w=P$m(Vf~Y~@Cmm+_iZRB<(ZBm2SP4` z_i4J`_O|2mD4AqQ$@lf%mlxfB{d%9~`(|eL3ZB=UP+`x{pWMrLH)D6av}(;EHPuX8uW)Zk$A@|zrUH3vq^ zkrH%;`tB!UTu!Sb^NfFV@RHeYKVGHgE_qV>!h$8lL5zIH`( zi&u&0*qm4Tt?zTUtLM51`IfyKU!5bPXWk6a9ic*@$URUf{TnnixyCYW`Jvx6Mzm(l z&jV}Uce3rGQ2hsKB!Mud-$f3@{05kF+)q{`n5)4~7 zZLiSKP-l%L0Ir-_@Q|O#Vt4>uVKuc!2HHQ0B#Kk4 z*w3Utx~@PiNU4RJhx>ttW#m5Fm3uWy&z!9r!mr1aMo2 zmABR{RK%YfE?*j687dExlp4pwN1+x`e6tuRy8cGi_G8;_x!5C*XdTUuLJ{^;i!R9p%Td z2I^n9J$?(2PO6Iah6TM>cAruF7Ca{FCM0nAAaJ6)J4zWHwIY+T80iqeT|(KerK`pS z|9_()KE9hFDGDxl9^#s?-z27}BLA)(Pl_3yy;x11+5<}?`)4ie*_dc3KkD#Bn3Pr? z<_?T7rNt%3Wt9olmpk{<@xp*h?CNbhOR>fMt1Gc_DeV0A_U;}Y1IA;j9c@xv zTz5d+G8c%6@&hxQXGJP^?m!yA$#c+P{AkHue!lNvvTGcl7_;FunWbZVH*r4`m>d=r z`p%Q%zP+_1^_q(2+Sw->Df_XeHP3k~y;c_@JpK%Gl?~f+mnY=Df3K?({zW~MByyl| z$k^zycm0_|9Rux|j{C{sQlYDi1K)-7H}SiP!Vi3F4t&PlI@b57qv9AjO)B&UK*5fM ziD?Zc+Fu|Kl8S=jTRS02_WG>^Pn2xL`gn~F!t}7V{s{RMAA#j5ocQH#+RfVp)E6JC zFOCee+1d3!Olva<`) z7g6=PnaKZK7k#oUwxF9FhLj^K6JwVbq z01=sc;Pi~d?m*Y?X70aJ!#e`tt`smUyo5YC1w^b3=u-~BovXmh(s}S;w##yJdV?9B zgVHUfe9!KQA4eC*Z`C+>&nz>Zeok_}&^SMJUC3MFl0>s?Be17XSLC1sNVdU3i#u!y%J}l~n|y^mz3@ zDQ9OB$#5kPSpM_)PG}^9^!vhOv6a|BnJ>-!N5`u1Z(jF*to@qK6dylaVL4AyQPdY5 z#^3vLPbRmdCo}%qo5xFAWJ1+%=_EzLMgsZvY73y4ar5yFLN?y>*$6RiHa11yi<8L; z0Ql4$p(?u33S-~K0@8HKhJSP$+L!9G-2sA69Y~yt%gTi2l>s}CV;jQG9jXf6a6YHM zM8*3}fcvQ#?aP9zcl)hGYue4l&qVBGWPHb;uU_%+88NThYt3Jo$d8)gr!Qdsc5b*V zowL2a-|J3e_z^k7mNNUjxxVh@I`@mB1Kp>?WlD8lTwdReOvSP?rG2@*O&>)KwjdvX zah2P`AN5b=QnbGJ{tHIm-dG))cLTG_Ac{7YM9GFZ03GMc)m>nrvB#o%LV(1Z0}9Bh z;ench0yQu^k1vvmp;!5(+hW3Ji1wjduR!62dxXoz?+=%{WwLmaMOS+N zXgsJfV!ieM)%7LNRIhFOyZIayr<6jGQl?NUGHXD{JZ3y)Os0%wZlXzQo64{wWY~r> z#J0mJA%x7Cn`EZ2$^2cvPVcwY|6Sj|wOXxJ+WR*=&wXF__1yP$nI~+c#65vINZX|y z0gepJdIg<*>m(*A>Htd)O<12uj!xU;o5@t%RFPZ+le(LSr zlO-QXB)#2YVmV`fE<&c`7BA`W6Kpg{W}p+t6n&xwC~SURYce`=!mfh{5bf|0y)ulYgS$?7&-^y3wWU<^jXLOMP&IImVU*yFF<+M8#;nY0h;ltFkWkFRg9{Q%Hu{v#zj^Cvy z1IFW{qq%}`aGE@I5TTYX1{Uv zTdwu<^Rv!vcOOiS8iLY98~O?RT`K__h%_wRER_B73Fa$dcvE$Nkf2gyk@+5&MIV!z z{kiZ!L1JoW&)uoRvp?sE{jk?U6j}l?=cFt3eU<^Q!qpsfb#!N~#Z9|6G*tpv5My(4v89=6HY|x;xmi*?;$?l7lJ8$2(`qIZiam?01B-QVhl1C^UrYSw%8;8--f(K- zFTbq9yMn|J5qp{c&PK`mNVuo=S+Pf_ecbYl}1gz-^gF>;*lOg(U;;Jo6lp$F7EcjnwfBd9{9pMN99{X?7DuKbu{ys53# z_w(^I2rOlJ1(b%w8<-1+GDSZhcY}WB23HS{y}N8xv;rT0p7%056 z#a>q>O@D8)fy&xflB=+xE_)Z&ly#elh`h4`tR!Gy$Xf@`u=bIC7Tls&fiSf;-u)L8 z#$S;Io&?fF1KVjlArdK`+0<-Eyg`VyoZ^EKYW;q(E_7R~6Hl+&>0GP?Dc<_R&JXFW z^F=YWOy7U--Aujw3u!Z|TK>K&O;V_7+sA*>vFwTa(LSS)nbA&KN>|~DP~*Qh`~st~ zYoaI;CpYn&J<^e)9DH=+Kx53u1cSm8ztFGkm0)^s7FTS1FY`!4q@Q$zZB@2==lhSX zQl%Fx@;Bbu?eZ^_4XpTf&Q-hqo>@z-rD3wQEx)kziRI`Yx5u{oMajGyWBSf8ymTsc zrSHNYY(wv?wr_9(`rEoUd+t>Ept5~__OE5B#(UH5r##N9Tc+zfi$*#d&#mZKS(|9a z{_mwI96I?gB3D?|35k+!R&RQ@#;@l?vQt1S1=h>>w~CjS@W6nmCERJhAU5%4h+jnA z8L0GSXwWR!&+m_2|6682aar(a-iaFm>-3_-1k0S<7I;2=-aQj2PE=&Q(6gFezt3n_0^hCG zj>qU^;#!}kzcUbI4>EnnwR#or6z_w0VY@bCH0(|MNbBUyaY9G*Az>^I`#0saakw3= zbsp;9_Exxt4D@I&^^~mMND36ZyZzvC7-Um**`LDQ{oO-3KdA=dwe#9OwI3|nDkvzB)jB=q zWMk&K^jAJhD9-JG{()Ht+%O&B(PU{gm)fME#8vYgJ*jy&6SD^`=@DG!L}*;M@& z)K42`pJ{?`*RQ9kP!skjCb?XGIey{w%B$Mv$&M}A%bibqna8wW zuPNKoF7?t}j*l-}!NhY!UnMOf{_z^xWj4pIB9n$qj?I~F(il^?x{~ydWT46qQYqSy0Fk#M$VyJxBRm;PN+Ctre+R1_B@YDaR2@F_brU#lZOr8 z+FLq!s(+}do>~lQseV^0b-|jAZR>^Mx<$dnJATC~_3s)eNqdlTh}UGdv_$v2hE@OP zNns;LgT=zxlw5}xQ z$_BEHx-fYO)CcWJs~naYla&<;RuJD+c3*686Lwpeb$;vOYkDUnTyF9PvI%7$C{=08 z2E?so=F;|eqO82Sa%;Otk&2w_JDY+k9aDaJ=U(G|Q9*E&lH}nuI=jhD8B_t8<}un4 z!B4V#pqi14t)c$Hztg`xqrSh^@a(aKg;k3YF>!mA;}yDP=CX&72EwS>$*I$S`XPsH z{K15n-l9bbm$+L)zj;ybw57}3rmEI51Ye{e}Rr8wz>NS~;ubN5SaQ8{Tv zY%=R98h3i}4NWT6Fv=z52WHuGt#hTg zN@p6jsZ9~T#}-8Qs*KkyH=>fgy5Kw7LF=CNpY>APvZX8i*q3wSF&+fyMyGuv>X5Bm zTvR+cNaNWEIC${~f^$=w{vK_J1(F_@eUB0dZ>#ggl6fib7!= z5gVHUA60D>3A&geV8El3UsN>v)$rHB?M}PbMTJK>R8kP^*%IGYHsd_kaf{%Q^7G&4^;=}Lb-c8u_V_R7u1)LzPkHXw@nb&dY`Bl!U0}w#f1frrcAGa+4EtHR?h5npKQ6?r7q8cHq5&v&&O7Gb}ILqxg;2589m)vQ_>Qq zDW5X)ZIRw%`P{mOjWN{mTxpc>_!TyPX+)lSH?nQZj@BZB$Hm{v>fI~fa?U#=g;Rd&oHi0V!z+)S^gpiI*yLh(z{}L1^7>0^v(xm7f!B9}&M($q$aAX{uf4Ry?%1BE9eOVy zr-Q+9aDu7$`^c7j$Ig<9FF!9(oqJ2Rir7KL=>m|Wt%E4NI}2uPipt70=Q6t!$Fpdo zO2P}@-AcZLFWiGVLt)kA#I#O|Q(XQxI=4`UkdRQ5W{qK*^8yLC*k|2u$7tNDK5aDO zNry$C@mX4Ma4>{Ppb{%uj0v?l&`_KiC?KJ)=9K*X<|%?JnV6hu8#9xxc4TGaN@(ky z3!QACI=6gjm#*+^wHPZ@uCITXOjO69!b}s#=4HJZ5irA5GWFC?e9=BAQc_5xRn^p} zL1WQ-jMncy^5I~nDNP4q#P@7N()OSPXt?zY2@7XdwH}d>XggD%k&)pY&{64DFY8rt z$Ac72Suau;57GgB%xb(V;;)3v4R4%Ve0$OF=bUtPZS@0QYg?c|=nV%~ zm^>;zU9MQJK7BM&)I!9|%S(k!rBcC8Zb7VoFpoXNk})?PGgdP11OYU0)kO{W%$ezF zfr2gvaq5Yby1KTi`kV3*paDGT#a%g`JEc^_s-> zRaWHE(o&2+Fy8J7Ma!@{I>EBJE`Qy0&^gD$@8gwq z#Kl0M3sWSjuU@_4koL?#VLsG1+2cJd?ATqz$Ou1zHsM-|Yz!-Nsx5|fQ{C8|FtXp$ z>0GLQct!k*{LyA}7WJJe_5FTHRf&~VEw$>NG3t)e`E7fe_6ZiMH)*C;S634r*DU|Niz-!u^Lw{22NRb`vzjT>yjv))%An7yS_GOJ^rTiWwvR#sNsOj?@2+~+11 zkNI9Df`-i5Vl*|@>72TJxxc2YELX{6{rXUc<_~u{B{aY#!b!fdFUled`~rrGGmZtH zBzbJwu_FT$TyPoJgA8c&AzO{GEa&6Lugc+c5fc@eW^@CuZjY$_YA{3&sW)x3m#u9um-oAq)+ z_41`lhi`c=T2`M_!MR!7m!k7SxlMiM*NXlRLSqLj+w@qI#h1sIiq6D28&LImcz6i! z?JTT+rQsthnxkOvoenxB$2ij)>@A8aC;F&(H?Fe9IN&wR1hX$seiM+DHN$;*!-Nq0 zvu@U1H0=&yCQHxUoPu#u^Yd{)|9!*5LXbN(3s_`yJUe@>$8W~0c=lV2?f&5~O}Pem zJ!U|d$js%{8p7%uhNAjeBal!WB&c9S_s?Ix)O63Ho4klf5NaZ{H#7(I58u9Zt3lsz z@!*JUUv-4{Ot)7Ag^~pkxuGCVCoPCMd5-sFVn-3BIXMY)!vbPrSjLj* z8R};gT;f+=UFFfExGMki@VMUnsPR)Aq6681@qznvHFHg7E!e>z;egLSf7-(_Uiu$y z|LI!xwt!*>$IR){xuO`WeB+47=qKnz1%ajip#qz`cd4+X!&>9g=SSY5l~otfSUv}r z;n~m?!mVhW%_jo%K*I@$?>1s8@2QCsO%eUw1dO1XimG4`v-K7#F%E(>`LyCbw`|&!Ky&N2 zhZku_kp~s5&zI#07Wrl~uWo|(0;<%<=&g>^7_`=)oTUlstY+_&g&!kg?S}0iPY?9h z#ZjRbYk{*fj-xK|;tn6;A>^cqfz$i#4v~mwZa+gV9GdlGBJlG1bDoE4x@$9ZV)D;4 zOMD6v+PYA z));>|d5-{m;PZ*kodc_t29-DI8scfEn?=FD`q%g2bmy?=h>=Po3{kVOj&9tzF$m*M zU9hDT|GD!Tf(05#>OVU)w%P9IwrB_Frb zNm!p=gEOqvb0a+?qf{dK;-)h5GO#TLBqR)@%)Qgx9)*U6!lXF~A;<3B!-E|(w-n5l zdyM9{wMuE=FjCjaDGP*a(V+?!0#hAMpWf6^yD!1H1RaWV`?2gB;o*Ll14>Hd9 zNCFS1LgGTg@Hfv2$}zs|0;B!pBs7)RRQ88*sX*8d3XSj=h8@_FH*ClI1_#ry7m3CQ@cKGltSI0(6>;BSIioCwhJnJ@Q zVpjvJOGb_N6r1c(x_h5jQCT`@2};-c5BbMeLt3tX&Fw=whcSD01VduC|GXL#gW!Ku zAkZeTcZy_u*g}+>G4vrsq4}7D`??!z3$olKd(0`f@7bMD;0&Zv??~m_D%t0qMl=@` z5(?{fGE`BiIwMp7NjVNl*CebiqPM&Hzy`!_KLouwNbYfb{eXAmUcS|E08+;|y^7pW z_8ueN#F0(^!#T*q$H9r*dnhd>97r3QiNlygZW)JU!~PTulgvS}%pGD*d-?c=lcUTV zK~h)m9EPqgvHqa^A}Pv&!IWd<<`<48nONmGAF8gY5t6tj7no~{r3~T`3*U41pYe+H zcoX^jKbd&`%Kte2CFND|I!gY8l-J$J$DtBeGHuN)I}h@387E!oovarkcT`u1j5yDN)Rn`*)^`Hpsg7vJ65^v&1j{E`P=aGO0FjO)o$VB9Pawo zyS!9@QGe)r{Q(#@{667C-&m{s($6n~K;@@T-^Cdj@dOZY^1J$p2DYL13D?e9TK0YV zlpyCfcht5z0FJ=nF)>1z8(On=tvwWOVto~4F&|(WS95@U%5Sv`Ge3XM0QE?(K+3MD zzxBc%7i4G6HT|(>Hm>{PhZ~A}I|@AtZr@h>!&UaCBX{z>(K-!su1l@XPEF0UmXako z|03B>YdKcelFQFEV2^LzvBUIB`$&CcSul$bI%OWYJWkKc)58df?&;I-&R4$fbPyly zEw#V^7}Lg$nNx1SA;cTQgo5j1T?2!sh{;v;_1V?Wmq{Ef-~rr}ZsqIMNMyApd`(=P z$aFB_LDAQjJQEY&GomKcGQ}M`^DQ2oG5uT81nI4%*v;og@C)!61X=k0O~5kiN>V=) zOD;WYyZfzsNnCAc&zka45@gMbdt)Ugu$%YvB7I8dwB6#pQcnr`;LDzisvtkWs#Hz0z zzvE!-xn^!oLUq5Xnd#TjPkSw#xo;FOMWoP`ulf5A8(bk+rsQM;^c+wNlk}34#YOV; zT3|~tG4=g^XjIC|1wZ{tKBnK)i}_0#nHHWW!dL88c6HY>IqoD5@=5lAm94+X%l~5L z?5?nhm>SJomlF`Ug$@AxdLKvt4KQ*T5B~o#a)+;T$Of0h$19*8QOncw#qkp-TCfSQ zc#oV?wD2C>3nyj)IXQC#c(csv4}hD@Tr&KIhK59zgCwE)i?bLy6}(eUB+MbvfLXQT z0CRi}s1^-+U1z5W(!2U@k$TygSR~fN7`ZV6W9WO0xP#c-bdO+AUdligkBtkQGGPFD z0RqoM9r@IS#YGQ9KQ%I!-<0;o?FST)ixi6Wrm)Q$Fq5AFImWh2z0eg&gBSQvWAdHN zn>V{x?Tz9W6ja69GqluTy7cs6F&v`3Av?;zP}$*AW=^y|wj060kEellukUkvT>?vq zU;hj_qKVNpxFi=1YU|E9A#^XvFHVIKtSZJM1^4ei4WSJ}`HoPLRU;$vr9dk};J|@r zDmy>ol$9U}s_pYpHku%S=NRpu!!aELpK6bpE*yWmC3a=X>%Nnl8`u*O87Y3L7$_LA zrqi_sC&^_J#5a?oO^%mh?34fuNJ!jE6+_ry4uOP^+LDffav0P@ zGe#l>o2k#9X<_*B@dm@Ulad+a-r#|Rim2nOZ?oTOq*iVB?U3O zID9GwSRp6D{Dnx5$Pe!~N_6c~!NZBf9X*Akj~!$$>vG^WpC!@js|#r*Xk zAcSX0w!HZog+(UL3;Fl%RYXNXyg;Hu09Ga=cMqDNRi7LdhUB6}oF0>>AUrw@jL?Rm z>b}uYA~~JMS0Hq_whpGHP+Z6QktB)l0)ik;UWsCcMvJ3W?LmA*9#w5?OIk?p$d#&` zL|Qms7b__!EZnkYn`u0fN>eK}{l!sQ57ZOiBS@ouF>sbuCEiFp&vskauWvV;`tRH}|d3_`4k>_{A)FV4zs*cNE!9 zNC5=#mJyS26iN`UB_b$iToDxG(9fFziMBtVn-aRrh zU2m(Z4uVJtL9+)Zc>|PW_K1koeP=L^@Up|5C|%4Zg=-u*Hnx+vUZ{Ko@g`z9SK#r+ zF{GY9j*2q8k+aOm4$$wY!}~qJ%Z^u6QM$Z1?wXC%+E)1cSwcxAT&TCDrDb*!KJ_}I z#8X755rdP{MwJR+LR%6^u;6dTX3N%XllBbn<<5HIkjeJ1&6{=l#nN}2;NH126NlG2 zWXsQahUS+Q3H@noh(uzp8N+jJfb&OOzn)TQM5fl$(@O$y?C3zH_jX7=X>Qho3B5+pSvxovSHF9whuQ`T$tE6yjmgL3%zZ|W_)Rq^yI#CV zR74{$hyBx6q476B&PQo@h)Bkk%I>-Vz;#5hG<`aw?nlzor$bOgc)%Ni10%@H zBL-z0Se+o{crj;c$~|OfjwUjb&#-}cE6Yc*uItZZ?eb>SickG>PJV=kbS1Ng{d z=9b&90-)7|LBJzcZ~sE^s|n$%3#&v#kani_7X+YKnZmmqfQn}$2F@d*qm4zbbQL&1 zb_-(Q?~IYG$w^pXAeF;~E_|s7@$QfF;#EOdC={WOZ0NLhbx+_1uETsPFdGWaY$z0@!fK`HQxIn?>z@<>8@_qyU0W4&?p5*X zx){u>-1ZZAco>Vj8FB+cBZvO}Cxd(nrxi8F(Pl_=FTp^vrXvGq1|-HF3#*ZjjGb<}s8CY}r%Hzo%-L#Ezf}X$9!|*}Do4 zP9e9Sjf=~_V&jIg`yi+w*^z-THH=0(W7upU(n^kY(R5Kh7(*B)c|Ij6iMaOaw{Nu& zj-jW(-|q0jRi&ls7UR~NOtBaVX9M_jAl2nzMo0aTSaPG)!dLnPr-W06izzCFFkU2} zn##(Qt~vaW>adHepQgXN1)OGhRUjavjrJr4uFoDtfU1T@D>6+!ly-4%KGCv1o*@-Z zI5w9KkfTxf_BBaAcxmwMja<-Kz1cH(N6`l@l|nS0=+eEMDOV5=vnO*LS=y+}!%$tTvr>HN|-XgFrAv!2qmRHq#@Lj>R`Lm^2R_GgLjJJ z^@EUh-H7>}iw*#At!F;{=Z^QJ5_>HY}||Jmfm z^};Q`?)Bsg=4?iZw#NsYVd`n-Lu=+YQH#)|7EJn}F&QvT+R9m0B@ zV_8|*R;)@QcjwyHx87wx$$N-=01 zG!i)`BQ9s3pSWoEeRl#7jagNn3R7qR6ZA&3| z1EQtQdukwoOx|qU_8o!@lA`xwMQr#sFS3Hy@#wkSf1cR0NbGr7N~6%odb9yWZ<+1r z^`mH<@%8ho5vJoCriQ552K2k>9g{?Ip14#ezw8TrCv6NRtDZjHj9BRY{o}Envfg$^L>8pg_eA|Az*t+Em$*t~w-KQt(PCFw6xGkQpY={T(p5j7OfnjV?v7Pk2N>(~CxM@b}( zz^c@`f9xfTq(FIiK^RIWt&!BPBSknp4Jxysqx`>CFu9yAA1CukEPnaEBo|BUR$a_U`kwr1O|rsaonu3(lK+p_%z`c43B5u{cp?Bf0|?tUteBz YXuAB>dwGf@@$)K*8Yi=k|9$m;0D^xj^#A|> literal 0 HcmV?d00001 diff --git a/preprocess_dialogs/preprocess.py b/preprocess_dialogs/preprocess.py new file mode 100644 index 0000000..10cb0a0 --- /dev/null +++ b/preprocess_dialogs/preprocess.py @@ -0,0 +1,735 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +# This script preprocesses clevr-dialog questions + +from copy import deepcopy +from tqdm import tqdm +import numpy as np +import h5py +import json +import argparse +import os +import sys +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + + +parser = argparse.ArgumentParser() + +parser.add_argument( + '--input_dialogs_json', + help='The path of the raw dialog json file.', + required=True + ) + +# '/projects/abdessaied/ns-vqa/output/clevr_vocab.json') +parser.add_argument( + '--input_vocab_json', + help='The path of the generated vocab.', + required=True +) + +parser.add_argument( + '--output_vocab_json', + help='The path to save the generated vocab.', + required=True +) + +parser.add_argument( + '--output_h5_file', + help='The path of the output h5 file.', + required=True +) + +parser.add_argument( + '--mode', + help='The preprocessing strategy.', + choices=['stack', 'concat'], + required=True +) + +parser.add_argument( + '--split', + help='The split type of the data.', + choices=['train', 'val', 'test'], + required=True +) + +parser.add_argument( + '--percentage', + default=1.0, + type=int, + help='The percentage of data to use in training.' +) + +parser.add_argument( + '--num_rounds', + type=int, + default=10, + help='The total number of rounds in one dialog.' +) + +parser.add_argument( + '--val_size', + type=int, + help='The size of the validation set.', + required=True +) + + +SPECIAL_TOKENS = { + '': 0, + '': 1, + '': 2, + '': 3, +} + + +def tokenize(s, delim=' ', + add_start_token=True, add_end_token=True, + punct_to_keep=None, punct_to_remove=None): + """ + Tokenize a sequence, converting a string s into a list of (string) tokens by + splitting on the specified delimiter. Optionally keep or remove certain + punctuation marks and add start and end tokens. + """ + if punct_to_keep is not None: + for p in punct_to_keep: + s = s.replace(p, '%s%s' % (delim, p)) + + if punct_to_remove is not None: + for p in punct_to_remove: + s = s.replace(p, '') + + tokens = s.split(delim) + if add_start_token: + tokens.insert(0, '') + if add_end_token: + tokens.append('') + return tokens + + +def build_vocab(sequences, min_token_count=1, delim=' ', + punct_to_keep=None, punct_to_remove=None): + token_to_count = {} + tokenize_kwargs = { + 'delim': delim, + 'punct_to_keep': punct_to_keep, + 'punct_to_remove': punct_to_remove, + } + for seq in sequences: + seq_tokens = tokenize(seq, **tokenize_kwargs, + add_start_token=False, add_end_token=False) + for token in seq_tokens: + if token not in token_to_count: + token_to_count[token] = 0 + token_to_count[token] += 1 + + token_to_idx = {} + for token, idx in SPECIAL_TOKENS.items(): + token_to_idx[token] = idx + for token, count in sorted(token_to_count.items()): + if count >= min_token_count: + token_to_idx[token] = len(token_to_idx) + + return token_to_idx + + +def encode(seq_tokens, token_to_idx, allow_unk=False): + seq_idx = [] + for token in seq_tokens: + if token not in token_to_idx: + if allow_unk: + token = '' + else: + raise KeyError('Token "%s" not in vocab' % token) + seq_idx.append(token_to_idx[token]) + return seq_idx + + +def decode(seq_idx, idx_to_token, delim=None, stop_at_end=True): + tokens = [] + for idx in seq_idx: + tokens.append(idx_to_token[idx]) + if stop_at_end and tokens[-1] == '': + break + if delim is None: + return tokens + else: + return delim.join(tokens) + + +def concat(allDialogs, vocab, percentage, split="train", num_rounds=10): + pbar = tqdm(allDialogs) + pbar.set_description("[INFO] Encoding data ...") + + captions = [] + captionProgs = [] + captionImgIdx = [] + + questions = [] + questionProgs = [] + questionImgIdx = [] + questionRounds = [] + + histories = [] + historiesProg = [] + + answers = [] + maxQ = vocab["maxQ"] + # maxC = vocab["maxC"] + maxP = vocab["maxP"] + maxH = maxQ + (num_rounds-1)*(maxQ - 1) + maxHistProg = num_rounds * maxP + + questionBins = {} + captionBins = {} + # k=0 + for imgDialogs in pbar: + # k+= 1 + # if k>2: + # break + for dialog in imgDialogs["dialogs"]: + if split == "train": + if dialog["template"] not in captionBins: + captionBins[dialog["template"]] = { + "captions": [], + "captionProgs": [] + } + + caption = tokenize(dialog["caption"], punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + + # if len(caption) < maxQ: + while len(caption) < maxQ: + caption.append(vocab["text_token_to_idx"][""]) + caption = encode( + caption, vocab["text_token_to_idx"], allow_unk=True) + history = caption[:-1] # removes token + + captions.append(caption) + + progC = [dialog["template"]] + \ + list(map(lambda a: "_".join(a.split(" ")), dialog["args"])) + progC = " ".join(progC) + progC = tokenize(progC) + progC = encode(progC, vocab["prog_token_to_idx"], allow_unk=True) + while len(progC) < maxP: + progC.append(vocab["prog_token_to_idx"][""]) + + captionProgs.append(progC) + imgIdx = imgDialogs["image_index"] + captionImgIdx.append(imgIdx) + + if split == "train": + captionBins[dialog["template"]]["captions"].append(caption) + captionBins[dialog["template"]]["captionProgs"].append(progC) + while len(history) < maxQ - 1: + history.append(vocab["text_token_to_idx"][""]) + + histoyProg = progC + # qRounds = [] + for i, _round in enumerate(dialog["dialog"]): + question = tokenize(_round["question"], punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + question = encode( + question, vocab["text_token_to_idx"], allow_unk=True) + questionH = question[1:-1] # Delete token + + # if len(question) < maxQ: + # if len(question) < maxQ: + # print("q < {}".format(maxQ)) + # else: + # print("q >= {}".format(maxQ)) + + while len(question) < maxQ: + question.append(vocab["text_token_to_idx"][""]) + # else: + # question = question[:maxQ] + + prog = [_round["template"]] + \ + list(map(lambda a: "_".join(a.split(" ")), _round["args"])) + prog = " ".join(prog) + prog = tokenize(prog, punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + prog = encode(prog, vocab["prog_token_to_idx"], allow_unk=True) + + while len(prog) < maxP: + prog.append(vocab["prog_token_to_idx"][""]) + + answer = tokenize("_".join(str(_round["answer"]).split(" ")), punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + answer = encode( + answer, vocab["text_token_to_idx"], allow_unk=True) + assert len(answer) == 3 # answer = ans + answer = answer[1] + historyPadded = deepcopy(history) + + while len(historyPadded) < maxH - 1: + historyPadded.append(vocab["text_token_to_idx"][""]) + + historyProgPadded = deepcopy(histoyProg) + while len(historyProgPadded) < maxHistProg: + historyProgPadded.append( + vocab["prog_token_to_idx"][""]) + + if split == "train": + questionTypeIdx = _round["template"] + if questionTypeIdx not in questionBins: + questionBins[questionTypeIdx] = { + "questions": [], + "questionProgs": [], + "questionImgIdx": [], + "questionRounds": [], + + "histories": [], + "historiesProg": [], + "answers": [], + } + + questionBins[questionTypeIdx]["questions"].append(question) + questionBins[questionTypeIdx]["questionProgs"].append(prog) + questionBins[questionTypeIdx]["questionImgIdx"].append( + imgIdx) + questionBins[questionTypeIdx]["questionRounds"].append(i+1) + + questionBins[questionTypeIdx]["histories"].append( + historyPadded) + questionBins[questionTypeIdx]["historiesProg"].append( + historyProgPadded) + questionBins[questionTypeIdx]["answers"].append(answer) + else: + questions.append(question) + questionProgs.append(prog) + histories.append(historyPadded) + historiesProg.append(historyProgPadded) + answers.append(answer) + questionImgIdx.append(imgIdx) + questionRounds.append(i+1) + + while len(questionH) < maxQ-2: + questionH.append(vocab["text_token_to_idx"][""]) + qaPair = questionH + [answer] + history.extend(qaPair) + histoyProg.extend(prog) + + if split == "train": + captions = [] + captionProgs = [] + + questions = [] + questionProgs = [] + questionImgIdx = [] + questionRounds = [] + + histories = [] + historiesProg = [] + answers = [] + + for ctype in captionBins: + numTrSamples = int(percentage * len(captionBins[ctype]["captions"])) + + captions.extend(captionBins[ctype]["captions"][:numTrSamples]) + captionProgs.extend( + captionBins[ctype]["captionProgs"][:numTrSamples]) + + for qtype in questionBins: + numTrSamples = int(percentage * + len(questionBins[qtype]["questions"])) + + questions.extend(questionBins[qtype]["questions"][:numTrSamples]) + questionProgs.extend( + questionBins[qtype]["questionProgs"][:numTrSamples]) + questionImgIdx.extend( + questionBins[qtype]["questionImgIdx"][:numTrSamples]) + questionRounds.extend( + questionBins[qtype]["questionRounds"][:numTrSamples]) + + histories.extend(questionBins[qtype]["histories"][:numTrSamples]) + historiesProg.extend( + questionBins[qtype]["historiesProg"][:numTrSamples]) + + answers.extend(questionBins[qtype]["answers"][:numTrSamples]) + + result = { + split: { + "captions": captions, + "captionProgs": captionProgs, + # "captionImgIdx": captionImgIdx, + + "questions": questions, + "questionProgs": questionProgs, + "questionImgIdx": questionImgIdx, + "questionRounds": questionRounds, + + "histories": histories, + "historiesProg": historiesProg, + "answers": answers, + } + } + return result + + +def stack(allDialogs, vocab, percentage, split="train", num_rounds=10): + pbar = tqdm(allDialogs) + pbar.set_description("[INFO] Encoding data ...") + + captions = [] + captionProgs = [] + captionImgIdx = [] + + questions = [] + questionProgs = [] + questionImgIdx = [] + questionRounds = [] + + histories = [] + historiesProg = [] + + answers = [] + + maxQ = vocab["maxQ"] + # maxC = vocab["maxC"] + maxP = vocab["maxP"] + maxHistProg = num_rounds * maxP + questionBins = {} + captionBins = {} + + for imgDialogs in pbar: + for dialog in imgDialogs["dialogs"]: + if split == "train": + if dialog["template"] not in captionBins: + captionBins[dialog["template"]] = { + "captions": [], + "captionProgs": [] + } + + caption = tokenize(dialog["caption"], punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + caption = encode( + caption, vocab["text_token_to_idx"], allow_unk=True) + while len(caption) < maxQ: + caption.append(vocab["text_token_to_idx"][""]) + captions.append(caption) + + progC = [dialog["template"]] + \ + list(map(lambda a: "_".join(a.split(" ")), dialog["args"])) + progC = " ".join(progC) + progC = tokenize(progC) + progC = encode(progC, vocab["prog_token_to_idx"], allow_unk=True) + while len(progC) < maxP: + progC.append(vocab["prog_token_to_idx"][""]) + + captionProgs.append(progC) + imgIdx = imgDialogs["image_index"] + captionImgIdx.append(imgIdx) + + if split == "train": + captionBins[dialog["template"]]["captions"].append(caption) + captionBins[dialog["template"]]["captionProgs"].append(progC) + + while len(caption) < maxQ + 1: + caption.append(vocab["text_token_to_idx"][""]) + + history = np.zeros((num_rounds, maxQ + 1)) + history[0, :] = caption + histoyProg = progC + # qRounds = [] + for i, _round in enumerate(dialog["dialog"]): + question = tokenize(_round["question"], punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + question = encode( + question, vocab["text_token_to_idx"], allow_unk=True) + questionH = question[0:-1] # Delete token + + if len(question) < maxQ: + while len(question) < maxQ: + question.append(vocab["text_token_to_idx"][""]) + else: + question = question[:maxQ] + + prog = [_round["template"]] + \ + list(map(lambda a: "_".join(a.split(" ")), _round["args"])) + prog = " ".join(prog) + prog = tokenize(prog, punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + prog = encode(prog, vocab["prog_token_to_idx"], allow_unk=True) + + while len(prog) < maxP: + prog.append(vocab["prog_token_to_idx"][""]) + + historyProgPadded = deepcopy(histoyProg) + while len(historyProgPadded) < maxHistProg: + historyProgPadded.append( + vocab["prog_token_to_idx"][""]) + + answer = tokenize("_".join(str(_round["answer"]).split(" ")), punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + answer = encode( + answer, vocab["text_token_to_idx"], allow_unk=True) + assert len(answer) == 3 # answer = ans + answer = answer[1] + + if split == "train": + questionTypeIdx = _round["template"] + if questionTypeIdx not in questionBins: + questionBins[questionTypeIdx] = { + "questions": [], + "questionProgs": [], + "questionImgIdx": [], + "questionRounds": [], + + "histories": [], + "historiesProg": [], + "answers": [], + + } + questionBins[questionTypeIdx]["questions"].append(question) + questionBins[questionTypeIdx]["questionProgs"].append(prog) + questionBins[questionTypeIdx]["questionImgIdx"].append( + imgIdx) + questionBins[questionTypeIdx]["questionRounds"].append(i+1) + + questionBins[questionTypeIdx]["histories"].append( + deepcopy(history)) + questionBins[questionTypeIdx]["historiesProg"].append( + historyProgPadded) + questionBins[questionTypeIdx]["answers"].append(answer) + else: + questions.append(question) + questionProgs.append(prog) + histories.append(deepcopy(history)) + historiesProg.append(historyProgPadded) + answers.append(answer) + questionImgIdx.append(imgIdx) + questionRounds.append(i+1) + + while len(questionH) < maxQ-1: + questionH.append(vocab["text_token_to_idx"][""]) + qaPair = questionH + [answer] + \ + [vocab["text_token_to_idx"][""]] + if i < num_rounds - 1: + history[i+1, :] = qaPair + histoyProg.extend(prog) + # questionRounds.append(qRounds) + + if split == "train": + captions = [] + captionProgs = [] + + questions = [] + questionProgs = [] + questionImgIdx = [] + questionRounds = [] + + histories = [] + historiesProg = [] + answers = [] + + for ctype in captionBins: + numTrSamples = int( + percentage * len(captionBins[ctype]["captions"])) + + captions.extend(captionBins[ctype]["captions"][:numTrSamples]) + captionProgs.extend( + captionBins[ctype]["captionProgs"][:numTrSamples]) + + for qtype in questionBins: + numTrSamples = int( + percentage * len(questionBins[qtype]["questions"])) + + questions.extend(questionBins[qtype]["questions"][:numTrSamples]) + questionProgs.extend( + questionBins[qtype]["questionProgs"][:numTrSamples]) + questionImgIdx.extend( + questionBins[qtype]["questionImgIdx"][:numTrSamples]) + questionRounds.extend( + questionBins[qtype]["questionRounds"][:numTrSamples]) + + histories.extend(questionBins[qtype]["histories"][:numTrSamples]) + historiesProg.extend( + questionBins[qtype]["historiesProg"][:numTrSamples]) + + answers.extend(questionBins[qtype]["answers"][:numTrSamples]) + + result = { + split: { + "captions": captions, + "captionProgs": captionProgs, + + "questions": questions, + "questionProgs": questionProgs, + "questionImgIdx": questionImgIdx, + "questionRounds": questionRounds, + + "histories": histories, + "historiesProg": historiesProg, + "answers": answers, + } + } + + return result + + +def main(args): + assert not((args.input_vocab_json == "") + and (args.output_vocab_json == "")) + + print("[INFO] Loading data ...") + with open(args.input_dialogs_json, "r") as f: + allDialogs = json.load(f) + + # Either create the vocab or load it from disk + if args.input_vocab_json == "": + maxQ = 0 + maxP = 0 + text = [] + programs = [] + answers = [] + pbar = tqdm(allDialogs) + pbar.set_description("[INFO] Building vocab ...") + for imgDialogs in pbar: + for dialog in imgDialogs["dialogs"]: + text.append(dialog["caption"]) + tokenized_cap = tokenize( + dialog["caption"], punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + if len(tokenized_cap) > maxQ: + maxQ = len(tokenized_cap) + + prog = [dialog["template"]] + \ + list(map(lambda a: "_".join(a.split(" ")), dialog["args"])) + prog = " ".join(prog) + programs.append(prog) + for _round in dialog["dialog"]: + text.append(_round["question"]) + tokenized_quest = tokenize( + _round["question"], punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + if len(tokenized_quest) > maxQ: + maxQ = len(tokenized_quest) + + prog = [_round["template"]] + \ + list(map(lambda a: "_".join( + a.split(" ")), _round["args"])) + prog = " ".join(prog) + + programs.append(prog) + answers.append("_".join(str(_round["answer"]).split(" "))) + + # print("longest question has {} tokens".format(maxQ)) + answers = list(set(answers)) + text.extend(answers) + answer_token_to_idx = build_vocab( + answers, punct_to_keep=[';', ','], punct_to_remove=['?', '.']) + text_token_to_idx = build_vocab( + text, punct_to_keep=[';', ','], punct_to_remove=['?', '.']) + prog_token_to_idx = build_vocab(programs, punct_to_keep=[ + ';', ','], punct_to_remove=['?', '.']) + + idx_answer_to_token = {v: k for k, v in answer_token_to_idx.items()} + idx_text_to_token = {v: k for k, v in text_token_to_idx.items()} + idx_prog_to_token = {v: k for k, v in prog_token_to_idx.items()} + + vocab = { + "text_token_to_idx": text_token_to_idx, + "prog_token_to_idx": prog_token_to_idx, + "answer_token_to_idx": answer_token_to_idx, + "idx_answer_to_token": idx_answer_to_token, + "idx_text_to_token": idx_text_to_token, + "idx_prog_to_token": idx_prog_to_token, + "maxQ": maxQ, + "maxP": 6, + } + + else: + print("[INFO] Loading vocab ...") + + with open(args.input_vocab_json, 'r') as f: + vocab = json.load(f) + print("[INFO] Vocab loaded from {} ...".format(args.input_vocab_json)) + + if args.output_vocab_json != "": + if not os.path.isdir(os.path.dirname(args.output_vocab_json)): + os.makedirs(os.path.dirname(args.output_vocab_json)) + with open(args.output_vocab_json, 'w') as f: + json.dump(vocab, f) + print("[INFO] Vocab saved to {} ...".format(args.output_vocab_json)) + + # Encode all questions and programs + if args.split == "train": + if args.mode == "stack": + result = stack(allDialogs[args.val_size:], vocab, args.percentage, + split=args.split, num_rounds=args.num_rounds) + elif args.mode == "concat": + result = concat(allDialogs[args.val_size:], vocab, args.percentage, + split=args.split, num_rounds=args.num_rounds) + else: + print("[ERROR] {} is not supported. Choose between 'concat' and 'stack'".format( + args.mode)) + raise ValueError + elif args.split == "val": + if args.mode == "stack": + result = stack(allDialogs[:args.val_size], vocab, 1.0, + split=args.split, num_rounds=args.num_rounds) + elif args.mode == "concat": + result = concat(allDialogs[:args.val_size], vocab, 1.0, + split=args.split, num_rounds=args.num_rounds) + else: + print("[ERROR] {} is not supported. Choose between 'concat' and 'stack'".format( + args.mode)) + raise ValueError + elif args.split == "test": + if args.mode == "stack": + result = stack(allDialogs, vocab, args.percentage, + split=args.split, num_rounds=args.num_rounds) + elif args.mode == "concat": + result = concat(allDialogs, vocab, args.percentage, + split=args.split, num_rounds=args.num_rounds) + else: + print("[ERROR] {} is not supported. Choose between 'concat' and 'stack'".format( + args.mode)) + raise ValueError + elif args.split == "finetune": + if args.mode == "stack": + result = stack(allDialogs, vocab, args.percentage, + split=args.split, num_rounds=args.num_rounds) + elif args.mode == "concat": + result = concat(allDialogs, vocab, args.percentage, + split=args.split, num_rounds=args.num_rounds) + else: + print("[ERROR] {} is not supported. Choose between 'concat' and 'stack'".format( + args.mode)) + raise ValueError + else: + print("[ERROR] {} is not supported. Choose between 'train', 'val', and 'test'".format( + args.mode)) + raise ValueError + + print("[INFO] Writing output ...") + + if not os.path.isdir(os.path.dirname(args.output_h5_file)): + os.makedirs(os.path.dirname(args.output_h5_file)) + + for split in result: + if split != "train": + args.percentage = 1.0 + with h5py.File(args.output_h5_file.format(split, args.num_rounds, args.percentage), 'w') as f: + for dataName in result[split]: + try: + data = np.asarray(result[split][dataName], dtype=np.int32) + f.create_dataset(dataName, data=data) + except ValueError as e: + print("[INFO] Error raise by {} ...".format(dataName)) + raise e + + print("[INFO] Done ...") + + +if __name__ == '__main__': + args = parser.parse_args() + main(args) diff --git a/prog_generator/clevrDialog_dataset.py b/prog_generator/clevrDialog_dataset.py new file mode 100644 index 0000000..dd07c21 --- /dev/null +++ b/prog_generator/clevrDialog_dataset.py @@ -0,0 +1,94 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +import h5py +import json +import os +import numpy as np + +import torch +from torch.utils.data import Dataset + + +def invertDict(_dict): + return {v: k for k, v in _dict.items()} + + +class ClevrDialogDataset(Dataset): + def __init__(self, dataPath, vocabPath, split, indStart=0, indEnd=-1): + super(ClevrDialogDataset, self).__init__() + self.data = h5py.File(dataPath, "r") + with open(vocabPath, "r") as f: + self.vocab = json.load(f) + self.vocab["idx_text_to_token"] = invertDict(self.vocab["text_token_to_idx"]) + self.vocab["idx_prog_to_token"] = invertDict(self.vocab["prog_token_to_idx"]) + self.vocab["idx_prog_to_token"] = invertDict(self.vocab["prog_token_to_idx"]) + self.lenVocabText = len(self.vocab["text_token_to_idx"]) + self.lenVocabProg = len(self.vocab["prog_token_to_idx"]) + + self.split = split + self.indStart = indStart + self.indEnd = indEnd + self.maxSamples = indEnd - indStart + self.maxLenProg = 6 + + def __len__(self): + raise NotImplementedError + + def __getitem__(self, index): + raise NotImplementedError + + +class ClevrDialogCaptionDataset(ClevrDialogDataset): + def __init__(self, dataPath, vocabPath, split, name, indStart=0, indEnd=-1): + super(ClevrDialogCaptionDataset, self).__init__(dataPath, vocabPath, split, indStart=indStart, indEnd=indEnd) + self.captions = torch.LongTensor(np.asarray(self.data["captions"], dtype=np.int64)[indStart: indEnd]) + self.captionsPrgs = torch.LongTensor(np.asarray(self.data["captionProgs"], dtype=np.int64)[indStart: indEnd]) + self.name = name + + def __len__(self): + return len(self.captions) + + def __getitem__(self, idx): + assert idx < len(self) + caption = self.captions[idx][:16] + captionPrg = self.captionsPrgs[idx] + return caption, captionPrg + + +class ClevrDialogQuestionDataset(ClevrDialogDataset): + def __init__(self, dataPath, vocabPath, split, name, train=True, indStart=0, indEnd=-1): + super(ClevrDialogQuestionDataset, self).__init__(dataPath, vocabPath, split, indStart=indStart, indEnd=indEnd) + self.questions = torch.LongTensor(np.asarray(self.data["questions"], dtype=np.int64)[indStart: indEnd]) + self.quesProgs = torch.LongTensor(np.asarray(self.data["questionProgs"], dtype=np.int64)[indStart: indEnd]) + self.questionRounds = torch.LongTensor(np.asarray(self.data["questionRounds"], dtype=np.int64)[indStart: indEnd]) + self.questionImgIdx = torch.LongTensor(np.asarray(self.data["questionImgIdx"], dtype=np.int64)[indStart: indEnd]) + self.histories = torch.LongTensor(np.asarray(self.data["histories"], dtype=np.int64)[indStart: indEnd]) + self.historiesProgs = torch.LongTensor(np.asarray(self.data["historiesProg"], dtype=np.int64)[indStart: indEnd]) + + self.answers = torch.LongTensor(np.asarray(self.data["answers"], dtype=np.int64)[indStart: indEnd]) + self.name = name + self.train = train + + def __len__(self): + return len(self.questions) + + def __getitem__(self, idx): + assert idx < len(self) + question = self.questions[idx] + questionPrg = self.quesProgs[idx] + questionImgIdx = self.questionImgIdx[idx] + questionRound = self.questionRounds[idx] + + history = self.histories[idx] + historiesProg = self.historiesProgs[idx] + + answer = self.answers[idx] + if self.train: + return question, history, questionPrg, questionRound, answer + else: + return question, questionPrg, questionImgIdx, questionRound, history, historiesProg, answer diff --git a/prog_generator/models.py b/prog_generator/models.py new file mode 100644 index 0000000..da1f037 --- /dev/null +++ b/prog_generator/models.py @@ -0,0 +1,476 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +import torch +import math +import numpy as np +import torch.nn as nn +import torch.nn.functional as F + + +class FC(nn.Module): + def __init__(self, in_size, out_size, dropout_r=0., use_relu=True): + super(FC, self).__init__() + self.dropout_r = dropout_r + self.use_relu = use_relu + + self.linear = nn.Linear(in_size, out_size) + + if use_relu: + self.relu = nn.ReLU(inplace=True) + + if dropout_r > 0: + self.dropout = nn.Dropout(dropout_r) + + def forward(self, x): + x = self.linear(x) + + if self.use_relu: + x = self.relu(x) + + if self.dropout_r > 0: + x = self.dropout(x) + + return x + + +class MLP(nn.Module): + def __init__(self, in_size, mid_size, out_size, dropout_r=0., use_relu=True): + super(MLP, self).__init__() + + self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu) + self.linear = nn.Linear(mid_size, out_size) + + def forward(self, x): + return self.linear(self.fc(x)) + + +class LayerNorm(nn.Module): + def __init__(self, size, eps=1e-6): + super(LayerNorm, self).__init__() + self.eps = eps + + self.a_2 = nn.Parameter(torch.ones(size)) + self.b_2 = nn.Parameter(torch.zeros(size)) + + def forward(self, x): + mean = x.mean(-1, keepdim=True) + std = x.std(-1, keepdim=True) + + return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 + + +class MHAtt(nn.Module): + def __init__(self, opts): + super(MHAtt, self).__init__() + self.opts = opts + + self.linear_v = nn.Linear(opts.hiddenDim, opts.hiddenDim) + self.linear_k = nn.Linear(opts.hiddenDim, opts.hiddenDim) + self.linear_q = nn.Linear(opts.hiddenDim, opts.hiddenDim) + self.linear_merge = nn.Linear(opts.hiddenDim, opts.hiddenDim) + + self.dropout = nn.Dropout(opts.dropout) + + def forward(self, v, k, q, mask): + n_batches = q.size(0) + + v = self.linear_v(v).view( + n_batches, + -1, + self.opts.multiHead, + self.opts.hiddenSizeHead + ).transpose(1, 2) + + k = self.linear_k(k).view( + n_batches, + -1, + self.opts.multiHead, + self.opts.hiddenSizeHead + ).transpose(1, 2) + + q = self.linear_q(q).view( + n_batches, + -1, + self.opts.multiHead, + self.opts.hiddenSizeHead + ).transpose(1, 2) + + atted = self.att(v, k, q, mask) + atted = atted.transpose(1, 2).contiguous().view( + n_batches, + -1, + self.opts.hiddenDim + ) + + atted = self.linear_merge(atted) + + return atted + + def att(self, value, key, query, mask): + d_k = query.size(-1) + + scores = torch.matmul( + query, key.transpose(-2, -1) + ) / math.sqrt(d_k) + + if mask is not None: + scores = scores.masked_fill(mask, -1e9) + + att_map = F.softmax(scores, dim=-1) + att_map = self.dropout(att_map) + + return torch.matmul(att_map, value) + +class FFN(nn.Module): + def __init__(self, opts): + super(FFN, self).__init__() + + self.mlp = MLP( + in_size=opts.hiddenDim, + mid_size=opts.FeedForwardSize, + out_size=opts.hiddenDim, + dropout_r=opts.dropout, + use_relu=True + ) + + def forward(self, x): + return self.mlp(x) + + +class SA(nn.Module): + def __init__(self, opts): + super(SA, self).__init__() + self.mhatt = MHAtt(opts) + self.ffn = FFN(opts) + + self.dropout1 = nn.Dropout(opts.dropout) + self.norm1 = LayerNorm(opts.hiddenDim) + + self.dropout2 = nn.Dropout(opts.dropout) + self.norm2 = LayerNorm(opts.hiddenDim) + + def forward(self, x, x_mask): + x = self.norm1(x + self.dropout1( + self.mhatt(x, x, x, x_mask) + )) + + x = self.norm2(x + self.dropout2( + self.ffn(x) + )) + + return x + + +class AttFlat(nn.Module): + def __init__(self, opts): + super(AttFlat, self).__init__() + self.opts = opts + + self.mlp = MLP( + in_size=opts.hiddenDim, + mid_size=opts.FlatMLPSize, + out_size=opts.FlatGlimpses, + dropout_r=opts.dropout, + use_relu=True + ) + # FLAT_GLIMPSES = 1 + self.linear_merge = nn.Linear( + opts.hiddenDim * opts.FlatGlimpses, + opts.FlatOutSize + ) + + def forward(self, x, x_mask): + att = self.mlp(x) + att = att.masked_fill( + x_mask.squeeze(1).squeeze(1).unsqueeze(2), + -1e9 + ) + att = F.softmax(att, dim=1) + + att_list = [] + for i in range(self.opts.FlatGlimpses): + att_list.append( + torch.sum(att[:, :, i: i + 1] * x, dim=1) + ) + + x_atted = torch.cat(att_list, dim=1) + x_atted = self.linear_merge(x_atted) + + return x_atted + +class CaptionEncoder(nn.Module): + def __init__(self, opts, textVocabSize): + super(CaptionEncoder, self).__init__() + self.embedding = nn.Embedding(textVocabSize, opts.embedDim) + bidirectional = opts.bidirectional > 0 + self.lstmC = nn.LSTM( + input_size=opts.embedDim, + hidden_size=opts.hiddenDim, + num_layers=opts.numLayers, + batch_first=True, + bidirectional=bidirectional + ) + if bidirectional: + opts.hiddenDim *= 2 + opts.hiddenSizeHead *= 2 + opts.FlatOutSize *= 2 + + self.attCap = nn.ModuleList([SA(opts) for _ in range(opts.layers)]) + self.attFlatCap = AttFlat(opts) + self.fc = nn.Linear(opts.hiddenDim, opts.hiddenDim) + + def forward(self, cap, hist=None): + capMask = self.make_mask(cap.unsqueeze(2)) + cap = self.embedding(cap) + cap, (_, _) = self.lstmC(cap) + capO = cap.detach().clone() + + for attC in self.attCap: + cap = attC(cap, capMask) + # (batchSize, 512) + cap = self.attFlatCap(cap, capMask) + encOut = self.fc(cap) + return encOut, capO + +class QuestEncoder_1(nn.Module): + """ + Concat encoder + """ + def __init__(self, opts, textVocabSize): + super(QuestEncoder_1, self).__init__() + bidirectional = opts.bidirectional > 0 + + self.embedding = nn.Embedding(textVocabSize, opts.embedDim) + self.lstmQ = nn.LSTM( + input_size=opts.embedDim, + hidden_size=opts.hiddenDim, + num_layers=opts.numLayers, + bidirectional=bidirectional, + batch_first=True + ) + + self.lstmH = nn.LSTM( + input_size=opts.embedDim, + hidden_size=opts.hiddenDim, + num_layers=opts.numLayers, + bidirectional=bidirectional, + batch_first=True) + + if bidirectional: + opts.hiddenDim *= 2 + opts.hiddenSizeHead *= 2 + opts.FlatOutSize *= 2 + self.attQues = nn.ModuleList([SA(opts) for _ in range(opts.layers)]) + self.attHist = nn.ModuleList([SA(opts) for _ in range(opts.layers)]) + + self.attFlatQuest = AttFlat(opts) + self.fc = nn.Linear(2 * opts.hiddenDim, opts.hiddenDim) + + def forward(self, quest, hist): + questMask = self.make_mask(quest.unsqueeze(2)) + histMask = self.make_mask(hist.unsqueeze(2)) + + # quest = F.tanh(self.embedding(quest)) + quest = self.embedding(quest) + + quest, (_, _) = self.lstmQ(quest) + questO = quest.detach().clone() + + hist = self.embedding(hist) + hist, (_, _) = self.lstmH(hist) + + for attQ, attH in zip(self.attQues, self.attHist): + quest = attQ(quest, questMask) + hist = attH(hist, histMask) + # (batchSize, 512) + quest = self.attFlatQuest(quest, questMask) + + # hist: (batchSize, length, 512) + attWeights = torch.sum(torch.mul(hist, quest.unsqueeze(1)), -1) + attWeights = torch.softmax(attWeights, -1) + hist = torch.sum(torch.mul(hist, attWeights.unsqueeze(2)), 1) + encOut = self.fc(torch.cat([quest, hist], -1)) + + return encOut, questO + + # Masking + def make_mask(self, feature): + return (torch.sum( + torch.abs(feature), + dim=-1 + ) == 0).unsqueeze(1).unsqueeze(2) + + +class QuestEncoder_2(nn.Module): + """ + Stack encoder + """ + def __init__(self, opts, textVocabSize): + super(QuestEncoder_2, self).__init__() + bidirectional = opts.bidirectional > 0 + self.embedding = nn.Embedding(textVocabSize, opts.embedDim) + self.lstmQ = nn.LSTM( + input_size=opts.embedDim, + hidden_size=opts.hiddenDim, + num_layers=opts.numLayers, + batch_first=True, + bidirectional=bidirectional, + ) + + self.lstmH = nn.LSTM( + input_size=opts.embedDim, + hidden_size=opts.hiddenDim, + num_layers=opts.numLayers, + batch_first=True, + bidirectional=bidirectional, + ) + if bidirectional: + opts.hiddenDim *= 2 + + self.fc = nn.Linear(2 * opts.hiddenDim, opts.hiddenDim) + + def forward(self, quest, hist): + + quest = F.tanh(self.embedding(quest)) + quest, (questH, _) = self.lstmQ(quest) + + # concatenate the last hidden states from the forward and backward pass + # of the bidirectional lstm + lastHiddenForward = questH[1:2, :, :].squeeze(0) + lastHiddenBackward = questH[3:4, :, :].squeeze(0) + + # questH: (batchSize, 512) + questH = torch.cat([lastHiddenForward, lastHiddenBackward], -1) + + questO = quest.detach().clone() + + hist = F.tanh(self.embedding(hist)) + numRounds = hist.size(1) + histFeat = [] + for i in range(numRounds): + round_i = hist[:, i, :, :] + _, (round_i_h, _) = self.lstmH(round_i) + + #Same as before + lastHiddenForward = round_i_h[1:2, :, :].squeeze(0) + lastHiddenBackward = round_i_h[3:4, :, :].squeeze(0) + histFeat.append(torch.cat([lastHiddenForward, lastHiddenBackward], -1)) + + # hist: (batchSize, rounds, 512) + histFeat = torch.stack(histFeat, 1) + attWeights = torch.sum(torch.mul(histFeat, questH.unsqueeze(1)), -1) + attWeights = torch.softmax(attWeights, -1) + histFeat = torch.sum(torch.mul(histFeat, attWeights.unsqueeze(2)), 1) + encOut = self.fc(torch.cat([questH, histFeat], -1)) + return encOut, questO + + +class Decoder(nn.Module): + def __init__(self, opts, progVocabSize, maxLen, startID=1, endID=2): + super(Decoder, self).__init__() + self.numLayers = opts.numLayers + self.bidirectional = opts.bidirectional > 0 + self.maxLen = maxLen + self.startID = startID + self.endID = endID + + self.embedding = nn.Embedding(progVocabSize, opts.embedDim) + self.lstmProg = nn.LSTM( + input_size=opts.embedDim, + hidden_size=2*opts.hiddenDim if self.bidirectional else opts.hiddenDim, + num_layers=opts.numLayers, + batch_first=True, + # bidirectional=self.bidirectional, + ) + hiddenDim = opts.hiddenDim + if self.bidirectional: + hiddenDim *= 2 + + self.fcAtt = nn.Linear(2*hiddenDim, hiddenDim) + self.fcOut = nn.Linear(hiddenDim, progVocabSize) + + def initPrgHidden(self, encOut): + hidden = [encOut for _ in range(self.numLayers)] + hidden = torch.stack(hidden, 0).contiguous() + return hidden, hidden + + def forwardStep(self, prog, progH, questO): + batchSize = prog.size(0) + inputDim = questO.size(1) + prog = self.embedding(prog) + outProg, progH = self.lstmProg(prog, progH) + + att = torch.bmm(outProg, questO.transpose(1, 2)) + att = F.softmax(att.view(-1, inputDim), 1).view(batchSize, -1, inputDim) + context = torch.bmm(att, questO) + # (batchSize, progLength, hiddenDim) + out = F.tanh(self.fcAtt(torch.cat([outProg, context], dim=-1))) + + # (batchSize, progLength, progVocabSize) + out = self.fcOut(out) + predSoftmax = F.log_softmax(out, 2) + return predSoftmax, progH + + def forward(self, prog, encOut, questO): + progH = self.initPrgHidden(encOut) + predSoftmax, progH = self.forwardStep(prog, progH, questO) + + return predSoftmax, progH + + def sample(self, encOut, questO): + batchSize = encOut.size(0) + cudaFlag = encOut.is_cuda + progH = self.initPrgHidden(encOut) + # prog = progCopy[:, 0:3] + prog = torch.LongTensor(batchSize, 1).fill_(self.startID) + # prog = torch.cat((progStart, progEnd), -1) + if cudaFlag: + prog = prog.cuda() + outputLogProbs = [] + outputTokens = [] + + def decode(i, output): + tokens = output.topk(1, dim=-1)[1].view(batchSize, -1) + return tokens + + for i in range(self.maxLen): + predSoftmax, progH = self.forwardStep(prog, progH, questO) + prog = decode(i, predSoftmax) + + return outputTokens, outputLogProbs + + +class SeqToSeqC(nn.Module): + def __init__(self, encoder, decoder): + super(SeqToSeqC, self).__init__() + self.encoder = encoder + self.decoder = decoder + + def forward(self, cap, imgFeat, prog): + encOut, capO = self.encoder(cap, imgFeat) + predSoftmax, progHC = self.decoder(prog, encOut, capO) + return predSoftmax, progHC + + +class SeqToSeqQ(nn.Module): + def __init__(self, encoder, decoder): + super(SeqToSeqQ, self).__init__() + self.encoder = encoder + self.decoder = decoder + + def forward(self, quest, hist, prog): + encOut, questO = self.encoder(quest, hist) + predSoftmax, progHC = self.decoder(prog, encOut, questO) + return predSoftmax, progHC + + def sample(self, quest, hist): + with torch.no_grad(): + encOut, questO = self.encoder(quest, hist) + outputTokens, outputLogProbs = self.decoder.sample(encOut, questO) + outputTokens = torch.stack(outputTokens, 0).transpose(0, 1) + return outputTokens diff --git a/prog_generator/optim.py b/prog_generator/optim.py new file mode 100644 index 0000000..453fb38 --- /dev/null +++ b/prog_generator/optim.py @@ -0,0 +1,79 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +# -------------------------------------------------------- +# adapted from https://github.com/MILVLG/mcan-vqa/blob/master/core/model/optim.py +# -------------------------------------------------------- + +import torch +import torch.optim as Optim + + +class WarmupOptimizer(object): + def __init__(self, lr_base, optimizer, data_size, batch_size): + self.optimizer = optimizer + self._step = 0 + self.lr_base = lr_base + self._rate = 0 + self.data_size = data_size + self.batch_size = batch_size + + def step(self): + self._step += 1 + + rate = self.rate() + for p in self.optimizer.param_groups: + p['lr'] = rate + self._rate = rate + + self.optimizer.step() + + def zero_grad(self): + self.optimizer.zero_grad() + + def rate(self, step=None): + if step is None: + step = self._step + + if step <= int(self.data_size / self.batch_size * 1): + r = self.lr_base * 1/2. + else: + r = self.lr_base + + return r + + +def get_optim(opts, model, data_size, lr_base=None): + if lr_base is None: + lr_base = opts.lr + + if opts.optim == 'adam': + optim = Optim.Adam( + filter(lambda p: p.requires_grad, model.parameters()), + lr=0, + betas=opts.betas, + eps=opts.eps, + + ) + elif opts.optim == 'rmsprop': + optim = Optim.RMSprop( + filter(lambda p: p.requires_grad, model.parameters()), + lr=0, + eps=opts.eps, + weight_decay=opts.weight_decay + ) + else: + raise ValueError('{} optimizer is not supported'.fromat(opts.optim)) + return WarmupOptimizer( + lr_base, + optim, + data_size, + opts.batch_size + ) + +def adjust_lr(optim, decay_r): + optim.lr_base *= decay_r diff --git a/prog_generator/options_caption_parser.py b/prog_generator/options_caption_parser.py new file mode 100644 index 0000000..db8c653 --- /dev/null +++ b/prog_generator/options_caption_parser.py @@ -0,0 +1,283 @@ + +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" +# -------------------------------------------------------- +# adapted from https://github.com/kexinyi/ns-vqa/blob/master/scene_parse/attr_net/options.py +# -------------------------------------------------------- + + +import argparse +import os +import utils +import torch + + +class Options(): + def __init__(self): + self.parser = argparse.ArgumentParser() + self.initialized = False + + def initialize(self): + self.parser.add_argument( + '--mode', + required=True, + type=str, + choices=['train', 'test'], + help='The mode of the experiment') + + self.parser.add_argument( + '--run_dir', + required=True, + type=str, + help='The experiment directory') + + self.parser.add_argument( + '--load_checkpoint_path', + default=None, + type=str, + help='The path the the pretrained CaptionNet') + + self.parser.add_argument( + '--res_path', + required=True, + type=str, + help='Path where to log the predicted caption programs') + + self.parser.add_argument( + '--gpu_ids', + default='0', + type=str, + help='Id of the gpu to be used') + + self.parser.add_argument( + '--seed', + default=42, + type=int, + help='The seed used in training') + + self.parser.add_argument( + '--dataPathTr', + required=True, + type=str, + help='Path to the h5 file of the Clevr-Dialog preprocessed training data') + + self.parser.add_argument( + '--dataPathVal', + required=True, + type=str, + help='Path to the h5 file of the Clevr-Dialog preprocessed validation data') + + self.parser.add_argument( + '--dataPathTest', + required=True, + type=str, + help='Path to the h5 file of the Clevr-Dialog preprocessed test data') + + self.parser.add_argument( + '--vocabPath', + required=True, + type=str, + help='Path to the generated vocabulary') + + self.parser.add_argument( + '--batch_size', + default=64, + type=int, + help='Batch size') + + self.parser.add_argument( + '--num_workers', + default=0, + type=int, + help='Number of workers for loading') + + self.parser.add_argument( + '--num_iters', + default=5000, + type=int, + help='Total number of iterations') + + self.parser.add_argument( + '--display_every', + default=5, + type=int, + help='Display training information every N iterations') + + self.parser.add_argument( + '--debug_every', + default=100, + type=int, + help='Display debug message every N iterations') + + self.parser.add_argument( + '--validate_every', + default=1000, + type=int, + help='Validate every N iterations') + + self.parser.add_argument( + '--shuffle_data', + default=1, + type=int, + help='Activate to shuffle the training data') + + self.parser.add_argument( + '--optim', + default='adam', + type=str, + help='The name of the optimizer to be used') + + self.parser.add_argument( + '--lr', + default=1e-3, + type=float, + help='Base learning rate') + + self.parser.add_argument( + '--betas', + default='0.9, 0.98', + type=str, + help='Adam optimizer\'s betas') + + self.parser.add_argument( + '--eps', + default='1e-9', + type=float, + help='Adam optimizer\'s epsilon') + + self.parser.add_argument( + '--lr_decay_marks', + default='50000, 55000', + type=str, + help='Learing rate decay marks') + + self.parser.add_argument( + '--lr_decay_factor', + default=0.5, + type=float, + help='Learning rate decay factor') + + self.parser.add_argument( + '--weight_decay', + default=1e-6, + type=float, + help='Weight decay') + + self.parser.add_argument( + '--embedDim', + default=300, + type=int, + help='Embedding dimension') + + self.parser.add_argument( + '--hiddenDim', + default=512, + type=int, + help='LSTM hidden dimension') + + self.parser.add_argument( + '--numLayers', + default=2, + type=int, + help='Number of hidden LSTM layers') + + self.parser.add_argument( + '--dropout', + default=0.1, + type=float, + help='Dropout value') + + self.parser.add_argument( + '--multiHead', + default=8, + type=int, + help='Number of attention heads') + + self.parser.add_argument( + '--hiddenSizeHead', + default=64, + type=int, + help='Dimension of each attention head') + + self.parser.add_argument( + '--FeedForwardSize', + default=2048, + type=int, + help='Dimension of the feed forward layer') + + self.parser.add_argument( + '--FlatMLPSize', + default=512, + type=int, + help='MLP flatten size') + + self.parser.add_argument( + '--FlatGlimpses', + default=1, + type=int, + help='Number of flatten glimpses') + + self.parser.add_argument( + '--FlatOutSize', + default=512, + type=int, + help='Final attention reduction dimension') + + self.parser.add_argument( + '--layers', + default=6, + type=int, + help='Number of self attention layers') + + self.parser.add_argument( + '--bidirectional', + default=1, + type=int, + help='Activate to use bidirectional LSTMs') + + self.initialized = True + + def parse(self): + # initialize parser + if not self.initialized: + self.initialize() + self.opts = self.parser.parse_args() + + # parse gpu id list + str_gpu_ids = self.opts.gpu_ids.split(',') + self.opts.gpu_ids = [] + for str_id in str_gpu_ids: + if str_id.isdigit() and int(str_id) >= 0: + self.opts.gpu_ids.append(int(str_id)) + if len(self.opts.gpu_ids) > 0 and torch.cuda.is_available(): + print('\n[INFO] Using {} CUDA device(s) ...'.format(len(self.opts.gpu_ids))) + else: + print('\n[INFO] Using cpu ...') + self.opts.gpu_ids = [] + + # parse the optimizer's betas and lr decay marks + self.opts.betas = [float(beta) for beta in self.opts.betas.split(',')] + lr_decay_marks = [int(m) for m in self.opts.lr_decay_marks.split(',')] + for i in range(1, len(lr_decay_marks)): + assert lr_decay_marks[i] > lr_decay_marks[i-1] + self.opts.lr_decay_marks = lr_decay_marks + + # print and save options + args = vars(self.opts) + print('\n ' + 30*'-' + 'Opts' + 30*'-') + for k, v in args.items(): + print('%s: %s' % (str(k), str(v))) + + if not os.path.isdir(self.opts.run_dir): + os.makedirs(self.opts.run_dir) + filename = 'opts.txt' + file_path = os.path.join(self.opts.run_dir, filename) + with open(file_path, 'wt') as fout: + fout.write('| options\n') + for k, v in sorted(args.items()): + fout.write('%s: %s\n' % (str(k), str(v))) + return self.opts diff --git a/prog_generator/options_question_parser.py b/prog_generator/options_question_parser.py new file mode 100644 index 0000000..075841d --- /dev/null +++ b/prog_generator/options_question_parser.py @@ -0,0 +1,326 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" +# -------------------------------------------------------- +# adapted from https://github.com/kexinyi/ns-vqa/blob/master/scene_parse/attr_net/options.py +# -------------------------------------------------------- + +import argparse +import os +import utils +import torch + + +class Options(): + def __init__(self): + self.parser = argparse.ArgumentParser() + self.initialized = False + + def initialize(self): + self.parser.add_argument( + '--mode', + required=True, + type=str, + choices=['train', 'test_with_gt', 'test_with_pred'], + help='The mode of the experiment') + + self.parser.add_argument( + '--run_dir', + required=True, + type=str, + help='The experiment directory') + + # self.parser.add_argument('--dataset', default='clevr', type=str, help='dataset') + self.parser.add_argument( + '--text_log_dir', + required=True, + type=str, + help='File to save the logged text') + + self.parser.add_argument( + '--questionNetPath', + default='', + type=str, + help='Path to the pretrained QuestionNet that will be used for testing.') + + self.parser.add_argument( + '--captionNetPath', + default='', + type=str, + help='Path to the pretrained CaptionNet that will be used for testing.') + + self.parser.add_argument( + '--dialogLen', + default=10, + type=int, + help='Length of the dialogs to be used for testing. We used 10, 15, and 20 in our experiments.') + + self.parser.add_argument( + '--last_n_rounds', + default=10, + type=int, + help='Number of the last rounds to consider in the history. We used 1, 2, 3, 4, and 10 in our experiments. ') + + self.parser.add_argument( + '--encoderType', + required=True, + type=int, + choices=[1, 2], + help='Type of the encoder: 1 --> Concat, 2 --> Stack') + + self.parser.add_argument( + '--load_checkpoint_path', + default='None', + type=str, + help='Path to a QestionNet checkpoint path to resume training') + + self.parser.add_argument( + '--gpu_ids', + default='0', + type=str, + help='Id of the gpu to be used') + + self.parser.add_argument( + '--seed', + default=42, + type=int, + help='The seed used in training') + + self.parser.add_argument( + '--dataPathTr', + required=True, + type=str, + help='Path to the h5 file of the Clevr-Dialog preprocessed training data') + + self.parser.add_argument( + '--dataPathVal', + required=True, + type=str, + help='Path to the h5 file of the Clevr-Dialog preprocessed validation data') + + self.parser.add_argument( + '--dataPathTest', + required=True, + type=str, + help='Path to the h5 file of the Clevr-Dialog preprocessed test data') + + self.parser.add_argument( + '--scenesPath', + required=True, + type=str, + help='Path to the derendered clevr-dialog scenes') + + self.parser.add_argument( + '--vocabPath', + required=True, + type=str, + help='Path to the generated vocabulary') + + self.parser.add_argument( + '--batch_size', + default=64, + type=int, + help='Batch size') + + self.parser.add_argument( + '--countFirstFailueRound', + default=0, + type=int, + help='If activated, we count the first failure round') + + self.parser.add_argument( + '--maxSamples', + default=-1, + type=int, + help='Maximum number of training samples') + + self.parser.add_argument( + '--num_workers', + default=0, + type=int, + help='Number of workers for loading') + + self.parser.add_argument( + '--num_iters', + default=5000, + type=int, + help='Total number of iterations') + + self.parser.add_argument( + '--display_every', + default=5, + type=int, + help='Display training information every N iterations') + + self.parser.add_argument( + '--validate_every', + default=1000, + type=int, + help='Validate every N iterations') + + self.parser.add_argument( + '--shuffle_data', + default=1, + type=int, + help='Activate to shuffle the training data') + + self.parser.add_argument( + '--optim', + default='adam', + type=str, + help='The name of the optimizer to be used') + + self.parser.add_argument( + '--lr', + default=1e-3, + type=float, + help='Base learning rate') + + self.parser.add_argument( + '--betas', + default='0.9, 0.98', + type=str, + help='Adam optimizer\'s betas') + + self.parser.add_argument( + '--eps', + default='1e-9', + type=float, + help='Adam optimizer\'s epsilon') + + self.parser.add_argument( + '--lr_decay_marks', + default='50000, 55000', + type=str, + help='Learing rate decay marks') + + self.parser.add_argument( + '--lr_decay_factor', + default=0.5, + type=float, + help='Learning rate decay factor') + + self.parser.add_argument( + '--weight_decay', + default=1e-6, + type=float, + help='Weight decay') + + self.parser.add_argument( + '--embedDim', + default=300, + type=int, + help='Embedding dimension') + + self.parser.add_argument( + '--hiddenDim', + default=512, + type=int, + help='LSTM hidden dimension') + + self.parser.add_argument( + '--numLayers', + default=2, + type=int, + help='Number of hidden LSTM layers') + + self.parser.add_argument( + '--dropout', + default=0.1, + type=float, + help='Dropout value') + + self.parser.add_argument( + '--multiHead', + default=8, + type=int, + help='Number of attention heads') + + self.parser.add_argument( + '--hiddenSizeHead', + default=64, + type=int, + help='Dimension of each attention head') + + self.parser.add_argument( + '--FeedForwardSize', + default=2048, + type=int, + help='Dimension of the feed forward layer') + + self.parser.add_argument( + '--FlatMLPSize', + default=512, + type=int, + help='MLP flatten size') + + self.parser.add_argument( + '--FlatGlimpses', + default=1, + type=int, + help='Number of flatten glimpses') + + self.parser.add_argument( + '--FlatOutSize', + default=512, + type=int, + help='Final attention reduction dimension') + + self.parser.add_argument( + '--layers', + default=6, + type=int, + help='Number of self attention layers') + + self.parser.add_argument( + '--bidirectional', + default=1, + type=int, + help='Activate to use bidirectional LSTMs') + + self.initialized = True + + def parse(self): + # initialize parser + if not self.initialized: + self.initialize() + self.opts = self.parser.parse_args() + + # parse gpu id list + str_gpu_ids = self.opts.gpu_ids.split(',') + self.opts.gpu_ids = [] + for str_id in str_gpu_ids: + if str_id.isdigit() and int(str_id) >= 0: + self.opts.gpu_ids.append(int(str_id)) + if len(self.opts.gpu_ids) > 0 and torch.cuda.is_available(): + print('\n[INFO] Using {} CUDA device(s) ...'.format( + len(self.opts.gpu_ids))) + else: + print('\n[INFO] Using cpu ...') + self.opts.gpu_ids = [] + + # parse the optimizer's betas and lr decay marks + self.opts.betas = [float(beta) for beta in self.opts.betas.split(',')] + lr_decay_marks = [int(m) for m in self.opts.lr_decay_marks.split(',')] + for i in range(1, len(lr_decay_marks)): + assert lr_decay_marks[i] > lr_decay_marks[i-1] + self.opts.lr_decay_marks = lr_decay_marks + + # print and save options + args = vars(self.opts) + print('\n ' + 30*'-' + 'Opts' + 30*'-') + for k, v in args.items(): + print('%s: %s' % (str(k), str(v))) + + if not os.path.isdir(self.opts.run_dir): + os.makedirs(self.opts.run_dir) + filename = 'opts.txt' + file_path = os.path.join(self.opts.run_dir, filename) + with open(file_path, 'wt') as fout: + fout.write('| options\n') + for k, v in sorted(args.items()): + fout.write('%s: %s\n' % (str(k), str(v))) + return self.opts diff --git a/prog_generator/train_caption_parser.py b/prog_generator/train_caption_parser.py new file mode 100644 index 0000000..b385547 --- /dev/null +++ b/prog_generator/train_caption_parser.py @@ -0,0 +1,280 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +from clevrDialog_dataset import ClevrDialogCaptionDataset +from models import SeqToSeqC, CaptionEncoder, Decoder +from optim import get_optim, adjust_lr +from options_caption_parser import Options +import os, json, torch, pickle, copy, time +import numpy as np +import torch.nn as nn +import torch.utils.data as Data +from tensorboardX import SummaryWriter + + +class Execution: + def __init__(self, opts): + self.opts = opts + + self.loss_fn = torch.nn.NLLLoss().cuda() + + print("[INFO] Loading dataset ...") + + self.dataset_tr = ClevrDialogCaptionDataset( + opts.dataPathTr, opts.vocabPath, "train", "Captions Tr") + + self.dataset_val = ClevrDialogCaptionDataset( + opts.dataPathVal, opts.vocabPath, "val", "Captions Val") + + self.dataset_test = ClevrDialogCaptionDataset( + opts.dataPathTest, opts.vocabPath, "test", "Captions Test") + + tb_path = os.path.join(opts.run_dir, "tb_logdir") + if not os.path.isdir(tb_path): + os.makedirs(tb_path) + + self.ckpt_path = os.path.join(opts.run_dir, "ckpt_dir") + if not os.path.isdir(self.ckpt_path): + os.makedirs(self.ckpt_path) + + self.writer = SummaryWriter(tb_path) + self.iter_val = 0 + self.bestValAcc = float("-inf") + self.bestValIter = -1 + + def constructNet(self, lenVocabText, lenVocabProg, maxLenProg, ): + decoder = Decoder(self.opts, lenVocabProg, maxLenProg) + encoder = CaptionEncoder(self.opts, lenVocabText) + net = SeqToSeqC(encoder, decoder) + return net + + def train(self, dataset, dataset_val=None): + # Obtain needed information + lenVocabText = dataset.lenVocabText + lenVocabProg = dataset.lenVocabProg + maxLenProg = dataset.maxLenProg + net = self.constructNet(lenVocabText, lenVocabProg, maxLenProg) + + net.cuda() + net.train() + + # Define the multi-gpu training if needed + if len(self.opts.gpu_ids) > 1: + net = nn.DataParallel(net, device_ids=self.opts.gpu_ids) + + # Load checkpoint if resume training + if self.opts.load_checkpoint_path is not None: + print("[INFO] Resume trainig from ckpt {} ...".format( + self.opts.load_checkpoint_path + )) + + # Load the network parameters + ckpt = torch.load(self.opts.load_checkpoint_path) + print("[INFO] Checkpoint successfully loaded ...") + net.load_state_dict(ckpt['state_dict']) + + # Load the optimizer paramters + optim = get_optim(self.opts, net, len(dataset), lr_base=ckpt['lr_base']) + optim.optimizer.load_state_dict(ckpt['optimizer']) + + else: + optim = get_optim(self.opts, net, len(dataset)) + _iter = 0 + epoch = 0 + + # Define dataloader + dataloader = Data.DataLoader( + dataset, + batch_size=self.opts.batch_size, + shuffle=self.opts.shuffle_data, + num_workers=self.opts.num_workers, + ) + _iterCur = 0 + _totalCur = len(dataloader) + # Training loop + while _iter < self.opts.num_iters: + # Learning Rate Decay + if _iter in self.opts.lr_decay_marks: + adjust_lr(optim, self.opts.lr_decay_factor) + + time_start = time.time() + # Iteration + for caption, captionPrg in dataloader: + if _iter >= self.opts.num_iters: + break + caption = caption.cuda() + captionPrg = captionPrg.cuda() + captionPrgTarget = captionPrg.clone() + optim.zero_grad() + + predSoftmax, _ = net(caption, captionPrg) + + loss = self.loss_fn( + predSoftmax[:, :-1, :].contiguous().view(-1, predSoftmax.size(2)), + captionPrgTarget[:, 1:].contiguous().view(-1)) + loss.backward() + + # logging + self.writer.add_scalar( + 'train/loss', + loss.cpu().data.numpy(), + global_step=_iter) + + self.writer.add_scalar( + 'train/lr', + optim._rate, + global_step=_iter) + if _iter % self.opts.display_every == 0: + print("\r[CLEVR-Dialog - %s (%d/%4d)][epoch %2d][iter %4d/%4d] loss: %.4f, lr: %.2e" % ( + dataset.name, + _iterCur, + _totalCur, + epoch, + _iter, + self.opts.num_iters, + loss.cpu().data.numpy(), + optim._rate, + ), end=' ') + optim.step() + _iter += 1 + _iterCur += 1 + + if _iter % self.opts.validate_every == 0: + if dataset_val is not None: + valAcc = self.eval( + net, + dataset_val, + valid=True, + ) + if valAcc > self.bestValAcc: + self.bestValAcc = valAcc + self.bestValIter = _iter + + print("[INFO] Checkpointing model @ iter {}".format(_iter)) + state = { + 'state_dict': net.state_dict(), + 'optimizer': optim.optimizer.state_dict(), + 'lr_base': optim.lr_base, + 'optim': optim.lr_base, + 'last_iter': _iter, + 'last_epoch': epoch, + } + # checkpointing + torch.save( + state, + os.path.join(self.ckpt_path, 'ckpt_iter' + str(_iter) + '.pkl') + ) + else: + print("[INFO] No validation dataset available") + + time_end = time.time() + print('Finished epoch in {}s'.format(int(time_end-time_start))) + epoch += 1 + + print("[INFO] Training done. Best model had val acc. {} @ iter {}...".format(self.bestValAcc, self.bestValIter)) + + # Evaluation + def eval(self, net, dataset, valid=False): + net = net.eval() + data_size = len(dataset) + dataloader = Data.DataLoader( + dataset, + batch_size=self.opts.batch_size, + shuffle=False, + num_workers=self.opts.num_workers, + pin_memory=False + ) + allPredictedProgs = [] + numAllProg = 0 + falsePred = 0 + for step, (caption, captionPrg) in enumerate(dataloader): + print("\rEvaluation: [step %4d/%4d]" % ( + step, + int(data_size / self.opts.batch_size), + ), end=' ') + caption = caption.cuda() + captionPrg = captionPrg.cuda() + tokens = net.sample(caption) + targetProgs = decodeProg(captionPrg, dataset.vocab["idx_prog_to_token"], target=True) + predProgs = decodeProg(tokens, dataset.vocab["idx_prog_to_token"]) + allPredictedProgs.extend(list(map(lambda s: "( {} ( {} ) ) \n".format(s[0], ", ".join(s[1:])), predProgs))) + numAllProg += len(targetProgs) + for targetProg, predProg in zip(targetProgs, predProgs): + mainMod = targetProg[0] == predProg[0] + sameLength = len(targetProg) == len(predProg) + sameArgs = False + if sameLength: + sameArgs = True + for argTarget in targetProg[1:]: + if argTarget not in predProg[1:]: + sameArgs = False + break + + if not (mainMod and sameArgs): + falsePred += 1 + val_acc = (1 - (falsePred / numAllProg)) * 100.0 + print("Acc: {}".format(val_acc)) + net = net.train() + if not valid: + with open(self.opts.res_path, "w") as f: + f.writelines(allPredictedProgs) + print("[INFO] Predicted caption programs logged into {}".format(self.opts.res_path)) + return val_acc + + def run(self, run_mode): + self.set_seed(self.opts.seed) + if run_mode == 'train': + self.train(self.dataset_tr, self.dataset_val) + + elif run_mode == 'test': + lenVocabText = self.dataset_test.lenVocabText + lenVocabProg = self.dataset_test.lenVocabProg + maxLenProg = self.dataset_test.maxLenProg + net = self.constructNet(lenVocabText, lenVocabProg, maxLenProg) + + print('Loading ckpt {}'.format(self.opts.load_checkpoint_path)) + state_dict = torch.load(self.opts.load_checkpoint_path)['state_dict'] + net.load_state_dict(state_dict) + net.cuda() + self.eval(net, self.dataset_test) + + else: + exit(-1) + + def set_seed(self, seed): + """Sets the seed for reproducibility. + Args: + seed (int): The seed used + """ + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + print('[INFO] Seed set to {}...'.format(seed)) + + +def decodeProg(tokens, prgIdxToToken, target=False): + tokensBatch = tokens.tolist() + progsBatch = [] + for tokens in tokensBatch: + prog = [] + for tok in tokens: + if tok == 2: # has index 2 + break + prog.append(prgIdxToToken.get(tok)) + if target: + prog = prog[1:] + progsBatch.append(prog) + return progsBatch + + +if __name__ == "__main__": + opts = Options().parse() + exe = Execution(opts) + exe.run(opts.mode) + print("[INFO] Done ...") diff --git a/prog_generator/train_question_parser.py b/prog_generator/train_question_parser.py new file mode 100644 index 0000000..d6e6874 --- /dev/null +++ b/prog_generator/train_question_parser.py @@ -0,0 +1,912 @@ +""" +author: Adnen Abdessaied +maintainer: "Adnen Abdessaied" +website: adnenabdessaied.de +version: 1.0.1 +""" + +import os +import sys +import json, torch, pickle, copy, time +import numpy as np +import torch.nn as nn +import torch.utils.data as Data +from tensorboardX import SummaryWriter +from copy import deepcopy +from clevrDialog_dataset import ClevrDialogQuestionDataset +import pickle +from tqdm import tqdm + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from executor.symbolic_executor import SymbolicExecutorClevr, SymbolicExecutorMinecraft +from models import SeqToSeqQ, QuestEncoder_1, QuestEncoder_2, Decoder, CaptionEncoder, SeqToSeqC +from optim import get_optim, adjust_lr +from options_caption_parser import Options as OptionsC +from options_question_parser import Options as OptionsQ + + +class Execution: + def __init__(self, optsQ, optsC): + self.opts = deepcopy(optsQ) + if self.opts.useCuda > 0 and torch.cuda.is_available(): + self.device = torch.device("cuda:0") + print("[INFO] Using GPU {} ...".format(torch.cuda.get_device_name(0))) + else: + print("[INFO] Using CPU ...") + self.device = torch.device("cpu") + + self.loss_fn = torch.nn.NLLLoss().to(self.device) + + print("[INFO] Loading dataset ...") + + self.datasetTr = ClevrDialogQuestionDataset( + self.opts.dataPathTr, self.opts.vocabPath, "train", "All tr data") + + self.datasetVal = ClevrDialogQuestionDataset( + self.opts.dataPathVal, self.opts.vocabPath, "val", "All val data", train=False) + + self.datasetTest = ClevrDialogQuestionDataset( + self.opts.dataPathTest, self.opts.vocabPath, "test", "All val data", train=False) + + self.QuestionNet = constructQuestionNet( + self.opts, + self.datasetTr.lenVocabText, + self.datasetTr.lenVocabProg, + self.datasetTr.maxLenProg, + ) + + if os.path.isfile(self.opts.captionNetPath): + self.CaptionNet = constructCaptionNet( + optsC, + self.datasetTr.lenVocabText, + self.datasetTr.lenVocabProg, + self.datasetTr.maxLenProg + ) + print('Loading CaptionNet from {}'.format(self.opts.captionNetPath)) + state_dict = torch.load(self.opts.captionNetPath)['state_dict'] + self.CaptionNet.load_state_dict(state_dict) + self.CaptionNet.to(self.device) + total_params_cap = sum(p.numel() for p in self.CaptionNet.parameters() if p.requires_grad) + print("The caption encoder has {} trainable parameters".format(total_params_cap)) + + self.QuestionNet.to(self.device) + # if os.path.isfile(self.opts.load_checkpoint_path): + # print('Loading QuestionNet from {}'.format(optsQ.load_checkpoint_path)) + # state_dict = torch.load(self.opts.load_checkpoint_path)['state_dict'] + # self.QuestionNet.load_state_dict(state_dict) + total_params_quest = sum(p.numel() for p in self.QuestionNet.parameters() if p.requires_grad) + print("The question encoder has {} trainable parameters".format(total_params_quest)) + + if "minecraft" in self.opts.scenesPath: + self.symbolicExecutor = SymbolicExecutorMinecraft(self.opts.scenesPath) + else: + self.symbolicExecutor = SymbolicExecutorClevr(self.opts.scenesPath) + + tb_path = os.path.join(self.opts.run_dir, "tb_logdir") + if not os.path.isdir(tb_path): + os.makedirs(tb_path) + + self.ckpt_path = os.path.join(self.opts.run_dir, "ckpt_dir") + if not os.path.isdir(self.ckpt_path): + os.makedirs(self.ckpt_path) + if not os.path.isdir(self.opts.text_log_dir): + os.makedirs(self.opts.text_log_dir) + + self.writer = SummaryWriter(tb_path) + self.iter_val = 0 + + if os.path.isfile(self.opts.dependenciesPath): + with open(self.opts.dependenciesPath, "rb") as f: + self.dependencies = pickle.load(f) + + def train(self): + self.QuestionNet.train() + + # Define the multi-gpu training if needed + if len(self.opts.gpu_ids) > 1: + self.QuestionNet = nn.DataParallel(self.QuestionNet, device_ids=self.opts.gpu_ids) + + # Load checkpoint if resume training + if os.path.isfile(self.opts.load_checkpoint_path): + print("[INFO] Resume trainig from ckpt {} ...".format( + self.opts.load_checkpoint_path + )) + + # Load the network parameters + ckpt = torch.load(self.opts.load_checkpoint_path) + print("[INFO] Checkpoint successfully loaded ...") + self.QuestionNet.load_state_dict(ckpt['state_dict']) + + # Load the optimizer paramters + optim = get_optim(self.opts, self.QuestionNet, len(self.datasetTr)) # , ckpt['optim'], lr_base=ckpt['lr_base']) + # optim._step = int(data_size / self.__C.BATCH_SIZE * self.__C.CKPT_EPOCH) + optim.optimizer.load_state_dict(ckpt['optimizer']) + _iter = 0 # ckpt['last_iter'] + epoch = 0 # ckpt['last_epoch'] + + else: + optim = get_optim(self.opts, self.QuestionNet, len(self.datasetTr)) + _iter = 0 + epoch = 0 + + trainTime = 0 + bestValAcc = float("-inf") + bestCkp = 0 + # Training loop + while _iter < self.opts.num_iters: + + # Learning Rate Decay + if _iter in self.opts.lr_decay_marks: + adjust_lr(optim, self.opts.lr_decay_factor) + + # Define multi-thread dataloader + dataloader = Data.DataLoader( + self.datasetTr, + batch_size=self.opts.batch_size, + shuffle=self.opts.shuffle_data, + num_workers=self.opts.num_workers, + ) + + # Iteration + time_start = 0 + time_end = 0 + for batch_iter, (quest, hist, prog, questionRound, _) in enumerate(dataloader): + time_start = time.time() + if _iter >= self.opts.num_iters: + break + quest = quest.to(self.device) + if self.opts.last_n_rounds < 10: + last_n_rounds_batch = [] + for i, r in enumerate(questionRound.tolist()): + startIdx = max(r - self.opts.last_n_rounds, 0) + endIdx = max(r, self.opts.last_n_rounds) + if hist.dim() == 3: + assert endIdx - startIdx == self.opts.last_n_rounds + histBatch = hist[i, :, :] + last_n_rounds_batch.append(histBatch[startIdx:endIdx, :]) + elif hist.dim() == 2: + startIdx *= 20 + endIdx *= 20 + histBatch = hist[i, :] + temp = histBatch[startIdx:endIdx].cpu() + if r > self.opts.last_n_rounds: + last_n_rounds_batch.append(torch.cat([torch.tensor([1]), temp, torch.tensor([2])], 0)) + else: + last_n_rounds_batch.append(torch.cat([temp, torch.tensor([2, 0])], 0)) + hist = torch.stack(last_n_rounds_batch, dim=0) + hist = hist.to(self.device) + prog = prog.to(self.device) + progTarget = prog.clone() + optim.zero_grad() + + predSoftmax, _ = self.QuestionNet(quest, hist, prog[:, :-1]) + loss = self.loss_fn( + # predSoftmax[:, :-1, :].contiguous().view(-1, predSoftmax.size(2)), + predSoftmax.contiguous().view(-1, predSoftmax.size(2)), + progTarget[:, 1:].contiguous().view(-1)) + loss.backward() + + if _iter % self.opts.validate_every == 0 and _iter > 0: + valAcc = self.val() + if valAcc > bestValAcc: + bestValAcc = valAcc + bestCkp = _iter + print("\n[INFO] Checkpointing model @ iter {} with val accuracy {}\n".format(_iter, valAcc)) + state = { + 'state_dict': self.QuestionNet.state_dict(), + 'optimizer': optim.optimizer.state_dict(), + 'lr_base': optim.lr_base, + 'optim': optim.lr_base, + 'last_iter': _iter, + 'last_epoch': epoch, + } + # checkpointing + torch.save( + state, + os.path.join(self.ckpt_path, 'ckpt_iter' + str(_iter) + '.pkl') + ) + + # logging + self.writer.add_scalar( + 'train/loss', + loss.cpu().data.numpy(), + global_step=_iter) + + self.writer.add_scalar( + 'train/lr', + optim._rate, + global_step=_iter) + if _iter % self.opts.display_every == 0: + time_end = time.time() + trainTime += time_end-time_start + + print("\r[CLEVR-Dialog - %s (%d | %d)][epoch %2d][iter %4d/%4d][runtime %4f] loss: %.4f, lr: %.2e" % ( + self.datasetTr.name, + batch_iter, + len(dataloader), + epoch, + _iter, + self.opts.num_iters, + trainTime, + loss.cpu().data.numpy(), + optim._rate, + ), end=' ') + + optim.step() + _iter += 1 + + epoch += 1 + print("[INFO] Avg. epoch time: {} s".format(trainTime / epoch)) + print("[INFO] Best model achieved val acc. {} @ iter {}".format(bestValAcc, bestCkp)) + + def val(self): + self.QuestionNet.eval() + + total_correct = 0 + total = 0 + + if len(self.opts.gpu_ids) > 1: + self.QuestionNet = nn.DataParallel(self.QuestionNet, device_ids=self.opts.gpu_ids) + self.QuestionNet = self.QuestionNet.eval() + dataloader = Data.DataLoader( + self.datasetVal, + batch_size=self.opts.batch_size, + shuffle=True, + num_workers=self.opts.num_workers, + pin_memory=False + ) + _iterCur = 0 + _totalCur = len(dataloader) + + for step, (question, questionPrg, questionImgIdx, questionRounds, history, historiesProg, answer) in enumerate(dataloader): + # print("\rEvaluation: [step %4d/%4d]" % ( + print("\rEvaluation: [step %4d/%4d]" % ( + step, + int(len(dataloader)), + ), end=' ') + + question = question.to(self.device) + + if history.dim() == 3: + caption = history.detach() + caption = caption[:, 0, :] + caption = caption[:, :16].to(self.device) + elif history.dim() == 2: + caption = history.detach() + caption = caption[:, :16].to(self.device) + if self.opts.last_n_rounds is not None: + last_n_rounds_batch = [] + for i, r in enumerate(questionRounds.tolist()): + startIdx = max(r - self.opts.last_n_rounds, 0) + endIdx = max(r, self.opts.last_n_rounds) + if history.dim() == 3: + assert endIdx - startIdx == self.opts.last_n_rounds + histBatch = history[i, :, :] + last_n_rounds_batch.append(histBatch[startIdx:endIdx, :]) + elif history.dim() == 2: + startIdx *= 20 + endIdx *= 20 + histBatch = history[i, :] + temp = histBatch[startIdx:endIdx] + if r > self.opts.last_n_rounds: + last_n_rounds_batch.append(torch.cat([torch.tensor([1]), temp, torch.tensor([2])], 0)) + else: + last_n_rounds_batch.append(torch.cat([temp, torch.tensor([2, 0])], 0)) + history = torch.stack(last_n_rounds_batch, dim=0) + history = history.to(self.device) + questionPrg = questionPrg.to(self.device) + + questProgsToksPred = self.QuestionNet.sample(question, history) + questProgsPred = decodeProg(questProgsToksPred, self.datasetVal.vocab["idx_prog_to_token"]) + targetProgs = decodeProg(questionPrg, self.datasetVal.vocab["idx_prog_to_token"], target=True) + + correct = [1 if pred == gt else 0 for (pred, gt) in zip(questProgsPred, targetProgs)] + + correct = sum(correct) + total_correct += correct + total += len(targetProgs) + self.QuestionNet.train() + + return 100.0 * (total_correct / total) + + # Evaluation + def eval_with_gt(self): + # Define the multi-gpu training if needed + all_pred_answers = [] + all_gt_answers = [] + all_question_types = [] + all_penalties = [] + all_pred_programs = [] + all_gt_programs = [] + + first_failure_round = 0 + total_correct = 0 + total_acc_pen = 0 + total = 0 + total_quest_prog_correct = 0 + + if len(self.opts.gpu_ids) > 1: + self.QuestionNet = nn.DataParallel(self.QuestionNet, device_ids=self.opts.gpu_ids) + self.QuestionNet = self.QuestionNet.eval() + self.CaptionNet = self.CaptionNet.eval() + if self.opts.batch_size != self.opts.dialogLen: + print("[INFO] Changed batch size from {} to {}".format(self.opts.batch_size, self.opts.dialogLen)) + self.opts.batch_size = self.opts.dialogLen + dataloader = Data.DataLoader( + self.datasetTest, + batch_size=self.opts.batch_size, + shuffle=False, + num_workers=self.opts.num_workers, + pin_memory=False + ) + _iterCur = 0 + _totalCur = len(dataloader) + + for step, (question, questionPrg, questionImgIdx, questionRounds, history, historiesProg, answer) in enumerate(dataloader): + # print("\rEvaluation: [step %4d/%4d]" % ( + # step + 1, + # int(data_size / self.opts.batch_size), + # ), end=' ') + # if step >= 5000: + # break + batchSize = question.size(0) + question = question.to(self.device) + # dependecy = self.dependencies[step*batchSize:(step+1)*batchSize] + + if history.dim() == 3: + caption = history.detach() + caption = caption[:, 0, :] + caption = caption[:, :16].to(self.device) + elif history.dim() == 2: + caption = history.detach() + caption = caption[:, :16].to(self.device) + if self.opts.last_n_rounds < 10: + last_n_rounds_batch = [] + for i, r in enumerate(questionRounds.tolist()): + startIdx = max(r - self.opts.last_n_rounds, 0) + endIdx = max(r, self.opts.last_n_rounds) + if history.dim() == 3: + assert endIdx - startIdx == self.opts.last_n_rounds + histBatch = history[i, :, :] + last_n_rounds_batch.append(histBatch[startIdx:endIdx, :]) + elif history.dim() == 2: + startIdx *= 20 + endIdx *= 20 + histBatch = history[i, :] + temp = histBatch[startIdx:endIdx] + if r > self.opts.last_n_rounds: + last_n_rounds_batch.append(torch.cat([torch.tensor([1]), temp, torch.tensor([2])], 0)) + else: + last_n_rounds_batch.append(torch.cat([temp, torch.tensor([2, 0])], 0)) + history = torch.stack(last_n_rounds_batch, dim=0) + + history = history.to(self.device) + questionPrg = questionPrg.to(self.device) + historiesProg = historiesProg.tolist() + questionRounds = questionRounds.tolist() + answer = answer.tolist() + answers = list(map(lambda a: self.datasetTest.vocab["idx_text_to_token"][a], answer)) + questionImgIdx = questionImgIdx.tolist() + # if "minecraft" in self.opts.scenesPath: + # questionImgIdx = [idx - 1 for idx in questionImgIdx] + questProgsToksPred = self.QuestionNet.sample(question, history) + capProgsToksPred = self.CaptionNet.sample(caption) + + questProgsPred = decodeProg(questProgsToksPred, self.datasetTest.vocab["idx_prog_to_token"]) + capProgsPred = decodeProg(capProgsToksPred, self.datasetTest.vocab["idx_prog_to_token"]) + + targetProgs = decodeProg(questionPrg, self.datasetTest.vocab["idx_prog_to_token"], target=True) + questionTypes = [targetProg[0] for targetProg in targetProgs] + # progHistories = getProgHistories(historiesProg[0], dataset.vocab["idx_prog_to_token"]) + progHistories = [getProgHistories(progHistToks, self.datasetTest.vocab["idx_prog_to_token"]) for progHistToks in historiesProg] + pred_answers = [] + all_pred_programs.append([capProgsPred[0]] + questProgsPred) + all_gt_programs.append([progHistories[0]] + (targetProgs)) + + for i in range(batchSize): + # if capProgsPred[i][0] == "extreme-center": + # print("bla") + # print("idx = {}".format(questionImgIdx[i])) + ans = self.getPrediction( + questProgsPred[i], + capProgsPred[i], + progHistories[i], + questionImgIdx[i] + ) + # if ans == "Error": + # print(capProgsPred[i]) + pred_answers.append(ans) + # print(pred_answers) + correct = [1 if pred == ans else 0 for (pred, ans) in zip(pred_answers, answers)] + correct_prog = [1 if pred == ans else 0 for (pred, ans) in zip(questProgsPred, targetProgs)] + idx_false = np.argwhere(np.array(correct) == 0).squeeze(-1) + if idx_false.shape[-1] > 0: + first_failure_round += idx_false[0] + 1 + else: + first_failure_round += self.opts.dialogLen + 1 + + correct = sum(correct) + correct_prog = sum(correct_prog) + total_correct += correct + total_quest_prog_correct += correct_prog + total += len(answers) + all_pred_answers.append(pred_answers) + all_gt_answers.append(answers) + all_question_types.append(questionTypes) + penalty = np.zeros_like(penalty) + all_penalties.append(penalty) + _iterCur += 1 + if _iterCur % self.opts.display_every == 0: + print("[Evaluation] step {0} / {1} | acc. = {2:.2f}".format( + _iterCur, _totalCur, 100.0 * (total_correct / total))) + + ffr = 1.0 * (first_failure_round/_totalCur)/(self.opts.dialogLen + 1) + + textOut = "\n --------------- Average First Failure Round --------------- \n" + textOut += "{} / {}".format(ffr, self.opts.dialogLen) + + # print(total_correct, total) + accuracy = total_correct / total + vd_acc = total_acc_pen / total + quest_prog_acc = total_quest_prog_correct / total + textOut += "\n --------------- Overall acc. --------------- \n" + textOut += "{}".format(100.0 * accuracy) + textOut += "\n --------------- Overall VD acc. --------------- \n" + textOut += "{}".format(100.0 * vd_acc) + textOut += "\n --------------- Question Prog. Acc --------------- \n" + textOut += "{}".format(100.0 * quest_prog_acc) + textOut += get_per_round_acc( + all_pred_answers, all_gt_answers, all_penalties) + + textOut += get_per_question_type_acc( + all_pred_answers, all_gt_answers, all_question_types, all_penalties) + + # textOut += get_per_dependency_type_acc( + # all_pred_answers, all_gt_answers, all_penalties) + + textOut += "\n --------------- Done --------------- \n" + print(textOut) + fname = self.opts.questionNetPath.split("/")[-3] + "results_{}_{}.txt".format(self.opts.last_n_rounds, self.opts.dialogLen) + pred_answers_fname = self.opts.questionNetPath.split("/")[-3] + "_pred_answers_{}_{}.pkl".format(self.opts.last_n_rounds, self.opts.dialogLen) + pred_answers_fname = os.path.join("/projects/abdessaied/clevr-dialog/output/pred_answers", pred_answers_fname) + model_name = "NSVD_stack" if "stack" in self.opts.questionNetPath else "NSVD_concat" + experiment_name = "minecraft" + # experiment_name += "_{}".format(self.opts.dialogLen) + prog_output_fname = os.path.join("/projects/abdessaied/clevr-dialog/output/prog_output/{}_{}.pkl".format(model_name, experiment_name)) + + fpath = os.path.join(self.opts.text_log_dir, fname) + with open(fpath, "w") as f: + f.writelines(textOut) + with open(pred_answers_fname, "wb") as f: + pickle.dump(all_pred_answers, f, protocol=pickle.HIGHEST_PROTOCOL) + with open(prog_output_fname, "wb") as f: + pickle.dump((all_gt_programs, all_pred_programs, all_pred_answers), f, protocol=pickle.HIGHEST_PROTOCOL) + +# Evaluation + def eval_with_pred(self): + # Define the multi-gpu training if needed + all_pred_answers = [] + all_gt_answers = [] + all_question_types = [] + all_penalties = [] + + first_failure_round = 0 + total_correct = 0 + total_acc_pen = 0 + total = 0 + + samples = {} + + if len(self.opts.gpu_ids) > 1: + self.QuestionNet = nn.DataParallel(self.QuestionNet, device_ids=self.opts.gpu_ids) + self.QuestionNet = self.QuestionNet.eval() + self.CaptionNet = self.CaptionNet.eval() + if self.opts.batch_size != self.opts.dialogLen: + print("[INFO] Changed batch size from {} to {}".format(self.opts.batch_size, self.opts.dialogLen)) + self.opts.batch_size = self.opts.dialogLen + dataloader = Data.DataLoader( + self.datasetTest, + batch_size=self.opts.batch_size, + shuffle=False, + num_workers=self.opts.num_workers, + pin_memory=False + ) + _iterCur = 0 + _totalCur = len(dataloader) + step = 0 + for step, (question, questionPrg, questionImgIdx, questionRounds, history, historiesProg, answer) in enumerate(dataloader): + question = question.tolist() + questions = decode(question, self.datasetTest.vocab["idx_text_to_token"], target=True) + questions = list(map(lambda q: " ".join(q), questions)) + targetProgs = decode(questionPrg, self.datasetTest.vocab["idx_prog_to_token"], target=True) + + questionTypes = [targetProg[0] for targetProg in targetProgs] + targetProgs = list(map(lambda q: " ".join(q), targetProgs)) + + historiesProg = historiesProg.tolist() + progHistories = [getProgHistories(progHistToks, self.datasetTest.vocab["idx_prog_to_token"]) for progHistToks in historiesProg] + + answer = answer.tolist() + answers = list(map(lambda a: self.datasetTest.vocab["idx_text_to_token"][a], answer)) + questionImgIdx = questionImgIdx.tolist() + + if self.opts.encoderType == 2: + histories_eval = [history[0, 0, :].tolist()] + caption = history.detach() + caption = caption[0, 0, :].unsqueeze(0) + caption = caption[:, :16].to(self.device) + elif self.opts.encoderType == 1: + caption = history.detach() + histories_eval = [history[0, :20].tolist()] + caption = caption[0, :16].unsqueeze(0).to(self.device) + cap = decode(caption, self.datasetTest.vocab["idx_text_to_token"], target=False) + capProgToksPred = self.CaptionNet.sample(caption) + capProgPred = decode(capProgToksPred, self.datasetTest.vocab["idx_prog_to_token"])[0] + + pred_answers = [] + pred_quest_prog = [] + for i, (q, prog_hist, img_idx) in enumerate(zip(question, progHistories, questionImgIdx)): + _round = i + 1 + if _round <= self.opts.last_n_rounds: + start = 0 + else: + start = _round - self.opts.last_n_rounds + end = len(histories_eval) + + quest = torch.tensor(q).unsqueeze(0).to(self.device) + if self.opts.encoderType == 3: + hist = torch.stack([torch.tensor(h) for h in histories_eval[start:end]], dim=0).unsqueeze(0).to(self.device) + elif self.opts.encoderType == 1: + histories_eval_copy = deepcopy(histories_eval) + histories_eval_copy[-1].append(self.datasetTest.vocab["text_token_to_idx"][""]) + hist = torch.cat([torch.tensor(h) for h in histories_eval_copy[start:end]], dim=-1).unsqueeze(0).to(self.device) + + questProgsToksPred = self.QuestionNet.sample(quest, hist) + questProgsPred = decode(questProgsToksPred, self.datasetTest.vocab["idx_prog_to_token"])[0] + pred_quest_prog.append(" ".join(questProgsPred)) + ans = self.getPrediction( + questProgsPred, + capProgPred, + prog_hist, + img_idx + ) + ans_idx = self.datasetTest.vocab["text_token_to_idx"].get( + ans, self.datasetTest.vocab["text_token_to_idx"][""]) + q[q.index(self.datasetTest.vocab["text_token_to_idx"][""])] = self.datasetTest.vocab["text_token_to_idx"][""] + q[-1] = self.datasetTest.vocab["text_token_to_idx"][""] + q.insert(-1, ans_idx) + if self.opts.encoderType == 3: + histories_eval.append(copy.deepcopy(q)) + elif self.opts.encoderType == 0: + del q[0] + del q[-1] + histories_eval.append(copy.deepcopy(q)) + + pred_answers.append(ans) + + correct = [1 if pred == ans else 0 for (pred, ans) in zip(pred_answers, answers)] + idx_false = np.argwhere(np.array(correct) == 0).squeeze(-1) + if idx_false.shape[-1] > 0: + first_failure_round += idx_false[0] + 1 + else: + first_failure_round += self.opts.dialogLen + 1 + + correct = sum(correct) + total_correct += correct + total += len(answers) + all_pred_answers.append(pred_answers) + all_gt_answers.append(answers) + all_question_types.append(questionTypes) + _iterCur += 1 + if _iterCur % self.opts.display_every == 0: + print("[Evaluation] step {0} / {1} | acc. = {2:.2f}".format( + _iterCur, _totalCur, 100.0 * (total_correct / total) + )) + samples["{}_{}".format(questionImgIdx[0], (step % 5) + 1)] = { + "caption": " ".join(cap[0]), + "cap_prog_gt": " ".join(progHistories[0][0]), + "cap_prog_pred": " ".join(capProgPred), + + "questions": questions, + "quest_progs_gt": targetProgs, + "quest_progs_pred": pred_quest_prog, + + + "answers": answers, + "preds": pred_answers, + "acc": correct, + } + + + ffr = 1.0 * self.opts.dialogLen * (first_failure_round/total) + + textOut = "\n --------------- Average First Failure Round --------------- \n" + textOut += "{} / {}".format(ffr, self.opts.dialogLen) + + # print(total_correct, total) + accuracy = total_correct / total + vd_acc = total_acc_pen / total + textOut += "\n --------------- Overall acc. --------------- \n" + textOut += "{}".format(100.0 * accuracy) + textOut += "\n --------------- Overall VD acc. --------------- \n" + textOut += "{}".format(100.0 * vd_acc) + + textOut += get_per_round_acc( + all_pred_answers, all_gt_answers, all_penalties) + + textOut += get_per_question_type_acc( + all_pred_answers, all_gt_answers, all_question_types, all_penalties) + + textOut += "\n --------------- Done --------------- \n" + print(textOut) + if step >= len(dataloader): + fname = self.opts.questionNetPath.split("/")[-3] + "_results_{}_{}_{}.txt".format(self.opts.last_n_rounds, self.opts.dialogLen, self.acc_type) + pred_answers_fname = self.opts.questionNetPath.split("/")[-3] + "_pred_answers_{}_{}.pkl".format(self.opts.last_n_rounds, self.opts.dialogLen) + pred_answers_fname = os.path.join("/projects/abdessaied/clevr-dialog/output/pred_answers", pred_answers_fname) + + fpath = os.path.join(self.opts.text_log_dir, fname) + with open(fpath, "w") as f: + f.writelines(textOut) + with open(pred_answers_fname, "wb") as f: + pickle.dump(all_pred_answers, f, protocol=pickle.HIGHEST_PROTOCOL) + + def getPrediction(self, questProgPred, capProgPred, historyProg, imgIndex): + self.symbolicExecutor.reset(imgIndex) + # if round one, execute the predicted caption program first then answer the question + if len(historyProg) == 1: + captionFuncLabel = capProgPred[0] + captionFuncArgs = capProgPred[1:] + + questionFuncLabel = questProgPred[0] + questionFuncArgs = questProgPred[1:] + + try: + _ = self.symbolicExecutor.execute(captionFuncLabel, captionFuncArgs) + except: + return "Error" + + try: + predAnswer = self.symbolicExecutor.execute(questionFuncLabel, questionFuncArgs) + except: + return "Error" + + # If it is not the first round, we have to execute the program history and + # then answer the question. + else: + questionFuncLabel = questProgPred[0] + questionFuncArgs = questProgPred[1:] + for prg in historyProg: + # prg = prg.split(" ") + FuncLabel = prg[0] + FuncArgs = prg[1:] + try: + _ = self.symbolicExecutor.execute(FuncLabel, FuncArgs) + except: + return "Error" + + try: + predAnswer = self.symbolicExecutor.execute(questionFuncLabel, questionFuncArgs) + except: + return "Error" + return str(predAnswer) + + def run(self, run_mode, epoch=None): + self.set_seed(self.opts.seed) + if run_mode == 'train': + self.train() + + elif run_mode == 'test_with_gt': + print('Testing with gt answers in history') + print('Loading ckpt {}'.format(self.opts.questionNetPath)) + state_dict = torch.load(self.opts.questionNetPath)['state_dict'] + self.QuestionNet.load_state_dict(state_dict) + self.eval_with_gt() + + elif run_mode == 'test_with_pred': + print('Testing with predicted answers in history') + print('Loading ckpt {}'.format(self.opts.questionNetPath)) + state_dict = torch.load(self.opts.questionNetPath)['state_dict'] + self.QuestionNet.load_state_dict(state_dict) + self.eval_with_pred() + else: + exit(-1) + + def set_seed(self, seed): + """Sets the seed for reproducibility. + Args: + seed (int): The seed used + """ + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + print('[INFO] Seed set to {}...'.format(seed)) + + +def constructQuestionNet(opts, lenVocabText, lenVocabProg, maxLenProg): + decoder = Decoder(opts, lenVocabProg, maxLenProg) + if opts.encoderType == 1: + encoder = QuestEncoder_1(opts, lenVocabText) + elif opts.encoderType == 2: + encoder = QuestEncoder_2(opts, lenVocabText) + + net = SeqToSeqQ(encoder, decoder) + return net + + +def constructCaptionNet(opts, lenVocabText, lenVocabProg, maxLenProg): + decoder = Decoder(opts, lenVocabProg, maxLenProg) + encoder = CaptionEncoder(opts, lenVocabText) + net = SeqToSeqC(encoder, decoder) + return net + + +def getProgHistories(progHistToks, prgIdxToToken): + progHist = [] + temp = [] + for tok in progHistToks: + if tok not in [0, 1, 2]: + temp.append(prgIdxToToken[tok]) + # del progHistToks[i] + if tok == 2: + # del progHistToks[i] + # progHist.append(" ".join(temp)) + progHist.append(temp) + temp = [] + return progHist + + +def getHistoriesFromStack(histToks, textIdxToToken): + histories = "\n" + temp = [] + for i, roundToks in enumerate(histToks): + for tok in roundToks: + if tok not in [0, 1, 2]: + temp.append(textIdxToToken[tok]) + # del progHistToks[i] + if tok == 2: + # del progHistToks[i] + if i == 0: + histories += " ".join(temp) + ".\n" + else: + histories += " ".join(temp[:-1]) + "? | {}.\n".format(temp[-1]) + # histories.append(temp) + temp = [] + break + return histories + + +def getHistoriesFromConcat(histToks, textIdxToToken): + histories = [] + temp = [] + for tok in histToks: + if tok not in [0, 1, 2]: + temp.append(textIdxToToken[tok]) + # del progHistToks[i] + if tok == 2: + # del progHistToks[i] + histories.append(" ".join(temp[:-1]) + "? | {}".format(temp[-1])) + # histories.append(temp) + temp = [] + return histories + + +def decodeProg(tokens, prgIdxToToken, target=False): + tokensBatch = tokens.tolist() + progsBatch = [] + for tokens in tokensBatch: + prog = [] + for tok in tokens: + if tok == 2: # has index 2 + break + prog.append(prgIdxToToken.get(tok)) + if target: + prog = prog[1:] + # progsBatch.append(" ".join(prog)) + progsBatch.append(prog) + return progsBatch + + +def printPred(predSoftmax, gts, prgIdxToToken): + assert predSoftmax.size(0) == gts.size(0) + tokens = predSoftmax.topk(1)[1].squeeze(-1) + tokens = tokens.tolist() + gts = gts.tolist() + message = "\n ------------------------ \n" + for token, gt in zip(tokens, gts): + message += "Prediction: " + for tok in token: + message += prgIdxToToken.get(tok) + " " + message += "\n Target : " + for tok in gt: + message += prgIdxToToken.get(tok) + " " + message += "\n ------------------------ \n" + return message + + +def get_per_round_acc(preds, gts, penalties): + res = {} + for img_preds, img_gt, img_pen in zip(preds, gts, penalties): + img_preds = list(img_preds) + img_gt = list(img_gt) + img_pen = list(img_pen) + for i, (pred, gt, pen) in enumerate(zip(img_preds, img_gt, img_pen)): + _round = str(i + 1) + if _round not in res: + res[_round] = { + "correct": 0, + "all": 0 + } + res[_round]["all"] += 1 + if pred == gt: + res[_round]["correct"] += 0.5**pen + + textOut = "\n --------------- Per round Acc --------------- \n" + for k in res: + textOut += "{}: {} %\n".format(k, 100.0 * (res[k]["correct"]/res[k]["all"])) + return textOut + + +def get_per_question_type_acc(preds, gts, qtypes, penalties): + res1 = {} + res2 = {} + + for img_preds, img_gt, img_qtypes, img_pen in zip(preds, gts, qtypes, penalties): + # img_preds = list(img_preds) + # img_gt = list(img_gt) + img_pen = list(img_pen) + for pred, gt, temp, pen in zip(img_preds, img_gt, img_qtypes, img_pen): + if temp not in res1: + res1[temp] = { + "correct": 0, + "all": 0 + } + temp_cat = temp.split("-")[0] + if temp_cat not in res2: + res2[temp_cat] = { + "correct": 0, + "all": 0 + } + res1[temp]["all"] += 1 + res2[temp_cat]["all"] += 1 + + if pred == gt: + res1[temp]["correct"] += 0.5**pen + res2[temp_cat]["correct"] += 0.5**pen + + textOut = "\n --------------- Per question Type Acc --------------- \n" + for k in res1: + textOut += "{}: {} %\n".format(k, 100.0 * (res1[k]["correct"]/res1[k]["all"])) + + textOut += "\n --------------- Per question Category Acc --------------- \n" + for k in res2: + textOut += "{}: {} %\n".format(k, 100.0 * (res2[k]["correct"]/res2[k]["all"])) + return textOut + + +def decode(tokens, prgIdxToToken, target=False): + if type(tokens) != list: + tokens = tokens.tolist() + + progsBatch = [] + for token in tokens: + prog = [] + for tok in token: + if tok == 2: # has index 2 + break + prog.append(prgIdxToToken.get(tok)) + if target: + prog = prog[1:] + # progsBatch.append(" ".join(prog)) + progsBatch.append(prog) + return progsBatch + +if __name__ == "__main__": + optsC = OptionsC().parse() + optsQ = OptionsQ().parse() + + exe = Execution(optsQ, optsC) + exe.run("test") + print("[INFO] Done ...") diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..4fb62cf --- /dev/null +++ b/utils.py @@ -0,0 +1,80 @@ +import json +import numpy as np + + +def merge_captions_question_programs(path_cap, path_ques, caption_first=True): + with open(path_cap, "r"): + c_progs = path_cap.readlines() + with open(path_ques, "r"): + q_progs = path_ques.readlines() + + all_merged_progs = [] + i = 0 + while i < len(q_progs): + cap_idx = i % 11 if caption_first else i % 10 + start_idx_p = i + 1 if caption_first else i + end_idx_p = start_idx_p + 12 if caption_first else start_idx_p + 11 + temp = c_progs[cap_idx] + q_progs[start_idx_p, end_idx_p] + all_merged_progs.append(temp) + i = end_idx_p + + +def load_clevr_scenes(scenes_json): + with open(scenes_json) as f: + scenes_raw = json.load(f) + if type(scenes_raw) == dict: + scenes_raw = scenes_raw["scenes"] + + scenes = [] + for s in scenes_raw: + table = [] + for i, o in enumerate(s['objects']): + item = {} + item['id'] = '%d-%d' % (s['image_index'], i) + if '3d_coords' in o: + item['position'] = [np.dot(o['3d_coords'], s['directions']['right']), + np.dot(o['3d_coords'], s['directions']['front']), + o['3d_coords'][2]] + else: + item['position'] = o['position'] + item['color'] = o['color'] + item['material'] = o['material'] + item['shape'] = o['shape'] + item['size'] = o['size'] + table.append(item) + scenes.append(table) + return scenes + + +def load_minecraft_scenes(scenes_json): + with open(scenes_json) as f: + scenes_raw = json.load(f) + if type(scenes_raw) == dict: + scenes_raw = scenes_raw["scenes"] + + scenes = [] + for s in scenes_raw: + table = [] + for i, o in enumerate(s['objects']): + item = {} + item['id'] = '%d-%d' % (s['image_index'], i) + if '3d_coords' in o: + item['position'] = [np.dot(o['3d_coords'], s['directions']['right']), + np.dot(o['3d_coords'], s['directions']['front']), + o['3d_coords'][2]] + else: + item['position'] = o['position'] + item['nature'] = o['nature'] + item['class'] = o['class'] + item['direction'] = "facing_" + if o['direction'] == "front": + item['direction'] += "forward" + elif o['direction'] == "back": + item['direction'] += "backward" + elif o['direction'] == "right": + item['direction'] += "right" + elif o['direction'] == "left": + item['direction'] += "left" + table.append(item) + scenes.append(table) + return scenes diff --git a/utils_preprocess.py b/utils_preprocess.py new file mode 100644 index 0000000..c7cf219 --- /dev/null +++ b/utils_preprocess.py @@ -0,0 +1,62 @@ +import os +import json +import numpy as np +import torch + + +def mkdirs(paths): + if isinstance(paths, list): + for path in paths: + if not os.path.exists(path): + os.makedirs(path) + else: + if not os.path.exists(paths): + os.makedirs(paths) + + +def invert_dict(d): + return {v: k for k, v in d.items()} + + +def load_vocab(path): + with open(path, 'r') as f: + vocab = json.load(f) + vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx']) + vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx']) + vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx']) + # Sanity check: make sure , , and are consistent + assert vocab['question_token_to_idx'][''] == 0 + assert vocab['question_token_to_idx'][''] == 1 + assert vocab['question_token_to_idx'][''] == 2 + assert vocab['program_token_to_idx'][''] == 0 + assert vocab['program_token_to_idx'][''] == 1 + assert vocab['program_token_to_idx'][''] == 2 + return vocab + + +def load_scenes(scenes_json): + with open(scenes_json) as f: + scenes_dict = json.load(f)['scenes'] + scenes = [] + for s in scenes_dict: + table = [] + for i, o in enumerate(s['objects']): + item = {} + item['id'] = '%d-%d' % (s['image_index'], i) + if '3d_coords' in o: + item['position'] = [np.dot(o['3d_coords'], s['directions']['right']), + np.dot(o['3d_coords'], s['directions']['front']), + o['3d_coords'][2]] + else: + item['position'] = o['position'] + item['color'] = o['color'] + item['material'] = o['material'] + item['shape'] = o['shape'] + item['size'] = o['size'] + table.append(item) + scenes.append(table) + return scenes + + +def load_embedding(path): + return torch.Tensor(np.load(path)) \ No newline at end of file