From f34dc653e5006ed97e7f95bcbcd780c7249ec62d Mon Sep 17 00:00:00 2001 From: mohsen-mansouryar Date: Wed, 9 Mar 2016 19:52:35 +0100 Subject: [PATCH] migrated code to public repository --- README.md | 23 +- code/.gitignore | 4 + code/README.md | 37 + code/Visualization/1CalibrationDepth.py | 944 ++++++++ code/Visualization/3CalibrationDepths.py | 1980 +++++++++++++++++ ...fectDistanceDifference1CalibrationDepth.py | 200 ++ code/Visualization/EffectNumberofClusters.py | 350 +++ .../EffectNumberofClusters_plus_Simulation.py | 418 ++++ code/copy_missing_data.py | 36 + code/experiment.py | 441 ++++ code/geom.py | 288 +++ code/minimize.py | 131 ++ code/parallax_2D3D_3Cdepths.py | 249 +++ code/parallax_analysis.py | 996 +++++++++ code/pupil/__init__.py | 0 code/pupil/calibrate.py | 255 +++ code/pupil/file_methods.py | 65 + code/pupil/methods.py | 661 ++++++ code/pupil/player_methods.py | 157 ++ code/recording/__init__.py | 0 code/recording/aruco_test | Bin 0 -> 277012 bytes code/recording/aruco_test.cpp | 242 ++ code/recording/collect_images.py | 89 + code/recording/data/.gitignore | 3 + code/recording/data/display_setting.txt | 238 ++ .../data/eye_camera/2015_10_03/000/info.csv | 14 + .../data/eye_camera/2015_10_03/000/pupil_data | Bin 0 -> 404245 bytes .../eye_camera/2015_10_03/000/user_info.csv | 2 + .../data/eye_camera/2015_10_03/001/info.csv | 14 + .../data/eye_camera/2015_10_03/001/pupil_data | Bin 0 -> 488935 bytes .../eye_camera/2015_10_03/001/user_info.csv | 2 + .../data/eye_camera/eye_camera_RMS0p39.yml | 20 + code/recording/data/frames_005/frame_300.txt | 9 + code/recording/data/frames_006/frame_300.txt | 10 + code/recording/data/frames_007/frame_300.txt | 24 + code/recording/data/frames_010/frame_300.txt | 24 + code/recording/data/participants/.gitignore | 2 + code/recording/data/participants/notes.txt | 15 + .../data/scene_camera/2015_10_03/000/info.csv | 14 + .../scene_camera/2015_10_03/000/pupil_data | Bin 0 -> 234804 bytes .../scene_camera/2015_10_03/000/user_info.csv | 2 + code/recording/data_grabber.py | 61 + code/recording/eye_world_correlation.py | 85 + code/recording/opencv/calibrate_and_save.py | 193 ++ code/recording/process_recordings.py | 454 ++++ code/recording/retrieve.py | 150 ++ code/recording/samples/frame_1_details.txt | 27 + code/recording/tracker.py | 221 ++ code/recording/util/GazeHelper/.gitignore | 41 + code/recording/util/GazeHelper/.idea/.name | 1 + .../util/GazeHelper/.idea/compiler.xml | 22 + .../.idea/copyright/profiles_settings.xml | 3 + .../util/GazeHelper/.idea/gradle.xml | 19 + code/recording/util/GazeHelper/.idea/misc.xml | 22 + .../util/GazeHelper/.idea/modules.xml | 9 + code/recording/util/GazeHelper/.idea/vcs.xml | 6 + code/recording/util/GazeHelper/GazeHelper.iml | 19 + code/recording/util/GazeHelper/app/.gitignore | 1 + code/recording/util/GazeHelper/app/app.iml | 95 + .../util/GazeHelper/app/build.gradle | 25 + .../util/GazeHelper/app/proguard-rules.pro | 17 + .../app/src/main/AndroidManifest.xml | 21 + .../mmbrian/com/gazehelper/MainActivity.java | 108 + .../app/src/main/res/layout/activity_main.xml | 59 + .../app/src/main/res/values-w820dp/dimens.xml | 6 + .../app/src/main/res/values/dimens.xml | 5 + .../app/src/main/res/values/strings.xml | 6 + .../app/src/main/res/values/styles.xml | 8 + code/recording/util/GazeHelper/build.gradle | 19 + .../util/GazeHelper/gradle.properties | 18 + .../gradle/wrapper/gradle-wrapper.properties | 6 + code/recording/util/GazeHelper/gradlew | 164 ++ code/recording/util/GazeHelper/gradlew.bat | 90 + .../recording/util/GazeHelper/settings.gradle | 1 + .../CalibrationBoard.pde | 102 + .../util/SingleMarkerVisualizer/Marker.pde | 69 + .../SingleMarkerVisualizer.pde | 177 ++ .../SingleMarkerVisualizer/sketch.properties | 2 + code/recording/util/__init__.py | 0 code/recording/util/check_pupil_positions.py | 18 + code/recording/util/markers/marker.py | 201 ++ code/recording/util/tools.py | 44 + code/recording_experiment.py | 196 ++ .../results/2d2d/2_calibration_depths/p10.csv | 50 + .../results/2d2d/2_calibration_depths/p11.csv | 50 + .../results/2d2d/2_calibration_depths/p12.csv | 50 + .../results/2d2d/2_calibration_depths/p13.csv | 50 + .../results/2d2d/2_calibration_depths/p14.csv | 50 + .../results/2d2d/2_calibration_depths/p15.csv | 50 + .../results/2d2d/2_calibration_depths/p16.csv | 50 + .../results/2d2d/2_calibration_depths/p20.csv | 50 + .../results/2d2d/2_calibration_depths/p21.csv | 50 + .../results/2d2d/2_calibration_depths/p24.csv | 50 + .../results/2d2d/2_calibration_depths/p25.csv | 50 + .../results/2d2d/2_calibration_depths/p26.csv | 50 + code/results/2d2d/2_calibration_depths/p5.csv | 50 + code/results/2d2d/2_calibration_depths/p7.csv | 50 + .../results/2d2d/3_calibration_depths/p10.csv | 50 + .../results/2d2d/3_calibration_depths/p11.csv | 50 + .../results/2d2d/3_calibration_depths/p12.csv | 50 + .../results/2d2d/3_calibration_depths/p13.csv | 50 + .../results/2d2d/3_calibration_depths/p14.csv | 50 + .../results/2d2d/3_calibration_depths/p15.csv | 50 + .../results/2d2d/3_calibration_depths/p16.csv | 50 + .../results/2d2d/3_calibration_depths/p20.csv | 50 + .../results/2d2d/3_calibration_depths/p21.csv | 50 + .../results/2d2d/3_calibration_depths/p24.csv | 50 + .../results/2d2d/3_calibration_depths/p25.csv | 50 + .../results/2d2d/3_calibration_depths/p26.csv | 50 + code/results/2d2d/3_calibration_depths/p5.csv | 50 + code/results/2d2d/3_calibration_depths/p7.csv | 50 + .../results/2d2d/4_calibration_depths/p10.csv | 25 + .../results/2d2d/4_calibration_depths/p11.csv | 25 + .../results/2d2d/4_calibration_depths/p12.csv | 25 + .../results/2d2d/4_calibration_depths/p13.csv | 25 + .../results/2d2d/4_calibration_depths/p14.csv | 25 + .../results/2d2d/4_calibration_depths/p15.csv | 25 + .../results/2d2d/4_calibration_depths/p16.csv | 25 + .../results/2d2d/4_calibration_depths/p20.csv | 25 + .../results/2d2d/4_calibration_depths/p21.csv | 25 + .../results/2d2d/4_calibration_depths/p24.csv | 25 + .../results/2d2d/4_calibration_depths/p25.csv | 25 + .../results/2d2d/4_calibration_depths/p26.csv | 25 + code/results/2d2d/4_calibration_depths/p5.csv | 25 + code/results/2d2d/4_calibration_depths/p7.csv | 25 + .../results/2d2d/5_calibration_depths/p10.csv | 5 + .../results/2d2d/5_calibration_depths/p11.csv | 5 + .../results/2d2d/5_calibration_depths/p12.csv | 5 + .../results/2d2d/5_calibration_depths/p13.csv | 5 + .../results/2d2d/5_calibration_depths/p14.csv | 5 + .../results/2d2d/5_calibration_depths/p15.csv | 5 + .../results/2d2d/5_calibration_depths/p16.csv | 5 + .../results/2d2d/5_calibration_depths/p20.csv | 5 + .../results/2d2d/5_calibration_depths/p21.csv | 5 + .../results/2d2d/5_calibration_depths/p24.csv | 5 + .../results/2d2d/5_calibration_depths/p25.csv | 5 + .../results/2d2d/5_calibration_depths/p26.csv | 5 + code/results/2d2d/5_calibration_depths/p5.csv | 5 + code/results/2d2d/5_calibration_depths/p7.csv | 5 + code/results/2d2d/README.txt | 9 + code/results/2d2d/p10_2d2d_all.csv | 25 + code/results/2d2d/p11_2d2d_all.csv | 25 + code/results/2d2d/p12_2d2d_all.csv | 25 + code/results/2d2d/p13_2d2d_all.csv | 25 + code/results/2d2d/p14_2d2d_all.csv | 25 + code/results/2d2d/p15_2d2d_all.csv | 25 + code/results/2d2d/p16_2d2d_all.csv | 25 + code/results/2d2d/p20_2d2d_all.csv | 25 + code/results/2d2d/p21_2d2d_all.csv | 25 + code/results/2d2d/p24_2d2d_all.csv | 25 + code/results/2d2d/p25_2d2d_all.csv | 25 + code/results/2d2d/p26_2d2d_all.csv | 25 + code/results/2d2d/p5_2d2d_all.csv | 25 + code/results/2d2d/p7_2d2d_all.csv | 25 + .../results/2d3d/2_calibration_depths/p10.csv | 50 + .../results/2d3d/2_calibration_depths/p11.csv | 50 + .../results/2d3d/2_calibration_depths/p12.csv | 50 + .../results/2d3d/2_calibration_depths/p13.csv | 50 + .../results/2d3d/2_calibration_depths/p14.csv | 50 + .../results/2d3d/2_calibration_depths/p15.csv | 50 + .../results/2d3d/2_calibration_depths/p16.csv | 50 + .../results/2d3d/2_calibration_depths/p20.csv | 50 + .../results/2d3d/2_calibration_depths/p21.csv | 50 + .../results/2d3d/2_calibration_depths/p24.csv | 50 + .../results/2d3d/2_calibration_depths/p25.csv | 50 + .../results/2d3d/2_calibration_depths/p26.csv | 50 + code/results/2d3d/2_calibration_depths/p5.csv | 50 + code/results/2d3d/2_calibration_depths/p7.csv | 50 + .../results/2d3d/3_calibration_depths/p10.csv | 50 + .../results/2d3d/3_calibration_depths/p11.csv | 50 + .../results/2d3d/3_calibration_depths/p12.csv | 50 + .../results/2d3d/3_calibration_depths/p13.csv | 50 + .../results/2d3d/3_calibration_depths/p14.csv | 50 + .../results/2d3d/3_calibration_depths/p15.csv | 50 + .../results/2d3d/3_calibration_depths/p16.csv | 50 + .../results/2d3d/3_calibration_depths/p20.csv | 50 + .../results/2d3d/3_calibration_depths/p21.csv | 50 + .../results/2d3d/3_calibration_depths/p24.csv | 50 + .../results/2d3d/3_calibration_depths/p25.csv | 50 + .../results/2d3d/3_calibration_depths/p26.csv | 50 + code/results/2d3d/3_calibration_depths/p5.csv | 50 + code/results/2d3d/3_calibration_depths/p7.csv | 50 + .../results/2d3d/4_calibration_depths/p10.csv | 25 + .../results/2d3d/4_calibration_depths/p11.csv | 25 + .../results/2d3d/4_calibration_depths/p12.csv | 25 + .../results/2d3d/4_calibration_depths/p13.csv | 25 + .../results/2d3d/4_calibration_depths/p14.csv | 25 + .../results/2d3d/4_calibration_depths/p15.csv | 25 + .../results/2d3d/4_calibration_depths/p16.csv | 25 + .../results/2d3d/4_calibration_depths/p20.csv | 25 + .../results/2d3d/4_calibration_depths/p21.csv | 25 + .../results/2d3d/4_calibration_depths/p24.csv | 25 + .../results/2d3d/4_calibration_depths/p25.csv | 25 + .../results/2d3d/4_calibration_depths/p26.csv | 25 + code/results/2d3d/4_calibration_depths/p5.csv | 25 + code/results/2d3d/4_calibration_depths/p7.csv | 25 + .../results/2d3d/5_calibration_depths/p10.csv | 5 + .../results/2d3d/5_calibration_depths/p11.csv | 5 + .../results/2d3d/5_calibration_depths/p12.csv | 5 + .../results/2d3d/5_calibration_depths/p13.csv | 5 + .../results/2d3d/5_calibration_depths/p14.csv | 5 + .../results/2d3d/5_calibration_depths/p15.csv | 5 + .../results/2d3d/5_calibration_depths/p16.csv | 5 + .../results/2d3d/5_calibration_depths/p20.csv | 5 + .../results/2d3d/5_calibration_depths/p21.csv | 5 + .../results/2d3d/5_calibration_depths/p24.csv | 5 + .../results/2d3d/5_calibration_depths/p25.csv | 5 + .../results/2d3d/5_calibration_depths/p26.csv | 5 + code/results/2d3d/5_calibration_depths/p5.csv | 5 + code/results/2d3d/5_calibration_depths/p7.csv | 5 + code/results/2d3d/README.txt | 9 + code/results/2d3d/__p5_2d3d_all.csv | 25 + code/results/2d3d/p10_2d3d_all.csv | 25 + code/results/2d3d/p11_2d3d_all.csv | 25 + code/results/2d3d/p12_2d3d_all.csv | 25 + code/results/2d3d/p13_2d3d_all.csv | 25 + code/results/2d3d/p14_2d3d_all.csv | 25 + code/results/2d3d/p15_2d3d_all.csv | 25 + code/results/2d3d/p16_2d3d_all.csv | 25 + code/results/2d3d/p20_2d3d_all.csv | 25 + code/results/2d3d/p21_2d3d_all.csv | 25 + code/results/2d3d/p24_2d3d_all.csv | 25 + code/results/2d3d/p25_2d3d_all.csv | 25 + code/results/2d3d/p26_2d3d_all.csv | 25 + code/results/2d3d/p5_2d3d_all.csv | 25 + code/results/2d3d/p7_2d3d_all.csv | 25 + code/short_sim.py | 358 +++ code/sim.py | 594 +++++ code/svis.py | 83 + code/util.py | 46 + code/vector.py | 66 + paper/.gitignore | 184 -- paper/README.md | 1 - 233 files changed, 16279 insertions(+), 186 deletions(-) create mode 100644 code/Visualization/1CalibrationDepth.py create mode 100644 code/Visualization/3CalibrationDepths.py create mode 100644 code/Visualization/EffectDistanceDifference1CalibrationDepth.py create mode 100644 code/Visualization/EffectNumberofClusters.py create mode 100644 code/Visualization/EffectNumberofClusters_plus_Simulation.py create mode 100644 code/copy_missing_data.py create mode 100644 code/experiment.py create mode 100644 code/geom.py create mode 100644 code/minimize.py create mode 100644 code/parallax_2D3D_3Cdepths.py create mode 100644 code/parallax_analysis.py create mode 100644 code/pupil/__init__.py create mode 100644 code/pupil/calibrate.py create mode 100644 code/pupil/file_methods.py create mode 100644 code/pupil/methods.py create mode 100644 code/pupil/player_methods.py create mode 100644 code/recording/__init__.py create mode 100644 code/recording/aruco_test create mode 100644 code/recording/aruco_test.cpp create mode 100644 code/recording/collect_images.py create mode 100644 code/recording/data/.gitignore create mode 100644 code/recording/data/display_setting.txt create mode 100644 code/recording/data/eye_camera/2015_10_03/000/info.csv create mode 100644 code/recording/data/eye_camera/2015_10_03/000/pupil_data create mode 100644 code/recording/data/eye_camera/2015_10_03/000/user_info.csv create mode 100644 code/recording/data/eye_camera/2015_10_03/001/info.csv create mode 100644 code/recording/data/eye_camera/2015_10_03/001/pupil_data create mode 100644 code/recording/data/eye_camera/2015_10_03/001/user_info.csv create mode 100644 code/recording/data/eye_camera/eye_camera_RMS0p39.yml create mode 100644 code/recording/data/frames_005/frame_300.txt create mode 100644 code/recording/data/frames_006/frame_300.txt create mode 100644 code/recording/data/frames_007/frame_300.txt create mode 100644 code/recording/data/frames_010/frame_300.txt create mode 100644 code/recording/data/participants/.gitignore create mode 100644 code/recording/data/participants/notes.txt create mode 100644 code/recording/data/scene_camera/2015_10_03/000/info.csv create mode 100644 code/recording/data/scene_camera/2015_10_03/000/pupil_data create mode 100644 code/recording/data/scene_camera/2015_10_03/000/user_info.csv create mode 100644 code/recording/data_grabber.py create mode 100644 code/recording/eye_world_correlation.py create mode 100644 code/recording/opencv/calibrate_and_save.py create mode 100644 code/recording/process_recordings.py create mode 100644 code/recording/retrieve.py create mode 100644 code/recording/samples/frame_1_details.txt create mode 100644 code/recording/tracker.py create mode 100644 code/recording/util/GazeHelper/.gitignore create mode 100644 code/recording/util/GazeHelper/.idea/.name create mode 100644 code/recording/util/GazeHelper/.idea/compiler.xml create mode 100644 code/recording/util/GazeHelper/.idea/copyright/profiles_settings.xml create mode 100644 code/recording/util/GazeHelper/.idea/gradle.xml create mode 100644 code/recording/util/GazeHelper/.idea/misc.xml create mode 100644 code/recording/util/GazeHelper/.idea/modules.xml create mode 100644 code/recording/util/GazeHelper/.idea/vcs.xml create mode 100644 code/recording/util/GazeHelper/GazeHelper.iml create mode 100644 code/recording/util/GazeHelper/app/.gitignore create mode 100644 code/recording/util/GazeHelper/app/app.iml create mode 100644 code/recording/util/GazeHelper/app/build.gradle create mode 100644 code/recording/util/GazeHelper/app/proguard-rules.pro create mode 100644 code/recording/util/GazeHelper/app/src/main/AndroidManifest.xml create mode 100644 code/recording/util/GazeHelper/app/src/main/java/gazehelper/android/mmbrian/com/gazehelper/MainActivity.java create mode 100644 code/recording/util/GazeHelper/app/src/main/res/layout/activity_main.xml create mode 100644 code/recording/util/GazeHelper/app/src/main/res/values-w820dp/dimens.xml create mode 100644 code/recording/util/GazeHelper/app/src/main/res/values/dimens.xml create mode 100644 code/recording/util/GazeHelper/app/src/main/res/values/strings.xml create mode 100644 code/recording/util/GazeHelper/app/src/main/res/values/styles.xml create mode 100644 code/recording/util/GazeHelper/build.gradle create mode 100644 code/recording/util/GazeHelper/gradle.properties create mode 100644 code/recording/util/GazeHelper/gradle/wrapper/gradle-wrapper.properties create mode 100644 code/recording/util/GazeHelper/gradlew create mode 100644 code/recording/util/GazeHelper/gradlew.bat create mode 100644 code/recording/util/GazeHelper/settings.gradle create mode 100644 code/recording/util/SingleMarkerVisualizer/CalibrationBoard.pde create mode 100644 code/recording/util/SingleMarkerVisualizer/Marker.pde create mode 100644 code/recording/util/SingleMarkerVisualizer/SingleMarkerVisualizer.pde create mode 100644 code/recording/util/SingleMarkerVisualizer/sketch.properties create mode 100644 code/recording/util/__init__.py create mode 100644 code/recording/util/check_pupil_positions.py create mode 100644 code/recording/util/markers/marker.py create mode 100644 code/recording/util/tools.py create mode 100644 code/recording_experiment.py create mode 100644 code/results/2d2d/2_calibration_depths/p10.csv create mode 100644 code/results/2d2d/2_calibration_depths/p11.csv create mode 100644 code/results/2d2d/2_calibration_depths/p12.csv create mode 100644 code/results/2d2d/2_calibration_depths/p13.csv create mode 100644 code/results/2d2d/2_calibration_depths/p14.csv create mode 100644 code/results/2d2d/2_calibration_depths/p15.csv create mode 100644 code/results/2d2d/2_calibration_depths/p16.csv create mode 100644 code/results/2d2d/2_calibration_depths/p20.csv create mode 100644 code/results/2d2d/2_calibration_depths/p21.csv create mode 100644 code/results/2d2d/2_calibration_depths/p24.csv create mode 100644 code/results/2d2d/2_calibration_depths/p25.csv create mode 100644 code/results/2d2d/2_calibration_depths/p26.csv create mode 100644 code/results/2d2d/2_calibration_depths/p5.csv create mode 100644 code/results/2d2d/2_calibration_depths/p7.csv create mode 100644 code/results/2d2d/3_calibration_depths/p10.csv create mode 100644 code/results/2d2d/3_calibration_depths/p11.csv create mode 100644 code/results/2d2d/3_calibration_depths/p12.csv create mode 100644 code/results/2d2d/3_calibration_depths/p13.csv create mode 100644 code/results/2d2d/3_calibration_depths/p14.csv create mode 100644 code/results/2d2d/3_calibration_depths/p15.csv create mode 100644 code/results/2d2d/3_calibration_depths/p16.csv create mode 100644 code/results/2d2d/3_calibration_depths/p20.csv create mode 100644 code/results/2d2d/3_calibration_depths/p21.csv create mode 100644 code/results/2d2d/3_calibration_depths/p24.csv create mode 100644 code/results/2d2d/3_calibration_depths/p25.csv create mode 100644 code/results/2d2d/3_calibration_depths/p26.csv create mode 100644 code/results/2d2d/3_calibration_depths/p5.csv create mode 100644 code/results/2d2d/3_calibration_depths/p7.csv create mode 100644 code/results/2d2d/4_calibration_depths/p10.csv create mode 100644 code/results/2d2d/4_calibration_depths/p11.csv create mode 100644 code/results/2d2d/4_calibration_depths/p12.csv create mode 100644 code/results/2d2d/4_calibration_depths/p13.csv create mode 100644 code/results/2d2d/4_calibration_depths/p14.csv create mode 100644 code/results/2d2d/4_calibration_depths/p15.csv create mode 100644 code/results/2d2d/4_calibration_depths/p16.csv create mode 100644 code/results/2d2d/4_calibration_depths/p20.csv create mode 100644 code/results/2d2d/4_calibration_depths/p21.csv create mode 100644 code/results/2d2d/4_calibration_depths/p24.csv create mode 100644 code/results/2d2d/4_calibration_depths/p25.csv create mode 100644 code/results/2d2d/4_calibration_depths/p26.csv create mode 100644 code/results/2d2d/4_calibration_depths/p5.csv create mode 100644 code/results/2d2d/4_calibration_depths/p7.csv create mode 100644 code/results/2d2d/5_calibration_depths/p10.csv create mode 100644 code/results/2d2d/5_calibration_depths/p11.csv create mode 100644 code/results/2d2d/5_calibration_depths/p12.csv create mode 100644 code/results/2d2d/5_calibration_depths/p13.csv create mode 100644 code/results/2d2d/5_calibration_depths/p14.csv create mode 100644 code/results/2d2d/5_calibration_depths/p15.csv create mode 100644 code/results/2d2d/5_calibration_depths/p16.csv create mode 100644 code/results/2d2d/5_calibration_depths/p20.csv create mode 100644 code/results/2d2d/5_calibration_depths/p21.csv create mode 100644 code/results/2d2d/5_calibration_depths/p24.csv create mode 100644 code/results/2d2d/5_calibration_depths/p25.csv create mode 100644 code/results/2d2d/5_calibration_depths/p26.csv create mode 100644 code/results/2d2d/5_calibration_depths/p5.csv create mode 100644 code/results/2d2d/5_calibration_depths/p7.csv create mode 100644 code/results/2d2d/README.txt create mode 100644 code/results/2d2d/p10_2d2d_all.csv create mode 100644 code/results/2d2d/p11_2d2d_all.csv create mode 100644 code/results/2d2d/p12_2d2d_all.csv create mode 100644 code/results/2d2d/p13_2d2d_all.csv create mode 100644 code/results/2d2d/p14_2d2d_all.csv create mode 100644 code/results/2d2d/p15_2d2d_all.csv create mode 100644 code/results/2d2d/p16_2d2d_all.csv create mode 100644 code/results/2d2d/p20_2d2d_all.csv create mode 100644 code/results/2d2d/p21_2d2d_all.csv create mode 100644 code/results/2d2d/p24_2d2d_all.csv create mode 100644 code/results/2d2d/p25_2d2d_all.csv create mode 100644 code/results/2d2d/p26_2d2d_all.csv create mode 100644 code/results/2d2d/p5_2d2d_all.csv create mode 100644 code/results/2d2d/p7_2d2d_all.csv create mode 100644 code/results/2d3d/2_calibration_depths/p10.csv create mode 100644 code/results/2d3d/2_calibration_depths/p11.csv create mode 100644 code/results/2d3d/2_calibration_depths/p12.csv create mode 100644 code/results/2d3d/2_calibration_depths/p13.csv create mode 100644 code/results/2d3d/2_calibration_depths/p14.csv create mode 100644 code/results/2d3d/2_calibration_depths/p15.csv create mode 100644 code/results/2d3d/2_calibration_depths/p16.csv create mode 100644 code/results/2d3d/2_calibration_depths/p20.csv create mode 100644 code/results/2d3d/2_calibration_depths/p21.csv create mode 100644 code/results/2d3d/2_calibration_depths/p24.csv create mode 100644 code/results/2d3d/2_calibration_depths/p25.csv create mode 100644 code/results/2d3d/2_calibration_depths/p26.csv create mode 100644 code/results/2d3d/2_calibration_depths/p5.csv create mode 100644 code/results/2d3d/2_calibration_depths/p7.csv create mode 100644 code/results/2d3d/3_calibration_depths/p10.csv create mode 100644 code/results/2d3d/3_calibration_depths/p11.csv create mode 100644 code/results/2d3d/3_calibration_depths/p12.csv create mode 100644 code/results/2d3d/3_calibration_depths/p13.csv create mode 100644 code/results/2d3d/3_calibration_depths/p14.csv create mode 100644 code/results/2d3d/3_calibration_depths/p15.csv create mode 100644 code/results/2d3d/3_calibration_depths/p16.csv create mode 100644 code/results/2d3d/3_calibration_depths/p20.csv create mode 100644 code/results/2d3d/3_calibration_depths/p21.csv create mode 100644 code/results/2d3d/3_calibration_depths/p24.csv create mode 100644 code/results/2d3d/3_calibration_depths/p25.csv create mode 100644 code/results/2d3d/3_calibration_depths/p26.csv create mode 100644 code/results/2d3d/3_calibration_depths/p5.csv create mode 100644 code/results/2d3d/3_calibration_depths/p7.csv create mode 100644 code/results/2d3d/4_calibration_depths/p10.csv create mode 100644 code/results/2d3d/4_calibration_depths/p11.csv create mode 100644 code/results/2d3d/4_calibration_depths/p12.csv create mode 100644 code/results/2d3d/4_calibration_depths/p13.csv create mode 100644 code/results/2d3d/4_calibration_depths/p14.csv create mode 100644 code/results/2d3d/4_calibration_depths/p15.csv create mode 100644 code/results/2d3d/4_calibration_depths/p16.csv create mode 100644 code/results/2d3d/4_calibration_depths/p20.csv create mode 100644 code/results/2d3d/4_calibration_depths/p21.csv create mode 100644 code/results/2d3d/4_calibration_depths/p24.csv create mode 100644 code/results/2d3d/4_calibration_depths/p25.csv create mode 100644 code/results/2d3d/4_calibration_depths/p26.csv create mode 100644 code/results/2d3d/4_calibration_depths/p5.csv create mode 100644 code/results/2d3d/4_calibration_depths/p7.csv create mode 100644 code/results/2d3d/5_calibration_depths/p10.csv create mode 100644 code/results/2d3d/5_calibration_depths/p11.csv create mode 100644 code/results/2d3d/5_calibration_depths/p12.csv create mode 100644 code/results/2d3d/5_calibration_depths/p13.csv create mode 100644 code/results/2d3d/5_calibration_depths/p14.csv create mode 100644 code/results/2d3d/5_calibration_depths/p15.csv create mode 100644 code/results/2d3d/5_calibration_depths/p16.csv create mode 100644 code/results/2d3d/5_calibration_depths/p20.csv create mode 100644 code/results/2d3d/5_calibration_depths/p21.csv create mode 100644 code/results/2d3d/5_calibration_depths/p24.csv create mode 100644 code/results/2d3d/5_calibration_depths/p25.csv create mode 100644 code/results/2d3d/5_calibration_depths/p26.csv create mode 100644 code/results/2d3d/5_calibration_depths/p5.csv create mode 100644 code/results/2d3d/5_calibration_depths/p7.csv create mode 100644 code/results/2d3d/README.txt create mode 100644 code/results/2d3d/__p5_2d3d_all.csv create mode 100644 code/results/2d3d/p10_2d3d_all.csv create mode 100644 code/results/2d3d/p11_2d3d_all.csv create mode 100644 code/results/2d3d/p12_2d3d_all.csv create mode 100644 code/results/2d3d/p13_2d3d_all.csv create mode 100644 code/results/2d3d/p14_2d3d_all.csv create mode 100644 code/results/2d3d/p15_2d3d_all.csv create mode 100644 code/results/2d3d/p16_2d3d_all.csv create mode 100644 code/results/2d3d/p20_2d3d_all.csv create mode 100644 code/results/2d3d/p21_2d3d_all.csv create mode 100644 code/results/2d3d/p24_2d3d_all.csv create mode 100644 code/results/2d3d/p25_2d3d_all.csv create mode 100644 code/results/2d3d/p26_2d3d_all.csv create mode 100644 code/results/2d3d/p5_2d3d_all.csv create mode 100644 code/results/2d3d/p7_2d3d_all.csv create mode 100644 code/short_sim.py create mode 100644 code/sim.py create mode 100644 code/svis.py create mode 100644 code/util.py create mode 100644 code/vector.py delete mode 100644 paper/.gitignore delete mode 100644 paper/README.md diff --git a/README.md b/README.md index 63384bb..f4cfdac 100644 --- a/README.md +++ b/README.md @@ -1 +1,22 @@ -# etra2016_3dgaze +### 3D Gaze Estimation from 2D Pupil Positions on Monocular Head-Mounted Eye Trackers +**Mohsen Mansouryar, Julian Steil, Yusuke Sugano and Andreas Bulling** + +published at ETRA 2016 + +code -> Contains main scripts. below you can see a list of commands and the results they produce: +- cmd: python parallax_analysis.py pts |-> result: plot of calibration and test points. +- cmd: python parallax_analysis.py 2d3d |-> result: plot of 2D-to-2D againt 3D-to-3D mapping over all number of calibration depths. +- cmd: python parallax_analysis.py 2d2d_2d3d |-> result: plot comparing parallax error over five different test depths for three calibration depths of 1.0m, 1.5m, and 2.0m between 2D-to-2D and 3D-to-3D mapping. +- cmd: python parallax_2D3D_3Cdepths.py |-> result: plot comparing average angular error of the two mapping techniques when 3 calibration depths are used together. (depths 1 to 5 correspond to test depths 1.0m to 2.0m) + +code/pupil -> Modules directly used from PUPIL source code for baseline 2D-to-2D mapping and data stream correlation. + +code/recording -> Scripts related to dataset recording and marker visualization and tracking. script dependencies are python 2's openCV and ArUco library. more information regarding each module is documented where required. + +code/results -> Contains gaze estimation results for both 2D-to-2D and 2D-to-3D mapping approaches with multiple calibration depths on data from participants. data files in the root directory of each method correspond to single depth calibration results. data format is described inside README.txt inside each method directory. the results are also available via /BS/3D_Gaze_Tracking/work/results + +code/Visualization -> Creation of figures for the paper +1CalibrationDepth.py -> 2D-to-2D vs. 2D-to-3D with one calibration depth +3CalibrationDepths.py -> 2D-to-2D vs. 2D-to-3D with three calibration depth +EffectDistanceDifference1CalibrationDepth.py -> Effect of different distances to the original calibration depth +EffectNumberofClusters.py -> Effect of the number of clusters diff --git a/code/.gitignore b/code/.gitignore index 886b180..de39f93 100644 --- a/code/.gitignore +++ b/code/.gitignore @@ -493,3 +493,7 @@ FakesAssemblies/ **/*.Server/GeneratedArtifacts **/*.Server/ModelManifest.xml _Pvt_Extensions + + +# Custom ignore rules +*.npy \ No newline at end of file diff --git a/code/README.md b/code/README.md index 8b13789..87e620e 100644 --- a/code/README.md +++ b/code/README.md @@ -1 +1,38 @@ +- Participants data is accessible under /BS/3D_Gaze_Tracking/archive00/participants/ + - only the data for the following 14 participants is used in the study: + 'p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25' + +- Eye camera's intrinsic parameters are accessible under /BS/3D_Gaze_Tracking/archive00/eye_camera_images/ + +- Scene camera's intrinsic parameters are accessible under /BS/3D_Gaze_Tracking/archive00/scene_camera/ + +- geom.py contains the Pinhole camera model along with a couple of geometry related methods. + +- minimize.py contains code on least square minimization using numpy. + +- vector.py contains a fully python version of VPython's vector object used for vector processing. + +- sim.py contains the main GazeSimulation object used for simulation and visualization. this object also +contains methods for handling real world data. + +- parallax_analysis.py mainly uses GazeSimulation object to perform different experiments on simulation/real- +world data. (parallax_2D3D_3Cdepths.py is just a single experiment separated from this module) + +- Scripts regarding real-world data processing including marker tracking, marker movement detection, frame +extraction, ... are inside recording package. + +- recording/tracker.py uses a slightly modified version of aruco_test script to silently track markers in +the scene, log the output of ArUco, and compute 2D and 3D position of the center of marker given the ArUco output. (the modified aruco_test is included in recording package) + +- recording/process_recordings.py contains the main code for detecting intervals in which marker is not +moving using sklearn's AgglomerativeClustering. the method performs well almost always but in a few cases manual annotation was required to get the correct output. output of this detection is depicted in marker_motion.png inside each recording's data files. a sample output for a test recording (16 points) is located under /BS/3D_Gaze_Tracking/work/marker_motion.png + +- recording/util/check_pupil_positions.py is used during recording to ensure accurate pupil detection. +example usage: + python check_pupil_positions.py /BS/3D_Gaze_Tracking/archive00/participants/p16/2015_09_29/000/ + +- recording/util/SingleMarkerVisualizer/ contains the main Processing script for visualizing a moving marker +during the recording experiments. the code uses Processing version 3. + + diff --git a/code/Visualization/1CalibrationDepth.py b/code/Visualization/1CalibrationDepth.py new file mode 100644 index 0000000..0fc6bf2 --- /dev/null +++ b/code/Visualization/1CalibrationDepth.py @@ -0,0 +1,944 @@ +import os, sys +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +from matplotlib.pyplot import * +import matplotlib.patches as mpatches + +# activate latex text rendering +#rc('text', usetex=True) + +def main(argv): +# path = str(argv[0]) + path ="/home/mmbrian/3D_Gaze_Tracking/work/results/2D2D/" + + Data1 = np.load(path + "p5_2d2d_all.npy") + Data2 = np.load(path + "p7_2d2d_all.npy") + Data3 = np.load(path + "p10_2d2d_all.npy") + Data4 = np.load(path + "p11_2d2d_all.npy") + Data5 = np.load(path + "p12_2d2d_all.npy") + Data6 = np.load(path + "p13_2d2d_all.npy") + Data7 = np.load(path + "p14_2d2d_all.npy") + Data8 = np.load(path + "p15_2d2d_all.npy") + Data9 = np.load(path + "p16_2d2d_all.npy") + Data10 = np.load(path + "p20_2d2d_all.npy") + Data11 = np.load(path + "p21_2d2d_all.npy") + Data12 = np.load(path + "p24_2d2d_all.npy") + Data13 = np.load(path + "p25_2d2d_all.npy") + Data14 = np.load(path + "p26_2d2d_all.npy") + + Data = [Data1,Data2,Data3,Data4,Data5,Data6,Data7,Data8,Data9,Data10,Data11,Data12,Data13,Data14] + + Participantmean = [] + for i in xrange(5): + Participantmean.append(float(0)) + yerrup = [] + for i in xrange(5): + yerrup.append(float(0)) + yerrdown = [] + for i in xrange(5): + yerrdown.append(float(0)) + + maxvalue = [] + for i in xrange(5): + maxvalue.append(float(0)) + minvalue = [] + for i in xrange(5): + minvalue.append(float(0)) + + Activitymax = [] + for i in xrange(5): + Activitymax.append([]) + for j in xrange(15): + Activitymax[i].append(float(0)) + + Activitymin = [] + for i in xrange(5): + Activitymin.append([]) + for j in xrange(15): + Activitymin[i].append(float(0)) + + AngularerrorC1 = [] + for i in xrange(5): + AngularerrorC1.append([]) + for j in xrange(14): + AngularerrorC1[i].append(float(0)) + + AngularerrorC2 = [] + for i in xrange(5): + AngularerrorC2.append([]) + for j in xrange(14): + AngularerrorC2[i].append(float(0)) + + AngularerrorC3 = [] + for i in xrange(5): + AngularerrorC3.append([]) + for j in xrange(14): + AngularerrorC3[i].append(float(0)) + + AngularerrorC4 = [] + for i in xrange(5): + AngularerrorC4.append([]) + for j in xrange(14): + AngularerrorC4[i].append(float(0)) + + AngularerrorC5 = [] + for i in xrange(5): + AngularerrorC5.append([]) + for j in xrange(14): + AngularerrorC5[i].append(float(0)) + +# C1 + distance = 1.0 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: + AngularerrorC1[0][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: + print Data[i][j][7] + AngularerrorC1[1][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: + AngularerrorC1[2][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: + AngularerrorC1[3][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: + AngularerrorC1[4][i] = Data[i][j][7] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrorC1: ", AngularerrorC1[0] + print "AngularerrorC1: ", AngularerrorC1[1] + print "AngularerrorC1: ", AngularerrorC1[2] + print "AngularerrorC1: ", AngularerrorC1[3] + print "AngularerrorC1: ", AngularerrorC1[4] + + meanC1D1 = np.mean(AngularerrorC1[0]) + meanC1D2 = np.mean(AngularerrorC1[1]) + meanC1D3 = np.mean(AngularerrorC1[2]) + meanC1D4 = np.mean(AngularerrorC1[3]) + meanC1D5 = np.mean(AngularerrorC1[4]) + + stdC1D1 = np.std(AngularerrorC1[0]) + stdC1D2 = np.std(AngularerrorC1[1]) + stdC1D3 = np.std(AngularerrorC1[2]) + stdC1D4 = np.std(AngularerrorC1[3]) + stdC1D5 = np.std(AngularerrorC1[4]) + + meanC1 = [meanC1D1,meanC1D2,meanC1D3,meanC1D4,meanC1D5] + stdC1 = [stdC1D1,stdC1D2,stdC1D3,stdC1D4,stdC1D5] + +# C2 + distance = 1.25 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: + AngularerrorC2[0][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: + print Data[i][j][7] + AngularerrorC2[1][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: + AngularerrorC2[2][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: + AngularerrorC2[3][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: + AngularerrorC2[4][i] = Data[i][j][7] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrorC2: ", AngularerrorC2[0] + print "AngularerrorC2: ", AngularerrorC2[1] + print "AngularerrorC2: ", AngularerrorC2[2] + print "AngularerrorC2: ", AngularerrorC2[3] + print "AngularerrorC2: ", AngularerrorC2[4] + + meanC2D1 = np.mean(AngularerrorC2[0]) + meanC2D2 = np.mean(AngularerrorC2[1]) + meanC2D3 = np.mean(AngularerrorC2[2]) + meanC2D4 = np.mean(AngularerrorC2[3]) + meanC2D5 = np.mean(AngularerrorC2[4]) + + stdC2D1 = np.std(AngularerrorC2[0]) + stdC2D2 = np.std(AngularerrorC2[1]) + stdC2D3 = np.std(AngularerrorC2[2]) + stdC2D4 = np.std(AngularerrorC2[3]) + stdC2D5 = np.std(AngularerrorC2[4]) + + meanC2 = [meanC2D1,meanC2D2,meanC2D3,meanC2D4,meanC2D5] + stdC2 = [stdC2D1,stdC2D2,stdC2D3,stdC2D4,stdC2D5] + +# C3 + distance = 1.5 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: + AngularerrorC3[0][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: + print Data[i][j][7] + AngularerrorC3[1][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: + AngularerrorC3[2][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: + AngularerrorC3[3][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: + AngularerrorC3[4][i] = Data[i][j][7] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrorC3: ", AngularerrorC3[0] + print "AngularerrorC3: ", AngularerrorC3[1] + print "AngularerrorC3: ", AngularerrorC3[2] + print "AngularerrorC3: ", AngularerrorC3[3] + print "AngularerrorC3: ", AngularerrorC3[4] + + meanC3D1 = np.mean(AngularerrorC3[0]) + meanC3D2 = np.mean(AngularerrorC3[1]) + meanC3D3 = np.mean(AngularerrorC3[2]) + meanC3D4 = np.mean(AngularerrorC3[3]) + meanC3D5 = np.mean(AngularerrorC3[4]) + + stdC3D1 = np.std(AngularerrorC3[0]) + stdC3D2 = np.std(AngularerrorC3[1]) + stdC3D3 = np.std(AngularerrorC3[2]) + stdC3D4 = np.std(AngularerrorC3[3]) + stdC3D5 = np.std(AngularerrorC3[4]) + + meanC3 = [meanC3D1,meanC3D2,meanC3D3,meanC3D4,meanC3D5] + stdC3 = [stdC3D1,stdC3D2,stdC3D3,stdC3D4,stdC3D5] + +# C4 + distance = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: + AngularerrorC4[0][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: + print Data[i][j][7] + AngularerrorC4[1][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: + AngularerrorC4[2][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: + AngularerrorC4[3][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: + AngularerrorC4[4][i] = Data[i][j][7] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrorC4: ", AngularerrorC4[0] + print "AngularerrorC4: ", AngularerrorC4[1] + print "AngularerrorC4: ", AngularerrorC4[2] + print "AngularerrorC4: ", AngularerrorC4[3] + print "AngularerrorC4: ", AngularerrorC4[4] + + meanC4D1 = np.mean(AngularerrorC4[0]) + meanC4D2 = np.mean(AngularerrorC4[1]) + meanC4D3 = np.mean(AngularerrorC4[2]) + meanC4D4 = np.mean(AngularerrorC4[3]) + meanC4D5 = np.mean(AngularerrorC4[4]) + + stdC4D1 = np.std(AngularerrorC4[0]) + stdC4D2 = np.std(AngularerrorC4[1]) + stdC4D3 = np.std(AngularerrorC4[2]) + stdC4D4 = np.std(AngularerrorC4[3]) + stdC4D5 = np.std(AngularerrorC4[4]) + + meanC4 = [meanC4D1,meanC4D2,meanC4D3,meanC4D4,meanC4D5] + stdC4 = [stdC4D1,stdC4D2,stdC4D3,stdC4D4,stdC4D5] + +# C5 + distance = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: + AngularerrorC5[0][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: + print Data[i][j][7] + AngularerrorC5[1][i] = Data[i][j][7] + break + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: + AngularerrorC5[2][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: + AngularerrorC5[3][i] = Data[i][j][7] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: + AngularerrorC5[4][i] = Data[i][j][7] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrorC5: ", AngularerrorC5[0] + print "AngularerrorC5: ", AngularerrorC5[1] + print "AngularerrorC5: ", AngularerrorC5[2] + print "AngularerrorC5: ", AngularerrorC5[3] + print "AngularerrorC5: ", AngularerrorC5[4] + + meanC5D1 = np.mean(AngularerrorC5[0]) + meanC5D2 = np.mean(AngularerrorC5[1]) + meanC5D3 = np.mean(AngularerrorC5[2]) + meanC5D4 = np.mean(AngularerrorC5[3]) + meanC5D5 = np.mean(AngularerrorC5[4]) + + stdC5D1 = np.std(AngularerrorC5[0]) + stdC5D2 = np.std(AngularerrorC5[1]) + stdC5D3 = np.std(AngularerrorC5[2]) + stdC5D4 = np.std(AngularerrorC5[3]) + stdC5D5 = np.std(AngularerrorC5[4]) + + meanC5 = [meanC5D1,meanC5D2,meanC5D3,meanC5D4,meanC5D5] + stdC5 = [stdC5D1,stdC5D2,stdC5D3,stdC5D4,stdC5D5] + + ####################################################################################### + + + path ="/home/mmbrian/3D_Gaze_Tracking/work/results/2D3D/" + + Datatwo1 = np.load(path + "p5_2d3d_all.npy") + Datatwo2 = np.load(path + "p7_2d3d_all.npy") + Datatwo3 = np.load(path + "p10_2d3d_all.npy") + Datatwo4 = np.load(path + "p11_2d3d_all.npy") + Datatwo5 = np.load(path + "p12_2d3d_all.npy") + Datatwo6 = np.load(path + "p13_2d3d_all.npy") + Datatwo7 = np.load(path + "p14_2d3d_all.npy") + Datatwo8 = np.load(path + "p15_2d3d_all.npy") + Datatwo9 = np.load(path + "p16_2d3d_all.npy") + Datatwo10 = np.load(path + "p20_2d3d_all.npy") + Datatwo11 = np.load(path + "p21_2d3d_all.npy") + Datatwo12 = np.load(path + "p24_2d3d_all.npy") + Datatwo13 = np.load(path + "p25_2d3d_all.npy") + Datatwo14 = np.load(path + "p26_2d3d_all.npy") + + Datatwo = [Datatwo1,Datatwo2,Datatwo3,Datatwo4,Datatwo5,Datatwo6,Datatwo7,Datatwo8,Datatwo9,Datatwo10,Datatwo11,Datatwo12,Datatwo13,Datatwo14] + + Participantmean = [] + for i in xrange(5): + Participantmean.append(float(0)) + yerrup = [] + for i in xrange(5): + yerrup.append(float(0)) + yerrdown = [] + for i in xrange(5): + yerrdown.append(float(0)) + + maxvalue = [] + for i in xrange(5): + maxvalue.append(float(0)) + minvalue = [] + for i in xrange(5): + minvalue.append(float(0)) + + Activitymax = [] + for i in xrange(5): + Activitymax.append([]) + for j in xrange(15): + Activitymax[i].append(float(0)) + + Activitymin = [] + for i in xrange(5): + Activitymin.append([]) + for j in xrange(15): + Activitymin[i].append(float(0)) + + AngularerrortwoC1 = [] + for i in xrange(5): + AngularerrortwoC1.append([]) + for j in xrange(14): + AngularerrortwoC1[i].append(float(0)) + + AngularerrortwoC2 = [] + for i in xrange(5): + AngularerrortwoC2.append([]) + for j in xrange(14): + AngularerrortwoC2[i].append(float(0)) + + AngularerrortwoC3 = [] + for i in xrange(5): + AngularerrortwoC3.append([]) + for j in xrange(14): + AngularerrortwoC3[i].append(float(0)) + + AngularerrortwoC4 = [] + for i in xrange(5): + AngularerrortwoC4.append([]) + for j in xrange(14): + AngularerrortwoC4[i].append(float(0)) + + AngularerrortwoC5 = [] + for i in xrange(5): + AngularerrortwoC5.append([]) + for j in xrange(14): + AngularerrortwoC5[i].append(float(0)) + +# C1 + distance = 1.0 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC1[0][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Datatwo[i][j][1] == 1.25 and Datatwo[i][j][0] == distance: + print Datatwo[i][j][7] + AngularerrortwoC1[1][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.5 and Datatwo[i][j][0] == distance: + AngularerrortwoC1[2][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.75 and Datatwo[i][j][0] == distance: + AngularerrortwoC1[3][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 2.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC1[4][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrortwoC1: ", AngularerrortwoC1[0] + print "AngularerrortwoC1: ", AngularerrortwoC1[1] + print "AngularerrortwoC1: ", AngularerrortwoC1[2] + print "AngularerrortwoC1: ", AngularerrortwoC1[3] + print "AngularerrortwoC1: ", AngularerrortwoC1[4] + + meantwoC1D1 = np.mean(AngularerrortwoC1[0]) + meantwoC1D2 = np.mean(AngularerrortwoC1[1]) + meantwoC1D3 = np.mean(AngularerrortwoC1[2]) + meantwoC1D4 = np.mean(AngularerrortwoC1[3]) + meantwoC1D5 = np.mean(AngularerrortwoC1[4]) + + stdtwoC1D1 = np.std(AngularerrortwoC1[0]) + stdtwoC1D2 = np.std(AngularerrortwoC1[1]) + stdtwoC1D3 = np.std(AngularerrortwoC1[2]) + stdtwoC1D4 = np.std(AngularerrortwoC1[3]) + stdtwoC1D5 = np.std(AngularerrortwoC1[4]) + + meantwoC1 = [meantwoC1D1,meantwoC1D2,meantwoC1D3,meantwoC1D4,meantwoC1D5] + stdtwoC1 = [stdtwoC1D1,stdtwoC1D2,stdtwoC1D3,stdtwoC1D4,stdtwoC1D5] + +# C2 + distance = 1.25 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC2[0][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Datatwo[i][j][1] == 1.25 and Datatwo[i][j][0] == distance: + print Datatwo[i][j][7] + AngularerrortwoC2[1][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.5 and Datatwo[i][j][0] == distance: + AngularerrortwoC2[2][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.75 and Datatwo[i][j][0] == distance: + AngularerrortwoC2[3][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 2.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC2[4][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrortwoC2: ", AngularerrortwoC2[0] + print "AngularerrortwoC2: ", AngularerrortwoC2[1] + print "AngularerrortwoC2: ", AngularerrortwoC2[2] + print "AngularerrortwoC2: ", AngularerrortwoC2[3] + print "AngularerrortwoC2: ", AngularerrortwoC2[4] + + meantwoC2D1 = np.mean(AngularerrortwoC2[0]) + meantwoC2D2 = np.mean(AngularerrortwoC2[1]) + meantwoC2D3 = np.mean(AngularerrortwoC2[2]) + meantwoC2D4 = np.mean(AngularerrortwoC2[3]) + meantwoC2D5 = np.mean(AngularerrortwoC2[4]) + + stdtwoC2D1 = np.std(AngularerrortwoC2[0]) + stdtwoC2D2 = np.std(AngularerrortwoC2[1]) + stdtwoC2D3 = np.std(AngularerrortwoC2[2]) + stdtwoC2D4 = np.std(AngularerrortwoC2[3]) + stdtwoC2D5 = np.std(AngularerrortwoC2[4]) + + meantwoC2 = [meantwoC2D1,meantwoC2D2,meantwoC2D3,meantwoC2D4,meantwoC2D5] + stdtwoC2 = [stdtwoC2D1,stdtwoC2D2,stdtwoC2D3,stdtwoC2D4,stdtwoC2D5] + +# C3 + distance = 1.5 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC3[0][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Datatwo[i][j][1] == 1.25 and Datatwo[i][j][0] == distance: + print Datatwo[i][j][7] + AngularerrortwoC3[1][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.5 and Datatwo[i][j][0] == distance: + AngularerrortwoC3[2][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.75 and Datatwo[i][j][0] == distance: + AngularerrortwoC3[3][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 2.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC3[4][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrortwoC3: ", AngularerrortwoC3[0] + print "AngularerrortwoC3: ", AngularerrortwoC3[1] + print "AngularerrortwoC3: ", AngularerrortwoC3[2] + print "AngularerrortwoC3: ", AngularerrortwoC3[3] + print "AngularerrortwoC3: ", AngularerrortwoC3[4] + + meantwoC3D1 = np.mean(AngularerrortwoC3[0]) + meantwoC3D2 = np.mean(AngularerrortwoC3[1]) + meantwoC3D3 = np.mean(AngularerrortwoC3[2]) + meantwoC3D4 = np.mean(AngularerrortwoC3[3]) + meantwoC3D5 = np.mean(AngularerrortwoC3[4]) + + stdtwoC3D1 = np.std(AngularerrortwoC3[0]) + stdtwoC3D2 = np.std(AngularerrortwoC3[1]) + stdtwoC3D3 = np.std(AngularerrortwoC3[2]) + stdtwoC3D4 = np.std(AngularerrortwoC3[3]) + stdtwoC3D5 = np.std(AngularerrortwoC3[4]) + + meantwoC3 = [meantwoC3D1,meantwoC3D2,meantwoC3D3,meantwoC3D4,meantwoC3D5] + stdtwoC3 = [stdtwoC3D1,stdtwoC3D2,stdtwoC3D3,stdtwoC3D4,stdtwoC3D5] + +# C4 + distance = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC4[0][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Datatwo[i][j][1] == 1.25 and Datatwo[i][j][0] == distance: + print Datatwo[i][j][7] + AngularerrortwoC4[1][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.5 and Datatwo[i][j][0] == distance: + AngularerrortwoC4[2][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.75 and Datatwo[i][j][0] == distance: + AngularerrortwoC4[3][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 2.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC4[4][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrortwoC4: ", AngularerrortwoC4[0] + print "AngularerrortwoC4: ", AngularerrortwoC4[1] + print "AngularerrortwoC4: ", AngularerrortwoC4[2] + print "AngularerrortwoC4: ", AngularerrortwoC4[3] + print "AngularerrortwoC4: ", AngularerrortwoC4[4] + + meantwoC4D1 = np.mean(AngularerrortwoC4[0]) + meantwoC4D2 = np.mean(AngularerrortwoC4[1]) + meantwoC4D3 = np.mean(AngularerrortwoC4[2]) + meantwoC4D4 = np.mean(AngularerrortwoC4[3]) + meantwoC4D5 = np.mean(AngularerrortwoC4[4]) + + stdtwoC4D1 = np.std(AngularerrortwoC4[0]) + stdtwoC4D2 = np.std(AngularerrortwoC4[1]) + stdtwoC4D3 = np.std(AngularerrortwoC4[2]) + stdtwoC4D4 = np.std(AngularerrortwoC4[3]) + stdtwoC4D5 = np.std(AngularerrortwoC4[4]) + + meantwoC4 = [meantwoC4D1,meantwoC4D2,meantwoC4D3,meantwoC4D4,meantwoC4D5] + stdtwoC4 = [stdtwoC4D1,stdtwoC4D2,stdtwoC4D3,stdtwoC4D4,stdtwoC4D5] + +# C5 + distance = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC5[0][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + print "i: ", i," j: ", j + if Datatwo[i][j][1] == 1.25 and Datatwo[i][j][0] == distance: + print Datatwo[i][j][7] + AngularerrortwoC5[1][i] = Datatwo[i][j][2] + break + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.5 and Datatwo[i][j][0] == distance: + AngularerrortwoC5[2][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 1.75 and Datatwo[i][j][0] == distance: + AngularerrortwoC5[3][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + j = 0 + while j < 25: + if Datatwo[i][j][1] == 2.0 and Datatwo[i][j][0] == distance: + AngularerrortwoC5[4][i] = Datatwo[i][j][2] + j = 25 + else: + j += 1 + i += 1 + + print "AngularerrortwoC5: ", AngularerrortwoC5[0] + print "AngularerrortwoC5: ", AngularerrortwoC5[1] + print "AngularerrortwoC5: ", AngularerrortwoC5[2] + print "AngularerrortwoC5: ", AngularerrortwoC5[3] + print "AngularerrortwoC5: ", AngularerrortwoC5[4] + + meantwoC5D1 = np.mean(AngularerrortwoC5[0]) + meantwoC5D2 = np.mean(AngularerrortwoC5[1]) + meantwoC5D3 = np.mean(AngularerrortwoC5[2]) + meantwoC5D4 = np.mean(AngularerrortwoC5[3]) + meantwoC5D5 = np.mean(AngularerrortwoC5[4]) + + stdtwoC5D1 = np.std(AngularerrortwoC5[0]) + stdtwoC5D2 = np.std(AngularerrortwoC5[1]) + stdtwoC5D3 = np.std(AngularerrortwoC5[2]) + stdtwoC5D4 = np.std(AngularerrortwoC5[3]) + stdtwoC5D5 = np.std(AngularerrortwoC5[4]) + + meantwoC5 = [meantwoC5D1,meantwoC5D2,meantwoC5D3,meantwoC5D4,meantwoC5D5] + stdtwoC5 = [stdtwoC5D1,stdtwoC5D2,stdtwoC5D3,stdtwoC5D4,stdtwoC5D5] + + + + + + ###################################################################################### + + N = 5 + ind = np.asarray([0.25,1.25,2.25,3.25,4.25]) + + width = 0.5 # the width of the bars + + x1 = [0.375,1.375,2.375,3.375,4.375] + x2 = [0.425,1.425,2.425,3.425,4.425] + x3 = [0.475,1.475,2.475,3.475,4.475] + x4 = [0.525,1.525,2.525,3.525,4.525] + x5 = [0.575,1.575,2.575,3.575,4.575] + x6 = [0.625,1.625,2.625,3.625,4.625] + + fig = plt.figure(figsize=(14.0, 10.0)) + + + + ax = fig.add_subplot(111) + + + +# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects1 = ax.errorbar(x1, meanC1,yerr=[stdC1,stdC1],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x1, meanC1, marker="o", linestyle='-',lw=3,color='blue',label = r'2D-to-2D Calibration Depth 1') + +# rects2 =ax.errorbar(x2, meanC2,yerr=[stdC2,stdC2],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) +# plt.plot(x2, meanC2, marker="o", linestyle='-',lw=3,color='red') + + rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='orange',label = r'2D-to-2D Calibration Depth 3') +# +# rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) +# plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') + + rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D Calibration Depth 5') + + + +# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects2 = ax.errorbar(x2, meantwoC1,yerr=[stdtwoC1,stdtwoC1],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x2, meantwoC1, marker="o", linestyle='--',lw=3,color='blue',label = r'2D-to-3D Calibration Depth 1') + +# rects2 =ax.errorbar(x2, meanC2,yerr=[stdC2,stdC2],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) +# plt.plot(x2, meanC2, marker="o", linestyle='-',lw=3,color='red') + + rects4 = ax.errorbar(x4, meantwoC3,yerr=[stdtwoC3,stdtwoC3],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + plt.plot(x4, meantwoC3, marker="o", linestyle='--',lw=3,color='orange',label = r'2D-to-3D Calibration Depth 3') +# +# rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) +# plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') + + rects6 =ax.errorbar(x6, meantwoC5,yerr=[stdtwoC5,stdtwoC5],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x6, meantwoC5, marker="o", linestyle='--',lw=3,color='red',label = r'2D-to-3D Calibration Depth 5') + + + ax.set_ylabel('Angular Error',fontsize=22) + ax.set_xlabel('Depth',fontsize=22) + ax.set_xticks(ind+0.25) + ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=16) + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]#,110]#,120] + print TOPICs + LABELs = ["",r'D1 - 1m',r'D2 - 1.25m', r'D3 - 1.5m', r'D4 - 1.75m', r'D5 - 2.0m', ""]#, ""]#, ""] + +# fig.canvas.set_window_title('Distance Error Correlation') + plt.xticks(TOPICs, LABELs,fontsize=18) + +# legend([rects1,rects2,rects3,rects4,rects5], [r'\LARGE\textbf{Calibration Distance 1}', r'\LARGE\textbf{Calibration Distance 2}',r'\LARGE\textbf{Calibration Distance 3}', r'\LARGE\textbf{Calibration Distance 4}',r'\LARGE\textbf{Calibration Distance 5}'], loc='lower right') + legend(fontsize=20,loc='best') + + TOPICS = [-4.0,-2.0, 0.0,2.0,4.0,6.0,8.0,10.0,12,14,16,18,20,22,24]#,110]#,120] + print TOPICS + LABELS = [r'', r'',r'0',r'2', r'4', r'6', r'8', r'10', r'12', r'14', r'16', r'18', r'20', r'22', r'24']#, ""]#, ""] + +# fig.canvas.set_window_title('Accuracy - Activity Statistics') + plt.yticks(TOPICS, LABELS,fontsize=18) + + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height), + ha='center', va='bottom',fontweight='bold',fontsize=13.5) + +# autolabel(rects1) + + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + + + means = [meanC1, meanC2, meanC3, meanC4, meanC5] + print meanC1 + print meanC2 + print meanC3 + print meanC4 + print meanC5 + + fixationinfos_list_path = "MeansC1D2D2.npy" + fixationinfos_list_csv_path = "MeansC1D2D2.csv" + np.save(fixationinfos_list_path,np.asarray(means)) + np.savetxt(fixationinfos_list_csv_path,np.asarray(means), delimiter=",", fmt="%f") + +## fixationinfos_list_path = "Activitymin_"+str(activity)+".npy" +## fixationinfos_list_csv_path = "Activitymin_"+str(activity)+".csv" +## np.save(fixationinfos_list_path,np.asarray(Activitymin)) +## np.savetxt(fixationinfos_list_csv_path,np.asarray(Activitymin), delimiter=",", fmt="%s") + + + +if __name__ == "__main__": + main(sys.argv[1:]) + diff --git a/code/Visualization/3CalibrationDepths.py b/code/Visualization/3CalibrationDepths.py new file mode 100644 index 0000000..4d4dd24 --- /dev/null +++ b/code/Visualization/3CalibrationDepths.py @@ -0,0 +1,1980 @@ +import os, sys +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +from matplotlib.pyplot import * +import matplotlib.patches as mpatches + +# activate latex text rendering +#rc('text', usetex=True) + +def main(argv): +# path = str(argv[0]) +#/home/Julian/3D_Pupil_Project/work/results/2D2D/3_calibration_depths/p5.csv + path ="/home/mmbrian/3D_Gaze_Tracking/work/results/2D3D/3_calibration_depths/" + + Data1 = np.load(path + "p5.npy") + Data2 = np.load(path + "p7.npy") + Data3 = np.load(path + "p10.npy") + Data4 = np.load(path + "p11.npy") + Data5 = np.load(path + "p12.npy") + Data6 = np.load(path + "p13.npy") + Data7 = np.load(path + "p14.npy") + Data8 = np.load(path + "p15.npy") + Data9 = np.load(path + "p16.npy") + Data10 = np.load(path + "p20.npy") + Data11 = np.load(path + "p21.npy") + Data12 = np.load(path + "p24.npy") + Data13 = np.load(path + "p25.npy") + Data14 = np.load(path + "p26.npy") + + Data = [Data1,Data2,Data3,Data4,Data5,Data6,Data7,Data8,Data9,Data10,Data11,Data12,Data13,Data14] + + + AngularerrorC1 = [] + for i in xrange(5): + AngularerrorC1.append([]) + for j in xrange(14): + AngularerrorC1[i].append(float(0)) + + AngularerrorC2 = [] + for i in xrange(5): + AngularerrorC2.append([]) + for j in xrange(14): + AngularerrorC2[i].append(float(0)) + + AngularerrorC3 = [] + for i in xrange(5): + AngularerrorC3.append([]) + for j in xrange(14): + AngularerrorC3[i].append(float(0)) + + AngularerrorC4 = [] + for i in xrange(5): + AngularerrorC4.append([]) + for j in xrange(14): + AngularerrorC4[i].append(float(0)) + + AngularerrorC5 = [] + for i in xrange(5): + AngularerrorC5.append([]) + for j in xrange(14): + AngularerrorC5[i].append(float(0)) + + AngularerrorC6 = [] + for i in xrange(5): + AngularerrorC6.append([]) + for j in xrange(14): + AngularerrorC6[i].append(float(0)) + + AngularerrorC7 = [] + for i in xrange(5): + AngularerrorC7.append([]) + for j in xrange(14): + AngularerrorC7[i].append(float(0)) + + AngularerrorC8 = [] + for i in xrange(5): + AngularerrorC8.append([]) + for j in xrange(14): + AngularerrorC8[i].append(float(0)) + + AngularerrorC9 = [] + for i in xrange(5): + AngularerrorC9.append([]) + for j in xrange(14): + AngularerrorC9[i].append(float(0)) + + AngularerrorC10 = [] + for i in xrange(5): + AngularerrorC10.append([]) + for j in xrange(14): + AngularerrorC10[i].append(float(0)) + +# Combi1 1,1.25,1.5 + distance1 = 1.0 + distance2 = 1.25 + distance3 = 1.5 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3: + AngularerrorC1[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3: + AngularerrorC1[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3: + AngularerrorC1[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3: + AngularerrorC1[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3: + AngularerrorC1[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC1: ", AngularerrorC1[0] + print "AngularerrorC2: ", AngularerrorC1[1] + print "AngularerrorC3: ", AngularerrorC1[2] + print "AngularerrorC4: ", AngularerrorC1[3] + print "AngularerrorC5: ", AngularerrorC1[4] + + meanC1D1 = np.mean(AngularerrorC1[0]) + meanC1D2 = np.mean(AngularerrorC1[1]) + meanC1D3 = np.mean(AngularerrorC1[2]) + meanC1D4 = np.mean(AngularerrorC1[3]) + meanC1D5 = np.mean(AngularerrorC1[4]) + + stdC1D1 = np.std(AngularerrorC1[0]) + stdC1D2 = np.std(AngularerrorC1[1]) + stdC1D3 = np.std(AngularerrorC1[2]) + stdC1D4 = np.std(AngularerrorC1[3]) + stdC1D5 = np.std(AngularerrorC1[4]) + + meanC1 = [meanC1D1,meanC1D2,meanC1D3,meanC1D4,meanC1D5] + stdC1 = [stdC1D1,stdC1D2,stdC1D3,stdC1D4,stdC1D5] + + +# Combi2 1,1.25,1.75 + distance1 = 1.0 + distance2 = 1.25 + distance3 = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC2[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC2[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC2[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC2[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC2[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC1: ", AngularerrorC2[0] + print "AngularerrorC2: ", AngularerrorC2[1] + print "AngularerrorC3: ", AngularerrorC2[2] + print "AngularerrorC4: ", AngularerrorC2[3] + print "AngularerrorC5: ", AngularerrorC2[4] + + meanC2D1 = np.mean(AngularerrorC2[0]) + meanC2D2 = np.mean(AngularerrorC2[1]) + meanC2D3 = np.mean(AngularerrorC2[2]) + meanC2D4 = np.mean(AngularerrorC2[3]) + meanC2D5 = np.mean(AngularerrorC2[4]) + + stdC2D1 = np.std(AngularerrorC2[0]) + stdC2D2 = np.std(AngularerrorC2[1]) + stdC2D3 = np.std(AngularerrorC2[2]) + stdC2D4 = np.std(AngularerrorC2[3]) + stdC2D5 = np.std(AngularerrorC2[4]) + + meanC2 = [meanC2D1,meanC2D2,meanC2D3,meanC2D4,meanC2D5] + stdC2 = [stdC2D1,stdC2D2,stdC2D3,stdC2D4,stdC2D5] + +# Combi3 1,1.25,2.0 + distance1 = 1.0 + distance2 = 1.25 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC3[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC3[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC3[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC3[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC3[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC1: ", AngularerrorC3[0] + print "AngularerrorC2: ", AngularerrorC3[1] + print "AngularerrorC3: ", AngularerrorC3[2] + print "AngularerrorC4: ", AngularerrorC3[3] + print "AngularerrorC5: ", AngularerrorC3[4] + + meanC3D1 = np.mean(AngularerrorC3[0]) + meanC3D2 = np.mean(AngularerrorC3[1]) + meanC3D3 = np.mean(AngularerrorC3[2]) + meanC3D4 = np.mean(AngularerrorC3[3]) + meanC3D5 = np.mean(AngularerrorC3[4]) + + stdC3D1 = np.std(AngularerrorC3[0]) + stdC3D2 = np.std(AngularerrorC3[1]) + stdC3D3 = np.std(AngularerrorC3[2]) + stdC3D4 = np.std(AngularerrorC3[3]) + stdC3D5 = np.std(AngularerrorC3[4]) + + meanC3 = [meanC3D1,meanC3D2,meanC3D3,meanC3D4,meanC3D5] + stdC3 = [stdC3D1,stdC3D2,stdC3D3,stdC3D4,stdC3D5] + + +# Combi4 1,1.5,1.75 + distance1 = 1.0 + distance2 = 1.5 + distance3 = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC4[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC4[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC4[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC4[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC4[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC4: ", AngularerrorC4[0] + print "AngularerrorC2: ", AngularerrorC4[1] + print "AngularerrorC3: ", AngularerrorC4[2] + print "AngularerrorC4: ", AngularerrorC4[3] + print "AngularerrorC5: ", AngularerrorC4[4] + + meanC4D1 = np.mean(AngularerrorC4[0]) + meanC4D2 = np.mean(AngularerrorC4[1]) + meanC4D3 = np.mean(AngularerrorC4[2]) + meanC4D4 = np.mean(AngularerrorC4[3]) + meanC4D5 = np.mean(AngularerrorC4[4]) + + stdC4D1 = np.std(AngularerrorC4[0]) + stdC4D2 = np.std(AngularerrorC4[1]) + stdC4D3 = np.std(AngularerrorC4[2]) + stdC4D4 = np.std(AngularerrorC4[3]) + stdC4D5 = np.std(AngularerrorC4[4]) + + meanC4 = [meanC4D1,meanC4D2,meanC4D3,meanC4D4,meanC4D5] + stdC4 = [stdC4D1,stdC4D2,stdC4D3,stdC4D4,stdC4D5] + +# Combi5 1.0,1.5,2.0 + distance1 = 1.0 + distance2 = 1.5 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC5[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC5[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC5[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC5[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC5[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC5: ", AngularerrorC5[0] + print "AngularerrorC2: ", AngularerrorC5[1] + print "AngularerrorC3: ", AngularerrorC5[2] + print "AngularerrorC4: ", AngularerrorC5[3] + print "AngularerrorC5: ", AngularerrorC5[4] + + meanC5D1 = np.mean(AngularerrorC5[0]) + meanC5D2 = np.mean(AngularerrorC5[1]) + meanC5D3 = np.mean(AngularerrorC5[2]) + meanC5D4 = np.mean(AngularerrorC5[3]) + meanC5D5 = np.mean(AngularerrorC5[4]) + + stdC5D1 = np.std(AngularerrorC5[0]) + stdC5D2 = np.std(AngularerrorC5[1]) + stdC5D3 = np.std(AngularerrorC5[2]) + stdC5D4 = np.std(AngularerrorC5[3]) + stdC5D5 = np.std(AngularerrorC5[4]) + + meanC5 = [meanC5D1,meanC5D2,meanC5D3,meanC5D4,meanC5D5] + stdC5 = [stdC5D1,stdC5D2,stdC5D3,stdC5D4,stdC5D5] + + +# Combi6 1.0,1.75,2.0 + distance1 = 1.0 + distance2 = 1.75 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC6[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC6[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC6[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC6[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC6[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC6: ", AngularerrorC6[0] + print "AngularerrorC2: ", AngularerrorC6[1] + print "AngularerrorC3: ", AngularerrorC6[2] + print "AngularerrorC4: ", AngularerrorC6[3] + print "AngularerrorC5: ", AngularerrorC6[4] + + meanC6D1 = np.mean(AngularerrorC6[0]) + meanC6D2 = np.mean(AngularerrorC6[1]) + meanC6D3 = np.mean(AngularerrorC6[2]) + meanC6D4 = np.mean(AngularerrorC6[3]) + meanC6D5 = np.mean(AngularerrorC6[4]) + + stdC6D1 = np.std(AngularerrorC6[0]) + stdC6D2 = np.std(AngularerrorC6[1]) + stdC6D3 = np.std(AngularerrorC6[2]) + stdC6D4 = np.std(AngularerrorC6[3]) + stdC6D5 = np.std(AngularerrorC6[4]) + + meanC6 = [meanC6D1,meanC6D2,meanC6D3,meanC6D4,meanC6D5] + stdC6 = [stdC6D1,stdC6D2,stdC6D3,stdC6D4,stdC6D5] + +# Combi7 1.25,1.5,1,75 + distance1 = 1.25 + distance2 = 1.5 + distance3 = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC7[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC7[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC7[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC7[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC7[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC7: ", AngularerrorC7[0] + print "AngularerrorC2: ", AngularerrorC7[1] + print "AngularerrorC3: ", AngularerrorC7[2] + print "AngularerrorC4: ", AngularerrorC7[3] + print "AngularerrorC5: ", AngularerrorC7[4] + + meanC7D1 = np.mean(AngularerrorC7[0]) + meanC7D2 = np.mean(AngularerrorC7[1]) + meanC7D3 = np.mean(AngularerrorC7[2]) + meanC7D4 = np.mean(AngularerrorC7[3]) + meanC7D5 = np.mean(AngularerrorC7[4]) + + stdC7D1 = np.std(AngularerrorC7[0]) + stdC7D2 = np.std(AngularerrorC7[1]) + stdC7D3 = np.std(AngularerrorC7[2]) + stdC7D4 = np.std(AngularerrorC7[3]) + stdC7D5 = np.std(AngularerrorC7[4]) + + meanC7 = [meanC7D1,meanC7D2,meanC7D3,meanC7D4,meanC7D5] + stdC7 = [stdC7D1,stdC7D2,stdC7D3,stdC7D4,stdC7D5] + +# Combi8 1.25,1.5,2.0 + distance1 = 1.25 + distance2 = 1.5 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC8[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC8[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC8[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC8[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC8[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC8: ", AngularerrorC8[0] + print "AngularerrorC2: ", AngularerrorC8[1] + print "AngularerrorC3: ", AngularerrorC8[2] + print "AngularerrorC4: ", AngularerrorC8[3] + print "AngularerrorC5: ", AngularerrorC8[4] + + meanC8D1 = np.mean(AngularerrorC8[0]) + meanC8D2 = np.mean(AngularerrorC8[1]) + meanC8D3 = np.mean(AngularerrorC8[2]) + meanC8D4 = np.mean(AngularerrorC8[3]) + meanC8D5 = np.mean(AngularerrorC8[4]) + + stdC8D1 = np.std(AngularerrorC8[0]) + stdC8D2 = np.std(AngularerrorC8[1]) + stdC8D3 = np.std(AngularerrorC8[2]) + stdC8D4 = np.std(AngularerrorC8[3]) + stdC8D5 = np.std(AngularerrorC8[4]) + + meanC8 = [meanC8D1,meanC8D2,meanC8D3,meanC8D4,meanC8D5] + stdC8 = [stdC8D1,stdC8D2,stdC8D3,stdC8D4,stdC8D5] + +# Combi9 1.25,1.75,2.0 + distance1 = 1.25 + distance2 = 1.75 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC9[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC9[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC9[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC9[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC9[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC9: ", AngularerrorC9[0] + print "AngularerrorC2: ", AngularerrorC9[1] + print "AngularerrorC3: ", AngularerrorC9[2] + print "AngularerrorC4: ", AngularerrorC9[3] + print "AngularerrorC5: ", AngularerrorC9[4] + + meanC9D1 = np.mean(AngularerrorC9[0]) + meanC9D2 = np.mean(AngularerrorC9[1]) + meanC9D3 = np.mean(AngularerrorC9[2]) + meanC9D4 = np.mean(AngularerrorC9[3]) + meanC9D5 = np.mean(AngularerrorC9[4]) + + stdC9D1 = np.std(AngularerrorC9[0]) + stdC9D2 = np.std(AngularerrorC9[1]) + stdC9D3 = np.std(AngularerrorC9[2]) + stdC9D4 = np.std(AngularerrorC9[3]) + stdC9D5 = np.std(AngularerrorC9[4]) + + meanC9 = [meanC9D1,meanC9D2,meanC9D3,meanC9D4,meanC9D5] + stdC9 = [stdC9D1,stdC9D2,stdC9D3,stdC9D4,stdC9D5] + + +# Combi10 1.5,1.75,2.0 + distance1 = 1.5 + distance2 = 1.75 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Data[i][j][1], Data[i][j][1] + if Data[i][j][3] == 1.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC10[0][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Data[i][j][3] == 1.25 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC10[1][i] = Data[i][j][4] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.5 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC10[2][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 1.75 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC10[3][i] = Data[i][j][4] + break + else: + j += 1 + j = 0 + while j < 50: + if Data[i][j][3] == 2.0 and Data[i][j][0] == distance1 and Data[i][j][1] == distance2 and Data[i][j][2] == distance3 : + AngularerrorC10[4][i] = Data[i][j][4] + break + else: + j += 1 + i += 1 + + print "AngularerrorC10: ", AngularerrorC10[0] + print "AngularerrorC2: ", AngularerrorC10[1] + print "AngularerrorC3: ", AngularerrorC10[2] + print "AngularerrorC4: ", AngularerrorC10[3] + print "AngularerrorC5: ", AngularerrorC10[4] + + meanC10D1 = np.mean(AngularerrorC10[0]) + meanC10D2 = np.mean(AngularerrorC10[1]) + meanC10D3 = np.mean(AngularerrorC10[2]) + meanC10D4 = np.mean(AngularerrorC10[3]) + meanC10D5 = np.mean(AngularerrorC10[4]) + + stdC10D1 = np.std(AngularerrorC10[0]) + stdC10D2 = np.std(AngularerrorC10[1]) + stdC10D3 = np.std(AngularerrorC10[2]) + stdC10D4 = np.std(AngularerrorC10[3]) + stdC10D5 = np.std(AngularerrorC10[4]) + + meanC10 = [meanC10D1,meanC10D2,meanC10D3,meanC10D4,meanC10D5] + stdC10 = [stdC10D1,stdC10D2,stdC10D3,stdC10D4,stdC10D5] + + + ##################################################################################### + + + + path ="/home/mmbrian/3D_Gaze_Tracking/work/results/2D2D/3_calibration_depths/" + + Datatwo1 = np.load(path + "p5.npy") + Datatwo2 = np.load(path + "p7.npy") + Datatwo3 = np.load(path + "p10.npy") + Datatwo4 = np.load(path + "p11.npy") + Datatwo5 = np.load(path + "p12.npy") + Datatwo6 = np.load(path + "p13.npy") + Datatwo7 = np.load(path + "p14.npy") + Datatwo8 = np.load(path + "p15.npy") + Datatwo9 = np.load(path + "p16.npy") + Datatwo10 = np.load(path + "p20.npy") + Datatwo11 = np.load(path + "p21.npy") + Datatwo12 = np.load(path + "p24.npy") + Datatwo13 = np.load(path + "p25.npy") + Datatwo14 = np.load(path + "p26.npy") + + Datatwo = [Datatwo1,Datatwo2,Datatwo3,Datatwo4,Datatwo5,Datatwo6,Datatwo7,Datatwo8,Datatwo9,Datatwo10,Datatwo11,Datatwo12,Datatwo13,Datatwo14] + + + AngularerrorCtwo1 = [] + for i in xrange(5): + AngularerrorCtwo1.append([]) + for j in xrange(14): + AngularerrorCtwo1[i].append(float(0)) + + AngularerrorCtwo2 = [] + for i in xrange(5): + AngularerrorCtwo2.append([]) + for j in xrange(14): + AngularerrorCtwo2[i].append(float(0)) + + AngularerrorCtwo3 = [] + for i in xrange(5): + AngularerrorCtwo3.append([]) + for j in xrange(14): + AngularerrorCtwo3[i].append(float(0)) + + AngularerrorCtwo4 = [] + for i in xrange(5): + AngularerrorCtwo4.append([]) + for j in xrange(14): + AngularerrorCtwo4[i].append(float(0)) + + AngularerrorCtwo5 = [] + for i in xrange(5): + AngularerrorCtwo5.append([]) + for j in xrange(14): + AngularerrorCtwo5[i].append(float(0)) + + AngularerrorCtwo6 = [] + for i in xrange(5): + AngularerrorCtwo6.append([]) + for j in xrange(14): + AngularerrorCtwo6[i].append(float(0)) + + AngularerrorCtwo7 = [] + for i in xrange(5): + AngularerrorCtwo7.append([]) + for j in xrange(14): + AngularerrorCtwo7[i].append(float(0)) + + AngularerrorCtwo8 = [] + for i in xrange(5): + AngularerrorCtwo8.append([]) + for j in xrange(14): + AngularerrorCtwo8[i].append(float(0)) + + AngularerrorCtwo9 = [] + for i in xrange(5): + AngularerrorCtwo9.append([]) + for j in xrange(14): + AngularerrorCtwo9[i].append(float(0)) + + AngularerrorCtwo10 = [] + for i in xrange(5): + AngularerrorCtwo10.append([]) + for j in xrange(14): + AngularerrorCtwo10[i].append(float(0)) + +# Combi1 1,1.25,1.5 + distance1 = 1.0 + distance2 = 1.25 + distance3 = 1.5 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3: + AngularerrorCtwo1[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3: + AngularerrorCtwo1[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3: + AngularerrorCtwo1[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3: + AngularerrorCtwo1[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3: + AngularerrorCtwo1[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo1: ", AngularerrorCtwo1[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo1[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo1[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo1[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo1[4] + + meantwoC1D1 = np.mean(AngularerrorCtwo1[0]) + meantwoC1D2 = np.mean(AngularerrorCtwo1[1]) + meantwoC1D3 = np.mean(AngularerrorCtwo1[2]) + meantwoC1D4 = np.mean(AngularerrorCtwo1[3]) + meantwoC1D5 = np.mean(AngularerrorCtwo1[4]) + + stdtwoC1D1 = np.std(AngularerrorCtwo1[0]) + stdtwoC1D2 = np.std(AngularerrorCtwo1[1]) + stdtwoC1D3 = np.std(AngularerrorCtwo1[2]) + stdtwoC1D4 = np.std(AngularerrorCtwo1[3]) + stdtwoC1D5 = np.std(AngularerrorCtwo1[4]) + + meantwoC1 = [meantwoC1D1,meantwoC1D2,meantwoC1D3,meantwoC1D4,meantwoC1D5] + stdtwoC1 = [stdtwoC1D1,stdtwoC1D2,stdtwoC1D3,stdtwoC1D4,stdtwoC1D5] + + +# Combi2 1,1.25,1.75 + distance1 = 1.0 + distance2 = 1.25 + distance3 = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo2[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo2[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo2[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo2[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo2[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo1: ", AngularerrorCtwo2[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo2[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo2[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo2[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo2[4] + + meantwoC2D1 = np.mean(AngularerrorCtwo2[0]) + meantwoC2D2 = np.mean(AngularerrorCtwo2[1]) + meantwoC2D3 = np.mean(AngularerrorCtwo2[2]) + meantwoC2D4 = np.mean(AngularerrorCtwo2[3]) + meantwoC2D5 = np.mean(AngularerrorCtwo2[4]) + + stdtwoC2D1 = np.std(AngularerrorCtwo2[0]) + stdtwoC2D2 = np.std(AngularerrorCtwo2[1]) + stdtwoC2D3 = np.std(AngularerrorCtwo2[2]) + stdtwoC2D4 = np.std(AngularerrorCtwo2[3]) + stdtwoC2D5 = np.std(AngularerrorCtwo2[4]) + + meantwoC2 = [meantwoC2D1,meantwoC2D2,meantwoC2D3,meantwoC2D4,meantwoC2D5] + stdtwoC2 = [stdtwoC2D1,stdtwoC2D2,stdtwoC2D3,stdtwoC2D4,stdtwoC2D5] + +# Combi3 1,1.25,2.0 + distance1 = 1.0 + distance2 = 1.25 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo3[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo3[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo3[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo3[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo3[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo1: ", AngularerrorCtwo3[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo3[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo3[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo3[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo3[4] + + meantwoC3D1 = np.mean(AngularerrorCtwo3[0]) + meantwoC3D2 = np.mean(AngularerrorCtwo3[1]) + meantwoC3D3 = np.mean(AngularerrorCtwo3[2]) + meantwoC3D4 = np.mean(AngularerrorCtwo3[3]) + meantwoC3D5 = np.mean(AngularerrorCtwo3[4]) + + stdtwoC3D1 = np.std(AngularerrorCtwo3[0]) + stdtwoC3D2 = np.std(AngularerrorCtwo3[1]) + stdtwoC3D3 = np.std(AngularerrorCtwo3[2]) + stdtwoC3D4 = np.std(AngularerrorCtwo3[3]) + stdtwoC3D5 = np.std(AngularerrorCtwo3[4]) + + meantwoC3 = [meantwoC3D1,meantwoC3D2,meantwoC3D3,meantwoC3D4,meantwoC3D5] + stdtwoC3 = [stdtwoC3D1,stdtwoC3D2,stdtwoC3D3,stdtwoC3D4,stdtwoC3D5] + + +# Combi4 1,1.5,1.75 + distance1 = 1.0 + distance2 = 1.5 + distance3 = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo4[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo4[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo4[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo4[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo4[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo4: ", AngularerrorCtwo4[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo4[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo4[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo4[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo4[4] + + meantwoC4D1 = np.mean(AngularerrorCtwo4[0]) + meantwoC4D2 = np.mean(AngularerrorCtwo4[1]) + meantwoC4D3 = np.mean(AngularerrorCtwo4[2]) + meantwoC4D4 = np.mean(AngularerrorCtwo4[3]) + meantwoC4D5 = np.mean(AngularerrorCtwo4[4]) + + stdtwoC4D1 = np.std(AngularerrorCtwo4[0]) + stdtwoC4D2 = np.std(AngularerrorCtwo4[1]) + stdtwoC4D3 = np.std(AngularerrorCtwo4[2]) + stdtwoC4D4 = np.std(AngularerrorCtwo4[3]) + stdtwoC4D5 = np.std(AngularerrorCtwo4[4]) + + meantwoC4 = [meantwoC4D1,meantwoC4D2,meantwoC4D3,meantwoC4D4,meantwoC4D5] + stdtwoC4 = [stdtwoC4D1,stdtwoC4D2,stdtwoC4D3,stdtwoC4D4,stdtwoC4D5] + +# Combi5 1.0,1.5,2.0 + distance1 = 1.0 + distance2 = 1.5 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo5[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo5[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo5[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo5[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo5[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo5: ", AngularerrorCtwo5[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo5[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo5[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo5[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo5[4] + + meantwoC5D1 = np.mean(AngularerrorCtwo5[0]) + meantwoC5D2 = np.mean(AngularerrorCtwo5[1]) + meantwoC5D3 = np.mean(AngularerrorCtwo5[2]) + meantwoC5D4 = np.mean(AngularerrorCtwo5[3]) + meantwoC5D5 = np.mean(AngularerrorCtwo5[4]) + + stdtwoC5D1 = np.std(AngularerrorCtwo5[0]) + stdtwoC5D2 = np.std(AngularerrorCtwo5[1]) + stdtwoC5D3 = np.std(AngularerrorCtwo5[2]) + stdtwoC5D4 = np.std(AngularerrorCtwo5[3]) + stdtwoC5D5 = np.std(AngularerrorCtwo5[4]) + + meantwoC5 = [meantwoC5D1,meantwoC5D2,meantwoC5D3,meantwoC5D4,meantwoC5D5] + stdtwoC5 = [stdtwoC5D1,stdtwoC5D2,stdtwoC5D3,stdtwoC5D4,stdtwoC5D5] + + +# Combi6 1.0,1.75,2.0 + distance1 = 1.0 + distance2 = 1.75 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo6[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo6[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo6[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo6[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo6[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo6: ", AngularerrorCtwo6[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo6[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo6[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo6[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo6[4] + + meantwoC6D1 = np.mean(AngularerrorCtwo6[0]) + meantwoC6D2 = np.mean(AngularerrorCtwo6[1]) + meantwoC6D3 = np.mean(AngularerrorCtwo6[2]) + meantwoC6D4 = np.mean(AngularerrorCtwo6[3]) + meantwoC6D5 = np.mean(AngularerrorCtwo6[4]) + + stdtwoC6D1 = np.std(AngularerrorCtwo6[0]) + stdtwoC6D2 = np.std(AngularerrorCtwo6[1]) + stdtwoC6D3 = np.std(AngularerrorCtwo6[2]) + stdtwoC6D4 = np.std(AngularerrorCtwo6[3]) + stdtwoC6D5 = np.std(AngularerrorCtwo6[4]) + + meantwoC6 = [meantwoC6D1,meantwoC6D2,meantwoC6D3,meantwoC6D4,meantwoC6D5] + stdtwoC6 = [stdtwoC6D1,stdtwoC6D2,stdtwoC6D3,stdtwoC6D4,stdtwoC6D5] + +# Combi7 1.25,1.5,1,75 + distance1 = 1.25 + distance2 = 1.5 + distance3 = 1.75 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo7[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo7[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo7[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo7[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo7[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo7: ", AngularerrorCtwo7[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo7[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo7[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo7[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo7[4] + + meantwoC7D1 = np.mean(AngularerrorCtwo7[0]) + meantwoC7D2 = np.mean(AngularerrorCtwo7[1]) + meantwoC7D3 = np.mean(AngularerrorCtwo7[2]) + meantwoC7D4 = np.mean(AngularerrorCtwo7[3]) + meantwoC7D5 = np.mean(AngularerrorCtwo7[4]) + + stdtwoC7D1 = np.std(AngularerrorCtwo7[0]) + stdtwoC7D2 = np.std(AngularerrorCtwo7[1]) + stdtwoC7D3 = np.std(AngularerrorCtwo7[2]) + stdtwoC7D4 = np.std(AngularerrorCtwo7[3]) + stdtwoC7D5 = np.std(AngularerrorCtwo7[4]) + + meantwoC7 = [meantwoC7D1,meantwoC7D2,meantwoC7D3,meantwoC7D4,meantwoC7D5] + stdtwoC7 = [stdtwoC7D1,stdtwoC7D2,stdtwoC7D3,stdtwoC7D4,stdtwoC7D5] + +# Combi8 1.25,1.5,2.0 + distance1 = 1.25 + distance2 = 1.5 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo8[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo8[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo8[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo8[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo8[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo8: ", AngularerrorCtwo8[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo8[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo8[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo8[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo8[4] + + meantwoC8D1 = np.mean(AngularerrorCtwo8[0]) + meantwoC8D2 = np.mean(AngularerrorCtwo8[1]) + meantwoC8D3 = np.mean(AngularerrorCtwo8[2]) + meantwoC8D4 = np.mean(AngularerrorCtwo8[3]) + meantwoC8D5 = np.mean(AngularerrorCtwo8[4]) + + stdtwoC8D1 = np.std(AngularerrorCtwo8[0]) + stdtwoC8D2 = np.std(AngularerrorCtwo8[1]) + stdtwoC8D3 = np.std(AngularerrorCtwo8[2]) + stdtwoC8D4 = np.std(AngularerrorCtwo8[3]) + stdtwoC8D5 = np.std(AngularerrorCtwo8[4]) + + meantwoC8 = [meantwoC8D1,meantwoC8D2,meantwoC8D3,meantwoC8D4,meantwoC8D5] + stdtwoC8 = [stdtwoC8D1,stdtwoC8D2,stdtwoC8D3,stdtwoC8D4,stdtwoC8D5] + +# Combi9 1.25,1.75,2.0 + distance1 = 1.25 + distance2 = 1.75 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo9[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo9[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo9[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo9[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo9[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo9: ", AngularerrorCtwo9[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo9[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo9[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo9[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo9[4] + + meantwoC9D1 = np.mean(AngularerrorCtwo9[0]) + meantwoC9D2 = np.mean(AngularerrorCtwo9[1]) + meantwoC9D3 = np.mean(AngularerrorCtwo9[2]) + meantwoC9D4 = np.mean(AngularerrorCtwo9[3]) + meantwoC9D5 = np.mean(AngularerrorCtwo9[4]) + + stdtwoC9D1 = np.std(AngularerrorCtwo9[0]) + stdtwoC9D2 = np.std(AngularerrorCtwo9[1]) + stdtwoC9D3 = np.std(AngularerrorCtwo9[2]) + stdtwoC9D4 = np.std(AngularerrorCtwo9[3]) + stdtwoC9D5 = np.std(AngularerrorCtwo9[4]) + + meantwoC9 = [meantwoC9D1,meantwoC9D2,meantwoC9D3,meantwoC9D4,meantwoC9D5] + stdtwoC9 = [stdtwoC9D1,stdtwoC9D2,stdtwoC9D3,stdtwoC9D4,stdtwoC9D5] + + +# Combi10 1.5,1.75,2.0 + distance1 = 1.5 + distance2 = 1.75 + distance3 = 2.0 + i = 0 + while i < 14: + j = 0 + while j < 50: +# print "i: ", i," j: ", j +# print Datatwo[i][j][1], Datatwo[i][j][1] + if Datatwo[i][j][3] == 1.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo10[0][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: +# print "i: ", i," j: ", j + if Datatwo[i][j][3] == 1.25 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo10[1][i] = Datatwo[i][j][9] +# i = 14 + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.5 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo10[2][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 1.75 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo10[3][i] = Datatwo[i][j][9] + break + else: + j += 1 + j = 0 + while j < 50: + if Datatwo[i][j][3] == 2.0 and Datatwo[i][j][0] == distance1 and Datatwo[i][j][1] == distance2 and Datatwo[i][j][2] == distance3 : + AngularerrorCtwo10[4][i] = Datatwo[i][j][9] + break + else: + j += 1 + i += 1 + + print "AngularerrorCtwo10: ", AngularerrorCtwo10[0] + print "AngularerrorCtwo2: ", AngularerrorCtwo10[1] + print "AngularerrorCtwo3: ", AngularerrorCtwo10[2] + print "AngularerrorCtwo4: ", AngularerrorCtwo10[3] + print "AngularerrorCtwo5: ", AngularerrorCtwo10[4] + + meantwoC10D1 = np.mean(AngularerrorCtwo10[0]) + meantwoC10D2 = np.mean(AngularerrorCtwo10[1]) + meantwoC10D3 = np.mean(AngularerrorCtwo10[2]) + meantwoC10D4 = np.mean(AngularerrorCtwo10[3]) + meantwoC10D5 = np.mean(AngularerrorCtwo10[4]) + + stdtwoC10D1 = np.std(AngularerrorCtwo10[0]) + stdtwoC10D2 = np.std(AngularerrorCtwo10[1]) + stdtwoC10D3 = np.std(AngularerrorCtwo10[2]) + stdtwoC10D4 = np.std(AngularerrorCtwo10[3]) + stdtwoC10D5 = np.std(AngularerrorCtwo10[4]) + + meantwoC10 = [meantwoC10D1,meantwoC10D2,meantwoC10D3,meantwoC10D4,meantwoC10D5] + stdtwoC10 = [stdtwoC10D1,stdtwoC10D2,stdtwoC10D3,stdtwoC10D4,stdtwoC10D5] + + + + + + + + + + + + + + + + ##################################################################################### +## C2 +# distance = 1.25 +# i = 0 +# while i < 14: +# j = 0 +# while j < 25: +# if Datatwo[i][j][1] == 1.0 and Data[i][j][0] == distance: +# AngularerrorC2[0][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# print "i: ", i," j: ", j +# if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: +# print Data[i][j][7] +# AngularerrorC2[1][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: +# AngularerrorC2[2][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: +# AngularerrorC2[3][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: +# AngularerrorC2[4][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# i += 1 + +# print "AngularerrorC2: ", AngularerrorC2[0] +# print "AngularerrorC2: ", AngularerrorC2[1] +# print "AngularerrorC2: ", AngularerrorC2[2] +# print "AngularerrorC2: ", AngularerrorC2[3] +# print "AngularerrorC2: ", AngularerrorC2[4] + +# meanC2D1 = np.mean(AngularerrorC2[0]) +# meanC2D2 = np.mean(AngularerrorC2[1]) +# meanC2D3 = np.mean(AngularerrorC2[2]) +# meanC2D4 = np.mean(AngularerrorC2[3]) +# meanC2D5 = np.mean(AngularerrorC2[4]) + +# stdC2D1 = np.std(AngularerrorC2[0]) +# stdC2D2 = np.std(AngularerrorC2[1]) +# stdC2D3 = np.std(AngularerrorC2[2]) +# stdC2D4 = np.std(AngularerrorC2[3]) +# stdC2D5 = np.std(AngularerrorC2[4]) +# +# meanC2 = [meanC2D1,meanC2D2,meanC2D3,meanC2D4,meanC2D5] +# stdC2 = [stdC2D1,stdC2D2,stdC2D3,stdC2D4,stdC2D5] + +## C3 +# distance = 1.5 +# i = 0 +# while i < 14: +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: +# AngularerrorC3[0][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# print "i: ", i," j: ", j +# if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: +# print Data[i][j][7] +# AngularerrorC3[1][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: +# AngularerrorC3[2][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: +# AngularerrorC3[3][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: +# AngularerrorC3[4][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# i += 1 + +# print "AngularerrorC3: ", AngularerrorC3[0] +# print "AngularerrorC3: ", AngularerrorC3[1] +# print "AngularerrorC3: ", AngularerrorC3[2] +# print "AngularerrorC3: ", AngularerrorC3[3] +# print "AngularerrorC3: ", AngularerrorC3[4] + +# meanC3D1 = np.mean(AngularerrorC3[0]) +# meanC3D2 = np.mean(AngularerrorC3[1]) +# meanC3D3 = np.mean(AngularerrorC3[2]) +# meanC3D4 = np.mean(AngularerrorC3[3]) +# meanC3D5 = np.mean(AngularerrorC3[4]) + +# stdC3D1 = np.std(AngularerrorC3[0]) +# stdC3D2 = np.std(AngularerrorC3[1]) +# stdC3D3 = np.std(AngularerrorC3[2]) +# stdC3D4 = np.std(AngularerrorC3[3]) +# stdC3D5 = np.std(AngularerrorC3[4]) +# +# meanC3 = [meanC3D1,meanC3D2,meanC3D3,meanC3D4,meanC3D5] +# stdC3 = [stdC3D1,stdC3D2,stdC3D3,stdC3D4,stdC3D5] + +## C4 +# distance = 1.75 +# i = 0 +# while i < 14: +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: +# AngularerrorC4[0][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# print "i: ", i," j: ", j +# if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: +# print Data[i][j][7] +# AngularerrorC4[1][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: +# AngularerrorC4[2][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: +# AngularerrorC4[3][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: +# AngularerrorC4[4][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# i += 1 + +# print "AngularerrorC4: ", AngularerrorC4[0] +# print "AngularerrorC4: ", AngularerrorC4[1] +# print "AngularerrorC4: ", AngularerrorC4[2] +# print "AngularerrorC4: ", AngularerrorC4[3] +# print "AngularerrorC4: ", AngularerrorC4[4] + +# meanC4D1 = np.mean(AngularerrorC4[0]) +# meanC4D2 = np.mean(AngularerrorC4[1]) +# meanC4D3 = np.mean(AngularerrorC4[2]) +# meanC4D4 = np.mean(AngularerrorC4[3]) +# meanC4D5 = np.mean(AngularerrorC4[4]) + +# stdC4D1 = np.std(AngularerrorC4[0]) +# stdC4D2 = np.std(AngularerrorC4[1]) +# stdC4D3 = np.std(AngularerrorC4[2]) +# stdC4D4 = np.std(AngularerrorC4[3]) +# stdC4D5 = np.std(AngularerrorC4[4]) +# +# meanC4 = [meanC4D1,meanC4D2,meanC4D3,meanC4D4,meanC4D5] +# stdC4 = [stdC4D1,stdC4D2,stdC4D3,stdC4D4,stdC4D5] + +## C5 +# distance = 2.0 +# i = 0 +# while i < 14: +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.0 and Data[i][j][0] == distance: +# AngularerrorC5[0][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# print "i: ", i," j: ", j +# if Data[i][j][1] == 1.25 and Data[i][j][0] == distance: +# print Data[i][j][7] +# AngularerrorC5[1][i] = Data[i][j][7] +# break +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.5 and Data[i][j][0] == distance: +# AngularerrorC5[2][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 1.75 and Data[i][j][0] == distance: +# AngularerrorC5[3][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# j = 0 +# while j < 25: +# if Data[i][j][1] == 2.0 and Data[i][j][0] == distance: +# AngularerrorC5[4][i] = Data[i][j][7] +# j = 25 +# else: +# j += 1 +# i += 1 + +# print "AngularerrorC5: ", AngularerrorC5[0] +# print "AngularerrorC5: ", AngularerrorC5[1] +# print "AngularerrorC5: ", AngularerrorC5[2] +# print "AngularerrorC5: ", AngularerrorC5[3] +# print "AngularerrorC5: ", AngularerrorC5[4] + +# meanC5D1 = np.mean(AngularerrorC5[0]) +# meanC5D2 = np.mean(AngularerrorC5[1]) +# meanC5D3 = np.mean(AngularerrorC5[2]) +# meanC5D4 = np.mean(AngularerrorC5[3]) +# meanC5D5 = np.mean(AngularerrorC5[4]) + +# stdC5D1 = np.std(AngularerrorC5[0]) +# stdC5D2 = np.std(AngularerrorC5[1]) +# stdC5D3 = np.std(AngularerrorC5[2]) +# stdC5D4 = np.std(AngularerrorC5[3]) +# stdC5D5 = np.std(AngularerrorC5[4]) +# +# meanC5 = [meanC5D1,meanC5D2,meanC5D3,meanC5D4,meanC5D5] +# stdC5 = [stdC5D1,stdC5D2,stdC5D3,stdC5D4,stdC5D5] + + N = 5 + ind = np.asarray([0.25,1.25,2.25,3.25,4.25]) + + width = 0.5 # the width of the bars + +# x1 = [0.275,1.275,2.275,3.275,4.275] +# x2 = [0.325,1.325,2.325,3.325,4.325] + x1 = [0.375,1.375,2.375,3.375,4.375] + x2 = [0.425,1.425,2.425,3.425,4.425] + x3 = [0.475,1.475,2.475,3.475,4.475] + x4 = [0.525,1.525,2.525,3.525,4.525] + x5 = [0.575,1.575,2.575,3.575,4.575] + x6 = [0.625,1.625,2.625,3.625,4.625] +# x9 = [0.675,1.675,2.675,3.675,4.675] +# x10 = [0.725,1.725,2.725,3.725,4.725] + +# x1 = [0.4,1.4,2.4,3.4,4.4] +# x2 = [0.45,1.45,2.45,3.45,4.45] +# x3 = [0.5,1.5,2.5,3.5,4.5] +# x4 = [0.55,1.55,2.55,3.55,4.55] +# x5 = [0.6,1.6,2.6,3.6,4.6] + + fig = plt.figure(figsize=(14.0, 10.0)) + + + + ax = fig.add_subplot(111) + + +# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects1 = ax.errorbar(x1, meantwoC1,yerr=[stdtwoC1,stdtwoC1],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x1, meantwoC1, marker="o", linestyle='-',lw=3,color='blue',label = r'2D-to-2D Calibration Depth 1 + 2 + 3') + +# rects2 =ax.errorbar(x2, meantwoC2,yerr=[stdtwoC2,stdtwoC2],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) +# plt.plot(x2, meantwoC2, marker="o", linestyle='-',lw=3,color='red') +# +# rects3 = ax.errorbar(x3, meantwoC3,yerr=[stdtwoC3,stdtwoC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2) +# plt.plot(x3, meantwoC3, marker="o", linestyle='-',lw=3,color='black') + +# rects4 =ax.errorbar(x4, meantwoC4,yerr=[stdtwoC4,stdtwoC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) +# plt.plot(x4, meantwoC4, marker="o", linestyle='-',lw=3,color='green') + +# rects5 =ax.errorbar(x5, meantwoC5,yerr=[stdtwoC5,stdtwoC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) +# plt.plot(x5, meantwoC5, marker="o", linestyle='-',lw=3,color='orange') +# +# rects6 =ax.errorbar(x6, meantwoC6,yerr=[stdtwoC6,stdtwoC6],fmt='o',color='cyan',ecolor='cyan',lw=3, capsize=5, capthick=2) +# plt.plot(x6, meantwoC6, marker="o", linestyle='-',lw=3,color='cyan') + + rects3 =ax.errorbar(x3, meantwoC7,yerr=[stdtwoC7,stdtwoC7],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x3, meantwoC7, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D Calibration Depth 2 + 3 + 4') +# +# rects8 =ax.errorbar(x8, meantwoC8,yerr=[stdtwoC8,stdtwoC8],fmt='o',color='darkviolet',ecolor='darkviolet',lw=3, capsize=5, capthick=2) +# plt.plot(x8, meantwoC8, marker="o", linestyle='-',lw=3,color='darkviolet') +# +# rects9 =ax.errorbar(x9, meantwoC9,yerr=[stdtwoC9,stdtwoC9],fmt='o',color='lime',ecolor='lime',lw=3, capsize=5, capthick=2) +# plt.plot(x9, meantwoC9, marker="o", linestyle='-',lw=3,color='lime') + + rects5 =ax.errorbar(x5, meantwoC10,yerr=[stdtwoC10,stdtwoC10],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + plt.plot(x5, meantwoC10, marker="o", linestyle='-',lw=3,color='orange',label = r'2D-to-2D Calibration Depth 3 + 4 + 5') + + +# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects2 = ax.errorbar(x2, meanC1,yerr=[stdC1,stdC1],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x2, meanC1, marker="o", linestyle='--',lw=3,color='blue',label = r'2D-to-3D Calibration Depth 1 + 2 + 3',) + +# rects2 =ax.errorbar(x2, meanC2,yerr=[stdC2,stdC2],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) +# plt.plot(x2, meanC2, marker="o", linestyle='-',lw=3,color='red') +# +# rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2) +# plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='black') + +# rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) +# plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') + +# rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) +# plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='orange') +# +# rects6 =ax.errorbar(x6, meanC6,yerr=[stdC6,stdC6],fmt='o',color='cyan',ecolor='cyan',lw=3, capsize=5, capthick=2) +# plt.plot(x6, meanC6, marker="o", linestyle='-',lw=3,color='cyan') + + rects4 =ax.errorbar(x4, meanC7,yerr=[stdC7,stdC7],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x4, meanC7, marker="o", linestyle='--',lw=3,color='red',label = r'2D-to-3D Calibration Depth 2 + 3 + 4') +# +# rects8 =ax.errorbar(x8, meanC8,yerr=[stdC8,stdC8],fmt='o',color='darkviolet',ecolor='darkviolet',lw=3, capsize=5, capthick=2) +# plt.plot(x8, meanC8, marker="o", linestyle='-',lw=3,color='darkviolet') +# +# rects9 =ax.errorbar(x9, meanC9,yerr=[stdC9,stdC9],fmt='o',color='lime',ecolor='lime',lw=3, capsize=5, capthick=2) +# plt.plot(x9, meanC9, marker="o", linestyle='-',lw=3,color='lime') + + rects6 =ax.errorbar(x6, meanC10,yerr=[stdC10,stdC10],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + plt.plot(x6, meanC10, marker="o", linestyle='--',lw=3,color='orange',label = r'2D-to-3D Calibration Depth 3 + 4 + 5') + + + ax.set_ylabel(r'Angular Error',fontsize=22) + ax.set_xlabel(r'Depth',fontsize=22) + ax.set_xticks(ind+0.25) + ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=16) + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]#,110]#,120] + print TOPICs + LABELs = ["",r'D1 - 1m',r'D2 - 1.25m', r'D3 - 1.5m', r'D4 - 1.75m', r'D5 - 2.0m', ""]#, ""]#, ""] + +# fig.canvas.set_window_title('Distance Error Correlation') + plt.xticks(TOPICs, LABELs,fontsize=18) + +# legend1 = + legend(fontsize=20,loc='best') +# legend([rects1,rects2,rects3,rects4,rects5,rects6], [r'\LARGE\textbf{2C-to-2D Calibration Depth 1 + 2 + 3}', r'\LARGE\textbf{2C-to-3D Calibration Depth 1 + 2 + 3}', r'\LARGE\textbf{2C-to-2D Calibration Depth 2 + 3 + 4}', r'\LARGE\textbf{2C-to-3D Calibration Depth 2 + 3 + 4}',r'\LARGE\textbf{2C-to-2D Calibration Depth 3 + 4 + 5}',r'\LARGE\textbf{2C-to-3D Calibration Depth 3 + 4 + 5}'], loc='upper right') + +# ax2 = plt.gca().add_artist(legend1) +## +# plt. legend([rects2,rects4,rects6], [r'\LARGE\textbf{2C-to-3D Calibration Depth 1 + 2 + 3}', r'\LARGE\textbf{2C-to-3D Calibration Depth 2 + 3 + 4}',r'\LARGE\textbf{2C-to-3D Calibration Depth 3 + 4 + 5}'], loc='upper right') + + TOPICS = [-0.5, 0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0,6.5,7.0,7.5,8.0,8.5,9.0]#,110]#,120] + print TOPICS + LABELS = [ r'',r'0',r'0.5',r'1', r'1.5', r'2', r'2.5', r'3', r'3.5', r'4', r'4.5', r'5', r'5.5', r'6', r'6.5', r'7', r'7.5', r'8', r'8.5', ""]#, ""]#, ""] + +# fig.canvas.set_window_title('Accuracy - Activity Statistics') + plt.yticks(TOPICS, LABELS,fontsize=18) + + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height), + ha='center', va='bottom',fontweight='bold',fontsize=13.5) + +# autolabel(rects1) + + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + + + means = [meanC1, meanC2, meanC3, meanC4, meanC5, meanC6, meanC7, meanC8, meanC9,meanC10] + print meanC1 + print meanC2 + print meanC3 + print meanC4 + print meanC5 + + fixationinfos_list_path = "MeansC3D2D3.npy" + fixationinfos_list_csv_path = "MeansC3D2D3.csv" + np.save(fixationinfos_list_path,np.asarray(means)) + np.savetxt(fixationinfos_list_csv_path,np.asarray(means), delimiter=",", fmt="%f") + +if __name__ == "__main__": + main(sys.argv[1:]) + diff --git a/code/Visualization/EffectDistanceDifference1CalibrationDepth.py b/code/Visualization/EffectDistanceDifference1CalibrationDepth.py new file mode 100644 index 0000000..f488bc9 --- /dev/null +++ b/code/Visualization/EffectDistanceDifference1CalibrationDepth.py @@ -0,0 +1,200 @@ +import os, sys +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +from matplotlib.pyplot import * +import matplotlib.patches as mpatches + +# activate latex text rendering +#rc('text', usetex=True) + +def main(argv): +# path = str(argv[0]) +# path ="/home/Julian/3D_Pupil_Project/work/results/2D2D/" + +# Data1 = np.load(path + "p5_2d2d_all.npy") +# Data2 = np.load(path + "p7_2d2d_all.npy") +# Data3 = np.load(path + "p10_2d2d_all.npy") +# Data4 = np.load(path + "p11_2d2d_all.npy") +# Data5 = np.load(path + "p12_2d2d_all.npy") +# Data6 = np.load(path + "p13_2d2d_all.npy") +# Data7 = np.load(path + "p14_2d2d_all.npy") +# Data8 = np.load(path + "p15_2d2d_all.npy") +# Data9 = np.load(path + "p16_2d2d_all.npy") +# Data10 = np.load(path + "p20_2d2d_all.npy") +# Data11 = np.load(path + "p21_2d2d_all.npy") +# Data12 = np.load(path + "p24_2d2d_all.npy") +# Data13 = np.load(path + "p25_2d2d_all.npy") + D2D2 = np.load("MeansC1D2D2.npy") + D2D3 = np.load("MeansC1D2D3.npy") + + + mean02D2D = np.mean([D2D2[0][0],D2D2[1][1], D2D2[2][2], D2D2[3][3], D2D2[4][4]]) + mean12D2D = np.mean([D2D2[0][1],D2D2[1][2], D2D2[2][3], D2D2[3][4]]) + mean22D2D = np.mean([D2D2[0][2],D2D2[1][3], D2D2[2][4]]) + mean32D2D = np.mean([D2D2[0][3],D2D2[1][4]]) + mean42D2D = np.mean([D2D2[0][4]]) + +# mean2D2D = [mean02D2D,mean12D2D,mean22D2D,mean32D2D,mean42D2D] + + minmean02D2D = np.mean([D2D2[1][0],D2D2[2][1], D2D2[3][2], D2D2[4][3]]) + minmean12D2D = np.mean([D2D2[2][0],D2D2[3][1], D2D2[4][2]]) + minmean22D2D = np.mean([D2D2[3][0],D2D2[4][1]]) + minmean32D2D = np.mean([D2D2[4][0]]) + +# minmean2D2D = [minmean02D2D,minmean12D2D,minmean22D2D,minmean32D2D] + + std02D2D = np.std([D2D2[0][0],D2D2[1][1], D2D2[2][2], D2D2[3][3], D2D2[4][4]]) + std12D2D = np.std([D2D2[0][1],D2D2[1][2], D2D2[2][3], D2D2[3][4]]) + std22D2D = np.std([D2D2[0][2],D2D2[1][3], D2D2[2][4]]) + std32D2D = np.std([D2D2[0][3],D2D2[1][4]]) + std42D2D = np.std([D2D2[0][4]]) + +# std2D2D = [std02D2D,std12D2D,std22D2D,std32D2D,std42D2D] + + minstd02D2D = np.std([D2D2[1][0],D2D2[2][1], D2D2[3][2], D2D2[4][3]]) + minstd12D2D = np.std([D2D2[2][0],D2D2[3][1], D2D2[4][2]]) + minstd22D2D = np.std([D2D2[3][0],D2D2[4][1]]) + minstd32D2D = np.std([D2D2[4][0]]) + +# minstd2D2D = [minstd02D2D,minstd12D2D,minstd22D2D,minstd32D2D] + + mean2D2D = [minmean32D2D,minmean22D2D,minmean12D2D,minmean02D2D,mean02D2D,mean12D2D,mean22D2D,mean32D2D,mean42D2D] + std2D2D = [minstd32D2D,minstd22D2D,minstd12D2D,minstd02D2D,std02D2D,std12D2D,std22D2D,std32D2D,std42D2D] + + mean02D3D = np.mean([D2D3[0][0],D2D3[1][1], D2D3[2][2], D2D3[3][3], D2D3[4][4]]) + mean12D3D = np.mean([D2D3[0][1],D2D3[1][2], D2D3[2][3], D2D3[3][4]]) + mean22D3D = np.mean([D2D3[0][2],D2D3[1][3], D2D3[2][4]]) + mean32D3D = np.mean([D2D3[0][3],D2D3[1][4]]) + mean42D3D = np.mean([D2D3[0][4]]) + +# mean2D3D = [mean02D3D,mean12D3D,mean22D3D,mean32D3D,mean42D3D] + + std02D3D = np.std([D2D3[0][0],D2D3[1][1], D2D3[2][2], D2D3[3][3], D2D3[4][4]]) + std12D3D = np.std([D2D3[0][1],D2D3[1][2], D2D3[2][3], D2D3[3][4]]) + std22D3D = np.std([D2D3[0][2],D2D3[1][3], D2D3[2][4]]) + std32D3D = np.std([D2D3[0][3],D2D3[1][4]]) + std42D3D = np.std([D2D3[0][4]]) + +# std2D3D = [std02D3D,std12D3D,std22D3D,std32D3D,std42D3D] + + minmean02D3D = np.mean([D2D3[1][0],D2D3[2][1], D2D3[3][2], D2D3[4][3]]) + minmean12D3D = np.mean([D2D3[2][0],D2D3[3][1], D2D3[4][2]]) + minmean22D3D = np.mean([D2D3[3][0],D2D3[4][1]]) + minmean32D3D = np.mean([D2D3[4][0]]) + +# minmean2D3D = [minmean02D3D,minmean12D3D,minmean22D3D,minmean32D3D] + + minstd02D3D = np.std([D2D3[1][0],D2D3[2][1], D2D3[3][2], D2D3[4][3]]) + minstd12D3D = np.std([D2D3[2][0],D2D3[3][1], D2D3[4][2]]) + minstd22D3D = np.std([D2D3[3][0],D2D3[4][1]]) + minstd32D3D = np.std([D2D3[4][0]]) + +# minstd2D3D = [minstd02D3D,minstd12D3D,minstd22D3D,minstd32D3D] + + mean2D3D = [minmean32D3D,minmean22D3D,minmean12D3D,minmean02D3D,mean02D3D,mean12D3D,mean22D3D,mean32D3D,mean42D3D] + std2D3D = [minstd32D3D,minstd22D3D,minstd12D3D,minstd02D3D,std02D3D,std12D3D,std22D3D,std32D3D,std42D3D] + + N = 5 + ind = np.asarray([0.25,1.25,2.25,3.25,4.25]) + + width = 0.5 # the width of the bars + +# x1 = [0.4,1.4,2.4,3.4,4.4] + x2 = [-3.55,-2.55,-1.55,-0.55,0.45,1.45,2.45,3.45,4.45] +# x3 = [0.5,1.5,2.5,3.5,4.5] + x4 = [-3.45,-2.45,-1.45,-0.45,0.55,1.55,2.55,3.55,4.55] +# x5 = [0.6,1.6,2.6,3.6,4.6] + + fig = plt.figure(figsize=(14.0, 10.0)) + + + + ax = fig.add_subplot(111) + +# print mean2D2D +# print mean2D3D + +# ax.axhline(linewidth=2, y = np.mean(mean2D2D),color='r') +# ax.axhline(linewidth=2, y = np.mean(mean2D3D),color='blue') + + +# ax.axhline(linewidth=2, y = minvaluevalue,color='black') + +# ax.text(0.98, Participantmeanvalue+0.5, "Mean %.2f" % Participantmeanvalue,fontsize=12, fontweight='bold',color='r') +# ax.text(0.98, maxvaluevalue+0.5, "Maximum %.2f" % maxvaluevalue,fontsize=12, fontweight='bold',color='black') +# ax.text(0.98, minvaluevalue+0.5, "Minimum %.2f" % minvaluevalue,fontsize=12, fontweight='bold', color='black') + +# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects1 = ax.errorbar(x2, mean2D2D,yerr=[std2D2D,std2D2D],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x2, mean2D2D, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D') + + rects2 =ax.errorbar(x4, mean2D3D,yerr=[std2D3D,std2D3D],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x4, mean2D3D, marker="o", linestyle='-',lw=3,color='blue', label = r'2D-to-3D') + + legend(fontsize=20,loc='upper right') + +# rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2) +# plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='black') +# +# rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) +# plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') +# +# rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) +# plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='orange') + + + ax.set_ylabel(r'Angular Error',fontsize=22) + ax.set_xlabel(r'Distance Difference',fontsize=22) + ax.set_xticks(ind+0.25) + ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=18) + + TOPICs = [-3.5,-2.5,-1.5,-0.5,0.5,1.5,2.5,3.5,4.5]#,110]#,120] + print TOPICs + LABELs = [r'-1m',r'-0.75m', r'-0.5m', r'-0.25m', r'0m',r'0.25m', r'0.5m', r'0.75m', r'1.0m', ""]#, ""]#, ""] + +# fig.canvas.set_window_title('Distance Error Correlation') + plt.xticks(TOPICs, LABELs,fontsize=18) + +# legend([rects1,rects2], [r'\LARGE\textbf{2D2D}', r'\LARGE\textbf{2D3D}'], loc='lower right') + + TOPICS = [0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0,6.5,7.0,7.5,8.0,8.5]#,110]#,120] + print TOPICS + LABELS = [r'0.5',r'1', r'1.5', r'2', r'2.5', r'3', r'3.5', r'4', r'4.5', r'5', r'5.5', r'6', r'6.5', r'7', r'7.5', r'8', r'8.5']#, ""]#, ""] + +# fig.canvas.set_window_title('Accuracy - Activity Statistics') + plt.yticks(TOPICS, LABELS,fontsize=18) + + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height), + ha='center', va='bottom',fontweight='bold',fontsize=13.5) + +# autolabel(rects1) + + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + + + + +if __name__ == "__main__": + main(sys.argv[1:]) + diff --git a/code/Visualization/EffectNumberofClusters.py b/code/Visualization/EffectNumberofClusters.py new file mode 100644 index 0000000..9d23170 --- /dev/null +++ b/code/Visualization/EffectNumberofClusters.py @@ -0,0 +1,350 @@ +import os, sys +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +from matplotlib.pyplot import * +import matplotlib.patches as mpatches + +# activate latex text rendering +#rc('text', usetex=True) + +def main(argv): + + C12D2D = np.load("MeansC1D2D2.npy") + C12D3D = np.load("MeansC1D2D3.npy") + + C22D2D = np.load("MeansC2D2D2.npy") + C22D3D = np.load("MeansC2D2D3.npy") + + C32D2D = np.load("MeansC3D2D2.npy") + C32D3D = np.load("MeansC3D2D3.npy") + + C42D2D = np.load("MeansC4D2D2.npy") + C42D3D = np.load("MeansC4D2D3.npy") + + C52D2D = np.load("MeansC5D2D2.npy") + C52D3D = np.load("MeansC5D2D3.npy") + + + summeC12D2D = [] + summeC22D2D = [] + summeC32D2D = [] + summeC42D2D = [] + summeC52D2D = [] + + summeC12D3D = [] + summeC22D3D = [] + summeC32D3D = [] + summeC42D3D = [] + summeC52D3D = [] + + + i = 0 + while i < len(C12D2D): + j = 0 + while j < len(C12D2D[0]): + summeC12D2D.append(C12D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C22D2D): + j = 0 + while j < len(C22D2D[0]): + summeC22D2D.append(C22D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C32D2D): + j = 0 + while j < len(C32D2D[0]): + summeC32D2D.append(C32D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C42D2D): + j = 0 + while j < len(C42D2D[0]): + summeC42D2D.append(C42D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C52D2D): + j = 0 + while j < len(C52D2D[0]): + summeC52D2D.append(C52D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C12D3D): + j = 0 + while j < len(C12D3D[0]): + summeC12D3D.append(C12D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C22D3D): + j = 0 + while j < len(C22D3D[0]): + summeC22D3D.append(C22D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C32D3D): + j = 0 + while j < len(C32D3D[0]): + summeC32D3D.append(C32D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C42D3D): + j = 0 + while j < len(C42D3D[0]): + summeC42D3D.append(C42D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C52D3D): + j = 0 + while j < len(C52D3D[0]): + summeC52D3D.append(C52D3D[i][j]) + j += 1 + i += 1 + + + mean1 = np.mean(summeC12D2D) + mean2 = np.mean(summeC22D2D) + mean3 = np.mean(summeC32D2D) + mean4 = np.mean(summeC42D2D) + mean5 = np.mean(summeC52D2D) + + mean6 = np.mean(summeC12D3D) + mean7 = np.mean(summeC22D3D) + mean8 = np.mean(summeC32D3D) + mean9 = np.mean(summeC42D3D) + mean10 = np.mean(summeC52D3D) + + std1 = np.std(summeC12D2D) + std2 = np.std(summeC22D2D) + std3 = np.std(summeC32D2D) + std4 = np.std(summeC42D2D) + std5 = np.std(summeC52D2D) + + std6 = np.std(summeC12D3D) + std7 = np.std(summeC22D3D) + std8 = np.std(summeC32D3D) + std9 = np.std(summeC42D3D) + std10 = np.std(summeC52D3D) + +# i = 0 +# while i < len(C12D2D): +# j = 0 +# while j < len(C12D2D[0]): +# summeC12D2D.append(C12D2D[i][j]) +# j += 1 +# i += 1 + + +# print summeC12D2D + +# i = 0 +# minimum2 = 100 +# while i < len(C22D2D): +# print np.mean(C22D2D[i]) +# if np.mean(C22D2D[i]) < minimum2: +# minimum2 = np.mean(C22D2D[i]) +# i += 1 +# +# i = 0 +# minimum3 = 100 +# while i < len(C32D2D): +# print np.mean(C32D2D[i]) +# if np.mean(C32D2D[i]) < minimum3: +# minimum3 = np.mean(C32D2D[i]) +# i += 1 +# +# i = 0 +# minimum4 = 100 +# while i < len(C42D2D): +# print np.mean(C42D2D[i]) +# if np.mean(C42D2D[i]) < minimum4: +# minimum4 = np.mean(C42D2D[i]) +# i += 1 +# +# i = 0 +# minimum5 = 100 +# while i < len(C52D2D): +# print np.mean(C52D2D[i]) +# if np.mean(C52D2D[i]) < minimum5: +# minimum5 = np.mean(C52D2D[i]) +# i += 1 +# +# i = 0 +# minimum6 = 100 +# while i < len(C12D3D): +# print np.mean(C12D3D[i]) +# if np.mean(C12D3D[i]) < minimum6: +# minimum6 = np.mean(C12D3D[i]) +# i += 1 +# +# i = 0 +# minimum7 = 100 +# while i < len(C22D3D): +# print np.mean(C22D3D[i]) +# if np.mean(C22D3D[i]) < minimum7: +# minimum7 = np.mean(C22D3D[i]) +# i += 1 +# +# i = 0 +# minimum8 = 100 +# while i < len(C32D3D): +# print np.mean(C32D3D[i]) +# if np.mean(C32D3D[i]) < minimum8: +# minimum8 = np.mean(C32D3D[i]) +# i += 1 +# +# i = 0 +# minimum9 = 100 +# while i < len(C42D3D): +# print np.mean(C42D3D[i]) +# if np.mean(C42D3D[i]) < minimum9: +# minimum9 = np.mean(C42D3D[i]) +# i += 1 + +# i = 0 +# minimum10 = 100 +# while i < len(C52D3D): +# print np.mean(C52D3D[i]) +# if np.mean(C52D3D[i]) < minimum10: +# minimum10 = np.mean(C52D3D[i]) +# i += 1 +# + mean2D2D = [mean1,mean2,mean3,mean4,mean5] + mean2D3D = [mean6,mean7,mean8,mean9,mean10] + + std2D2D = [std1,std2,std3,std4,std5] + std2D3D = [std6,std7,std8,std9,std10] + +# print minimum1 + +# i = 0 +# minimum2 = 100 +# while i < 5: +# if np.mean(C12D2D[i]) < minimum: +# minimum1 = np.mean(C12D2D[i]) +# i += 1 + + + + + + N = 5 + ind = np.asarray([0.25,1.25,2.25,3.25,4.25]) + + width = 0.5 # the width of the bars + +# x1 = [0.4,1.4,2.4,3.4,4.4] + x2 = [0.45,1.45,2.45,3.45,4.45] +# x3 = [0.5,1.5,2.5,3.5,4.5] + x4 = [0.55,1.55,2.55,3.55,4.55] +# x5 = [0.6,1.6,2.6,3.6,4.6] + + fig = plt.figure(figsize=(14.0, 10.0)) + + + + ax = fig.add_subplot(111) + +# print mean2D2D +# print mean2D3D + +# ax.axhline(linewidth=2, y = np.mean(mean2D2D),color='r') +# ax.axhline(linewidth=2, y = np.mean(mean2D3D),color='blue') + +# ax.axhline(linewidth=2, y = minvaluevalue,color='black') + +# ax.text(0.98, Participantmeanvalue+0.5, "Mean %.2f" % Participantmeanvalue,fontsize=12, fontweight='bold',color='r') +# ax.text(0.98, maxvaluevalue+0.5, "Maximum %.2f" % maxvaluevalue,fontsize=12, fontweight='bold',color='black') +# ax.text(0.98, minvaluevalue+0.5, "Minimum %.2f" % minvaluevalue,fontsize=12, fontweight='bold', color='black') + +# rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects1 = ax.errorbar(x2, mean2D2D,yerr=[std2D2D,std2D2D],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x2, mean2D2D, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D') + + rects2 =ax.errorbar(x4, mean2D3D,yerr=[std2D3D,std2D3D],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x4, mean2D3D, marker="o", linestyle='-',lw=3,color='blue', label = r'2D-to-3D') + + legend(fontsize=20,loc='upper right') + +# rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2) +# plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='black') +# +# rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) +# plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') +# +# rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) +# plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='orange') + + + ax.set_ylabel(r'Angular Error',fontsize=22) + ax.set_xlabel(r'Number of Calibration Depths',fontsize=22) + ax.set_xticks(ind+0.25) + ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=18) + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]#,110]#,120] + print TOPICs + LABELs = ["",r'1',r'2', r'3', r'4', r'5', ""]#, ""]#, ""] + +# fig.canvas.set_window_title('Distance Error Correlation') + plt.xticks(TOPICs, LABELs,fontsize=18) + +# legend([rects1,rects2], [r'\LARGE\textbf{2D2D}', r'\LARGE\textbf{2D3D}'], loc='lower right') + + TOPICS = [0.5,1,1.5,2,2.5,3,3.5,4,4.5,5]#,110]#,120] + print TOPICS + LABELS = [r'0.5', r'1',r'1.5', r'2',r'2.5', r'3',r'3.5', r'4',r'4.5',r'5']#, ""]#, ""] + +# fig.canvas.set_window_title('Accuracy - Activity Statistics') + plt.yticks(TOPICS, LABELS,fontsize=18) + + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height), + ha='center', va='bottom',fontweight='bold',fontsize=13.5) + +# autolabel(rects1) + + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + + + + +if __name__ == "__main__": + main(sys.argv[1:]) + diff --git a/code/Visualization/EffectNumberofClusters_plus_Simulation.py b/code/Visualization/EffectNumberofClusters_plus_Simulation.py new file mode 100644 index 0000000..909d6a1 --- /dev/null +++ b/code/Visualization/EffectNumberofClusters_plus_Simulation.py @@ -0,0 +1,418 @@ +from __future__ import division + +import os, sys +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +from matplotlib.pyplot import * +import matplotlib.patches as mpatches + +sys.path.append('..') # so we can import modules from `code` directory +from minimize import findInitialW, _q, g, minimizeEnergy, g3D3D, findW3D3D +from minimize import g as gaze_ray +from geom import getSphericalCoords, getAngularDiff +from vector import Vector as v + +from sim import GazeSimulation +from recording.util.tools import is_outlier +from recording.tracker import readCameraParams +from geom import getRotationMatrix +from parallax_analysis import Experiment + +''' +Change lines 48 to 61 accordingly +''' + + +class Parallax2Dto3DMapping(Experiment): + ''' + IMPORTANT! + In all experiments, scene camera's rvec = (0, 0, 0) i.e. the corresponding rotation matrix is the identity matrix therefore + I have not included the dot production with this rotation matrix to convert points in world coordinates + into scene camera coordinates. however, one should know that if the scene camera is rotated differentl7y + this transformation is essential. I would add the corresponding computations later on. + ''' + + def __run__(self): + # Processing real world data + ###################################################################################################### + print '> Processing real world data...' + + C12D2D = np.load("../results/MeansC1D2D2.npy") + C12D3D = np.load("../results/MeansC1D2D3.npy") + + C22D2D = np.load("../results/MeansC2D2D2.npy") + C22D3D = np.load("../results/MeansC2D2D3.npy") + + C32D2D = np.load("../results/MeansC3D2D2.npy") + C32D3D = np.load("../results/MeansC3D2D3.npy") + + C42D2D = np.load("../results/MeansC4D2D2.npy") + C42D3D = np.load("../results/MeansC4D2D3.npy") + + C52D2D = np.load("../results/MeansC5D2D2.npy") + C52D3D = np.load("../results/MeansC5D2D3.npy") + + + summeC12D2D = [] + summeC22D2D = [] + summeC32D2D = [] + summeC42D2D = [] + summeC52D2D = [] + + summeC12D3D = [] + summeC22D3D = [] + summeC32D3D = [] + summeC42D3D = [] + summeC52D3D = [] + + + i = 0 + while i < len(C12D2D): + j = 0 + while j < len(C12D2D[0]): + summeC12D2D.append(C12D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C22D2D): + j = 0 + while j < len(C22D2D[0]): + summeC22D2D.append(C22D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C32D2D): + j = 0 + while j < len(C32D2D[0]): + summeC32D2D.append(C32D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C42D2D): + j = 0 + while j < len(C42D2D[0]): + summeC42D2D.append(C42D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C52D2D): + j = 0 + while j < len(C52D2D[0]): + summeC52D2D.append(C52D2D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C12D3D): + j = 0 + while j < len(C12D3D[0]): + summeC12D3D.append(C12D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C22D3D): + j = 0 + while j < len(C22D3D[0]): + summeC22D3D.append(C22D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C32D3D): + j = 0 + while j < len(C32D3D[0]): + summeC32D3D.append(C32D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C42D3D): + j = 0 + while j < len(C42D3D[0]): + summeC42D3D.append(C42D3D[i][j]) + j += 1 + i += 1 + + i = 0 + while i < len(C52D3D): + j = 0 + while j < len(C52D3D[0]): + summeC52D3D.append(C52D3D[i][j]) + j += 1 + i += 1 + + + mean1 = np.mean(summeC12D2D) + mean2 = np.mean(summeC22D2D) + mean3 = np.mean(summeC32D2D) + mean4 = np.mean(summeC42D2D) + mean5 = np.mean(summeC52D2D) + + mean6 = np.mean(summeC12D3D) + mean7 = np.mean(summeC22D3D) + mean8 = np.mean(summeC32D3D) + mean9 = np.mean(summeC42D3D) + mean10 = np.mean(summeC52D3D) + + std1 = np.std(summeC12D2D) + std2 = np.std(summeC22D2D) + std3 = np.std(summeC32D2D) + std4 = np.std(summeC42D2D) + std5 = np.std(summeC52D2D) + + std6 = np.std(summeC12D3D) + std7 = np.std(summeC22D3D) + std8 = np.std(summeC32D3D) + std9 = np.std(summeC42D3D) + std10 = np.std(summeC52D3D) + + mean2D2D_real = [mean1,mean2,mean3,mean4,mean5] + mean2D3D_real = [mean6,mean7,mean8,mean9,mean10] + + std2D2D_real = [std1,std2,std3,std4,std5] + std2D3D_real = [std6,std7,std8,std9,std10] + ###################################################################################################### + # Simulation + print '> Processing simulation data...' + ###################################################################################################### + sim = GazeSimulation(log = False) + + sim.place_eyeball_on_scene_camera = False + sim.setEyeRelativeToSceneCamera(v(-65, -33, -73)) + # sim.setEyeRelativeToSceneCamera(v(-65, -33, 0)) # assuming eyeball and scene camera are coplanar i.e. e = (e.x, e.y, 0) + + sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera + sim.setTestDepth(1.5 * 1000) + sim.calibration_grid = True + sim.calibration_random_depth = False + sim.test_grid = True + sim.test_random_depth = False + sim.test_random_fixed_depth = False + + depths = map(lambda d:d*1000, [1, 1.25, 1.5, 1.75, 2.0]) + print '> Computing results for multiple calibration depths...' + results, results_std = [], [] + for num_of_calibration_depths in xrange(1, 6): # from 1 calibration depths to 5 + print '> Considering only %s calibration depth(s)...' %num_of_calibration_depths + sim.reset() + aae_2ds_aae = [] + aae_2ds_phe = [] + aae_3ds_aae = [] + aae_3ds_phe = [] + aae_3D3Ds = [] # angular error + + for calibs in combinations(depths, num_of_calibration_depths): + # Now calibs is a set of depths from each of which we need calibration data + print '> Current calibration depths', calibs + calibs = list(calibs) + cp, ct = [], [] + sim.reset() + sim.setCalibrationDepth(calibs) + # Perform calibration + sim.runCalibration() + cp, ct, p3d = sim.tr_pupil_locations, sim.calibration_points, sim.tr_3d_pupil_locations + # target positions are computed relative to the scene CCS + ti = map(lambda target: v(target) - v(sim.scene_camera.t), ct) + # Computing pupil pose for each gaze + ni = map(lambda p: (v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + + w, e, w0 = minimizeEnergy(cp, ti) + e = v(e) + + # transforming pupil pose to eye camera CS + eyeR = np.array(sim.eye_camera.R[:3]) + ni = map(lambda pose: eyeR.dot(np.array(pose)), ni) + + R, e3d3d = minimizeEnergy(ni, ti, pose_given=True) + e3d3d = v(e3d3d) + # R = LA.inv(R) + + # Now we have calibration data from multiple depths, we can test on all depths + for test_depth in depths: + sim.setTestDepth(test_depth) + aae_2d_aae, aae_2d_phe, aae_2d_std, _ = sim.runTest() # last one is PHE std + aae_2ds_aae.append((aae_2d_aae, aae_2d_std)) + aae_2ds_phe.append(aae_2d_phe) + # Fetching test points + t, p, p3d = sim.test_points, sim.te_pupil_locations, sim.te_3d_pupil_locations + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # target coords in scene CCS + + # 3D3D + t_3d3d = t[:] + + ni = map(lambda p: v(v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + # transforming pupil pose to eye camera CS + ni = map(lambda r: v(eyeR.dot(np.array(r))), ni) + + # applying estimated rotation to pose vector in eye camera coordinates (Rn) + # R is estimated rotation between scene camera and eye coordinate system (not eye camera!) + # in other words, R is the rotation part of e + Rni = map(lambda n: v(R.dot(np.array(n))), ni) # now ready to compare Rn with t-e + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + gis = map(lambda vec: v(vec), Rni) # gaze rays originating from eyeball + # we multiply g such that it hits t's z-plane i.e. multiply all coordinates by factor (t.z-e.z)/g.z + # then we add e to the final g so that it originates from scene camera. now both g and t are in the + # same coordinate system and originate from the same point, so we can compare them + gprimes = map(lambda tg: v(((tg[0].z - e3d3d.z)/tg[1].z)*tg[1] + e3d3d), zip(t_3d3d, gis)) + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t_3d3d)) + + N = len(t) + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + aae_3D3Ds.append((AAE, STD)) + + + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: g(q, w), qi) + + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + t = map(lambda vec: v(vec), t) + gis = map(lambda vec: v(vec), gis) + gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis)) + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t)) + N = len(t) + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + + # Computing physical distance error (in meters) + PHE = list((u-v).mag/1000 for u,v in zip(t, gprimes)) + N = len(t) + APHE = np.mean(PHE) + PHE_STD = np.std(PHE) + PHE_m, PHE_M = min(PHE), max(PHE) + + aae_3ds_aae.append((AAE, STD)) + aae_3ds_phe.append((PHE, PHE_STD)) + + # results only contains AAE + results.append([np.mean(np.array(aae_2ds_aae)[:,0]), np.mean(np.array(aae_3ds_aae)[:,0]), np.mean(np.array(aae_3D3Ds)[:,0])]) + results_std.append([np.std(np.array(aae_2ds_aae)[:,0]), np.std(np.array(aae_3ds_aae)[:,0]), np.std(np.array(aae_3D3Ds)[:,0])]) + ###################################################################################################### + # Plotting + print '> Plotting...' + ###################################################################################################### + # New plot code based on EffectNumberofClusters.py + mean2D2D = [res[0] for res in results] + mean2D3D = [res[1] for res in results] + mean3D3D = [res[2] for res in results] + std2D2D = [res[0] for res in results_std] + std2D3D = [res[1] for res in results_std] + std3D3D = [res[2] for res in results_std] + + + N = 5 + ind = np.asarray([0.25,1.25,2.25,3.25,4.25]) + + width = 0.5 # the width of the bars + + # x1 = [0.4,1.4,2.4,3.4,4.4] + x2 = [0.45,1.45,2.45,3.45,4.45] + # x3 = [0.5,1.5,2.5,3.5,4.5] + x4 = [0.55,1.55,2.55,3.55,4.55] + # x5 = [0.6,1.6,2.6,3.6,4.6] + x6 = [0.50,1.50,2.50,3.50,4.50] + + fig = plt.figure(figsize=(14.0, 10.0)) + + ax = fig.add_subplot(111) + + rrects1 = ax.errorbar(x2, mean2D2D_real,yerr=[std2D2D_real,std2D2D_real],fmt='o',color='red',ecolor='red',lw=3, capsize=8, capthick=3) + plt.plot(x2, mean2D2D_real, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D') + + rrects2 =ax.errorbar(x4, mean2D3D_real,yerr=[std2D3D_real,std2D3D_real],fmt='o',color='blue',ecolor='blue',lw=3, capsize=8, capthick=3) + plt.plot(x4, mean2D3D_real, marker="o", linestyle='-',lw=3,color='blue', label = r'2D-to-3D') + + + rects1 = ax.errorbar(x2, mean2D2D,yerr=[std2D2D,std2D2D],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x2, mean2D2D, marker="o", linestyle='--',lw=3,color='red',label = r'2D-to-2D Simulation') + + rects2 =ax.errorbar(x4, mean2D3D,yerr=[std2D3D,std2D3D],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x4, mean2D3D, marker="o", linestyle='--',lw=3,color='blue', label = r'2D-to-3D Simulation') + + rects3 =ax.errorbar(x6, mean3D3D,yerr=[std3D3D,std3D3D],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + plt.plot(x6, mean3D3D, marker="o", linestyle='--',lw=3,color='orange', label = r'3D-to-3D Simulation') + + legend(fontsize=20,loc='upper right') + + # rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2) + # plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='black') + # + # rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) + # plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') + # + # rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + # plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='orange') + + + ax.set_ylabel(r'Angular Error',fontsize=22) + ax.set_xlabel(r'Number of Calibration Depths',fontsize=22) + ax.set_xticks(ind+0.25) + ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=18) + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]#,110]#,120] + print TOPICs + LABELs = ["",r'1',r'2', r'3', r'4', r'5', ""]#, ""]#, ""] + + # fig.canvas.set_window_title('Distance Error Correlation') + plt.xticks(TOPICs, LABELs,fontsize=18) + + # legend([rects1,rects2], [r'\LARGE\textbf{2D2D}', r'\LARGE\textbf{2D3D}'], loc='lower right') + + TOPICS = [0.0, 0.5,1,1.5,2,2.5,3,3.5,4,4.5,5]#,110]#,120] + print TOPICS + LABELS = [r'0.0', r'0.5', r'1',r'1.5', r'2',r'2.5', r'3',r'3.5', r'4',r'4.5',r'5']#, ""]#, ""] + + # fig.canvas.set_window_title('Accuracy - Activity Statistics') + plt.yticks(TOPICS, LABELS,fontsize=18) + + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height), + ha='center', va='bottom',fontweight='bold',fontsize=13.5) + + # autolabel(rects1) + + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + ###################################################################################################### + + +def main(): + ex = Parallax2Dto3DMapping() + ex.performExperiment() + +if __name__ == "__main__": + main() + diff --git a/code/copy_missing_data.py b/code/copy_missing_data.py new file mode 100644 index 0000000..8bb4b05 --- /dev/null +++ b/code/copy_missing_data.py @@ -0,0 +1,36 @@ +''' +Copies all missing files from ROOT to DEST for selected PARTICIPANTS +''' +import os, sys +import shutil + +PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25'] + +DEST = '/home/mmbrian/3D_Gaze_Tracking/archive00/participants/' +ROOT = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants/' + +def main(): + processed = 0 + for p in os.listdir(ROOT): + if p in PARTICIPANTS: + print '> Copying missing files for participant', p + d1 = os.path.join(ROOT, p) + d1 = os.path.join(d1, os.listdir(d1)[0]) # ../p_i/../ + for d2 in os.listdir(d1): # per recording + root_path = os.path.join(d1, d2) + dest_path = os.path.join(DEST, p) + dest_path = os.path.join(dest_path, os.listdir(dest_path)[0], d2) + print '> From:', root_path, 'To:', dest_path + processPath(root_path, dest_path) + processed+=1 + print '> Processed %s participants.' % processed + +def processPath(root_path, dest_path): + dest_files = os.listdir(dest_path) + for f in os.listdir(root_path): # every file from root path + if not f in dest_files: # copy if file does not exist in destination + print '> Copying', f + shutil.copyfile(os.path.join(root_path, f), os.path.join(dest_path, f)) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/code/experiment.py b/code/experiment.py new file mode 100644 index 0000000..a314a03 --- /dev/null +++ b/code/experiment.py @@ -0,0 +1,441 @@ +from __future__ import division +import numpy as np +from numpy import linalg as LA + +from time import time + +from sklearn import linear_model + +from sim import GazeSimulation +from svis import Ray +from minimize import findInitialW, _q, g, minimizeEnergy, minimizeEnergyGivenE, minimizeUsingScaleVector + +from geom import getSphericalCoords, getAngularDiff + +import visual as vs +# from visual import vector as v # for vector operations +from vector import Vector as v # for vector operations + +class Experiment: + + def performExperiment(self): + print 'Running Experiment...' + start = time() + self.__run__() + self.runningTime = time() - start + print 'Running Time:', self.runningTime + if self.minimizationTime: + print 'Minimization Time:', self.minimizationTime + + def __run__(self): + pass + + +class Experiment1(Experiment): + + def __run__(self): + ''' + Simply evaluates 2D gaze estimation (using pupil calibration method) + ''' + sim = GazeSimulation(log = False) + ## Uncomment for shifting eyeball to scene camera coordinates + sim.scene_eye_distance_factor = 0.6 # controls how much to move eyeball towards scene camera [0-1] + sim.place_eyeball_on_scene_camera = False + + # calibration_grid, calibration_random_depth, test_grid, test_random_depth, test_random_fixed_depth + experiments = [ + [True, False, False, True, False], + [True, False, False, False, False], + [True, False, False, False, True], + [False, True, False, True, False], + [False, True, False, False, False], + [False, True, False, False, True], + [False, False, False, True, False], + [False, False, False, False, False], + [False, False, False, False, True], + # [True, False, True, False, False], + ] + + nsim = 100 + for experiment in experiments: + res = [sim.runCustomSimulation(experiment) for n in xrange(nsim)] + res = filter(lambda aae: aae>=0, res) + M = max(res) + m = min(res) + aae = sum(res) / len(res) + print 'AAE: %2.5f - Min AAE: %2.5f | Max AAE: %2.5f' % (aae, m, M) + + eye_scene_diff = v(sim.sclera_pos) - v(sim.scene_camera.t) + print "Eyeball-SceneCamera distance was", eye_scene_diff.mag + + self.sim = sim + self.minimizationTime = -1 + + +class Experiment1p5(Experiment): + + def __run__(self): + ''' + Read 3D gaze estimation experiment + ''' + sim = GazeSimulation(log = False) + ## Uncomment for shifting eyeball to scene camera coordinates + sim.place_eyeball_on_scene_camera = True + sim.scene_eye_distance_factor = 0.6 + # sim.scene_eye_distance_factor = 1.0 + + # calibration_grid, calibration_random_depth, test_grid, test_random_depth, test_random_fixed_depth + # -> grid setting + # con = [True, False, True, False, True] + # -> general setting with grid learning + # con = [True, False, False, True, False] + con = [True, False, False, False, False] + # -> general setting with random learning + # con = [False, True, False, True, False] + sim.num_calibration = 36 + sim.num_test = 25 + aae_2d = sim.runCustomSimulation(con) + + ## Fetching the calibration points + t, p = sim.calibration_points, sim.tr_pupil_locations + # target positions are computed relative to the scene CCS + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # verified + # p = sim.eye_camera.getNormalizedPts(p) + + start = time() + + eye_scene_diff = v(sim.sclera_pos) - v(sim.scene_camera.t) + e = np.array(eye_scene_diff) # eyeball coords in scene CCS + print 'Real e', e + e_org = e[:] + print 'Eye-Scene distance', eye_scene_diff.mag + # w, w0 = minimizeEnergyGivenE(p, t, e) + + ## Finding the optimal transformation matrix by minimizing the nonlinear energy + # w0 is the initial w by solving the leastsq with e=(0,0,0) + # w is by solving the leastsq again optimizing for both e and w + w, e, w0 = minimizeEnergy(p, t) + + # here were are initializing minimization with an e close to the ground + # truth e by adding random perturbations to it + # w, e, w0 = minimizeEnergy(p, t, e + np.random.rand(1, 3)[0]) + + print 'Estimated e', e, '( Distance:', LA.norm(e - e_org) , ')' + self.minimizationTime = time() - start + print + + sim.w = w + + ## Fetching test points + t, p = sim.test_points, sim.te_pupil_locations + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # target coords in scene CCS + + # closest point distance to scene camera + cDist = min(v(pt).mag for pt in t) + # farthest point distance to scene camera + fDist = max(v(pt).mag for pt in t) + # average point distance to scene camera + avgDist = sum(v(pt).mag for pt in t)/len(t) + + + # p = sim.eye_camera.getNormalizedPts(p) + # print + # print p + # print + + + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: g(q, w), qi) + gis0 = map(lambda q: g(q, w0), qi) + + # now we can compare unit gaze vectors with their corresponding gaze rays t + # normalizing gaze rays first + t = map(lambda vec: v(vec).norm(), t) + # TODO: compare spherical coordinates instead + + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis, t)) + N = len(t) + AAE = sum(AE)/N + VAR = sum((ae - AAE)**2 for ae in AE)/N + print 'AAE:', AAE, '\nVariance:', VAR, 'STD:', np.sqrt(VAR), '\nMin:', min(AE), 'Max:', max(AE), '(N=' + str(N) + ')' + print 'Target Distances: m=%s M=%s Avg=%s' % (cDist, fDist, avgDist) + + AE0 = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis0, t)) + AAE0 = sum(AE0)/N + print 'AAE (only optimizing W for e=(0,0,0)):', AAE0 + + print 'AAE (2D-to-2D mapping):', aae_2d + print ('Improvement (2D-2D vs 2D-3D): %s' % round((aae_2d - AAE)*100/aae_2d, 2)) + '%' + print ('Improvement (2D-2D vs 2D-2D): %s' % round((aae_2d - AAE0)*100/aae_2d, 2)) + '%' + + + sim.display_test_points = True + sim.display_calibration_points = False + + ## Visualizing gaze vector + ray_scale_factor = 120 + ground_truth_gaze_rays = [] + for pt in t: + ground_truth_gaze_rays.append(Ray(sim.scene_camera.t, ray_scale_factor, v(pt), vs.color.green)) + sim.rays = ground_truth_gaze_rays + + estimated_gaze_rays = [] + for pt in gis: + estimated_gaze_rays.append(Ray(sim.scene_camera.t, ray_scale_factor, v(pt), vs.color.red)) + sim.rays.extend(estimated_gaze_rays) + + AE = [] + for pair in zip(t, gis): + gt_r, e_r = pair + base = v(sim.scene_camera.t) + gt_ray_endpoint = base + (v(gt_r) * ray_scale_factor) + e_ray_endpoint = base + (v(e_r) * ray_scale_factor) + AE.append(getAngularDiff(gt_ray_endpoint, e_ray_endpoint, base)) + + diff = gt_ray_endpoint - e_ray_endpoint + sim.rays.append(Ray(e_ray_endpoint, diff.mag, diff.norm(), vs.color.orange)) + + # Recomputing AAE (using law of cosines) sum(AE)/N + assert(round(sum(AE)/N - AAE) == 0) + + self.sim = sim + print + +def p3dEx(): + ex = Experiment1p5() + ex.performExperiment() + return ex + +def experiment2(s = 1): + sim = GazeSimulation(log = False) + sim.place_eyeball_on_scene_camera = True + + # 2/3 for calibration, 1/3 for test + # sim.num_calibration = 66 + # sim.num_test = 33 + # sim.reset() + + # calibration_grid, calibration_random_depth, test_grid, test_random_depth, test_random_fixed_depth + # con = [True, False, False, False, True] + # con = [False, True, False, True, False] + con = [True, False, True, False, True] + # sim.runSimulation() + # sim.num_calibration = 36 + sim.num_calibration = 25 + sim.num_test = 25 + sim.runCustomSimulation(con) + + # retrieving calibration points (t for 3D target positions and p for 2D pupil locations) + t, p = sim.calibration_points, sim.tr_pupil_locations + # t = sim.tr_target_projections # projections of target points + # target positions are computed relative to the scene CCS + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # verified + + # p = sim.eye_camera.getNormalizedPts(p) + + # computing the initial mapping matrix w by solving the energy minimization + # for e=(0, 0, 0) + # w = findInitialW(p, t, True) + # print w + # w, e = minimizeEnergy(p, t) + # print w + w = np.array([[s, 0], + [0, s]]) + # w = minimizeUsingScaleVector(p, t) + + # e = np.array(v(sim.sclera_pos) - v(sim.scene_camera.t)) # eyeball coords in scene CCS + # w = minimizeEnergyGivenE(p, t, e) + + t, p = sim.test_points, sim.te_pupil_locations + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # target coords in scene CCS + + # TODO, is this necessary? (due to eye camera rotation) + p = map(lambda pt: np.array([-pt[0], pt[1]]), p) + + # p = sim.eye_camera.getNormalizedPts(p) + + # this w can be used for 2D-2D mapping (apparently!) testing it + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: g(q, w), qi) + + # now we can compare unit gaze vectors with their corresponding gaze rays t + # normalizing gaze rays + t = map(lambda vec: v(vec).norm(), t) + SAE = sum(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis, t)) + # return SAE / len(t) + + sim.display_test_points = True + sim.display_calibration_points = False + ## Visualizing gaze vector + ray_scale_factor = 120 + ground_truth_gaze_rays = [] + for pt in t: + ground_truth_gaze_rays.append(Ray(sim.scene_camera.t, ray_scale_factor, v(pt), vs.color.green)) + sim.rays = ground_truth_gaze_rays + + estimated_gaze_rays = [] + for pt in gis: + estimated_gaze_rays.append(Ray(sim.scene_camera.t, ray_scale_factor, v(pt), vs.color.red)) + sim.rays.extend(estimated_gaze_rays) + + AE = [] + for pair in zip(t, gis): + gt_r, e_r = pair + base = v(sim.scene_camera.t) + gt_ray_endpoint = base + (v(gt_r) * ray_scale_factor) + e_ray_endpoint = base + (v(e_r) * ray_scale_factor) + AE.append(getAngularDiff(gt_ray_endpoint, e_ray_endpoint, base)) + + diff = gt_ray_endpoint - e_ray_endpoint + sim.rays.append(Ray(gt_ray_endpoint, diff.mag, -diff.norm(), vs.color.orange)) + + return sim + # print e + +def experiment3(): + sim = GazeSimulation(log = False) + # 2/3 for calibration, 1/3 for test + # sim.num_calibration = 66 + # sim.num_test = 33 + # sim.reset() + + # calibration_grid, calibration_random_depth, test_grid, test_random_depth, test_random_fixed_depth + con = [True, False, False, True, False] + # con = [False, True, False, True, False] + # sim.runSimulation() + sim.runCustomSimulation(con) + + # retrieving calibration points (t for 3D target positions and p for 2D pupil locations) + t, p = sim.calibration_points, sim.tr_pupil_locations + # t = sim.tr_target_projections # projections of target points + # target positions are computed relative to the scene CCS + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) + + # p = sim.eye_camera.getNormalizedPts(p) + + # Applying regression using LARS (Least Angle Regression) + qi = map(_q, p) + clf = linear_model.Lars(n_nonzero_coefs=np.inf) # n_nonzero_coefs=1 + t = map(lambda vec: v(vec).norm(), t) + clf.fit(qi, t) + + t, p = sim.test_points, sim.te_pupil_locations + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) + # p = sim.eye_camera.getNormalizedPts(p) + + # this w can be used for 2D-2D mapping (apparently!) testing it + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: clf.predict(q)[0], qi) + + # now we can compare unit gaze vectors with their corresponding gaze rays t + # normalizing gaze rays + t = map(lambda vec: v(vec).norm(), t) + SAE = sum(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis, t)) + print SAE / len(t) + # print e + +def experimentGT(): + ## Here we are using the ground truth gaze vectors (from the eye) and we compare + ## them with ground truth vectors initiating from the scene camera + ## In case eyeball and scene camera are located at the same point, the AAE should + ## be reported as zero, this is only to make sure we're doing the right thing + + sim = GazeSimulation(log = False) + # Uncomment to shift eyeball to scene camera + # sim.place_eyeball_on_scene_camera = True + + # 2/3 for calibration, 1/3 for test + # sim.num_calibration = 66 + # sim.num_test = 33 + # sim.reset() + + # calibration_grid, calibration_random_depth, test_grid, test_random_depth, test_random_fixed_depth + con = [True, False, False, True, False] + # con = [False, True, False, True, False] + # sim.runSimulation() + + sim.runCustomSimulation(con) + + # retrieving calibration points (t for 3D target positions and p for 2D pupil locations) + t, p, tp = sim.calibration_points, sim.tr_pupil_locations, sim.tr_target_projections + # t = sim.tr_target_projections # projections of target points + # target positions are computed relative to the scene CCS + + t_org = t[:] + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) + + rays_from_scene = [] + for pt in t: + rays_from_scene.append(Ray(sim.scene_camera.t, v(pt).mag, v(pt).norm(), vs.color.red)) + sim.rays = rays_from_scene + + + rays_from_scene_to_target_projections = [] + base = v(sim.scene_camera.t) + v(sim.scene_camera.direction) * sim.scene_camera.f + for pp in tp: + # print pp + # since we wanna draw rays starting from scene camera, + # ray is relative to scene CCS + ray = base + v(pp[0], pp[1], 0) - v(sim.scene_camera.t) + rays_from_scene_to_target_projections.append(Ray(v(sim.scene_camera.t), ray.mag * 4, ray.norm(), vs.color.orange)) + sim.rays.extend(rays_from_scene_to_target_projections) + + t = map(lambda vec: v(vec).norm(), t) + + alpha_GT = [] + _deg = lambda triple: (np.degrees(triple[0]), np.degrees(triple[1]), triple[2]) + print + for pt in t: + alpha_GT.append(_deg(getSphericalCoords(pt[0], pt[1], pt[2]))) + # print _deg(getSphericalCoords(pt[0], pt[1], pt[2])), pt[2] + print + + # starting from eyeball + base = v(sim.sclera_pos) + # direction vector from eyeball towards eye camera + d = -v(sim.eye_camera.direction).norm() + # calculating distance between eyeball and image plane of the eye camera + dist = (v(sim.eye_camera.t) - v(sim.sclera_pos)).mag - sim.eye_camera.f + + p = map(lambda p: np.array((-p[0], p[1])), p) # -x due to orientation + sf = 4 + p = map(lambda p: p.dot(np.array([[sf, 0], + [0 , sf]])), p) + gis = map(lambda p: (d*dist + v(p[0], p[1], 0)).norm(), p) + + alpha_test = [] + + rays_from_eye = [] + for pt in gis: + alpha_test.append(_deg(getSphericalCoords(pt[0], pt[1], pt[2]))) + rays_from_eye.append(Ray(sim.sclera_pos, 200, pt, vs.color.yellow)) + sim.rays.extend(rays_from_eye) + + # gis = map(lambda t: (v(t) - v(sim.sclera_pos)).norm(), t_org) + + # now we can compare unit gaze vectors with their corresponding gaze rays t + # normalizing gaze rays + + # for _t, _gis in zip(t, gis): print _t, _gis + SAE = sum(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gis, t)) + print SAE / len(t) + return sim + + + +# experiment1() + +# for s in np.arange(1, 20, 0.25): +# print experiment2(s), s + +# experiment3() + +# experimentGT() +# experiment1p5() \ No newline at end of file diff --git a/code/geom.py b/code/geom.py new file mode 100644 index 0000000..debff31 --- /dev/null +++ b/code/geom.py @@ -0,0 +1,288 @@ +from __future__ import division +import numpy as np +from random import sample + +def_fov = np.pi * 2./3 + +def generatePoints(n, min_xyz, max_xyz, grid=False, randomZ=True, randFixedZ=False, depth=None, offset=0.5, xoffset=0, yoffset=0, zoffset=0): + if randFixedZ: # means all points have the same z but z is chosen at random between max and min z + z = min_xyz[2] + np.random.random() * (max_xyz[2] - min_xyz[2]) + else: # same depth + if not isinstance(depth, list) and not depth: # depth is exactly the middle of min and max z + z = min_xyz[2] + (max_xyz[2] - min_xyz[2]) / 2 + else: + z = depth + if not grid: # compute randomly + xr, yr, zr = max_xyz[0] - min_xyz[0], max_xyz[1] - min_xyz[1], max_xyz[2] - min_xyz[2] + + xr = np.random.rand(1, n)[0] * xr + min_xyz[0] + yr = np.random.rand(1, n)[0] * yr + min_xyz[1] + if randomZ: + zr = np.random.rand(1, n)[0] * zr + min_xyz[2] + else: + zr = np.ones((1, n))[0] * z + return zip(xr, yr, zr) + else: # compute points on a mXm grid when m = sqrt(n) + m = int(np.sqrt(n)) + gwx = (max_xyz[0] - min_xyz[0]) / m + gwy = (max_xyz[1] - min_xyz[1]) / m + zr = max_xyz[2] - min_xyz[2] + if randomZ: + return [(min_xyz[0] + (i+offset) * gwx + xoffset, + min_xyz[1] + (j+offset) * gwy + yoffset, + np.random.random() * zr + min_xyz[2] + zoffset) for i in xrange(m) for j in xrange(m)] + else: + if not isinstance(depth, list): + ret = [(min_xyz[0] + (i+offset) * gwx + xoffset, # offset .5 + min_xyz[1] + (j+offset) * gwy + yoffset, # offset .5 + z + zoffset) for i in xrange(m) for j in xrange(m)] + # return ret + return sample(ret, len(ret)) # this shuffles the points + else: + ret = [] + for dz in depth: + ret.extend([(min_xyz[0] + (i+offset) * gwx + xoffset, + min_xyz[1] + (j+offset) * gwy + yoffset, + dz + zoffset) for i in xrange(m) for j in xrange(m)]) + # return ret + return sample(ret, len(ret)) # this shuffles the points + + +def getSphericalCoords(x, y, z): + ''' + According to our coordinate system, this returns the + spherical coordinates of a 3D vector. + A vector originating from zero and pointing to the positive Z direction (no X or Y deviation) + will correspond to (teta, phi) = (0, 90) (in degrees) + The coordinate system we are using is similar to https://en.wikipedia.org/wiki/File:3D_Spherical_2.svg + + Y + | + | + |______X + / + / + / + Z + + with a CounterClockwise rotation of the axis vectors + ''' + r = np.sqrt(x*x + y*y + z*z) + teta = np.arctan(x/z) + phi = np.arccos(y/r) + return teta, phi, r + +def getAngularDiff(T, E, C): + ''' + T is the target point + E is the estimated target + C is camera center + Returns angular error + + (using law of cosines: http://mathcentral.uregina.ca/QQ/database/QQ.09.07/h/lucy1.html) + ''' + t = (E - C).mag + e = (C - T).mag + c = (T - E).mag + return np.degrees(np.arccos((e*e + t*t - c*c)/(2*e*t))) + +def getRotationMatrixFromAngles(r): + ''' + Returns a rotation matrix by combining elemental rotations + around x, y', and z'' + + It also appends a zero row, so the end result looks like: + [R_11 R_12 R_13] + [R_11 R_12 R_13] + [R_11 R_12 R_13] + [0 0 0 ] + ''' + cos = map(np.cos, r) + sin = map(np.sin, r) + Rx = np.array([ + [1, 0, 0], + [0, cos[0], -sin[0]], + [0, sin[0], cos[0]]]) + Ry = np.array([ + [ cos[1], 0, sin[1]], + [ 0 , 1, 0], + [-sin[1], 0, cos[1]]]) + Rz = np.array([ + [cos[2], -sin[2], 0], + [sin[2], cos[2], 0], + [0 , 0, 1]]) + R = Rz.dot(Ry.dot(Rx)) + return np.concatenate((R, [[0, 0, 0]])) + +# import cv2 +# def getRotationMatrix(a, b): +# y = a[1] - b[1] +# z = a[2] - b[2] +# x = a[0] - b[0] +# rotx = np.arctan(y/z) +# roty = np.arctan(x*np.cos(rotx)/z) +# rotz = np.arctan(np.cos(rotx)/(np.sin(rotx)*np.sin(roty))) +# return cv2.Rodrigues(np.array([rotx, roty, rotz]))[0] + +def getRotationMatrix(a, b): + ''' + Computes the rotation matrix that maps unit vector a to unit vector b + + It also augments a zero row, so the end result looks like: + [R_11 R_12 R_13] + [R_11 R_12 R_13] + [R_11 R_12 R_13] + [0 0 0 ] + + (simply slice the output like R = output[:3] to get only the rotation matrix) + + based on the solution here: + https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d + ''' + a, b = np.array(a), np.array(b) + v = np.cross(a, b, axis=0) + s = np.linalg.norm(v) # sine of angle + c = a.dot(b) # cosine of angle + + vx = np.array([ + [0 , -v[2], v[1]], + [v[2] , 0, -v[0]], + [-v[1], v[0], 0]]) + + if s == 0: # a == b + return np.concatenate((np.eye(3), [[0, 0, 0]])) + if c == 1: + return np.concatenate((np.eye(3) + vx, [[0, 0, 0]])) + return np.concatenate((np.eye(3) + vx + vx.dot(vx)*((1-c)/s/s), [[0, 0, 0]])) + + + + +class PinholeCamera: + ''' + Models a basic Pinhole Camera with 9 degrees of freedom + ''' + # Intrinsic parameters + f = 1 # focal length + p = (0, 0) # position of principal point in the image plane + # Extrinsic parameters + # this rotation corresponds to a camera setting pointing towards (0, 0, -1) + r = (0, 0, 0) # rotations in x, y', and z'' planes respectively + t = (0, 0, 0) # camera center translation w.r.t world coordinate system (with no rotation) + # + # Using the above parameters we can construct the camera matrix + # + # [f 0 p.x 0] + # P = K[R|t] where K = [0 f p.y 0] is the camera calibration matrix (full projection matrix) + # [0 0 1 0] + # + # and thus we have x = PX for every point X in the word coordinate system + # and its corresponding projection in the camera image plane x + # NOTE: points are assumed to be represented by homogeneous vectors + # + # Other parameters + label = '' + + direction = (0, 0, 1) # camera direction + fov = def_fov # field of view (both horizontal and vertical) + image_width = 2*f*np.tan(fov/2.) + ################################################ + def __init__(self, label, f = 1, r = (0, 0, 0), t = (0, 0, 0), direction = (0, 0, 1), fov=def_fov): + self.label = label + self.f = f + self.r = r + self.direction = direction + self.t = t + self.setFOV(fov) + self.recomputeCameraMatrix(True, True) + + def recomputeCameraMatrix(self, changedRotation, changedIntrinsic): + if changedRotation: + # # Computing rotation matrix using elemental rotations + self.R = getRotationMatrixFromAngles(self.r) + # by default if rotation is 0 then camera optical axis points to positive Z + self.direction = np.array([0, 0, 1, 0]).dot(self.R) + + # Computing the extrinsic matrix + _t = -self.R.dot(np.array(self.t)) # t = -RC + self.Rt = np.concatenate((self.R, np.array([[_t[0], _t[1], _t[2], 1]]).T), axis=1) + # instead of the above, we could also represent translation matrix as [I|-C] and + # [R -RC] + # then compute Rt as R[I|-C] = [0 1] [R] [-RC] + # but we're basically do the same thing by concatenating [0] with [ 1] + + if changedIntrinsic: + # Computing intrinsic matrix + f, px, py = self.f, self.p[0], self.p[1] + self.K = np.array([ + [f, 0, px, 0], + [0, f, py, 0], + [0, 0, 1, 0]]) + # Full Camera Projection Matrix + self.P = self.K.dot(self.Rt) + + ################################################ + ## Intrinsic parameter setters + def setF(self, f, auto_adjust = True): + self.f = f + if auto_adjust: + self.setFOV(self.fov) + self.recomputeCameraMatrix(False, True) + def setP(self, p, auto_adjust = True): + self.p = p + if auto_adjust: + self.recomputeCameraMatrix(False, True) + def setFOV(self, fov): + self.fov = fov + self.image_width = 2*self.f*np.tan(fov/2.) + ################################################ + ## Extrinsic parameter setters + def setT(self, t, auto_adjust = True): + self.t = t + if auto_adjust: + self.recomputeCameraMatrix(False, False) + def setR(self, r, auto_adjust = True): + self.r = r + if auto_adjust: + self.recomputeCameraMatrix(True, False) + ################################################ + def project(self, p): + ''' + Computes projection of a point p in world coordinate system + using its homogeneous vector coordinates [x, y, z, 1] + ''' + if len(p) < 4: p = (p[0], p[1], p[2], 1) + projection = self.P.dot(np.array(p)) + # dividing by the Z value to get a 2D point from homogeneous coordinates + return np.array((projection[0], projection[1]))/projection[2] + + def getNormalizedPts(self, pts): + ''' + Returns normalized x and y coordinates in range [0, 1] + ''' + px, py = self.p[0], self.p[1] + return map(lambda p:np.array([p[0] - px + self.image_width/2, + p[1] - py + self.image_width/2]) / self.image_width, pts) + def getDenormalizedPts(self, pts): + ''' + Returns original points in the camera image coordinate plane from normalized points + ''' + px, py = self.p[0], self.p[1] + offset = np.array([self.image_width/2-px, self.image_width/2-py]) + return map(lambda p:(p*self.image_width)-offset, pts) + +class Camera(PinholeCamera): + default_radius = 2.0 + + def updateShape(self): + c = v(self.t) + # Update camera center + self.center.pos = c + # Update the arrow + self.dir.pos = c + self.dir.axis = v(self.direction) * self.f + # Update image plane + self.img_plane.pos = c + self.dir.axis + self.img_plane.length = self.image_width + self.img_plane.height = self.image_width + # TODO: handle rotation of image plane \ No newline at end of file diff --git a/code/minimize.py b/code/minimize.py new file mode 100644 index 0000000..a9d1608 --- /dev/null +++ b/code/minimize.py @@ -0,0 +1,131 @@ +from __future__ import division +import numpy as np +from numpy import linalg as LA +from scipy import optimize +from sklearn.preprocessing import PolynomialFeatures as P +from vector import Vector as v +import cv2 + +M = 7 # this is the size of the feature vector +_q = lambda p: np.array([p[0], p[1], p[0]*p[0], p[1]*p[1], p[0]*p[1], p[0]*p[1]*p[0]*p[1], 1]) + +def alpha(q, w): + ''' + q is the polynomial representation of a 2D pupil point p + w is the Mx2 parameter matrix (M is the length of our feature vector) + (could be initialized as a zero matrix np.zeros((M, 2))) + returns alpha_i = (theta_i, phi_i) + ''' + theta = np.dot(w[:, 0], q) + phi = np.dot(w[:, 1], q) + return theta, phi + +def g(q, w): + ''' + computes the unit gaze ray originating from the eye corresponding to q + ''' + theta, phi = alpha(q, w) + sin = map(np.sin, (theta, phi)) + cos = map(np.cos, (theta, phi)) + ## This is THE definition, maps (0,0) to (0,0,1) and it produces a unit vector + return np.array([sin[0], + cos[0]*sin[1], + cos[0]*cos[1]]) +def g3D3D(pose): + theta, phi = pose + sin = map(np.sin, (theta, phi)) + cos = map(np.cos, (theta, phi)) + return np.array([sin[0], + cos[0]*sin[1], + cos[0]*cos[1]]) + +#################################################################################### +## Initial attempt to solve E(w, 0) Using a similiar idea to: +## http://scipy-lectures.github.io/advanced/mathematical_optimization/#id28 + +def getWfromRowRepr(w): + w1 = w[:M] # 1st col + w2 = w[M:] # 2nd col + return np.array([w1, w2]).transpose() + +def getRfromRowRepr(w): + return np.array([w[:3], w[3:6], w[6:]]) + +def f(w, qi, ti): + ''' + E(\bm{w}) = \sum_{i=1}^N | \bm{(\theta_i, \phi_i)} - \bm{q}_i\bm{w}|^2 + ''' + w = getWfromRowRepr(w) + alpha_i = map(lambda q: alpha(q, w), qi) # theta, phi + # we compare the estimated polar coordinates with polar coordinates corresponding + # to ti as in the 2D to 2D case + p_ti = map(lambda g: [np.arctan(g.x/g.z), np.arctan(g.y/g.z)], map(v,ti)) + return map(lambda pair: np.sqrt((pair[0][0]-pair[1][0])**2 + (pair[0][1]-pair[1][1])**2), zip(p_ti, alpha_i)) + +def findInitialW(p, t, retMatrix = True): + ti = map(np.array, t) + qi = map(_q, p) + + w0 = np.zeros(2*M) + wr = optimize.leastsq(f, w0[:], args=(qi, ti)) + if retMatrix: + return getWfromRowRepr(wr[0]) + else: + return wr[0] + +# tub function > 0 in -pi:pi, positive outside +tub = lambda x: max(-x-np.pi, 0, x-np.pi) +tub_weight = 10000 +def f3D3D(w, ni, ti): + ''' + E_{\textmd{3Dto3D}}(\bm{R}, \bm{e}) = \sum_{i=1}^N |\bm{R} \bm{n}_i \times (\bm{t}_i - \bm{e}) |^2 + ''' + rvec = w[:3] + tvec = w[3:] # e + R = cv2.Rodrigues(np.array(rvec))[0] + return map(lambda (t, n): LA.norm(np.cross(R.dot(n), t-tvec)), zip(ti, ni)) + \ + [tub_weight * tub(w[0]), tub_weight * tub(w[1]), tub_weight * tub(w[2])] + # this distance measure works if we initialize the minimization by np.array([0.,0.,0.,0.,0.,0.]) + # return map(lambda (t, n): -np.dot(R.dot(n), t-tvec)/LA.norm(t-tvec) + 1, zip(ti, ni)) + +def findW3D3D(po, t): + ''' + po stands for pose i.e. a vector in the direction of gaze for each pupil + ''' + ti = map(np.array, t) # ground truth data in scene camera coordinate system + ni = map(np.array, po) # pupil pose in eye coordinate system (normal to pupil disc) + # w0 = np.array([0.,0.,0.,0.,0.,0.]) # rvec (roll, pitch, yaw), tvec (x, y, z) + w0 = np.array([0.,np.pi,0.,0.,0.,0.]) # rvec (roll, pitch, yaw), tvec (x, y, z) + wr = optimize.leastsq(f3D3D, w0[:], args=(ni, ti)) + return wr[0] +#################################################################################### +## Solving target energy using e0 = (0, 0, 0) and w0 from above + +def minimizeEnergy(p, t, e0 = None, pose_given = False): + if e0 is None: + e0 = np.array([0, 0, 0]) + + if pose_given: + w = findW3D3D(p, t) + return cv2.Rodrigues(np.array(w[:3]))[0], w[3:] # R, e + else: + w0 = findInitialW(p, t, False) + qi = map(_q, p) + + ti = map(np.array, t) + we0 = np.concatenate((w0, e0)) + + wer = optimize.leastsq(f2D3D, we0[:], args=(qi, ti))[0] + w = getWfromRowRepr(wer[:2*M]) + e = wer[2*M:] + return w, e, getWfromRowRepr(w0) + +def f2D3D(we, qi, ti): + ''' + E_{\textmd{2Dto3D}}(\bm{w}, \bm{e}) = \sum_{i=1}^N |\bm{g}(\bm{q}_i\bm{w}) \times (\bm{t}_i - \bm{e})|^2 + ''' + # extracting parameters from the combined vector + w = getWfromRowRepr(we[:2*M]) + e = we[2*M:] + gis = map(lambda q: g(q, w), qi) + return map(lambda pair: LA.norm(np.cross(pair[1], pair[0]-e)), zip(ti, gis)) \ No newline at end of file diff --git a/code/parallax_2D3D_3Cdepths.py b/code/parallax_2D3D_3Cdepths.py new file mode 100644 index 0000000..47fe325 --- /dev/null +++ b/code/parallax_2D3D_3Cdepths.py @@ -0,0 +1,249 @@ +from __future__ import division + +import os +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +import matplotlib.patches as mpatches + +from minimize import findInitialW, _q, g, minimizeEnergy, g3D3D, findW3D3D +from minimize import g as gaze_ray +from geom import getSphericalCoords, getAngularDiff +from vector import Vector as v +# from experiment import Experiment +from sim import GazeSimulation +from recording.util.tools import is_outlier +from recording.tracker import readCameraParams +from geom import getRotationMatrix + + +class Experiment: + def performExperiment(self): + print 'Running Experiment...' + start = time() + self.__run__() + self.runningTime = time() - start + print 'Running Time:', self.runningTime + def __run__(self): + pass + +class Parallax2Dto3DMapping(Experiment): + def __run__(self): + sim = GazeSimulation(log = False) + + sim.place_eyeball_on_scene_camera = False + sim.setEyeRelativeToSceneCamera(v(-65, -33, -73)) + # sim.setEyeRelativeToSceneCamera(v(-65, -33, 0)) # assuming eyeball and scene camera are coplanar i.e. e = (e.x, e.y, 0) + sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera + sim.setTestDepth(1.5 * 1000) + sim.calibration_grid = True + sim.calibration_random_depth = False + sim.test_grid = True + sim.test_random_depth = False + sim.test_random_fixed_depth = False + + depths = map(lambda d:d*1000, [1, 1.25, 1.5, 1.75, 2.0]) + print '> Computing results for multiple calibration depths...' + results = [] + for num_of_calibration_depths in xrange(1, 6): # from 1 calibration depths to 5 + print '> Considering only %s calibration depth(s)...' %num_of_calibration_depths + sim.reset() + if num_of_calibration_depths != 3: continue + + for calibs in combinations(depths, num_of_calibration_depths): + aae_2ds_aae = [] + aae_2ds_phe = [] + aae_3ds_aae = [] + aae_3ds_phe = [] + aae_3D3Ds = [] # angular error + + # Now calibs is a set of depths, from each of those we need calibration data + # print calibs + if not calibs in [(1000., 1250., 1500.), (1250., 1500., 1750.), (1500., 1750., 2000.)]: + continue + print 'curr calibs', calibs + calibs = list(calibs) + cp, ct = [], [] + sim.reset() + sim.setCalibrationDepth(calibs) + + # Perform calibration + sim.runCalibration() + cp, ct, p3d = sim.tr_pupil_locations, sim.calibration_points, sim.tr_3d_pupil_locations + + # target positions are computed relative to the scene CCS + ti = map(lambda target: v(target) - v(sim.scene_camera.t), ct) + # Computing pupil pose for each gaze + ni = map(lambda p: (v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + + w, e, w0 = minimizeEnergy(cp, ti) + e = v(e) + + # transforming pupil pose to eye camera CS + eyeR = np.array(sim.eye_camera.R[:3]) + ni = map(lambda pose: eyeR.dot(np.array(pose)), ni) + + R, e3d3d = minimizeEnergy(ni, ti, pose_given=True) + e3d3d = v(e3d3d) + # R = LA.inv(R) + + # Now we have calibration data from multiple depths, we can test on all depths + for test_depth in depths: + sim.setTestDepth(test_depth) + aae_2d_aae, aae_2d_phe, aae_2d_std, _ = sim.runTest() # 2nd one is PHE + aae_2ds_aae.append((aae_2d_aae, aae_2d_std)) + aae_2ds_phe.append(aae_2d_phe) + + # Fetching test points + t, p, p3d = sim.test_points, sim.te_pupil_locations, sim.te_3d_pupil_locations + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # target coords in scene CCS + + # 3D3D + t_3d3d = t[:] + + ni = map(lambda p: v(v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + # transforming pupil pose to eye camera CS + ni = map(lambda r: v(eyeR.dot(np.array(r))), ni) + + # applying estimated rotation to pose vector in eye camera coordinates (Rn) + # R is estimated rotation between scene camera and eye coordinate system (not eye camera!) + # in other words, R is the rotation part of e + Rni = map(lambda n: v(R.dot(np.array(n))), ni) # now ready to compare Rn with t-e + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + gis = map(lambda vec: v(vec), Rni) # gaze rays originating from eyeball + # we multiply g such that it hits t's z-plane i.e. multiply all coordinates by factor (t.z-e.z)/g.z + # then we add e to the final g so that it originates from scene camera. now both g and t are in the + # same coordinate system and originate from the same point, so we can compare them + gprimes = map(lambda tg: v(((tg[0].z - e3d3d.z)/tg[1].z)*tg[1] + e3d3d), zip(t_3d3d, gis)) + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t_3d3d)) + + N = len(t) + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + aae_3D3Ds.append((AAE, STD)) + + + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: g(q, w), qi) + + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + t = map(lambda vec: v(vec), t) + gis = map(lambda vec: v(vec), gis) + gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis)) + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t)) + N = len(t) + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + + # Computing physical distance error (in meters) + PHE = list((u-v).mag/1000 for u,v in zip(t, gprimes)) + N = len(t) + APHE = np.mean(PHE) + PHE_STD = np.std(PHE) + PHE_m, PHE_M = min(PHE), max(PHE) + + aae_3ds_aae.append((AAE, STD)) + aae_3ds_phe.append((PHE, PHE_STD)) + + + results.append([calibs, aae_2ds_aae, aae_3ds_aae, aae_3D3Ds]) + + plt.ylabel('Angular Error') + plt.xlabel('Depth') + + fig = plt.figure(figsize=(14.0, 10.0)) + ax = fig.add_subplot(111) + + clrs = ['blue', 'red', 'orange'] + depth_map = {1000.:1, 1250.:2, 1500.:3, 1750.:4, 2000.:5} + cdlabel = lambda c: ' + '.join(map(str, map(lambda d: depth_map[d], c))) + + x1 = [0.375,1.375,2.375,3.375,4.375] + x2 = [0.425,1.425,2.425,3.425,4.425] + x3 = [0.475,1.475,2.475,3.475,4.475] + x4 = [0.525,1.525,2.525,3.525,4.525] + x5 = [0.575,1.575,2.575,3.575,4.575] + x6 = [0.625,1.625,2.625,3.625,4.625] + xrange_2d2d = [x1, x3, x5] + xrange_2d3d = [x2, x4, x6] + + for i in [0, 1, 2]: + res = results[i] + calibs = res[0] + + _xrange = xrange_2d2d[i] + aae_2d2d = np.array(res[1])[:,0] + std_2d2d = np.array(res[1])[:,1] + rects1 = ax.errorbar(_xrange, aae_2d2d,yerr=[std_2d2d, std_2d2d],fmt='o',color=clrs[i],ecolor=clrs[i],lw=3, capsize=5, capthick=2) + plt.plot(_xrange, aae_2d2d, marker="o", linestyle='-',lw=3,color=clrs[i],label = '2D-to-2D Calibration Depth '+cdlabel(calibs)) + + for i in [0, 1, 2]: + res = results[i] + calibs = res[0] + + _xrange = xrange_2d3d[i] + aae_2d3d = np.array(res[2])[:,0] + std_2d3d = np.array(res[2])[:,1] + rects2 = ax.errorbar(_xrange, aae_2d3d,yerr=[std_2d3d, std_2d3d],fmt='o',color=clrs[i],ecolor=clrs[i],lw=3, capsize=5, capthick=2) + plt.plot(_xrange, aae_2d3d, marker="o", linestyle='--',lw=3,color=clrs[i],label ='2D-to-3D Calibration Depth '+cdlabel(calibs)) + + for i in [0, 1, 2]: + res = results[i] + calibs = res[0] + + _xrange = xrange_2d2d[i] + aae_3d3d = np.array(res[3])[:,0] + std_3d3d = np.array(res[3])[:,1] + rects3 = ax.errorbar(_xrange, aae_3d3d,yerr=[std_3d3d, std_3d3d],fmt='o',color=clrs[i],ecolor=clrs[i],lw=3, capsize=5, capthick=2) + plt.plot(_xrange, aae_3d3d, marker="o", linestyle='-.',lw=3,color=clrs[i],label ='3D-to-3D Calibration Depth '+cdlabel(calibs)) + + ax.set_ylim(0, 1.5) + + ax.set_ylabel(r'Angular Error',fontsize=22, fontweight='bold') + ax.set_xlabel(r'Depth',fontsize=22, fontweight='bold') + + TOPICS = [-0.2, 0, 0.2, 0.4,0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4]#,110]#,120] + LABELS = [r'', r'0', r'0.2',r'0.4',r'0.6', r'0.8', r'1.0', r'1.2', r'1.4', r'1.6', r'1.8', r'2.0', r'2.2', r'2.4']#, ""]#, ""] + + plt.yticks(TOPICS, LABELS,fontsize=18) + + # plt.legend(fontsize=20) + plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, + ncol=3, mode="expand", borderaxespad=0., fontsize=15) + plt.yticks(fontsize='18') + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0] + LABELs = ['', 'D1 - 1m', 'D2 - 1.25m', 'D3 - 1.5m', 'D4 - 1.75m', 'D5 - 2.0m', ''] + # ax.set_xticklabels(LABELs,fontsize=18) + plt.xticks(TOPICs, LABELs,fontsize=18) + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + +if __name__ == '__main__': + # This also does 3D gaze estimation and plots estimation results for both 3D and 2D estimation + ex = Parallax2Dto3DMapping() + ex.performExperiment() \ No newline at end of file diff --git a/code/parallax_analysis.py b/code/parallax_analysis.py new file mode 100644 index 0000000..eb956cb --- /dev/null +++ b/code/parallax_analysis.py @@ -0,0 +1,996 @@ +from __future__ import division + +import os, sys +import seaborn +from pylab import rcParams +import cv2 + +import numpy as np +from numpy import linalg as LA +from time import time +from itertools import combinations + +import matplotlib.pyplot as plt +from matplotlib.pyplot import legend +import matplotlib.patches as mpatches + +from minimize import findInitialW, _q, g, minimizeEnergy, g3D3D, findW3D3D +from minimize import g as gaze_ray +from geom import getSphericalCoords, getAngularDiff +from vector import Vector as v + +from sim import GazeSimulation +from recording.util.tools import is_outlier +from recording.tracker import readCameraParams +from geom import getRotationMatrix + +import pdb + +PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25'] + +class Experiment: + def performExperiment(self): + print 'Running Experiment...' + start = time() + self.__run__() + self.runningTime = time() - start + print 'Running Time:', self.runningTime + def __run__(self): + pass + +class Parallax2Dto2DMapping(Experiment): + def __run__(self): + sim = GazeSimulation(log = False) + + sim.place_eyeball_on_scene_camera = False + + sim.setEyeRelativeToSceneCamera(v(-65, -33, -73)) + + sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera + sim.setTestDepth(1.5 * 1000) + + sim.calibration_grid = True + sim.calibration_random_depth = False + sim.test_grid = True + sim.test_random_depth = False + sim.test_random_fixed_depth = False + + sim.reset() + + print 'scene_camera', sim.scene_camera.t + print 'calibration' + print len(sim.calibration_points) + # print sim.calibration_points + print min(np.array(sim.calibration_points)[:,0]) - sim.scene_camera.t[0], max(np.array(sim.calibration_points)[:,0]) - sim.scene_camera.t[0] + print min(np.array(sim.calibration_points)[:,1]) - sim.scene_camera.t[1], max(np.array(sim.calibration_points)[:,1]) - sim.scene_camera.t[1] + print 'depths', set(np.array(sim.calibration_points)[:,2]) + print 'test' + print len(sim.test_points) + print min(np.array(sim.test_points)[:,0]) - sim.scene_camera.t[0], max(np.array(sim.test_points)[:,0]) - sim.scene_camera.t[0] + print min(np.array(sim.test_points)[:,1]) - sim.scene_camera.t[1], max(np.array(sim.test_points)[:,1]) - sim.scene_camera.t[1] + print 'depths', set(np.array(sim.test_points)[:,2]) + + + + plt.ylabel('Y (mm)') + plt.xlabel('X (mm)') + plt.plot(np.array(sim.calibration_points)[:,0], np.array(sim.calibration_points)[:,1], 'bo') + plt.plot(np.array(sim.test_points)[:,0], np.array(sim.test_points)[:,1], 'ro') + plt.show() + + # self.sim = sim + # visualize the setting in windows + +class Parallax2Dto3DMapping(Experiment): + ''' + IMPORTANT! + In all experiments, scene camera's rvec = (0, 0, 0) i.e. the corresponding rotation matrix is the identity matrix therefore + I have not included the dot production with this rotation matrix to convert points in world coordinates + into scene camera coordinates. however, one should know that if the scene camera is rotated differentl7y + this transformation is essential. I would add the corresponding computations later on. + ''' + + def __run__(self): + sim = GazeSimulation(log = False) + + sim.place_eyeball_on_scene_camera = False + sim.setEyeRelativeToSceneCamera(v(-65, -33, -73)) + # sim.setEyeRelativeToSceneCamera(v(-65, -33, 0)) # assuming eyeball and scene camera are coplanar i.e. e = (e.x, e.y, 0) + + sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera + sim.setTestDepth(1.5 * 1000) + sim.calibration_grid = True + sim.calibration_random_depth = False + sim.test_grid = True + sim.test_random_depth = False + sim.test_random_fixed_depth = False + + depths = map(lambda d:d*1000, [1, 1.25, 1.5, 1.75, 2.0]) + print '> Computing results for multiple calibration depths...' + results, results_std = [], [] + for num_of_calibration_depths in xrange(1, 6): # from 1 calibration depths to 5 + print '> Considering only %s calibration depth(s)...' %num_of_calibration_depths + sim.reset() + aae_2ds_aae = [] + aae_2ds_phe = [] + aae_3ds_aae = [] + aae_3ds_phe = [] + aae_3D3Ds = [] # angular error + + for calibs in combinations(depths, num_of_calibration_depths): + # Now calibs is a set of depths from each of which we need calibration data + print 'Current calibration depths', calibs + calibs = list(calibs) + cp, ct = [], [] + sim.reset() + sim.setCalibrationDepth(calibs) + # Perform calibration + sim.runCalibration() + cp, ct, p3d = sim.tr_pupil_locations, sim.calibration_points, sim.tr_3d_pupil_locations + # target positions are computed relative to the scene CCS + ti = map(lambda target: v(target) - v(sim.scene_camera.t), ct) + # Computing pupil pose for each gaze + ni = map(lambda p: (v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + + w, e, w0 = minimizeEnergy(cp, ti) + e = v(e) + + # transforming pupil pose to eye camera CS + eyeR = np.array(sim.eye_camera.R[:3]) + ni = map(lambda pose: eyeR.dot(np.array(pose)), ni) + + R, e3d3d = minimizeEnergy(ni, ti, pose_given=True) + # R = LA.inv(R) + e3d3d = v(e3d3d) + + # Now we have calibration data from multiple depths, we can test on all depths + for test_depth in depths: + sim.setTestDepth(test_depth) + aae_2d_aae, aae_2d_phe, aae_2d_std, _ = sim.runTest() # last one is PHE std + aae_2ds_aae.append((aae_2d_aae, aae_2d_std)) + aae_2ds_phe.append(aae_2d_phe) + # Fetching test points + t, p, p3d = sim.test_points, sim.te_pupil_locations, sim.te_3d_pupil_locations + t = map(lambda target: v(target) - v(sim.scene_camera.t), t) # target coords in scene CCS + + # 3D3D + t_3d3d = t[:] + + ni = map(lambda p: v(v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + # transforming pupil pose to eye camera CS + ni = map(lambda r: v(eyeR.dot(np.array(r))), ni) + + # applying estimated rotation to pose vector in eye camera coordinates (Rn) + # R is estimated rotation between scene camera and eye coordinate system (not eye camera!) + # in other words, R is the rotation part of e + Rni = map(lambda n: v(R.dot(np.array(n))), ni) # now ready to compare Rn with t-e + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + gis = map(lambda vec: v(vec), Rni) # gaze rays originating from eyeball + # we multiply g such that it hits t's z-plane i.e. multiply all coordinates by factor (t.z-e.z)/g.z + # then we add e to the final g so that it originates from scene camera. now both g and t are in the + # same coordinate system and originate from the same point, so we can compare them + gprimes = map(lambda tg: v(((tg[0].z - e3d3d.z)/tg[1].z)*tg[1] + e3d3d), zip(t_3d3d, gis)) + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t_3d3d)) + + N = len(t) + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + aae_3D3Ds.append((AAE, STD)) + + + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: g(q, w), qi) + + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + t = map(lambda vec: v(vec), t) + gis = map(lambda vec: v(vec), gis) + gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis)) + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t)) + N = len(t) + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + + # Computing physical distance error (in meters) + PHE = list((u-v).mag/1000 for u,v in zip(t, gprimes)) + N = len(t) + APHE = np.mean(PHE) + PHE_STD = np.std(PHE) + PHE_m, PHE_M = min(PHE), max(PHE) + + aae_3ds_aae.append((AAE, STD)) + aae_3ds_phe.append((PHE, PHE_STD)) + + # results only contains AAE + results.append([np.mean(np.array(aae_2ds_aae)[:,0]), np.mean(np.array(aae_3ds_aae)[:,0]), np.mean(np.array(aae_3D3Ds)[:,0])]) + results_std.append([np.std(np.array(aae_2ds_aae)[:,0]), np.std(np.array(aae_3ds_aae)[:,0]), np.std(np.array(aae_3D3Ds)[:,0])]) + + # Old plot code + ###################################################################################################### + # plt.ylabel('Angular Error') + # plt.xlabel('Depth') + + # fig = plt.figure(figsize=(14.0, 10.0)) + # ax = fig.add_subplot(111) + + # clrs = ['b', 'r', 'orange'] + + + # _xrange = [0.5,1.5,2.5,3.5,4.5] + # ax.plot(_xrange, [res[0] for res in results], 'r', label='2D-to-2D', marker="o", linestyle='-',lw=3) + # ax.plot(_xrange, [res[1] for res in results], 'b', label='2D-to-3D', marker="o", linestyle='-',lw=3) + # ax.plot(_xrange, [res[2] for res in results], 'g', label='3D-to-3D', marker="o", linestyle='-',lw=3) + + # ax.set_ylabel(r'Angular Error',fontsize=22, fontweight='bold') + # ax.set_xlabel(r'Number of Calibration Depths',fontsize=22, fontweight='bold') + + # plt.legend(fontsize=20) + # # plt.legend(loc="upper left", ncol=3, title=r"$d_c$") + # # plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, + # # ncol=5, mode="expand", borderaxespad=0., fontsize=20) + # # plt.xticks(fontsize='18') + # plt.yticks(fontsize='18') + + + # TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0] + # LABELs = ['', '1', '2', '3', '4', '5', ''] + # # ax.set_xticklabels(LABELs,fontsize=18) + # plt.xticks(TOPICs, LABELs,fontsize=18) + + # left = 0.1 # the left side of the subplots of the figure + # right = 0.975 # the right side of the subplots of the figure + # bottom = 0.075 # the bottom of the subplots of the figure + # top = 0.925 # the top of the subplots of the figure + # wspace = 0.2 # the amount of width reserved for blank space between subplots + # hspace = 0.4 # the amount of height reserved for white space between subplots + + # plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + # plt.show() + ###################################################################################################### + # New plot code based on EffectNumberofClusters.py + mean2D2D = [res[0] for res in results] + mean2D3D = [res[1] for res in results] + mean3D3D = [res[2] for res in results] + std2D2D = [res[0] for res in results_std] + std2D3D = [res[1] for res in results_std] + std3D3D = [res[2] for res in results_std] + + + N = 5 + ind = np.asarray([0.25,1.25,2.25,3.25,4.25]) + + width = 0.5 # the width of the bars + + # x1 = [0.4,1.4,2.4,3.4,4.4] + x2 = [0.45,1.45,2.45,3.45,4.45] + # x3 = [0.5,1.5,2.5,3.5,4.5] + x4 = [0.55,1.55,2.55,3.55,4.55] + # x5 = [0.6,1.6,2.6,3.6,4.6] + x6 = [0.50,1.50,2.50,3.50,4.50] + + fig = plt.figure(figsize=(14.0, 10.0)) + + ax = fig.add_subplot(111) + + # print mean2D2D + # print mean2D3D + + # ax.axhline(linewidth=2, y = np.mean(mean2D2D),color='r') + # ax.axhline(linewidth=2, y = np.mean(mean2D3D),color='blue') + + # ax.axhline(linewidth=2, y = minvaluevalue,color='black') + + # ax.text(0.98, Participantmeanvalue+0.5, "Mean %.2f" % Participantmeanvalue,fontsize=12, fontweight='bold',color='r') + # ax.text(0.98, maxvaluevalue+0.5, "Maximum %.2f" % maxvaluevalue,fontsize=12, fontweight='bold',color='black') + # ax.text(0.98, minvaluevalue+0.5, "Minimum %.2f" % minvaluevalue,fontsize=12, fontweight='bold', color='black') + + # rects1 = ax.bar(ind, Participantmean,width, color='r',edgecolor='black',)#, hatch='//') + rects1 = ax.errorbar(x2, mean2D2D,yerr=[std2D2D,std2D2D],fmt='o',color='red',ecolor='red',lw=3, capsize=5, capthick=2) + plt.plot(x2, mean2D2D, marker="o", linestyle='-',lw=3,color='red',label = r'2D-to-2D') + + rects2 =ax.errorbar(x4, mean2D3D,yerr=[std2D3D,std2D3D],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x4, mean2D3D, marker="o", linestyle='-',lw=3,color='blue', label = r'2D-to-3D') + + rects3 =ax.errorbar(x6, mean3D3D,yerr=[std3D3D,std3D3D],fmt='o',color='blue',ecolor='blue',lw=3, capsize=5, capthick=2) + plt.plot(x6, mean3D3D, marker="o", linestyle='-',lw=3,color='blue', label = r'3D-to-3D') + + legend(fontsize=20,loc='upper right') + + # rects3 = ax.errorbar(x3, meanC3,yerr=[stdC3,stdC3],fmt='o',color='black',ecolor='black',lw=3, capsize=5, capthick=2) + # plt.plot(x3, meanC3, marker="o", linestyle='-',lw=3,color='black') + # + # rects4 =ax.errorbar(x4, meanC4,yerr=[stdC4,stdC4],fmt='o',color='green',ecolor='green',lw=3, capsize=5, capthick=2) + # plt.plot(x4, meanC4, marker="o", linestyle='-',lw=3,color='green') + # + # rects5 =ax.errorbar(x5, meanC5,yerr=[stdC5,stdC5],fmt='o',color='orange',ecolor='orange',lw=3, capsize=5, capthick=2) + # plt.plot(x5, meanC5, marker="o", linestyle='-',lw=3,color='orange') + + + ax.set_ylabel(r'Angular Error',fontsize=22) + ax.set_xlabel(r'Number of Calibration Depths',fontsize=22) + ax.set_xticks(ind+0.25) + ax.set_xticklabels( ('D1', 'D2', 'D3','D4', 'D5') ,fontsize=18) + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0]#,110]#,120] + print TOPICs + LABELs = ["",r'1',r'2', r'3', r'4', r'5', ""]#, ""]#, ""] + + # fig.canvas.set_window_title('Distance Error Correlation') + plt.xticks(TOPICs, LABELs,fontsize=18) + + # legend([rects1,rects2], [r'\LARGE\textbf{2D2D}', r'\LARGE\textbf{2D3D}'], loc='lower right') + + TOPICS = [0.5,1,1.5,2,2.5,3,3.5,4,4.5,5]#,110]#,120] + print TOPICS + LABELS = [r'0.5', r'1',r'1.5', r'2',r'2.5', r'3',r'3.5', r'4',r'4.5',r'5']#, ""]#, ""] + + # fig.canvas.set_window_title('Accuracy - Activity Statistics') + plt.yticks(TOPICS, LABELS,fontsize=18) + + def autolabel(rects): + # attach some text labels + for rect in rects: + height = rect.get_height() + ax.text(0.26+rect.get_x()+rect.get_width()/2., height +0.35, "%.2f"%float(height), + ha='center', va='bottom',fontweight='bold',fontsize=13.5) + + # autolabel(rects1) + + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + ###################################################################################################### + +class Parallax3Dto3DMapping(Experiment): # GT pupil pose instead of estimating the pose + ''' + ''' + def __run__(self): + sim = GazeSimulation(log = False) + sim.place_eyeball_on_scene_camera = False + sim.setEyeRelativeToSceneCamera(v(-65, -33, -73)) + # sim.setEyeRelativeToSceneCamera(v(-65, -33, 0)) # assuming eyeball and scene camera are coplanar i.e. e = (e.x, e.y, 0) + sim.setCalibrationDepth(1 * 1000) # mm, wrt scene camera + sim.setTestDepth(1.5 * 1000) + + sim.calibration_grid = True + sim.calibration_random_depth = False + sim.test_grid = True + sim.test_random_depth = False + sim.test_random_fixed_depth = False + + cdepths = [1, 1.5, 2.0] + tdepths = np.linspace(1, 2, 5) + + results = [] + for i, cdepth in enumerate(cdepths): + sim.setCalibrationDepth(cdepth * 1000) + # Performing calibration + sim.runCalibration() + + aae_2Ds = [] + aae_3Ds = [] + aae_3D3Ds = [] + std_2Ds = [] + std_3Ds = [] + std_3D3Ds = [] + + # Fetching calibration points + t, p3d, p2d = sim.calibration_points, sim.tr_3d_pupil_locations, sim.tr_pupil_locations + # target positions are computed relative to the scene CCS + ti = map(lambda target: v(target) - v(sim.scene_camera.t), t) + # Computing pupil pose for each gaze + ni = map(lambda p: (v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + + eye_scene_diff = v(sim.sclera_pos) - v(sim.scene_camera.t) + e = np.array(eye_scene_diff) # eyeball coords in scene CCS + e_org = v(e[:]) + + # transforming pose vector to eye camera coordinates + eyeR = np.array(sim.eye_camera.R[:3]) # result is identical to cv2.Rodrigues(np.array([0., np.pi, 0.]))[0] + ni = map(lambda pose: eyeR.dot(np.array(pose)), ni) + + R, e3d3d = minimizeEnergy(ni, ti, pose_given=True) + e3d3d = v(e3d3d) + # R = LA.inv(R) + + w2d3d, e2d3d, w0 = minimizeEnergy(p2d, ti) + e2d3d = v(e2d3d) + + print + print R + print 'e3d3d', e3d3d, 'distance =', (e3d3d-e_org).mag + print 'e2d3d', e2d3d, 'distance =', (e2d3d-e_org).mag + print 'real e', e_org + print + + for j, tdepth in enumerate(tdepths): + + sim.setTestDepth(tdepth * 1000) + aae_2d, _, std_2d, _ = sim.runTest() + aae_2Ds.append(aae_2d) + std_2Ds.append(std_2d) + + # Fetching test points + t, p3d, p2d = sim.test_points, sim.te_3d_pupil_locations, sim.te_pupil_locations + ti = map(lambda target: v(target) - v(sim.scene_camera.t), t) # target coords in scene CCS + ni = map(lambda p: v(v(p)-v(sim.sclera_pos)).norm(), p3d) # ground truth gaze vectors + + # transforming pose vector to eye camera coordinates + ni = map(lambda r: v(eyeR.dot(np.array(r))), ni) + + # applying estimated rotation to pose vector in eye camera coordinates (Rn) + ni = map(lambda n: v(R.dot(np.array(n))), ni) # now ready to compare Rn with t-e + + # 3D to 3D + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + gis = map(lambda vec: v(vec), ni) + gprimes = map(lambda tg: v(((tg[0].z - e3d3d.z)/tg[1].z)*tg[1] + e3d3d), zip(ti, gis)) + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, ti)) + # AE = list(np.degrees(np.arccos(v(p[0]).dot(p[1])/v(p[0]).mag/v(p[1]).mag)) for p in zip(gprimes, ti)) + + N = len(ti) + AAE = np.mean(AE) + STD_3D3D = np.std(AE) + m, M = min(AE), max(AE) + aae_3D3Ds.append(AAE) + std_3D3Ds.append(STD_3D3D) + + # 2D to 3D + qi = map(_q, p2d) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: gaze_ray(q, w2d3d), qi) + + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + gis = map(lambda vec: v(vec), gis) + gprimes = map(lambda tg: v(((tg[0].z - e2d3d.z)/tg[1].z)*tg[1] + e2d3d), zip(ti, gis)) + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, ti)) + # AE = list(np.degrees(np.arccos(v(p[0]).dot(p[1])/v(p[0]).mag/v(p[1]).mag)) for p in zip(gprimes, ti)) + + N = len(t) + AAE = np.mean(AE) + STD_2D3D = np.std(AE) + m, M = min(AE), max(AE) + # Computing physical distance error (in meters) + PHE = list((u-v).mag for u,v in zip(ti, gprimes)) + N = len(ti) + APHE = np.mean(PHE) + PHE_STD = np.std(PHE) + PHE_m, PHE_M = min(PHE), max(PHE) + + # aae_3Ds.append((AAE, STD, PHE, PHE_STD)) + aae_3Ds.append(AAE) + std_3Ds.append(STD_2D3D) + # break + + print 'depth', cdepth, 'finished.' + results.append([aae_2Ds, aae_3Ds, aae_3D3Ds, std_2Ds, std_3Ds, std_3D3Ds]) + + + clrs = ['r', 'g', 'b', 'k', 'o'] + colors = ['blue', 'orange', 'red', 'black', 'orange'] + patches = [] + + fig = plt.figure(figsize=(14.0, 10.0)) + ax = fig.add_subplot(111) + + dmap = {0: '1', 1:'3', 2:'5'} + + x1 = [0.375,1.375,2.375,3.375,4.375] + x2 = [0.425,1.425,2.425,3.425,4.425] + x3 = [0.475,1.475,2.475,3.475,4.475] + x4 = [0.525,1.525,2.525,3.525,4.525] + x5 = [0.575,1.575,2.575,3.575,4.575] + x6 = [0.625,1.625,2.625,3.625,4.625] + xrange_2d2d = [x1, x3, x5] + xrange_2d3d = [x2, x4, x6] + + for i in [0, 1, 2]: + cdepth_results = results[i] + _xrange = xrange_2d2d[i] + aae_2d2d = cdepth_results[0] + std_2d2d = cdepth_results[3] + rects1 = ax.errorbar(_xrange, aae_2d2d,yerr=[std_2d2d, std_2d2d],fmt='o',color=colors[i],ecolor=colors[i],lw=3, capsize=5, capthick=2) + plt.plot(_xrange, aae_2d2d, marker="o", linestyle='-',lw=3,color=colors[i],label = '2D-to-2D Calibration Depth ' + dmap[i]) + + for i in [0, 1, 2]: + cdepth_results = results[i] + _xrange = xrange_2d3d[i] + aae_2d3d = cdepth_results[1] + std_2d3d = cdepth_results[4] + rects2 = ax.errorbar(_xrange, aae_2d3d,yerr=[std_2d3d, std_2d3d],fmt='o',color=colors[i],ecolor=colors[i],lw=3, capsize=5, capthick=2) + plt.plot(_xrange, aae_2d3d, marker="o", linestyle='--',lw=3,color=colors[i],label = '2D-to-3D Calibration Depth ' + dmap[i]) + + for i in [0, 1, 2]: + cdepth_results = results[i] + _xrange = xrange_2d2d[i] + aae_3d3d = cdepth_results[2] + std_3d3d = cdepth_results[5] + rects3 = ax.errorbar(_xrange, aae_3d3d,yerr=[std_3d3d, std_3d3d],fmt='o',color=colors[i],ecolor=colors[i],lw=3, capsize=5, capthick=2) + plt.plot(_xrange, aae_3d3d, marker="o", linestyle='-.',lw=3,color=colors[i],label = '3D-to-3D Calibration Depth ' + dmap[i]) + + + ax.set_ylabel(r'\textbf{Angular Error}',fontsize=22) + ax.set_xlabel(r'\textbf{Depth}',fontsize=22) + # ax.set_ylim((0, 2.4)) + + TOPICS = [-0.2, 0, 0.2, 0.4,0.6,0.8,1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4]#,110]#,120] + LABELS = [r'', r'0', r'0.2',r'0.4',r'0.6', r'0.8', r'1.0', r'1.2', r'1.4', r'1.6', r'1.8', r'2.0', r'2.2', r'2.4']#, ""]#, ""] + + plt.yticks(TOPICS, LABELS,fontsize=18) + + plt.xlabel('Depth') + plt.ylabel('Angular Error') + + plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, + ncol=3, mode="expand", borderaxespad=0., fontsize=18) + + TOPICs = [0.0,0.5,1.5,2.5,3.5,4.5,5.0] + LABELs = ['', 'D1 - 1m', 'D2 - 1.25m', 'D3 - 1.5m', 'D4 - 1.75m', 'D5 - 2.0m', ''] + plt.xticks(TOPICs, LABELs,fontsize=18) + + left = 0.1 # the left side of the subplots of the figure + right = 0.975 # the right side of the subplots of the figure + bottom = 0.075 # the bottom of the subplots of the figure + top = 0.925 # the top of the subplots of the figure + wspace = 0.2 # the amount of width reserved for blank space between subplots + hspace = 0.4 # the amount of height reserved for white space between subplots + + plt.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) + plt.show() + + # self.sim = sim + +ROOT_DATA_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants' +class Parallax2Dto2DRealData(Experiment): + ''' + First it runs single calibration depth vs test depth over all combinations of depths per participant (25 cases) + Then we perform estimation with data from multiple calibration depths for each test depth + ''' + def __run__(self): + sim = GazeSimulation(log = False) + camera_matrix, dist_coeffs = readCameraParams() + + root_result_path = 'results/2d2d/' + if not os.path.exists(root_result_path): + os.makedirs(root_result_path) + + errors = [] + for d1 in os.listdir(ROOT_DATA_DIR): + if d1.startswith('p'): # every participant + if not d1 in PARTICIPANTS: + continue + participant_label = d1 + participant_results = [] + d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/ + d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../ + print '> Processing participant', d1 + participant_experiment = {} + for d3 in os.listdir(d2): # every recording + d4 = os.path.join(d2, d3) # .../pi/../00X/ + + intervals_dir = os.path.join(d4, 'gaze_intervals.npy') + if not os.path.isfile(intervals_dir): + # Participant not completely processed + print '> One or more recordings need processing...' + break + else: + + p = np.load(os.path.join(d4, 'p.npy')) + break_participant = False + for point in p: + if np.isnan(point[0]): + # For this gaze position, not input with nonzero confidence exist + break_participant = True + break + if break_participant: + print '> One or more recordings miss gaze coords...' + break + # print p + t2d = np.load(os.path.join(d4, 't2d.npy')) + t3d = np.load(os.path.join(d4, 't3d.npy')) + participant_experiment[d3] = [p, t2d, t3d] + + if len(participant_experiment) == 10: + print '> All recordings processed' + keys = sorted(participant_experiment.keys()) + depths = zip(keys[::2], keys[1::2]) + for calib_depth in depths: + cp, ct = participant_experiment[calib_depth[0]][0], participant_experiment[calib_depth[0]][1] + cdepth_value = getDepth(calib_depth) + # Perform calibration + sim.perform2D2DCalibrationOnReadData(cp, ct, (1280, 720)) + for test_depth in depths: + tdepth_value = getDepth(test_depth) + tp, tt, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][1], participant_experiment[test_depth[1]][2] + error = sim.run2D2DTestOnRealData(tp, tt, (1280, 720), tt3d, camera_matrix, dist_coeffs) + participant_results.append([cdepth_value, tdepth_value] + error) + print len(participant_results), 'combinations processed...' + np.save('results/2d2d/%s_2d2d_all.npy' % participant_label, np.array(participant_results)) + np.savetxt('results/2d2d/%s_2d2d_all.csv' % participant_label, np.array(participant_results), delimiter=",") + + print '> Computing results for multiple calibration depths...' + for num_of_calibration_depths in xrange(2, 6): # from 2 calibration depths to 5 + participant_results = [] + print '> Computing results for combining %s calibration depths...' % num_of_calibration_depths + for calibs in combinations(depths, num_of_calibration_depths): + # Now calibs is a set of depths, from each of those we need calibration data + cp, ct = [], [] + calib_depths_label = [] + for calib in calibs: + if len(cp): + cp = np.concatenate((cp, participant_experiment[calib[0]][0]), axis=0) + ct = np.concatenate((ct, participant_experiment[calib[0]][1]), axis=0) + else: + cp = participant_experiment[calib[0]][0] + ct = participant_experiment[calib[0]][1] + calib_depths_label.append(getDepth(calib)) + # Perform calibration + sim.perform2D2DCalibrationOnReadData(cp, ct, (1280, 720)) + # Now we have calibration data from multiple depths, we can test on all depths + for test_depth in depths: + tdepth_value = getDepth(test_depth) + tp, tt, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][1], participant_experiment[test_depth[1]][2] + error = sim.run2D2DTestOnRealData(tp, tt, (1280, 720), tt3d, camera_matrix, dist_coeffs) + participant_results.append(calib_depths_label + [tdepth_value] + error) + print len(participant_results), 'combinations processed...' + result_path = 'results/2d2d/%s_calibration_depths/' % num_of_calibration_depths + if not os.path.exists(result_path): + os.makedirs(result_path) + np.save(result_path + '%s.npy' % participant_label, np.array(participant_results)) + np.savetxt(result_path + '%s.csv' % participant_label, np.array(participant_results), delimiter=",") + + + print 'done.' + # plt.plot(tdrange, aaes) + # plt.show() + +def getDepth(depth_experiments): + _map = {'000': 1, '002': 1.25, '004': 1.5, '006': 1.75, '008': 2.0} + return _map[depth_experiments[0]] + +class Parallax2Dto3DRealData(Experiment): + def __run__(self): + sim = GazeSimulation(log = False) + aae_3ds = [] + root_result_path = '/home/mmbrian/3D_Gaze_Tracking/work/results/2D3D/' + if not os.path.exists(root_result_path): + os.makedirs(root_result_path) + + for d1 in os.listdir(ROOT_DATA_DIR): + if d1.startswith('p'): # every participant + if not d1 in PARTICIPANTS: + continue + participant_label = d1 + participant_results = [] + d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/ + d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../ + print '> Processing participant', d1 + participant_experiment = {} + for d3 in os.listdir(d2): # every recording + d4 = os.path.join(d2, d3) # .../pi/../00X/ + + intervals_dir = os.path.join(d4, 'gaze_intervals.npy') + if not os.path.isfile(intervals_dir): + # Participant not completely processed + print '> One or more recordings need processing...' + break + else: + + p = np.load(os.path.join(d4, 'p.npy')) + # p = np.load(os.path.join(d4, 'p_mean.npy')) + break_participant = False + for point in p: + if np.isnan(point[0]): + # For this gaze position, not input with nonzero confidence exist + break_participant = True + break + if break_participant: + print '> One or more recordings miss gaze coords...' + break + # print p + t2d = np.load(os.path.join(d4, 't2d.npy')) + t3d = np.load(os.path.join(d4, 't3d.npy')) + # t2d = np.load(os.path.join(d4, 't2d_mean.npy')) + # t3d = np.load(os.path.join(d4, 't3d_mean.npy')) + participant_experiment[d3] = [p, t2d, t3d] + + if len(participant_experiment) == 10: + print '> All recordings processed' + keys = sorted(participant_experiment.keys()) + depths = zip(keys[::2], keys[1::2]) + for calib_depth in depths: + cp, ct3d = participant_experiment[calib_depth[0]][0], participant_experiment[calib_depth[0]][2] + cdepth_value = getDepth(calib_depth) + + # Performing calibration + w, e, w0 = minimizeEnergy(cp, ct3d) + e = v(e) + for test_depth in depths: + tdepth_value = getDepth(test_depth) + tp, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][2] + + t, p = tt3d, tp + qi = map(_q, p) # computing feature vectors from raw pupil coordinates in 2D + # computing unit gaze vectors corresponding to pupil positions + # here we use the computed mapping matrix w + gis = map(lambda q: g(q, w), qi) + # Intersecting gaze rays originating from the eye with the planes defined by each + # target. then we can simply compute angular error between each intersection and + # the corresponding 3D target + t = map(lambda vec: v(vec), t) + gis = map(lambda vec: v(vec), gis) + gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis)) + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t)) + N = len(t) + AAE = sum(AE)/N + VAR = sum((ae - AAE)**2 for ae in AE)/N + STD = np.sqrt(VAR) + m, M = min(AE), max(AE) + + # Computing physical distance error (in meters) + PHE = list((u-v).mag for u,v in zip(t, gprimes)) + N = len(t) + APHE = sum(PHE)/N + PHE_VAR = sum((phe - APHE)**2 for phe in PHE)/N + PHE_STD = np.sqrt(VAR) + PHE_m, PHE_M = min(PHE), max(PHE) + + participant_results.append([cdepth_value, tdepth_value] + [AAE, VAR, STD, m, M, APHE, PHE_VAR, PHE_STD, PHE_m, PHE_M]) + + print len(participant_results), 'combinations processed...' + np.save(os.path.join(root_result_path, '%s_2d3d_all.npy' % participant_label), np.array(participant_results)) + np.savetxt(os.path.join(root_result_path, '%s_2d3d_all.csv' % participant_label), np.array(participant_results), delimiter=",") + + print '> Computing results for multiple calibration depths...' + for num_of_calibration_depths in xrange(2, 6): # from 2 calibration depths to 5 + participant_results = [] + print '> Computing results for combining %s calibration depths...' % num_of_calibration_depths + for calibs in combinations(depths, num_of_calibration_depths): + # Now calibs is a set of depths, from each of those we need calibration data + cp, ct3d = [], [] + calib_depths_label = [] + for calib in calibs: + if len(cp): + cp = np.concatenate((cp, participant_experiment[calib[0]][0]), axis=0) + ct3d = np.concatenate((ct3d, participant_experiment[calib[0]][2]), axis=0) + else: + cp = participant_experiment[calib[0]][0] + ct3d = participant_experiment[calib[0]][2] + calib_depths_label.append(getDepth(calib)) + # Performing calibration + w, e, w0 = minimizeEnergy(cp, ct3d) + e = v(e) + # Now we have calibration data from multiple depths, we can test on all depths + for test_depth in depths: + tdepth_value = getDepth(test_depth) + tp, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][2] + + t, p = tt3d, tp + qi = map(_q, p) + gis = map(lambda q: g(q, w), qi) + + t = map(lambda vec: v(vec), t) + gis = map(lambda vec: v(vec), gis) + gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis)) + + AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t)) + N = len(t) + AAE = sum(AE)/N + VAR = sum((ae - AAE)**2 for ae in AE)/N + STD = np.sqrt(VAR) + m, M = min(AE), max(AE) + + # Computing physical distance error (in meters) + PHE = list((u-v).mag for u,v in zip(t, gprimes)) + N = len(t) + APHE = sum(PHE)/N + PHE_VAR = sum((phe - APHE)**2 for phe in PHE)/N + PHE_STD = np.sqrt(VAR) + PHE_m, PHE_M = min(PHE), max(PHE) + + participant_results.append(calib_depths_label + [tdepth_value] + [AAE, VAR, STD, m, M, APHE, PHE_VAR, PHE_STD, PHE_m, PHE_M]) + + print len(participant_results), 'combinations processed...' + result_path = os.path.join(root_result_path, '%s_calibration_depths/' % num_of_calibration_depths) + if not os.path.exists(result_path): + os.makedirs(result_path) + np.save(result_path + '%s.npy' % participant_label, np.array(participant_results)) + np.savetxt(result_path + '%s.csv' % participant_label, np.array(participant_results), delimiter=",") + + print 'done.' + + + +class Parallax3Dto3DRealData(Experiment): + def __run__(self): + sim = GazeSimulation(log = False) + aae_3ds = [] + + root_result_path = '/home/mmbrian/3D_Gaze_Tracking/work/results/3D3D/' + root_pose_path = '/home/mmbrian/3D_Gaze_Tracking/work/Marker_Eye_Images/ImagesUndist/' + root_data_path = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants/' + take_only_nearest_neighbor_for_calibration = True + + participants = ['p14'] + + if not os.path.exists(root_result_path): + os.makedirs(root_result_path) + + for d1 in os.listdir(root_data_path): + if d1.startswith('p'): # every participant + if not d1 in participants: + # if not d1 in PARTICIPANTS: + continue + participant_label = d1 + participant_results = [] + d2 = os.path.join(root_data_path, d1) # .../pi/ + d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../ + print '> Processing participant', d1 + participant_experiment = {} + for d3 in os.listdir(d2): # every recording + d4 = os.path.join(d2, d3) # .../pi/../00X/ + + # pose_info = np.loadtxt(open(os.path.join(root_pose_path+d1+'/'+d3, "null_pupils.csv"),"rb"),delimiter=";") + pose_info = np.loadtxt(open(os.path.join(root_pose_path+d1+'/'+d3, "simple_pupils.csv"),"rb"),delimiter=";") + frames_numbers = pose_info[:, 0] + pose_estimates = pose_info[:,4:7] + pose_info = dict(zip(frames_numbers, pose_estimates)) + p_frames = np.load(os.path.join(d4, 'p_frames.npy')) + # print d4 + # Fetching pose information for every target + poses = [] + for target in p_frames: + pose = [] + for fn in target: # all frames corresponding to this pupil + # first fn corresponds to the nearest neighbor + # for test use all correspondents from these 3 or 2 estimates + # i.e. each pose-marker creates a correspondence so 3*16=48 correspondents for test + # for calibration compare two cases, one similar to above take all the 75 correspondents + # and the other taking only the pose corresponding to nearest neighbor which results in + # the same number of correspondents as target markers + try: + pose.append(pose_info[fn]) + except KeyError, err: + print err + poses.append(pose) + + t2d = np.load(os.path.join(d4, 't2d.npy')) + t3d = np.load(os.path.join(d4, 't3d.npy')) + participant_experiment[d3] = [poses, t2d, t3d] + + keys = sorted(participant_experiment.keys()) + depths = zip(keys[::2], keys[1::2]) + + for calib_depth in depths: + pose_data, ct3d = participant_experiment[calib_depth[0]][0], participant_experiment[calib_depth[0]][2] + cdepth_value = getDepth(calib_depth) + if take_only_nearest_neighbor_for_calibration: + pose = np.array(list(p[0] for p in pose_data)) + calib_3ds = ct3d[:] + else: + calib_3ds = [] + pose = [] + for i, p3d in enumerate(ct3d): + for p in pose_data[i]: + pose.append(p) + calib_3ds.append(p3d) + # Performing calibration + # First we convert gaze rays to actual pupil pose in our right hand coordinate system + # _pose = [(np.arctan(g.x/g.z), np.arctan(g.y/g.z)) for g in map(v, pose)] + _pose = map(v, pose) + print '> Running tests for calibration depth', cdepth_value + if any(g.z == 0 for g in _pose): + print 'Calibration is flawed' + # print pose + else: + print 'Calibration data is okay' + # print [g.mag for g in map(v, pose)] + # w, e, w0 = minimizeEnergy(_pose, calib_3ds, pose_given=True) + R, e = minimizeEnergy(pose, calib_3ds, pose_given=True) + # R = LA.inv(R) + print 'R', R + print 'e', e + + e = v(e) + for test_depth in depths: + tdepth_value = getDepth(test_depth) + tpose_data, tt3d = participant_experiment[test_depth[1]][0], participant_experiment[test_depth[1]][2] + + test_3ds = [] + tpose = [] + for i, p3d in enumerate(tt3d): + for p in tpose_data[i]: + tpose.append(p) + test_3ds.append(p3d) + + # applying estimated rotation to bring pose vectors to scene camera coordinates + tpose = map(lambda p: v(R.dot(np.array(p))), tpose) + + if any(g.z == 0 for g in map(v, tpose)): + print 'Test depth', tdepth_value, 'is flawed' + + gis = map(lambda vec: v(vec), tpose) + t = map(lambda vec: v(vec), test_3ds) + gprimes = map(lambda tg: v(((tg[0].z - e.z)/tg[1].z)*tg[1] + e), zip(t, gis)) + # AE = list(np.degrees(np.arctan((v(p[0]).cross(p[1])/(v(p[0]).dot(p[1]))).mag)) for p in zip(gprimes, t)) + AE = list(np.degrees(np.arccos(v(p[0]).dot(p[1])/v(p[0]).mag/v(p[1]).mag)) for p in zip(gprimes, t)) + + AAE = np.mean(AE) + STD = np.std(AE) + m, M = min(AE), max(AE) + + # Computing physical distance error (in meters) + PHE = list((u-v).mag for u,v in zip(t, gprimes)) + APHE = np.mean(PHE) + PHE_STD = np.std(PHE) + PHE_m, PHE_M = min(PHE), max(PHE) + + print 'Calibration', cdepth_value, 'Test', tdepth_value, AAE, 'degrees', APHE, 'meters' + participant_results.append([cdepth_value, tdepth_value] + [AAE, STD, m, M, APHE, PHE_STD, PHE_m, PHE_M]) + + print len(participant_results), 'combinations processed...' + np.save(os.path.join(root_result_path, '%s_3d3d_all.npy' % participant_label), np.array(participant_results)) + np.savetxt(os.path.join(root_result_path, '%s_3d3d_all.csv' % participant_label), np.array(participant_results), delimiter=",") + + +def main(): + if len(sys.argv) <= 1: + print 'Please select a mode.' + return + mode = sys.argv[1] + + if mode == 'pts': + # Performs an experiment by fixing calibration depth and testing for different test depths + # to investigate parallax error in 2D to 2D mapping + ex = Parallax2Dto2DMapping() + ex.performExperiment() + if mode == '2d3d': + # This also does 3D gaze estimation and plots estimation results for both 3D and 2D estimation + ex = Parallax2Dto3DMapping() + ex.performExperiment() + + # ex = Parallax2Dto3DMappingEye() + # ex.performExperiment()i + if mode == '2d2d_2d3d': + ex = Parallax3Dto3DMapping() + ex.performExperiment() + + # ex = Parallax2Dto2DRealData() + # ex.performExperiment() + + # ex = Parallax2Dto3DRealData() + # ex.performExperiment() + + if mode == '3d3d_real': + ex = Parallax3Dto3DRealData() + ex.performExperiment() + + +if __name__ == '__main__': + main() + + + + diff --git a/code/pupil/__init__.py b/code/pupil/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/code/pupil/calibrate.py b/code/pupil/calibrate.py new file mode 100644 index 0000000..8b6d39d --- /dev/null +++ b/code/pupil/calibrate.py @@ -0,0 +1,255 @@ +''' +(*)~---------------------------------------------------------------------------------- + Pupil - eye tracking platform + Copyright (C) 2012-2015 Pupil Labs + + Distributed under the terms of the CC BY-NC-SA License. + License details are in the file license.txt, distributed as part of this software. +----------------------------------------------------------------------------------~(*) +''' + +import numpy as np +#logging +import logging +logger = logging.getLogger(__name__) + + +def get_map_from_cloud(cal_pt_cloud,screen_size=(2,2),threshold = 35,return_inlier_map=False,return_params=False): + """ + we do a simple two pass fitting to a pair of bi-variate polynomials + return the function to map vector + """ + # fit once using all avaiable data + model_n = 7 + cx,cy,err_x,err_y = fit_poly_surface(cal_pt_cloud,model_n) + err_dist,err_mean,err_rms = fit_error_screen(err_x,err_y,screen_size) + if cal_pt_cloud[err_dist<=threshold].shape[0]: #did not disregard all points.. + # fit again disregarding extreme outliers + cx,cy,new_err_x,new_err_y = fit_poly_surface(cal_pt_cloud[err_dist<=threshold],model_n) + map_fn = make_map_function(cx,cy,model_n) + new_err_dist,new_err_mean,new_err_rms = fit_error_screen(new_err_x,new_err_y,screen_size) + + logger.info('first iteration. root-mean-square residuals: %s, in pixel' %err_rms) + logger.info('second iteration: ignoring outliers. root-mean-square residuals: %s in pixel',new_err_rms) + + logger.info('used %i data points out of the full dataset %i: subset is %i percent' \ + %(cal_pt_cloud[err_dist<=threshold].shape[0], cal_pt_cloud.shape[0], \ + 100*float(cal_pt_cloud[err_dist<=threshold].shape[0])/cal_pt_cloud.shape[0])) + + if return_inlier_map and return_params: + return map_fn,err_dist<=threshold,(cx,cy,model_n) + if return_inlier_map and not return_params: + return map_fn,err_dist<=threshold + if return_params and not return_inlier_map: + return map_fn,(cx,cy,model_n) + return map_fn + else: # did disregard all points. The data cannot be represented by the model in a meaningful way: + map_fn = make_map_function(cx,cy,model_n) + logger.info('First iteration. root-mean-square residuals: %s in pixel, this is bad!'%err_rms) + logger.warning('The data cannot be represented by the model in a meaningfull way.') + + if return_inlier_map and return_params: + return map_fn,err_dist<=threshold,(cx,cy,model_n) + if return_inlier_map and not return_params: + return map_fn,err_dist<=threshold + if return_params and not return_inlier_map: + return map_fn,(cx,cy,model_n) + return map_fn + + + +def fit_poly_surface(cal_pt_cloud,n=7): + M = make_model(cal_pt_cloud,n) + U,w,Vt = np.linalg.svd(M[:,:n],full_matrices=0) + V = Vt.transpose() + Ut = U.transpose() + pseudINV = np.dot(V, np.dot(np.diag(1/w), Ut)) + cx = np.dot(pseudINV, M[:,n]) + cy = np.dot(pseudINV, M[:,n+1]) + # compute model error in world screen units if screen_res specified + err_x=(np.dot(M[:,:n],cx)-M[:,n]) + err_y=(np.dot(M[:,:n],cy)-M[:,n+1]) + return cx,cy,err_x,err_y + +def fit_error_screen(err_x,err_y,(screen_x,screen_y)): + err_x *= screen_x/2. + err_y *= screen_y/2. + err_dist=np.sqrt(err_x*err_x + err_y*err_y) + err_mean=np.sum(err_dist)/len(err_dist) + err_rms=np.sqrt(np.sum(err_dist*err_dist)/len(err_dist)) + return err_dist,err_mean,err_rms + +def make_model(cal_pt_cloud,n=7): + n_points = cal_pt_cloud.shape[0] + + if n==3: + X=cal_pt_cloud[:,0] + Y=cal_pt_cloud[:,1] + Ones=np.ones(n_points) + ZX=cal_pt_cloud[:,2] + ZY=cal_pt_cloud[:,3] + M=np.array([X,Y,Ones,ZX,ZY]).transpose() + + elif n==7: + X=cal_pt_cloud[:,0] + Y=cal_pt_cloud[:,1] + XX=X*X + YY=Y*Y + XY=X*Y + XXYY=XX*YY + Ones=np.ones(n_points) + ZX=cal_pt_cloud[:,2] + ZY=cal_pt_cloud[:,3] + M=np.array([X,Y,XX,YY,XY,XXYY,Ones,ZX,ZY]).transpose() + + elif n==9: + X=cal_pt_cloud[:,0] + Y=cal_pt_cloud[:,1] + XX=X*X + YY=Y*Y + XY=X*Y + XXYY=XX*YY + XXY=XX*Y + YYX=YY*X + Ones=np.ones(n_points) + ZX=cal_pt_cloud[:,2] + ZY=cal_pt_cloud[:,3] + M=np.array([X,Y,XX,YY,XY,XXYY,XXY,YYX,Ones,ZX,ZY]).transpose() + else: + raise Exception("ERROR: Model n needs to be 3, 7 or 9") + return M + + +def make_map_function(cx,cy,n): + if n==3: + def fn((X,Y)): + x2 = cx[0]*X + cx[1]*Y +cx[2] + y2 = cy[0]*X + cy[1]*Y +cy[2] + return x2,y2 + + elif n==7: + def fn((X,Y)): + x2 = cx[0]*X + cx[1]*Y + cx[2]*X*X + cx[3]*Y*Y + cx[4]*X*Y + cx[5]*Y*Y*X*X +cx[6] + y2 = cy[0]*X + cy[1]*Y + cy[2]*X*X + cy[3]*Y*Y + cy[4]*X*Y + cy[5]*Y*Y*X*X +cy[6] + return x2,y2 + + elif n==9: + def fn((X,Y)): + # X Y XX YY XY XXYY XXY YYX Ones + x2 = cx[0]*X + cx[1]*Y + cx[2]*X*X + cx[3]*Y*Y + cx[4]*X*Y + cx[5]*Y*Y*X*X + cx[6]*Y*X*X + cx[7]*Y*Y*X + cx[8] + y2 = cy[0]*X + cy[1]*Y + cy[2]*X*X + cy[3]*Y*Y + cy[4]*X*Y + cy[5]*Y*Y*X*X + cy[6]*Y*X*X + cy[7]*Y*Y*X + cy[8] + return x2,y2 + else: + raise Exception("ERROR: Model n needs to be 3, 7 or 9") + + return fn + + +def preprocess_data(pupil_pts,ref_pts): + '''small utility function to deal with timestamped but uncorrelated data + input must be lists that contain dicts with at least "timestamp" and "norm_pos" + ''' + cal_data = [] + + if len(ref_pts)<=2: + return cal_data + + cur_ref_pt = ref_pts.pop(0) + next_ref_pt = ref_pts.pop(0) + while True: + matched = [] + while pupil_pts: + #select all points past the half-way point between current and next ref data sample + if pupil_pts[0]['timestamp'] <=(cur_ref_pt['timestamp']+next_ref_pt['timestamp'])/2.: + matched.append(pupil_pts.pop(0)) + else: + for p_pt in matched: + #only use close points + if abs(p_pt['timestamp']-cur_ref_pt['timestamp']) <= 1/15.: #assuming 30fps + slack + data_pt = p_pt["norm_pos"][0], p_pt["norm_pos"][1],cur_ref_pt['norm_pos'][0],cur_ref_pt['norm_pos'][1] + cal_data.append(data_pt) + break + if ref_pts: + cur_ref_pt = next_ref_pt + next_ref_pt = ref_pts.pop(0) + else: + break + return cal_data + + + +# if __name__ == '__main__': +# import matplotlib.pyplot as plt +# from matplotlib import cm +# from mpl_toolkits.mplot3d import Axes3D + +# cal_pt_cloud = np.load('cal_pt_cloud.npy') +# # plot input data +# # Z = cal_pt_cloud +# # ax.scatter(Z[:,0],Z[:,1],Z[:,2], c= "r") +# # ax.scatter(Z[:,0],Z[:,1],Z[:,3], c= "b") + +# # fit once +# model_n = 7 +# cx,cy,err_x,err_y = fit_poly_surface(cal_pt_cloud,model_n) +# map_fn = make_map_function(cx,cy,model_n) +# err_dist,err_mean,err_rms = fit_error_screen(err_x,err_y,(1280,720)) +# print err_rms,"in pixel" +# threshold =15 # err_rms*2 + +# # fit again disregarding crass outlines +# cx,cy,new_err_x,new_err_y = fit_poly_surface(cal_pt_cloud[err_dist<=threshold],model_n) +# map_fn = make_map_function(cx,cy,model_n) +# new_err_dist,new_err_mean,new_err_rms = fit_error_screen(new_err_x,new_err_y,(1280,720)) +# print new_err_rms,"in pixel" + +# print "using %i datapoints out of the full dataset %i: subset is %i percent" \ +# %(cal_pt_cloud[err_dist<=threshold].shape[0], cal_pt_cloud.shape[0], \ +# 100*float(cal_pt_cloud[err_dist<=threshold].shape[0])/cal_pt_cloud.shape[0]) + +# # plot residuals +# fig_error = plt.figure() +# plt.scatter(err_x,err_y,c="y") +# plt.scatter(new_err_x,new_err_y) +# plt.title("fitting residuals full data set (y) and better subset (b)") + + +# # plot projection of eye and world vs observed data +# X,Y,ZX,ZY = cal_pt_cloud.transpose().copy() +# X,Y = map_fn((X,Y)) +# X *= 1280/2. +# Y *= 720/2. +# ZX *= 1280/2. +# ZY *= 720/2. +# fig_projection = plt.figure() +# plt.scatter(X,Y) +# plt.scatter(ZX,ZY,c='y') +# plt.title("world space projection in pixes, mapped and observed (y)") + +# # plot the fitting functions 3D plot +# fig = plt.figure() +# ax = fig.gca(projection='3d') +# outliers =cal_pt_cloud[err_dist>threshold] +# inliers = cal_pt_cloud[err_dist<=threshold] +# ax.scatter(outliers[:,0],outliers[:,1],outliers[:,2], c= "y") +# ax.scatter(outliers[:,0],outliers[:,1],outliers[:,3], c= "y") +# ax.scatter(inliers[:,0],inliers[:,1],inliers[:,2], c= "r") +# ax.scatter(inliers[:,0],inliers[:,1],inliers[:,3], c= "b") +# Z = cal_pt_cloud +# X = np.linspace(min(Z[:,0])-.2,max(Z[:,0])+.2,num=30,endpoint=True) +# Y = np.linspace(min(Z[:,1])-.2,max(Z[:,1]+.2),num=30,endpoint=True) +# X, Y = np.meshgrid(X,Y) +# ZX,ZY = map_fn((X,Y)) +# ax.plot_surface(X, Y, ZX, rstride=1, cstride=1, linewidth=.1, antialiased=True,alpha=0.4,color='r') +# ax.plot_surface(X, Y, ZY, rstride=1, cstride=1, linewidth=.1, antialiased=True,alpha=0.4,color='b') +# plt.xlabel("Pupil x in Eye-Space") +# plt.ylabel("Pupil y Eye-Space") +# plt.title("Z: Gaze x (blue) Gaze y (red) World-Space, yellow=outliers") + +# # X,Y,_,_ = cal_pt_cloud.transpose() + +# # pts= map_fn((X,Y)) +# # import cv2 +# # pts = np.array(pts,dtype=np.float32).transpose() +# # print cv2.convexHull(pts)[:,0] +# plt.show() diff --git a/code/pupil/file_methods.py b/code/pupil/file_methods.py new file mode 100644 index 0000000..bf71e29 --- /dev/null +++ b/code/pupil/file_methods.py @@ -0,0 +1,65 @@ +''' +(*)~---------------------------------------------------------------------------------- + Pupil - eye tracking platform + Copyright (C) 2012-2015 Pupil Labs + + Distributed under the terms of the CC BY-NC-SA License. + License details are in the file license.txt, distributed as part of this software. +----------------------------------------------------------------------------------~(*) +''' + +import cPickle as pickle +import os +import logging +logger = logging.getLogger(__name__) + +class Persistent_Dict(dict): + """a dict class that uses pickle to save inself to file""" + def __init__(self, file_path): + super(Persistent_Dict, self).__init__() + self.file_path = os.path.expanduser(file_path) + try: + with open(self.file_path,'rb') as fh: + try: + self.update(pickle.load(fh)) + except: #KeyError,EOFError + logger.warning("Session settings file '%s'could not be read. Will overwrite on exit."%self.file_path) + except IOError: + logger.debug("Session settings file '%s' not found. Will make new one on exit."%self.file_path) + + + def save(self): + d = {} + d.update(self) + try: + with open(self.file_path,'wb') as fh: + pickle.dump(d,fh,-1) + except IOError: + logger.warning("Could not save session settings to '%s'"%self.file_path) + + + def close(self): + self.save() + + +def load_object(file_path): + file_path = os.path.expanduser(file_path) + with open(file_path,'rb') as fh: + return pickle.load(fh) + +def save_object(object,file_path): + file_path = os.path.expanduser(file_path) + with open(file_path,'wb') as fh: + pickle.dump(object,fh,-1) + +if __name__ == '__main__': + logging.basicConfig(level=logging.DEBUG) + # settings = Persistent_Dict("~/Desktop/test") + # settings['f'] = "this is a test" + # settings['list'] = ["list 1","list2"] + # settings.close() + + # save_object("string",'test') + # print load_object('test') + settings = Persistent_Dict('~/Desktop/pupil_settings/user_settings_eye') + print settings['roi'] \ No newline at end of file diff --git a/code/pupil/methods.py b/code/pupil/methods.py new file mode 100644 index 0000000..948ac4c --- /dev/null +++ b/code/pupil/methods.py @@ -0,0 +1,661 @@ +''' +(*)~---------------------------------------------------------------------------------- + Pupil - eye tracking platform + Copyright (C) 2012-2015 Pupil Labs + + Distributed under the terms of the CC BY-NC-SA License. + License details are in the file license.txt, distributed as part of this software. +----------------------------------------------------------------------------------~(*) +''' + +import numpy as np +try: + import numexpr as ne +except: + ne = None +import cv2 +import logging +logger = logging.getLogger(__name__) + + + +class Roi(object): + """this is a simple 2D Region of Interest class + it is applied on numpy arrays for convenient slicing + like this: + + roi_array_slice = full_array[r.view] + # do something with roi_array_slice + + this creates a view, no data copying done + """ + def __init__(self, array_shape): + self.array_shape = array_shape + self.lX = 0 + self.lY = 0 + self.uX = array_shape[1] + self.uY = array_shape[0] + self.nX = 0 + self.nY = 0 + + @property + def view(self): + return slice(self.lY,self.uY,),slice(self.lX,self.uX) + + @view.setter + def view(self, value): + raise Exception('The view field is read-only. Use the set methods instead') + + def add_vector(self,(x,y)): + """ + adds the roi offset to a len2 vector + """ + return (self.lX+x,self.lY+y) + + def sub_vector(self,(x,y)): + """ + subs the roi offset to a len2 vector + """ + return (x-self.lX,y-self.lY) + + def set(self,vals): + if vals is not None and len(vals) is 5: + if vals[-1] == self.array_shape: + self.lX,self.lY,self.uX,self.uY,_ = vals + else: + logger.info('Image size has changed: Region of Interest has been reset') + elif vals is not None and len(vals) is 4: + self.lX,self.lY,self.uX,self.uY= vals + + def get(self): + return self.lX,self.lY,self.uX,self.uY,self.array_shape + + + +def bin_thresholding(image, image_lower=0, image_upper=256): + binary_img = cv2.inRange(image, np.asarray(image_lower), + np.asarray(image_upper)) + + return binary_img + +def make_eye_kernel(inner_size,outer_size): + offset = (outer_size - inner_size)/2 + inner_count = inner_size**2 + outer_count = outer_size**2-inner_count + val_inner = -1.0 / inner_count + val_outer = -val_inner*inner_count/outer_count + inner = np.ones((inner_size,inner_size),np.float32)*val_inner + kernel = np.ones((outer_size,outer_size),np.float32)*val_outer + kernel[offset:offset+inner_size,offset:offset+inner_size]= inner + return kernel + +def dif_gaus(image, lower, upper): + lower, upper = int(lower-1), int(upper-1) + lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0) + upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0) + # upper +=50 + # lower +=50 + dif = lower-upper + # dif *= .1 + # dif = cv2.medianBlur(dif,3) + # dif = 255-dif + dif = cv2.inRange(dif, np.asarray(200),np.asarray(256)) + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) + dif = cv2.dilate(dif, kernel, iterations=2) + dif = cv2.erode(dif, kernel, iterations=1) + # dif = cv2.max(image,dif) + # dif = cv2.dilate(dif, kernel, iterations=1) + return dif + +def equalize(image, image_lower=0.0, image_upper=255.0): + image_lower = int(image_lower*2)/2 + image_lower +=1 + image_lower = max(3,image_lower) + mean = cv2.medianBlur(image,255) + image = image - (mean-100) + # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) + # cv2.dilate(image, kernel, image, iterations=1) + return image + + +def erase_specular(image,lower_threshold=0.0, upper_threshold=150.0): + """erase_specular: removes specular reflections + within given threshold using a binary mask (hi_mask) + """ + thresh = cv2.inRange(image, + np.asarray(float(lower_threshold)), + np.asarray(256.0)) + + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7)) + hi_mask = cv2.dilate(thresh, kernel, iterations=2) + + specular = cv2.inpaint(image, hi_mask, 2, flags=cv2.INPAINT_TELEA) + # return cv2.max(hi_mask,image) + return specular + + +def find_hough_circles(img): + circles = cv2.HoughCircles(pupil_img,cv2.cv.CV_HOUGH_GRADIENT,1,20, + param1=50,param2=30,minRadius=0,maxRadius=80) + if circles is not None: + circles = np.uint16(np.around(circles)) + for i in circles[0,:]: + # draw the outer circle + cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2) + # draw the center of the circle + cv2.circle(img,(i[0],i[1]),2,(0,0,255),3) + + + +def chessboard(image, pattern_size=(9,5)): + status, corners = cv2.findChessboardCorners(image, pattern_size, flags=4) + if status: + mean = corners.sum(0)/corners.shape[0] + # mean is [[x,y]] + return mean[0], corners + else: + return None + + +def curvature(c): + try: + from vector import Vector + except: + return + c = c[:,0] + curvature = [] + for i in xrange(len(c)-2): + #find the angle at i+1 + frm = Vector(c[i]) + at = Vector(c[i+1]) + to = Vector(c[i+2]) + a = frm -at + b = to -at + angle = a.angle(b) + curvature.append(angle) + return curvature + + + +def GetAnglesPolyline(polyline,closed=False): + """ + see: http://stackoverflow.com/questions/3486172/angle-between-3-points + ported to numpy + returns n-2 signed angles + """ + + points = polyline[:,0] + + if closed: + a = np.roll(points,1,axis=0) + b = points + c = np.roll(points,-1,axis=0) + else: + a = points[0:-2] # all "a" points + b = points[1:-1] # b + c = points[2:] # c points + # ab = b.x - a.x, b.y - a.y + ab = b-a + # cb = b.x - c.x, b.y - c.y + cb = b-c + # float dot = (ab.x * cb.x + ab.y * cb.y); # dot product + # print 'ab:',ab + # print 'cb:',cb + + # float dot = (ab.x * cb.x + ab.y * cb.y) dot product + # dot = np.dot(ab,cb.T) # this is a full matrix mulitplication we only need the diagonal \ + # dot = dot.diagonal() # because all we look for are the dotproducts of corresponding vectors (ab[n] and cb[n]) + dot = np.sum(ab * cb, axis=1) # or just do the dot product of the correspoing vectors in the first place! + + # float cross = (ab.x * cb.y - ab.y * cb.x) cross product + cros = np.cross(ab,cb) + + # float alpha = atan2(cross, dot); + alpha = np.arctan2(cros,dot) + return alpha*(180./np.pi) #degrees + # return alpha #radians + +# if ne: +# def GetAnglesPolyline(polyline): +# """ +# see: http://stackoverflow.com/questions/3486172/angle-between-3-points +# ported to numpy +# returns n-2 signed angles +# same as above but implemented using numexpr +# SLOWER than just numpy! +# """ + +# points = polyline[:,0] +# a = points[0:-2] # all "a" points +# b = points[1:-1] # b +# c = points[2:] # c points +# ax,ay = a[:,0],a[:,1] +# bx,by = b[:,0],b[:,1] +# cx,cy = c[:,0],c[:,1] +# # abx = '(bx - ax)' +# # aby = '(by - ay)' +# # cbx = '(bx - cx)' +# # cby = '(by - cy)' +# # # float dot = (ab.x * cb.x + ab.y * cb.y) dot product +# # dot = '%s * %s + %s * %s' %(abx,cbx,aby,cby) +# # # float cross = (ab.x * cb.y - ab.y * cb.x) cross product +# # cross = '(%s * %s - %s * %s)' %(abx,cby,aby,cbx) +# # # float alpha = atan2(cross, dot); +# # alpha = "arctan2(%s,%s)" %(cross,dot) +# # term = '%s*%s'%(alpha,180./np.pi) +# term = 'arctan2(((bx - ax) * (by - cy) - (by - ay) * (bx - cx)),(bx - ax) * (bx - cx) + (by - ay) * (by - cy))*57.2957795131' +# return ne.evaluate(term) + + + +def split_at_angle(contour, curvature, angle): + """ + contour is array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) ) + curvature is a n-2 list + """ + segments = [] + kink_index = [i for i in range(len(curvature)) if curvature[i] < angle] + for s,e in zip([0]+kink_index,kink_index+[None]): # list of slice indecies 0,i0,i1,i2,None + if e is not None: + segments.append(contour[s:e+1]) #need to include the last index + else: + segments.append(contour[s:e]) + return segments + + +def find_kink(curvature, angle): + """ + contour is array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) ) + curvature is a n-2 list + """ + kinks = [] + kink_index = [i for i in range(len(curvature)) if abs(curvature[i]) < angle] + return kink_index + +def find_change_in_general_direction(curvature): + """ + return indecies of where the singn of curvature has flipped + """ + curv_pos = curvature > 0 + split = [] + currently_pos = curv_pos[0] + for c, is_pos in zip(range(curvature.shape[0]),curv_pos): + if is_pos !=currently_pos: + currently_pos = is_pos + split.append(c) + return split + + +def find_kink_and_dir_change(curvature,angle): + split = [] + if curvature.shape[0] == 0: + return split + curv_pos = curvature > 0 + currently_pos = curv_pos[0] + for idx,c, is_pos in zip(range(curvature.shape[0]),curvature,curv_pos): + if (is_pos !=currently_pos) or abs(c) < angle: + currently_pos = is_pos + split.append(idx) + return split + + +def find_slope_disc(curvature,angle = 15): + # this only makes sense when your polyline is longish + if len(curvature)<4: + return [] + + i = 2 + split_idx = [] + for anchor1,anchor2,candidate in zip(curvature,curvature[1:],curvature[2:]): + base_slope = anchor2-anchor1 + new_slope = anchor2 - candidate + dif = abs(base_slope-new_slope) + if dif>=angle: + split_idx.add(i) + print i,dif + i +=1 + + return split_list + +def find_slope_disc_test(curvature,angle = 15): + # this only makes sense when your polyline is longish + if len(curvature)<4: + return [] + # mean = np.mean(curvature) + # print '------------------- start' + i = 2 + split_idx = set() + for anchor1,anchor2,candidate in zip(curvature,curvature[1:],curvature[2:]): + base_slope = anchor2-anchor1 + new_slope = anchor2 - candidate + dif = abs(base_slope-new_slope) + if dif>=angle: + split_idx.add(i) + # print i,dif + i +=1 + i-= 3 + for anchor1,anchor2,candidate in zip(curvature[::-1],curvature[:-1:][::-1],curvature[:-2:][::-1]): + avg = (anchor1+anchor2)/2. + dif = abs(avg-candidate) + if dif>=angle: + split_idx.add(i) + # print i,dif + i -=1 + split_list = list(split_idx) + split_list.sort() + # print split_list + # print '-------end' + return split_list + + +def points_at_corner_index(contour,index): + """ + contour is array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) ) + #index n-2 because the curvature is n-2 (1st and last are not exsistent), this shifts the index (0 splits at first knot!) + """ + return [contour[i+1] for i in index] + + +def split_at_corner_index(contour,index): + """ + contour is array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) ) + #index n-2 because the curvature is n-2 (1st and last are not exsistent), this shifts the index (0 splits at first knot!) + """ + segments = [] + index = [i+1 for i in index] + for s,e in zip([0]+index,index+[10000000]): # list of slice indecies 0,i0,i1,i2, + segments.append(contour[s:e+1])# +1 is for not loosing line segments + return segments + + +def convexity_defect(contour, curvature): + """ + contour is array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) ) + curvature is a n-2 list + """ + kinks = [] + mean = np.mean(curvature) + if mean>0: + kink_index = [i for i in range(len(curvature)) if curvature[i] < 0] + else: + kink_index = [i for i in range(len(curvature)) if curvature[i] > 0] + for s in kink_index: # list of slice indecies 0,i0,i1,i2,None + kinks.append(contour[s+1]) # because the curvature is n-2 (1st and last are not exsistent) + return kinks,kink_index + + +def is_round(ellipse,ratio,tolerance=.8): + center, (axis1,axis2), angle = ellipse + + if axis1 and axis2 and abs( ratio - min(axis2,axis1)/max(axis2,axis1)) < tolerance: + return True + else: + return False + +def size_deviation(ellipse,target_size): + center, axis, angle = ellipse + return abs(target_size-max(axis)) + + + + + +def circle_grid(image, pattern_size=(4,11)): + """Circle grid: finds an assymetric circle pattern + - circle_id: sorted from bottom left to top right (column first) + - If no circle_id is given, then the mean of circle positions is returned approx. center + - If no pattern is detected, function returns None + """ + status, centers = cv2.findCirclesGridDefault(image, pattern_size, flags=cv2.CALIB_CB_ASYMMETRIC_GRID) + if status: + return centers + else: + return None + +def calibrate_camera(img_pts, obj_pts, img_size): + # generate pattern size + camera_matrix = np.zeros((3,3)) + dist_coef = np.zeros(4) + rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_pts, img_pts, + img_size, camera_matrix, dist_coef) + return camera_matrix, dist_coefs + +def gen_pattern_grid(size=(4,11)): + pattern_grid = [] + for i in xrange(size[1]): + for j in xrange(size[0]): + pattern_grid.append([(2*j)+i%2,i,0]) + return np.asarray(pattern_grid, dtype='f4') + + + +def normalize(pos, (width, height),flip_y=False): + """ + normalize return as float + """ + x = pos[0] + y = pos[1] + x /=float(width) + y /=float(height) + if flip_y: + return x,1-y + return x,y + +def denormalize(pos, (width, height), flip_y=False): + """ + denormalize + """ + x = pos[0] + y = pos[1] + x *= width + if flip_y: + y = 1-y + y *= height + return x,y + + + +def dist_pts_ellipse(((ex,ey),(dx,dy),angle),points): + """ + return unsigned euclidian distances of points to ellipse + """ + pts = np.float64(points) + rx,ry = dx/2., dy/2. + angle = (angle/180.)*np.pi + # ex,ey =ex+0.000000001,ey-0.000000001 #hack to make 0 divisions possible this is UGLY!!! + pts = pts - np.array((ex,ey)) # move pts to ellipse appears at origin , with this we copy data -deliberatly! + + M_rot = np.mat([[np.cos(angle),-np.sin(angle)],[np.sin(angle),np.cos(angle)]]) + pts = np.array(pts*M_rot) #rotate so that ellipse axis align with coordinate system + # print "rotated",pts + + pts /= np.array((rx,ry)) #normalize such that ellipse radii=1 + # print "normalize",norm_pts + norm_mag = np.sqrt((pts*pts).sum(axis=1)) + norm_dist = abs(norm_mag-1) #distance of pt to ellipse in scaled space + # print 'norm_mag',norm_mag + # print 'norm_dist',norm_dist + ratio = (norm_dist)/norm_mag #scale factor to make the pts represent their dist to ellipse + # print 'ratio',ratio + scaled_error = np.transpose(pts.T*ratio) # per vector scalar multiplication: makeing sure that boradcasting is done right + # print "scaled error points", scaled_error + real_error = scaled_error*np.array((rx,ry)) + # print "real point",real_error + error_mag = np.sqrt((real_error*real_error).sum(axis=1)) + # print 'real_error',error_mag + # print 'result:',error_mag + return error_mag + + +if ne: + def dist_pts_ellipse(((ex,ey),(dx,dy),angle),points): + """ + return unsigned euclidian distances of points to ellipse + same as above but uses numexpr for 2x speedup + """ + pts = np.float64(points) + pts.shape=(-1,2) + rx,ry = dx/2., dy/2. + angle = (angle/180.)*np.pi + # ex,ey = ex+0.000000001 , ey-0.000000001 #hack to make 0 divisions possible this is UGLY!!! + x = pts[:,0] + y = pts[:,1] + # px = '((x-ex) * cos(angle) + (y-ey) * sin(angle))/rx' + # py = '(-(x-ex) * sin(angle) + (y-ey) * cos(angle))/ry' + # norm_mag = 'sqrt(('+px+')**2+('+py+')**2)' + # norm_dist = 'abs('+norm_mag+'-1)' + # ratio = norm_dist + "/" + norm_mag + # x_err = ''+px+'*'+ratio+'*rx' + # y_err = ''+py+'*'+ratio+'*ry' + # term = 'sqrt(('+x_err+')**2 + ('+y_err+')**2 )' + term = 'sqrt((((x-ex) * cos(angle) + (y-ey) * sin(angle))/rx*abs(sqrt((((x-ex) * cos(angle) + (y-ey) * sin(angle))/rx)**2+((-(x-ex) * sin(angle) + (y-ey) * cos(angle))/ry)**2)-1)/sqrt((((x-ex) * cos(angle) + (y-ey) * sin(angle))/rx)**2+((-(x-ex) * sin(angle) + (y-ey) * cos(angle))/ry)**2)*rx)**2 + ((-(x-ex) * sin(angle) + (y-ey) * cos(angle))/ry*abs(sqrt((((x-ex) * cos(angle) + (y-ey) * sin(angle))/rx)**2+((-(x-ex) * sin(angle) + (y-ey) * cos(angle))/ry)**2)-1)/sqrt((((x-ex) * cos(angle) + (y-ey) * sin(angle))/rx)**2+((-(x-ex) * sin(angle) + (y-ey) * cos(angle))/ry)**2)*ry)**2 )' + error_mag = ne.evaluate(term) + return error_mag + + + +def metric(l): + """ + example metric for search + """ + # print 'evaluating', idecies + global evals + evals +=1 + return sum(l) < 3 + + + + +def pruning_quick_combine(l,fn,seed_idx=None,max_evals=1e20,max_depth=5): + """ + l is a list of object to quick_combine. + the evaluation fn should accept idecies to your list and the list + it should return a binary result on wether this set is good + + this search finds all combinations but assumes: + that a bad subset can not be bettered by adding more nodes + that a good set may not always be improved by a 'passing' superset (purging subsets will revoke this) + + if all items and their combinations pass the evaluation fn you get n**2 -1 solutions + which leads to (2**n - 1) calls of your evaluation fn + + it needs more evaluations than finding strongly connected components in a graph because: + (1,5) and (1,6) and (5,6) may work but (1,5,6) may not pass evaluation, (n,m) being list idx's + + """ + if seed_idx: + non_seed_idx = [i for i in range(len(l)) if i not in seed_idx] + else: + #start from every item + seed_idx = range(len(l)) + non_seed_idx = [] + mapping = seed_idx+non_seed_idx + unknown = [[node] for node in range(len(seed_idx))] + # print mapping + results = [] + prune = [] + while unknown and max_evals: + path = unknown.pop(0) + max_evals -= 1 + # print '@idx',[mapping[i] for i in path] + # print '@content',path + if not len(path) > max_depth: + # is this combination even viable, or did a subset fail already? + if not any(m.issubset(set(path)) for m in prune): + #we have not tested this and a subset of this was sucessfull before + if fn([l[mapping[i]] for i in path]): + # yes this was good, keep as solution + results.append([mapping[i] for i in path]) + # lets explore more by creating paths to each remaining node + decedents = [path+[i] for i in range(path[-1]+1,len(mapping)) ] + unknown.extend(decedents) + else: + # print "pruning",path + prune.append(set(path)) + return results + + + + +# def is_subset(needle,haystack): +# """ Check if needle is ordered subset of haystack in O(n) +# taken from: +# http://stackoverflow.com/questions/1318935/python-list-filtering-remove-subsets-from-list-of-lists +# """ + +# if len(haystack) < len(needle): return False + +# index = 0 +# for element in needle: +# try: +# index = haystack.index(element, index) + 1 +# except ValueError: +# return False +# else: +# return True + +# def filter_subsets(lists): +# """ Given list of lists, return new list of lists without subsets +# taken from: +# http://stackoverflow.com/questions/1318935/python-list-filtering-remove-subsets-from-list-of-lists +# """ + +# for needle in lists: +# if not any(is_subset(needle, haystack) for haystack in lists +# if needle is not haystack): +# yield needle + +def filter_subsets(l): + return [m for i, m in enumerate(l) if not any(set(m).issubset(set(n)) for n in (l[:i] + l[i+1:]))] + + + +if __name__ == '__main__': + # tst = [] + # for x in range(10): + # tst.append(gen_pattern_grid()) + # tst = np.asarray(tst) + # print tst.shape + + + #test polyline + # *-* * + # | \ | + # * *-* + # | + # *-* + pl = np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[2, 1]],[[2, 2]],[[1, 3]],[[1, 4]],[[2,4]]], dtype=np.int32) + curvature = GetAnglesPolyline(pl,closed=0) + print curvature + curvature = GetAnglesPolyline(pl,closed=1) + # print curvature + # print find_curv_disc(curvature) + # idx = find_kink_and_dir_change(curvature,60) + # print idx + # print split_at_corner_index(pl,idx) + # ellipse = ((0,0),(np.sqrt(2),np.sqrt(2)),0) + # pts = np.array([(0,1),(.5,.5),(0,-1)]) + # # print pts.dtype + # print dist_pts_ellipse(ellipse,pts) + # print pts + # # print test() + + # l = [1,2,1,0,1,0] + # print len(l) + # # evals = 0 + # # r = quick_combine(l,metric) + # # # print r + # # print filter_subsets(r) + # # print evals + + # evals = 0 + # r = pruning_quick_combine(l,metric,[2]) + # print r + # print filter_subsets(r) + # print evals + + + + + + diff --git a/code/pupil/player_methods.py b/code/pupil/player_methods.py new file mode 100644 index 0000000..018a1ff --- /dev/null +++ b/code/pupil/player_methods.py @@ -0,0 +1,157 @@ +''' +(*)~---------------------------------------------------------------------------------- + Pupil - eye tracking platform + Copyright (C) 2012-2015 Pupil Labs + + Distributed under the terms of the CC BY-NC-SA License. + License details are in the file license.txt, distributed as part of this software. +----------------------------------------------------------------------------------~(*) +''' + +import os +import cv2 +import numpy as np +#logging +import logging +logger = logging.getLogger(__name__) +from file_methods import save_object + + +def correlate_data(data,timestamps): + ''' + data: dict of data : + will have at least: + timestamp: float + + timestamps: timestamps list to correlate data to + + this takes a data list and a timestamps list and makes a new list + with the length of the number of timestamps. + Each slot conains a list that will have 0, 1 or more assosiated data points. + + Finnaly we add an index field to the data_point with the assosiated index + ''' + timestamps = list(timestamps) + data_by_frame = [[] for i in timestamps] + + frame_idx = 0 + data_index = 0 + + + while True: + try: + datum = data[data_index] + # we can take the midpoint between two frames in time: More appropriate for SW timestamps + ts = ( timestamps[frame_idx]+timestamps[frame_idx+1] ) / 2. + # or the time of the next frame: More appropriate for Sart Of Exposure Timestamps (HW timestamps). + # ts = timestamps[frame_idx+1] + except IndexError: + # we might loose a data point at the end but we dont care + break + + if datum['timestamp'] <= ts: + datum['index'] = frame_idx + data_by_frame[frame_idx].append(datum) + data_index +=1 + else: + frame_idx+=1 + + return data_by_frame + + + +def update_recording_0v4_to_current(rec_dir): + logger.info("Updatig recording from v0.4x format to current version") + gaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy')) + pupil_array = np.load(os.path.join(rec_dir,'pupil_positions.npy')) + gaze_list = [] + pupil_list = [] + + for datum in pupil_array: + ts, confidence, id, x, y, diameter = datum[:6] + pupil_list.append({'timestamp':ts,'confidence':confidence,'id':id,'norm_pos':[x,y],'diameter':diameter}) + + pupil_by_ts = dict([(p['timestamp'],p) for p in pupil_list]) + + for datum in gaze_array: + ts,confidence,x,y, = datum + gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[x,y],'base':[pupil_by_ts.get(ts,None)]}) + + pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list} + try: + save_object(pupil_data,os.path.join(rec_dir, "pupil_data")) + except IOError: + pass + +def update_recording_0v3_to_current(rec_dir): + logger.info("Updatig recording from v0.3x format to current version") + pupilgaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy')) + gaze_list = [] + pupil_list = [] + + for datum in pupilgaze_array: + gaze_x,gaze_y,pupil_x,pupil_y,ts,confidence = datum + #some bogus size and confidence as we did not save it back then + pupil_list.append({'timestamp':ts,'confidence':confidence,'id':0,'norm_pos':[pupil_x,pupil_y],'diameter':50}) + gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[gaze_x,gaze_y],'base':[pupil_list[-1]]}) + + pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list} + try: + save_object(pupil_data,os.path.join(rec_dir, "pupil_data")) + except IOError: + pass + +def is_pupil_rec_dir(rec_dir): + if not os.path.isdir(rec_dir): + logger.error("No valid dir supplied") + return False + meta_info_path = os.path.join(rec_dir,"info.csv") + try: + with open(meta_info_path) as info: + meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) ) + info = meta_info["Capture Software Version"] + except: + logger.error("Could not read info.csv file: Not a valid Pupil recording.") + return False + return True + + + + +def transparent_circle(img,center,radius,color,thickness): + center = tuple(map(int,center)) + rgb = [255*c for c in color[:3]] # convert to 0-255 scale for OpenCV + alpha = color[-1] + radius = int(radius) + if thickness > 0: + pad = radius + 2 + thickness + else: + pad = radius + 3 + roi = slice(center[1]-pad,center[1]+pad),slice(center[0]-pad,center[0]+pad) + + try: + overlay = img[roi].copy() + cv2.circle(overlay,(pad,pad), radius=radius, color=rgb, thickness=thickness, lineType=cv2.cv.CV_AA) + opacity = alpha + cv2.addWeighted(overlay, opacity, img[roi], 1. - opacity, 0, img[roi]) + except: + logger.debug("transparent_circle would have been partially outsize of img. Did not draw it.") + + +def transparent_image_overlay(pos,overlay_img,img,alpha): + """ + Overlay one image with another with alpha blending + In player this will be used to overlay the eye (as overlay_img) over the world image (img) + Arguments: + pos: (x,y) position of the top left corner in numpy row,column format from top left corner (numpy coord system) + overlay_img: image to overlay + img: destination image + alpha: 0.0-1.0 + """ + roi = slice(pos[1],pos[1]+overlay_img.shape[0]),slice(pos[0],pos[0]+overlay_img.shape[1]) + try: + cv2.addWeighted(overlay_img,alpha,img[roi],1.-alpha,0,img[roi]) + except: + logger.debug("transparent_image_overlay was outside of the world image and was not drawn") + pass + diff --git a/code/recording/__init__.py b/code/recording/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/code/recording/aruco_test b/code/recording/aruco_test new file mode 100644 index 0000000000000000000000000000000000000000..67c104eb81566451f67764c7b1d3f19818e8e5d9 GIT binary patch literal 277012 zcmeFad3Y2>)IQoh3ycwx5FtPeGAJNBNeF?k4$Htq79$DBo`ht8KsK@g$`S~mA)<(& zh^VNj;ED?hf)Ew~cTiLm)QEtIprRt8BDwE5Rn^loB;WP>-RC~{pF7YwRp&i*>ePO! zx~FHMd-xEGu50RJ(fVl|>+52c_^WBDSFB1zb7?)bYw)+Nc9T{QTstl$=aL~sFGip+i6>iJh*8`{L>tV1eN<+zC7v$KpzzVj zLg46R7Y7s_F7+v@(%pu1WaoeVgi66tGQYa&Wvt9kQPmDzvofc|#dgig=$w_AQ#7aZ zoSt!=<6^rM)u*vo>^D{lwQBmpQd-?{mJsWdyR~582dDhFy{ll9b3`86FDi!Igju zrf9=((MKt|RYbK!(A#h&$vD;47+kmG8i#8Fu1UC3a81Ew{-h(4fy*nInPyOiXGkiC zg_eu>EXm6UEs)_t&>|UDq!@wOxaQ)zLo(+{N@Sr7mw+yk;U$t1S&C~Jt}_NV*zyjSN2s`Vg*lGOoxY2s|nyk4gHt0zscNg`YxrBd(`$ZNjw~ z*K@d@$MpiP7jbRD^$ITfyozfphcxX?Nq0!PQ_{w38w@&={lIHSZmnr_C3ongchYwr znLd8yXQ$5YJTi3Tg`JOn_51F)p?bpPxw%K4zyIXK?JIr_zIM^dlv&Has%(?pU{JGJ z-Ck-s@ukgQefa%jb4TsF*l5SQCm+p^S>EB@F^?22eRod3%`fF_?tE-~+Q$pq^%(SW zzc^g=hfckyJ=x>z?%nI3jYuoGDf8}|OTHQU=(B&$%xQd2z4t3m9(j3F`jXuInK5?` zogXoF*k8YVzddHk@t*9a@#X|Mufw7so6= zHfwBT`_nnaGZxIb{J|Y<6YpR7$Bu7bJuoxm)x9BOqe^%8%*$!qYWSnire9g4Jv{V- z?pfZ~qU$wYGyTKAHoo2S!uQ)h26fk;aenN)Q`gTUhLL*%!|Xmuh{YZs(DLOb{;wQg`V)^r@sUrpZV3{-XnkCedEuaCJu6ccCKOL zlaK8=w0cWKOrzJ6ceL-+u-8M^e!BXP9Us5(?#ZT4_PS=q!gcdD%(!OfZEx%ic_g-U zcdLIdT=P_$`{&M0nE8ggL-ndH+n;hT_&1^cBX738Z{d#h&rZ7V*YMuzzPz?Rr|*k?-|& z$p27>JtOMKcLns)V^R7X3$UPr!9)S>5>I{N35 z`nB8BsE&MN>uASGb(9xo)>_`^I`ogLqg)@?Vdq!^q4pr?@*p{3@hqUGQ>q8{nfp@{~c zuA!!nc5%Fb|7zO7aKV4$Ho1)PkN5Ze49)8dQ$(C0Qtq&2>B=}Pks)4=E7dmvq{SL z5^Sv+^$@0As~zs?k{m&%qTX7e-dw8QB$|l&q59lXn93ZR$#njpN08{>{&g=#hh7^|1I0oP+K`s@RbkyuWXm~V+G$S(bV~F~6rA)&4so{xPSnrg*+Np)+iJ-_8o;+T7Wz|UdHYB`tz^5OljTD9YP1(*JD%_7Vora_-y-e*C`s^@J^!LUss6RTf-l9j zjn|6$IVtt4{;)@m-}SP8_K*p7$b9bzDDPdezkMjn+d;|?!F*2rA#hyZ4xGwzIG|rW zB**DM*>BrOJt3$-3?;3imrHU4*(Cd6;CzxR?FnqhpJlniWqFnUzhyfH&KnO){-Jn%#kpOO9Nr-3f!JSXQ>EBBuW zty1>eLsGuCthc~%eGA$>6#cxD>~B(B%a(eM%}!;O>WBShzVl`OUm_C}L!Qd@lk7j8 zCI1aMe@5NrVvd@Z9+3USBKyfCDc@Aq=g&ig{BX%1Ap7~L0fOIC@(;;z9XP(`OMc*Z zd|TE-J6WG<9{5F;cT-_1YsU1;pC{xzQz7|IS->UiZ zTRAV)yIt6$`uXRwT!H?;4B3uHM+^BbMgfF9f#dN3IsXTayLi|`{c!zAp(jsvlpb>4 z3mlJ2WPfl5^oKuXKMd>-L9+fE%W|oC=^@!q0>^Kq)W0EM-m^;iK>y*m(`OFKEy^jR4A{)*ILU^ovx>Yq z>2nE6NFI@r;mwzdl1B{6%FXd6r%lOHCisj+_L-xGB^4yaq~L!h?4FjLo0C#dn3i9d zk|I>qrDz!HiR_L`&&`{goGWU^zjUg0NGv)k!<&|sm79*L5Cupog!Eu?eBtqGXL>n~UdD9Anckn+MBP}szQoq&Re-Oi1URnQ#)t!b^9oE*>EzuQF z<59l3+|L4kWn=;SBgUHTTN6l)TobT%&idasuBU6`3wfMpP|ZW)7$ zrg%+#pBzoKda)Z@%euwG;lDR)z=GRNEV!mVUVCfQG8k$XRMYXU_uNkOXttA5m z4KuZ2?fqH$s^)PTXl*f;MjE8DDg)-z=$-}G(_oiWFg-UbgMFt0Oi;cAYSHq|62{sI zGwWKkYPUA}s%DnX;eV~OT4!@};R81-H0{ug+0AdpRGTI-^QLDh7tE(#>{0yN-RP`- zetd0hReQQv_CAC&W1i#vrKP#P28fQWeO?(bC$r#xIj_(@J2wwI)Z&!snbW3EE6U_& z$GXH)ZfLUTm@%0d-rPZHd4)y!UNr))0@NCO6%b3x-D*4HuT5aI&8;=F*lGD`Q$#yl zrM73jw=h4`TTIia+@*6K)QtX_^vo={6()G4Q+*YVJ%ORZR2k{H`Ce5MsG@?xjP#Bj zi4g}|lM1^}%_=IGUfa4ryOQW`=q#D(DY@7#d(*Oo1K`H4gvyEHc2CDnK3^0K(b%b( zIT>&c(s_O(hS4oXCKaeUN1x8jE#R$kYyx&n2EPE6kHn})@;PJ{J<@Zt^V0HhPBJW= zSV;wGNk*9DWQ!#yBo)Sc)v1Y)?B<0z=#V zpHetCr8o*nk_uzfbBhW&ac&mo3L`Yt2nkP?P%b3ufE9BcBo$^Az>Gi_ssf+FmX0H(`MMrS?gGgH#1&rF$`mYF46 z17|dIG7GhAZ+3cio>owho|ZFJODoJpMW;|=c|w$(mYJiac&UH@rW6ziK3F*!TG#2h z+1{?%*;Dc}({j2Ndb9Jo@@YN=$;Gb^KOL4JC#u0;j;UFl#bi=Hy=rDgJ> zzzaf*HbNbusGQRaic-*#GtFYrR5im)D{_yNl$_lB?6jZGi&3%SWe+W&; z!Hk}RX_K~-f`P->gzU6w-a$CXL^-3o(WY}i{xl4goI+80Bs4EjG!~<~rxzz<>FpbK9qrr)=~IcsqXLEcEfeF?g!W-&e0Z$V6<^$cm; zAi~FPAh_|tVZkAJaZy^uQyg8Z{htMVy3GIRnezYLKS~j(<$tHaU%D;}{P`kBDy{WD z*-N!2G<=DBTXetGi7T<~T-=GOr?qQ}2tHJjixQ`MPel=#xI<&tqJUGH#+jM8Q)JWn znB?yPKp1^KeR(Dt#;OgG@`deJ;ZBQ1yG_bp%1vcC+z!?7D30+_&mU2%dJL{+JkLe^ z-X^|!{)qUunE1`|QaMJq1Ldu6jyp~K2bd%Nv@`KlpCcs7#1}nJ$oDky)xBUsTqZu1 zO?^BjzPv@rP@;*C3Nk)pO?*_m@kurD@#ux|nQr2vA&pO-iH}=##%GR+kA^fpB__VQ zw@BdKCce47R+{*#?MZ&EiT?~SPCges`06m5Co`;-5G1pEL08L); zaSO$1X{fH?_}_7e(~?lVh2y_boR)&>4IKZG;O_w3r8rIb)h>>|MRA(ytD`vnD#dAvuXb|$If~QNULDHuCn-)- zdbP&!brh$my!!kVFxsr5I8EWzl^kDA@kSIs%<;t(52JVm$M2x{brj#i@qCKYR9wA* z<1;8uQ*iZ4j!&U@IK@jiKAz%DDW1piQ52^swK|pKw@{p>(&|Kx52QFvq17&q_oDdq z6p!L~48>_`tafs|1I1}dtPbV)jTEOTuUg}H1jT8pt3LlPl|PK)PKsA@Jc!~nwN)SH zxP{_0rBzpO{O|6F(^OWyh2y_boTjkq4IKZG;xu(tujKeiiqn);UBdCNC{9yVbson* zp*T%Z)u|kRkK!~nRVQ+MFU4u7KS^`%zrucm&1!Q~dltH8s&cC6pbR>?u3xDLr*QF*)I2 z1w9exIk<-&O!pi-NY9s5y@`%(DZLczDLXyeGHKs7=;I0J z!lc~<)l`PGEFs=DiYfU$m#mQYlA&xz`O8z<_fl5~6y9K%(0KIjifmX{cDm;9r0P47 zB&4NQT>wftFhM#!W%Dn2%8Jf=%0^dt%IO+$-c#-d?;}sy!K&#f?z{U~m;?bFgn=~! zIpkt*l7i)l4TzK_V#1{uld?n`#XJXHygG5XJTZ!CSzl*Wg$@s!01k)x-y!sRLZoD8>O+U8WH6&{Xbn&vpL&M5!8wP*oEgOA0^ zT^>}&B~R(zL{I$TLSYRtA@l$`Pi^VTUCn5rX(1Ifdkgv0oDbO(TKczMRDT;SBxmj1 zHcKiBxxz@>n!jsmIK}^ydMbChs9}tn;#yNSswSqIDj$ue4wZG7@dpb<@ zlx?MT$5Xyk5cH@jTe((x$r5s}jp}p7(uFK`u&M&%#j|1!M_0Hv>}p0M1%;V|s(~1F zPKrJ(#5lUby>i!<$wKTlAvRx%o)x1@JS*H=25f%^ zD{w;D`CaX0>WsfAPxlteaGNPSkt!DIh<})KSgJDAS+RR1L0bi+Og_U=_XdL235ZLH z==&5kDyJNM!dFohwsLg(rBsb#qb&_EiHeKY- zhDDliWt~UhY~5v)Po-BTwj;dk7#aWJCAO9ucF}%n<_VMhMzS$#%4F}B1kGOI@Tl&j zdCD*^nP-4bMRdQ6`V9x6KMC_u zGYsh}NLHciaV6bOEyeK)CzWcbiX(g{T$!_7sYXo#Z6P3Pg_w{U4JlbAZ~X~nrTd%7 ziWrbgSw zHJgHkb8@143Oqz3Eu_ypu399JexozqKe!_QOf3HID<6Fr+?zvbJBukryR50 z=woGWmDv|nLnyQHQd>M{R)x%}EDK)Yj3!Fx2F~c5Iim`vk1T%BF`@E-YAO;*E-#j55D$X{c1^390rIotpn!Uu@`&-D^3aBFGu4c8SM@}xv1-V&B5>X5kRHAuY zloef&iA&Fgmd(FROKR!-%lg8ByQZMRcGDSE)!7RuF+D|2O|c90x2wd!J~m@d0^@a} zdKonwm6V1g8Vf$bhC-;)sJ`Z3wk)9TN+?WXbQ7T`p$w>X!A%l{6lXdP$)<)hu9zA! zzT)?gM)v4VPK^YMB6n37rVKWl(v(c0l}mU5G+?j>&OcOEv>z_Rwu9~y+ynA3sU4?c zJ&PR5-6uS2Ju4c&Ll7+3M*a<^#}%WGL#$G|79*e;^>|3Ic+vh7y@uyU$Q-PSBmt>5 z5#c4r3++&SwqL3HgpNRO_x^czZ^HnnIonZ8tDNjKEKu_-o79-}h&gfl?+BLe_s9t# zV8*FPrEE{rBq8UHFA+W)p=KC>hX@q9vV|`5WU}boN>o+Z+w-oF)K=QmWO&MLZP=?Q zb5}*9tF0k!`BEYBIaD5iD_(kDhf5G7+Po8Xi0lXMI!A57?Y-eQGW=3a$eM~J6(LKG zp<)j%-2zQi#X|U3L%6^!TE;8&FBQNnipMMlsUZ>z3=DYRrDWOVz3sI_vo z99@~d5m@!ddCXY$q^?eWxy#v*T~uHJJqKYC(hw4r)OJ@0oCqWfy&!flaD8ysJH_TCpIk+T`kcrW==sTZKbqYsi8BEhKaWQBT=Y8D%QcJ_avB z)CC=E2;LjSJ2X30+f{)-f_D-;D6&7;*{S^&(y5}%zMey&_InJ@y$BsxT1pjvaOn~v zM(;H0t?G}T(UgqpRn=&Fo|!TIF5Q3x zOYcTmsYR+E#h-GHQukuRKgBT<4Ciqw3~E8T#R&eG`lIMomANj=5!4b>LvF|P%J!mY z+|j^#7gvR+?2D=`P#BO$F6AK-58yRrJ1s1R;qy@9QnpGjf0S$$mg$r#eqYEcn)#Nx zG2IJ(wFCU66+9yrhL{Y`fF3c_hF~ecu$*^r={>UOXUCLt*eExBDQG)Y%3GCOe+PC` zJyxZn5(6Y(fuz6C^K3AwhQUOHVTojzG`MJ`7!N@uZ}WUq*jS7;Y85fFojF@mQ}Q;A znd=L060Gl$`I3sF;E*?X5)<=Yz_b*ybQ}6Kw+XiFRT-Xc4S1OeSwgFB^$z?=D8mx{ zsWJUv|AJmHysXkw_SM-a1Ei*;u5=E5p9HT%N2t1Tj`+NjD5W6wu!Rm1EMnkb)lWZ? zz|u{WzFZR8IR3^6O}~>frUY0YtFA(jjKKy-9ocFqbR~gAUs3AxOuu<#r9Bhxh#0Qa=n|ul?cD zOL}2Qx&6m>5UtS5?B5|kGOe+5z0k&H4GvuQi@Otg#>CDU>h09ys;)ts9bDQBxuTY|v)O)wnv9WyS{lsd zFc0eEb{f=_-WWjZz`~%W#I+6TXJKe5r@$DYgkp?zqEyD9E+RfvL)B`mau~722bV^I z$3{k%bJ)m`zW!UwhD~jsqPih9y$seQYTC|@! zm}#F3zrGBfKW%kKUl99b>;!PKWt^ax_pDUwRIswWNP%672R=JdaQHOsB?6^uc_YGp zu4N-m$pbEoY!UlZan|K`f&&#WY0pV3+-kLR_B+QZ+nRy$m%dGZwU8zKP$Y~{J_w+_ z3-;z6WW3yEgEtXbzJ^47GOlthes6)jI^fXfeeO|f7Lu&_(5IAU0^gy}Mj^I9h^^eU zLoQ+yg_t^nnJ>hQL!VSB)}6%y4}D%mA0dXl2{hXqz_UKenRip=7n)0PwaceyxFYiHX_?`EB4Xe~X zgv}WaP3pWxRl*#R@k+|LlL=RbjTJZ>*1&`->v|F{I`G$L5Ht>Yu0A704$ok6^zGw( zXDxtkrg2hMN55@GpzOEvpu?}<&ZLu7VZwMap|*a@hs)TfVkFRZoj1}vE;u&OA-b_R zj|t@?pt7P$m1D*!DkB~8R8lpKrfB@WqCpBD3SXdltEHd&;{5&@B$c=XSBQBkV%LC(~Yz@j9@&-DAyDH=jt>@m5)o@A*n<%)L zsscp{c_ZxZP{pLzKVZ*PDdN1({S~e@7WmEbWF1EaC^M?12sI>T8+oP_hqPG!{ZHWe zJc64FXAzpG{a#PmVb7jRc51$DZ)P8VgZ`sO?lqmA2;FpeLB?bDsI+@lG|D{H*?hdt9Q8h0PqKdKut7xLHiWZ(Us>nEF`>!=bZoVk2&v4WDu3!^z zL?y=eD;(uTdrd`ctwNoCMWQY`BXwuhPBez7uF7$bh>cVgc=f<4L5pMWu)0F^OB_2N zbYG_Na2D+~x~klL*^_=q_}68&_e$NDaW;t)weJH|%eDzJifFXj)tUYR?JB>kZoP>K zW>ckczfBo0bErUKE}c4m(7u~ow>G$h9>lu=j@*n$XzB!H1c6FI@O?k@t zmoi8*rs`Wl z`TXp(kuODeYrTQ)AygxW`VZ_3gjE#XP4D`sdWulq54?s^El`T?rZ;9(EhV&s(ylR3 ziVAcZ=S%2(!>UR`>1`@i2|(qTabHq_^H}|GiFDF0 zJ^xxU<{VWGPhf^x`cvTzO2P25ABUG+91I;bjZb(=_vxPavqe8*MsZA-G+^R@NdqPi zNZB{lm*MaG&|B!4A^C_9J^xA{({~UubJDYlGQ3^I*Og+r(pPl4W~UW)nVy%Y~yFPIacKC&`h|=mI=v z-pko8Gp9>hapt_N%wq3^UYQwl@HLR^zEP9fJ15{h7UKEzf-ZBjvnDww(3f;3v61*Q z^44T`FXy=2B4>`*o8in(!)qo;z*(5vvF6!g`MccD0Y;eCY8zI~lh&Uy2k?dTg8&bDox8JXDy&VGHJ(G+P{ zgh%hE;cHUu=Q{hCVpN7Mc@t0rqNM7*YA}nxzR|9|hUeN3p(B|l@J3)tlo&fzE^fG8922>;*uZf_2K(+5`Y9@k4fUXA( z#oDnS)B}17bQ7oz9&&T|p`AeYpRTD%20ag22pRxVW5FA`V9@tT?rBp^S;r;oop$_*aLmwZuE6N>iNfw2|XnDI`o`|9KVxL)yL3#tfe;VW5OEIE+-&29rFQs8ivhM57ql+ z;Dx|lX8cZ{y^)w_Rs)YR<1>ADAK)8-H#FlOA3hQI{lLF9jfXXos`6pue{s>{) z(17yp7kWr;BjheZZaB%M1jr5a$$bF12+VWzZZ%t;N$wV|o22>>C4CnlmyB~3dN-JD zlSz)+Gu^@pseWjFU60e=n-DhYZ?Z2xD*p!HUck1!COy<%qFH%-WI)e1kaL>#NSy3h z0zHJ&JAG}RnDmf6K|Xt4fF8O76Jgdf+Go!PNPh@8+jDJ}fB!Mra{+qlVgBxF)-%JW zr?E}bngCy7#<_o4<_Ia#zxqIK7UT-da+Vwxq57K$d_VAZW;|VJAbcM14}kYD( zJ@8Y&yO{AzAHEYf-F0K2h^u& z9MO811G!+coe4fWGk{M7-rkI7`0ypbrvi^PZ1XzzmTh|9CXj0?(Ef7j^V&}1Mg!l2gU+5De2Dwege4DjGOx9JrZwZ z#)tdr;RWCyB0aBPs=d8F`~%>}fY&u&odaG4ysmm|fO+F2@HXcBQ+@h70jKmgn(;|K zd^qqQf%i4zruxkS?!di+y7IpVxCMA!{on=Q;lS&%?*rhG!0T$ibHIau!_Dx`XKtVU z4X|KefcS1EjCZ z{&T?J0$!K>4X(l33Vb5g2V=gs-p)%Z_1{jw&!hgDnC(jx_6-JaxWv7L4-5{y&*Bbl zcCXbP>|9}U2e-S&?g@@sR?idMv$Xz*U{_Xf&jGR_$Hg{-;IzOt+u0Xg@LMZVRIp60EdhYCy8k zX7=7%JA0xQi?~sI^Aeg-ffkcGo-h`Of-%L;VGt z{lSLT9k$IDMDDL|f32Z)L;ct6h-O_jZL@AaY}F3y_AOTJqHcfOsx7wI@3m^{EX3Jr zVbnsBch{lF(HCWu<#UX*r)kB=;Q#;pe^3JzeO;Vu-b6t+$hb0vh8TSUX=aE&zc|!C zPT$?7k4i`14K~N=yTJ63YP1RHne=HOi1B*@?6ZQW-eb&r1H{$ut6eYUWp?zYdj4$7 zPGw5-0evu~@aK;!HMta6YjF|5H;~0Aw7H7|SbF&5s_$aTa~MLCnsOAsbRa&>B;`3q zhKays6Q8_PrpDYL6{~$5rg;8Hwc6nr7snMpPsWwqsf=p)eLV3|`C~iDDk9?8ZAEa8fyX3f?$e=`qvNHdkcr{Vj2Luc=mfrqKkx@YD9XZg&#ZBeE*h~#{afmi6^!aI^i-@LvxFGO{5&~aP9{2+9z#mWu{Na$m7rh6* zs66mR=YcQe4*bE0z!$6s{;)~l58nj7=s)lU;ekIq5%_}&`e;!<1zbzmK*@MJ84vv7 zjldU^2fm;?@COnCU(_A=!xMooMh|?Uc;FAg1pa_W;0x3Pe~2OQ2N(i>NFneCTmpZ< zCh&(-0)JQ}@P+$?XMj!@vj4v0Uk@K6`_>G(MG zWltfzEM3>8g1M5J{(mgj9o4k!qI!aBeeh3RI~Mi_J*#TVzE;=5E|$?eWm~laZ^Hj) z{CFs|r9w&AWeSJd>YqpWUkW#~-8dieS19bX9oYwd4Tam;N326w(Hal~WqvJ??JM-uHzdO>kLj*PYcJOvSl-hKgYFW9@`34yR6y(=Qq z?bl`?(m*GJ^XyZ0LqRA<3hleP0twTX&eOCx_J8n*WmvfW48%(8?WaMksea-POD{5N)c$oPB9Xcqk-P1CNkJQZB^0c*(}`AC7d;pX*4nS9NLP+*u-{0LD2{Bh zx2H%nN4D6bC=#QGBF#2?y%C7SvTYT1>mWqp^kK+nzh=1($C%f-s440>>hE2sYt~*U zVzb5?J#%3j{f~}49R)R8j8lbewmnr8CW9QdUyX36R?k*{Zm+;Wzs-(Yc$(!Gs1Kjq1-Ru`3eH%GU_DFy zU$()!cD#m2DCrDw?4ls)X@plZp-_WwNA)yyJpxWx({lP@ldz^M1oPhUjNMnvWaU## z?%%~^)hQ+q)G%3nJxUPPbWIN?4~}5+&^RV*7czNxBa?OSFnQ!Wll9k*qjZn8VzOZ% zlP4xKd2%k3jgK>Vb~lsfPBD4GhQ1!w^re!UCLz7(@gfh!(`t%Ci@%UL^G`EI~|!E z7{KIU8k0kdn7sQ0llS&8dH)!b4}M|tVZ+Ik{G(1xKE8#?;Y^V0nmTmv9Yn9u?_wIH zzXaNNOB6uKdU|inW+b1YkZ_U{xi|j@4?*Zb z`X^p+AX7>*6fozIg6^*e>mQ*<5cF(qGF$^;`t5igfMnh`WH>LX1+96%4bT&=FZ)v0 zh;f=^A|b4Wu0yo>6I7p(`Uc#(r?|~${occs0q#C@55K1UgM|g9mYIlwmdu$}4Drl9ypM zas-9y!|LM0yHO&aF9CcLI-Z3qtv(|ZAKbq=b-X{{2OXl#1W+?7bBw~fXc71Tn{ zfS?{q+Ux~AbSms7ZO4u!4Pw!-`$wE7=pVEcGI_zzOPn(f`T2tPz&hwbkSgx5X~%R@Em zB&!7p9(ft<*!n#Zy?Nm}+{%g}qi^1d$F?GR&?I~F4BUZ?7#KbugZ0NJk$&s)6>t;~ zZJ#v@(cQDr38>#Bqk+fcYSjvu*8Y|P*u1YF7EuCc0hn!o9ZpXIc<^g%6PAOyl7K7J zR2^cLL*@G`LUipZFgN>vC9eYbxaVM9+X?1D1%z`U+fR*wzHgRJ&^2n2Z-CNmb)mC; zuLl60d@K^?0sIHRwJ0-#+;*Y?n}2=?rEg^!^pUQSEnOkQ+QlF4NgaNqhWqIx6b=MD z!cUFX@$H*YQ#@h(66C0C{DMm!@vhR7|1OK+VRa|puP0rm_;Uon?VR= ztaUm+703Yn5Nd~tF%QT+DxIj@ZRlrO$7DRo9+aRL?!XOB;9LD=C}}tgcKY`&2;Qc@ zv`Z-b5Q1M{Ev$7q6b89*dQP_JgMUDVj=Z?ALRFWE6|ilJ9*(=)PC#NnQ!b)ob-9EI zJZPp~RH19G+VEpont}@T_0hVPYT#@tEn-@iSsQ@grSI6PYb6FwwHHo*Bwnr;ozk@p z2L24;^he_N>Azf$hbRo3XC3+@@dxxP@TDpZobBhTYW@*cxS)sh*%k{gaWy2v=FuOm zV+khph@<+FN8wH4u(BO4(OV1|Q4bxj+k@VDK);NQL>~wZRDxPpo)ezaH)C%=lS0(6 z>yYDCT~CJy{eGtC1kuEfctxKJ&Jx8H1JB9OW_>l-j~h@BO(Ft%Q-2xk{eFvnD2bw9U<`_| z==2`QLM0{Eofs$TGU!H2_%#+RL72K%0AH(Q!nxo=rh{c)6F3(n^9t~th77l@km+vu z+9vgU2E5Xc;rlT(iYG|@;MjrhX03c3*0SAGdtmTBiO4&@o@?WtgOb-nX=XS3ymq z{|e|&A530d&=E^hub4sf&=8CZT%z1Diz)Tzmf;`E)Lj8}H{jU&vCE5CX!+_m+?QLx z9#0TYvPL)1SSaXM%k^+*^jrws;U}cUd_e26!+Q8>X~F}*A0`=|%VSShVZ8jIcf+nv ze+lgE{>fwWBT>0GSsP$t(LVxqRN-3823qerTI1ryx~%^M=y!rya|~5QP%mp7R`GkO zY8r&XhsMRQ816!5nDq*L$i*ZR1-v)O7^T#@r4?#HsaEe7c&v;>k^oQkSHmI&)={xi z3*A;(JW%%ZVi(9hOazLaVgLFgVoK_L^-On<*x$~+Bxt080RTG=bnf|0g*ccbm! zgTQBo5SOz%?G_?_Yu-ib`3dkPLqrslM1qFfv^S*Tjc^C6EiPYGn2X5@Eqc=@q;!8s z-fBpjipgrH+m`()v!++#75E8iF)TC3_F9FMxd-@qL&j9hmtIFPpRhHwp_s2g;D8~- z3#d;rh9!^KZbrxFM*9|$zZlXiD2mO3KiH17m4XgT(>LPs^=qDg&1I(~5!3YbkIN*z zA)e@;#8d&CL8zUkIkqGdl5?+?)`ZD7+Z*63h%C4YLeKgO1{N^h{w8JvD!X0*$-{;; zSBL20rM*#SqwJGEmibphh<;_%R6tUz#jxNM`xkwr#<0e;Ayb0voUly9GW|@ZTs-1{ z5Al-`UQEnpw0F=N`wPuvVNxJeXb5sSOpUpmZVO3QEz+ij;S4PGp2SXu zlZ=CSj*)~LhX07}$I?ye4MdM->AN8LL@m;RCFoG^Eu718lD8oKaUDrCQG)*UMm;a3 zs~~9!SLOAY6Ig5yT&k$8_mLxuJxzi8mfDw;aXu_k)Is<8~sj|!04Sl8W zbn}l$vB`4$QIVnY}2zt~QWfwbtPGysM7*DM&*e8k#4+l0v6 zT0~+}F#Sb*VzJDXF2*VdJxe*$A}zKdhQV5_s}N?^vwU#?Q{hfX9VAI!i>aJiOg}8| zL5}(#!zmzB^-ADplr}A94VILku==a;l{SWJ0NI<#%(R%J*s}#Sum8vMc;c95mm2`J z_Q7<}6;WyV>`!TGUqGW2%tOaKN=Z5>u6}k4DLoUCcN)?(4p>@?VL^BOby$t5r21+I zZ8QXZHO_(&e_CpOmV$3X=tD!0tDO{RF;7v}>GhMhNWn7@x@ZWRvL?ZZYpg#lm%dj> za}*etsC*-9X(+2*RDYUFs_hHO5m!r#{3C9#j;oYvr$VUM5HuNDH5Bc!q5jLTnu~rP zgdR5pO@@+SL|5yIN-4MvLLVA}frhf$UG*Qt{TNnz29g)Ameyie@JRiNlTvL+1P-Ne ziSnBay$@UGpvwCBIJsbrJt5>V1WksLU_?J_4csplOoLFdAsA>VtG!Zx?s!#xNIrhG zv=+mHjT}!8Q00fvAw$q)r~@15pq7rq4P^e`LFiXQ&}1kHMkHCs{2&E`uz6{TtM;a6 zwf!7_l*#;iLeg`!v=+l^Q>^FUqKfiEXqF*pGPFJ1lAy_s8{d)nFNe@NL(pU>2}aDY zPVFi+z6PN~hG1ZgvfBBM``=dOhvctUOKUN#cA@pd(W?BB$OhL{P2Zt2YV!_Jh`XxwySckNE>fv;+Tkjbo)15(@UyXEBWzv#;)+y_xOc3U&mbgUueU_M{ zKecYhyvla>gyg6?q(yav(gAGT%`L@EuDFMAQdf$-L|nUJy2dBUJvPqQv>xBZV^yPi zwcG~p?D~Eb>^b=l9IQrmi@ZV8;@w!n_4vhb(=G8~YjL&`|7a49aN{5Bh=lPU494L| z{I&2pDzx~AaGp@2$FJ8dT8SmTPaDK+@nKl|N*wWzb_WTKf9g1hGya-b5TsZS|F;B5 zf;wv27CpWJmV@`S_^+`e#diIk0}v{)X+1C9glr?HjV5!BK}3%~gx$6!{+p&SGkz6z zACCBXYsj2C-ym}=IP2wvFaAgg?^{d>U;YV4Q}L^&Q1a;cRGJxAs5I4R7ZzUoDG6`g zOu~OVNcgA8B>YG|<@F34SrY3t4t4O46T46g)_6U#iA8syj{6+M!Ea;>3ms|qna~%v z8E&x9adQ+&MK-tS)lmBju2wOqb*-;XY#hs6RxEidpx>D#NSmos)$9G5ErC>-PWJ09 zt9t^eS%F5^`+LwRkrurVa@O_G8$nzd-gGU}E?AcSO<-lDL&R*Z>E1b7ITU zE;3ZaLi>M=7L05xVoq0XdZ8BCRK#5U*P%fpuNSeB0gt0yBX1BfO&`!7?HhTcj9LaX zK|4pbl~LP(4QTI3Y)HvM%`xB^w0mSH84VrK0PP>yRYsiy9!EEb>?WhG{teL^BI87K zOaBnGcx1eY-7=sOeIv4uh}rahHgxOATXgCgdjB|5fye$Z;a3xzh0U(8w(PK9QK()J4Dg9+@jx78iaiD{{7oI9&8YW|4P@ zSZMzR=ys9wMa=2KPnKzs_vl}eCM~oHX-Y53*COu|EZs$?S&^$m%%X+07&QoeBCJJH zPbSGXVtxo~F>xHnyai0AuV9k-BuKBa-kAJb)i%1dup3f4Ye;@W+Jpr}(x)=XSir>l z0F$YknM~WwWcpDinLjd_aqU7%H?t#?+#yWzW-`eyXHxJylfolRihp1-`(Gw=t|_79 zb6YZ55Y1%a5GEz5Ocv)eDJ^5N>?J05ZD(@#M?_i{2QMNryC0J~$6`wm)^br1lf}!K zls?X6$?Hs(eZ!>eFDB(7SSG_--rIr6eQqWzy-ZduWwQ1eChJ~h^2mNB>%U?0=)X)J zi(F1xHqf1*u$E7ZV)APan{@HdbYlLQ&CI{pEn>W0-uEky0gs+TZWP}@ku=jO-fcuh;Po8U!6VRLv8YZ8XCou&%zUlfX zI#kPpuOa;H-vIQMd%r}ua<7-dbd%EY-G>~e+jowW$2d&4?;PLnoJulu`_6HC4~OaY zo#V{rPQ<6%ca9%k;xOI#bX18OpLBcB@#D*|M0ZAvM);Ec3X1S1u2vVmfoX%7i_vxn ztWOm?{BOcS*B~wgZpLkOM6MVDWeA09-{OxDZGm_c>x$F$#+ck&;^HjAtG|Y>sOFUW zW4ay%es6_|IkP(*V>>szaE-2g4_Q44@MI+-lE>0fkKptu78cIs8^G26 zT4MhA7P$8R6IaLW*1m3;+68EcFEzs{ z)`Gn=3J;^2kgM*%%$q>iQ2UdK{vyY+y! zfxSuSYp1G?hKp1|a*i5n+rOdW zdI&`M3vrL4X+!Hy$A->j`>`M7(dL0*lAo{?S=Ar6Bw1u8;5+?9!ig`=G#tscT^7vI zDQpB@)6{{^AOm~Y$T5<*-%zJkZPZ-8V!eKCArOFu}B^q2hT z1dX@Ui>Sw*_WkF?c(Y7{&}@eM(Yl5 z8)zZWLTM3e0cS?T6Em4uhHhw*0k;1WGqgwrr|pF;Vv!1-(wwGd`6Nw44vQyg$X??~ znqa7FjH}f)T0qp3G_Aqus<`317?MxY^auME1&QTg$j%$cwCTEK0Pdv+XOhuz7e+z_ ziUIQ}ZXL84&!{*&8_}fJ$9GWpR$5V8a|vxFvrv1Zh@sg|qn|mFDD1FZJ05?NDI7YK zQaQ%#M))0Et==Q61atgRFuqmH@WX_7Y+I4k`^_i1_OpR}PKajV1%1SuUaDF+zLrsk zAUf>PsmOd528?6l9Y~>BcrDH$1KUp_7hu*)zw6ows2GGq3AkFFBz*!bN?3z)dBjrz zOeL_2K{{Y_%)QN`YpuYXkE_%GhaNA5+Ig$MSW8S+CzuPL1>;r44F8#Qg^~sVFFt_a z5d)@fEs#TYn5Vf8dw7}Pbx z)vDDws8nO19XQ<-H~eOX$YnP!ZE*HB6x1B=U{QwycI^(&lCU<_2? zTt|(8@Gckw1Zr{&5JQat3ac?dVKoN!A@6r7|88WJ90SL|_+Bx?;|b{?#z5Yix^@wW z4i%gvyg#7{k6}OsDV`{X8XME6Be%A|hWd+?wqR=EJxiH$XWyUIOEFVzN3pHuiz>kz$75PP#%# zgB%+t5&X%3&12&-xD8PItW7+u$*JYoxE@ej1^bPS?RGqUiNt+@dX$9U*jS54Iwk;_ zX`m$UH#RmV;aTm4z?T~`YHU!h7zU&oBpN9}s2V}2!gy>@SK?KH`%7p(8lpwB3hGjy zcrk)+AcnDZAz`(3AxqTOr2s9fwk``eEVeFX92Q%bY_zu8y4*jL!eZ+}xu~tn3=Utt zbs^h~txHxEo*2i~Y7(`Pv2`g0>t4kUPv`baMBLcAY(!+cAut%B@JvWa(POA3W9#xU zz^@F^T+(H1UCx4kQDLGz<<{lmKe|=~Sv?s0w&u9l!f;Nmwl34(!m8E;}Njsg@%Fk2oYw=OA|%BVK;8d-n)1uGgAq8spjBtuoemKs}^SBl}rB>|i4kISt~`w@8S4P{ddcoB*C zOp#lcZ!Tk!r(7NeywP8a+Pc_0Qq9}I_bD}+fN!;agFP0jI05LSQlZ7jt;?Ga%hdk> z(s2&RMo2it`p`yM%m_eEf<;Tqt;?X%x^{|+5D%!oFSXpdOh|&eLGV~Wla17B>+;H2 zv_k>d3sg!T$;Q^@nMaWH0bma+Ty0%`_*0IoSAgtMXgCcQsX}gDdUk}0&mnNuU&uFD z<<@0QU#ZdB9lkOy(HNvrZC$c2N|CmJ>v0#%`M;Hts7pt#|$G9(>18}bjq#R;SXxAHuhW;k$#NXZUdNAysbPjTx*rR>0E z#`_$&-B8n-KutPu#83_#g_Q$GVdcOrL3lZ?R=db5>A*b<#zw^q-%p6##9WKV^c}`Y|x+sP)Y=9JT)7Do2eBl#W`1_pnca3I}d(1mkM;Iq8#*T1>94QO<1u zbR_TugThhkRD~xCpmZPrj{!_`Zwt^9JSTpKdvx8 zNA2if$o&LM-vqo@iG*`<<)}6P8FTFyzaEYRdBj}qf zyBg`*y9Boc)P~@C$klMvI^cU1oVp*N!A5H3sCkFU)RO?E8>y9}_5dD@_>Pp%1GLyk ztsJ$#dgd$A}|ZRnQ( zUGc%vQENS27jRR|%q?(he)t*CWs49BiV4&-bP2!`v4oLaOJ4=7_N!2 zF&@Yag@)5`;mJUYm5!PN4>IZtA+XwCh`)CyA3Wx9ghUqPbtv#HWi>+}ry z|G?7?8Fth}-AP9+yatNqKwyO-#I@rqxN_8v;27TWB&1&Tmn3(`aMYgsR*W~x`w%)# zf<|}uchvg8qqESffBzvNZd2+YwJlOTwml3-?XR_1pmQM7VtpQ}C3xZ&E-Y6Y3z4Qa zM~(I}>ZFZ)MRnTDJ8Sv9P1;f$CvEefZZWP_QPc#=QCkJhqlz0I$B=Z?UIcraf~2E1 z?*VA^IchBcv!g~1Cd{X}mA-3)leVxyK1Xf(061y{YSK|7hH}&>tQ<88D@Uy}!rgGS z8c0@2M{OV&BNQ{-Lx^lJIlX zX5m4S7lFKOpd|0-s2zL{j@l96Ckz?os8O!)$w)OwDN=$^HG)uuv7<&^h8;ET7ojIm z`7QpzEn=Lgy_tZXPYlCRBVpyJktNDeyA3U?9JPrY7LM994hu(Z7+PC7YKa^cjvD2n z9JQSszS>bE+YCo-7kc!5T&=RGjSNTaGq5TZJ3OD;?@7cBN9`ga*TR?Lbm0#o7Cr}3 zQnUl&hNIRBVx0`p1*FSx)cS(&QJ9~j=KV|8mOxfd1w2!UgmZG`s105Uyad=v12-%& z9JR_-xPJnB^vw`@SqVz`Cd>VQ<4BRY$<#jp`iWq!DCMZ7V43IC z_1riK#wAiKNA2`@?8``b8$cZi=G4kj8-iJQ0l|X--Qr6v9ksq#-8pqSpiEzC>8L%6 zyZxMcF`&DBsimWKaFMQ2ZRk$`de#R^M=iP(dnJM^039%3<*2=P4WQ#-pCyQ_7L80T zYdC7Rt&v&ShTtX;E?Ii|))*cJI2bA&wVMF7H(=?g-G;Ae>3zZV5MtzDIBNIeNOdF` zm&*0qT=Vg#ThUux;7 zSuf&S*i_6cK>5DZ(otLT1s+5qxE#>^zSPoD3wat&7QxQ}+G3_V3R7Oqn6(kxwnHrUwaKll1F9==c z7}!+`^38DSL~ZwQ$Xo{A0Mo1}I4u>j;L=fRhoewSD+t6HLR>q(f-6TYYBD5;L281( zB;P?$4+FHrV+fSgQV5|XBxrPZe@AWHH4u6j0vkz)J2Z8W+7_uE#qh>@T$hOF9GgO< z#p@sOay2y(%T?@cNTZ%zYj6_vDW6?C20$%W^tBVUTs7pdSgx9JSS(kUIV_%C)9)a8 zv0UAS!(O#q-NWIlm#ZRJVk}qLx8O^FxLS?)8KxP_)e^AoQ|$00!a{FD+*q!jM&vC+ zz>QG&I7ms+FHuXza&;KsaYOVdbs1y1ItTur3iDg8GH=thEs)hihQam6#TJHhanfG70?bNwOX!vf39os1Rn+Tm62L4SDW#SZ)bvk2lSUOwOp&?kV78L(Wgk~BP9bPn7<2{Cdoma9QH zlld39>7gTV^o8pxnMT&9aCmCuyw$xazN?*p1O9qzWkIUuivkfwv*?{T$ z?xt+ya&;11&Sf0}Pm_o*7qwg!V1;IDcLCo|GCZKv4WN|=P!(k8*MPoPDvWc5v}H2& zWk8mZMryfSjll@#)YlV47xz*A>KL9~TiF@!6QP>w1<2(~EtjjQ@w)aa!Q%j>7^&5A zH7W?xb|KgcRZ1So#&Y#5I*Ps;*gA!){Vzu4K}GlE?29zBa1H}aL!-I zH(2FzH5PY7d92!Q#rZ!j(HO*1%hg{yWu`X+?%^lGIUCE>9sQ8$a0uLP2+@3EELZQ& zLAkPk+>&x@NcNj9fT(L~7T&|MuhN8m|IBf`V z?f43=ma7GWq3&-;1z~pfl{);cb2Q%6vun?ZF>ARALfuHv=!pKy)$*H=uLlB2hBoRT zV!5Jqhxb&p5NJI%;b^qSkbEpx2O!d7$y2D7;E7$aAl)z;8=?P;wJU+QscPSQ?K2wB zL5`v5-pu1YuHm{0mnhXmhMTF(LdML*HKi1Vo0K6+Dp6?;Nkjt`iHKB4ky6s2qWqud zU2E@sj^X>h`u*?kxA$H9eV+BM>D_DEYpsLkgiVNZ?vq}{HW{&U<`wP}xBdY12Qt_> z4~j8&=M{D#sC|q}5oVz?uW$^KUmaWWf>nQvGIXq5}o@C|a;dPuh=A=AAfywTIy+6Wd1=%pd`Y7h!px~~VI6Go`^;DGHw8~gOly4G@ z>WzGr?nH5;F&z#a?mj?$s)X!?tT|%=W?{w@I}JZ8$nmEt>%s2EIHuSLg-`mn30WWG z%_>y^(_Hz_fz4$t94_2+Lh)B(yS#^Pf}A|Z2EcC)PFHzB-Vmh0UwVvJlN=52$C z)j%Sx;)l!9!*?68#@+G2guB-3WkakCZLd_-E0FX(`R+j5!QFS0=*D{m~1{YY@+)pm}j+dtnvN z*APl>p7*)3f*lf3@~!KOc2icl=7n%C4m5fjUW4nWK99p;8s_3oYq)*@)=w5`bSv(B zyrIl%U@c1>(`C8lsk#R`$ef9yTc$&qGbZPB3xDBu+deRY#{)HfDcf zCq$Zk#eHy(k%Mf}JV@ zrj$xiiCU9Q(=jCxn|&xS`)oN2>Vrv9ya1J!?5m1Lorn6npM^=b!ngX7J#I`=O7SKXw+BlLV_n1nnbq~}WvBz1c zb6w3EIXw!<1|mH4){Cyyy2)ugKR_is2RBh$=^ycYhULPeaTEMx_bZ4bbnp=hb#Xko z1sJ0Yxm^hVqD67Lal|02e)(xj;q-Uef?nO-jELp`sWIHs>Gu1_^oIK$y2Jj;DRAFQ zcR~MPcq%`G?x_Eohm}=+Cf%3#FTt#7`TOWD=|4UUocrm%%wM22;JmwAZ+mT@{sT>~hIxp7Se7vs!Qd4bH{Pi9Hm$`6{oN)%rL_{jn#hnWq7J z*_uP@cyui9*dYi515b5%YXzBQCjjH{1t!XB2s4v(5!7v`W3_gQuY4Kf*l9S;Ks? zO8T$@Eq07|KJ$dg;M4jnrV5vrP0o^bI2oT6M?9RSqB{4TC*`?eK$gLYznvDyjzE0;rWLbU2~GA?p^U-Vk&QJn@y zbvH4xnibg#bh7MuEDmV}A`*&dkgjL54V93$({BF~p=1JYmc-FrEmrncl!rL`J{*Do zDr7teR;9Z!f~nAe$F^6h07I>Cr8s{@?Np)h5u~6}S%o27}JckIg-+HQw298iroJSp^%T~;1AYfxy1U5pR${)O?d(^f|j zi63yKPC{#AT`l6>wmJ(@{vJjgyNg(QhYbklko{o;tSJGk$^oxJ&K!ZiZU^`q`?_nD zwUuyFKrK9YB;KS>VL5aTv%EBZXbms1xD z1$}2OGtrGAr!U1N(bdHC9?X#ZzH$K*=Q_X}ljA8%Z-QQ4p~Aj-olKlN0`GN>o&nTT z)R))LM0`B(yU)=RJ%NbFd^wLOi>09+1^%SN(E~IZK)v(_F@*&?u`;+@-FNJ7a4Te3 z==d8`t1-bfHE~|t)YuGIzL)sHsX|6mPlKF!t<j7uO&*@9Tos3mWe9G`!};L7W%Y z@YOiRVJ6x*Xn4odu+GyU&WmecdxsMdwhW%ke|z=82#58-2w&?H5x&tUA{^DnBz((* z5lZy#Kgzm%_m3&cqDSr|jXj09$PN36W=XyJB=Y70^{ThMG?L|QFO4+uwwFel>g}bb zUL6GW!*QiP`2`~D)T>h=x!;k-o)O5@tBWCi+JTbl)h9p2)C>@{5w{OWc@Z9C&EazD z)oamZ%_QY8Za=z|HbT+$>Q=N8q9ueBmcS)3i=~2+RIje84>i?*U+Hq_O^8mt`WD7K z^r+f_(}_IB$f;M~#B8e?1nGoi4YFO>)T^ie1ZNKLMM)f8ugdP(C=19}u`^1g;#eLj zA#*ep-`!RPPA-wJ_})G`Sszy1`VF1@oLBMvlXS9WtoXrOaF%59D}M9{))aByYe8s= zZD#&`g(s^x88301hoei!#U6#h6$=*+yLP*ozR$PU4E+wX13d?2;EUt1SD4 zGs5do54?qTt<2T0A`2r8I`q4+!F~o*4(*6!adyb*2zYxzBTe;QP-C!VF$q{6S89Z_ z1CGJgfuyk`jTINj7;I~ZJ3CO4!EQ$-_65?Sh5?>H%BAoa`wb$;VA~+ij*;>p;747G z4VP6{8*KJ%M$2jl-y?-Fi+v1VNe27JJgC_Z{Ck(fz=h}-?4y{iphx9LZso@%F>(yH zbqGdQ7SftTBoxsgn?#JkzJVr4H3i->iDNnfoNbJ9U#_n->$QqwxslKq?8nvN)CT(n zo!VfJ(WwpgH#)V!z5}N**gIw`igB-6{hAY%@BDN3FHqku_($B6kAv*nDSb)ZQ>jUekD0f zwhl1e^lQrQG!btPyxTc?Ouyy?z7w)E)EMBo4#%5#mVOPF7FuoZxl;FixiQMI5ms7W z-}l)>8`+d-Bde2aF{5_4>1^;~7G0jw?uUV)rH1vWPF zKqjVO2{!`N%!7@MjJ_7Cn8;fI^&%_*J2tYTIo3JR_jo{eJ8-g%tU^65Djo&?q{DG- zr0*+6#hZY(I&iX$6#mtu;Va-jki#^@f{2(EeFot>HnQP}39BG-x&$tXt|mG*(mKxs z7zchOIi51dMjplxN8;Qbc(-%(7#nGT*GJMm2Kdx-^cWl2Sq5qlRkaBCQitPx)9`Gh z%r#em7HM$JkP9|<-p;v5mbY^*(!|?27ip?@&Y9+}&|r+TaiuQ!9!YVUyK<1!bfmFI z1v1TDBZ#kaprqz*=^32Vh`6gBfCrNDBs|8xhRA8|W=_Hk04a9@p5;=05{ho_nt&o& zo&x+DDU4ZcBPdDD-R%*mc^~*^E{EQP=rngn0MVoR8IC{5V~m{UZp3LziM;L@Rp z23cxvn!BTap}DIC{Hi35Ztl2o%Zl=4^=-l!2F)F}W0~ge0d%)abGL*}-Q2CDQ#W_7 z(W#reyWup=T_?06+|u_9uGng(d@5W{b9d?m{Qi@RkY`}s-2JvrSzjl?PIFfSoiQr< zo9K(o1k>DIvL0Iv@LU2h*MMp6w!fsTS`uRDw~{Cm90W(ui#33=Lf{Wt;*hkXon+?eL&2q%4%PQ zmB@h9G2Bj&vqxZ4m0j^8UTnho&>M>2GO(%24whEd=isZ#fNBz!fSsz$4_J2HT??qK z11DEy_suXW1^~a^;kZ>IpAx^VH#pzF(J{>h`m#leRhcn zYY#|=TxCLZsjWkZQk(@*uxjxB8 zb|Rd;4C`^`k82~xAo|r2$DStU*a-Wy%K@pv=%{j{IrU|Mjg73D0iz>a15j-bHa61l zNoB2NBC`RtB`g6uHnMve4nw8y0f0t1aI%do!108lVixcR9gb@w{qx&e#dCnxIB>F! ztomE&G<*#Fb8?u5*ajvf$|F=A8(IG|zMe6+-*Ey(%u*Nuygbn8!3QQjVui{1Na<=qiuu+;Mqu-hwX!oyb1DQE%2=_hk*;xY3{zmygxmvL*N`Gk1=wZyKP@X!x=~mA}@6)qJjBindWZ! zAK;__ua?Bo&7Jg7qkLI?&1NB#ietH{#WZ(UWx=UOcP;4D&0Tvsb#vF9PTkza;WW+N z72}k}jZq76#jaw?hr#7EcaxUG-duz{1MB9l&@@mAAY!fo)7(YBQ&tLv5((ZcpX@WMc&rIishhivh`FpTki76V4rRR27>nHM&4n3F zdK_63Y4FXM3mPn!95~Yv-omy>6K`Q#q^VxmX8LE(BJh`RrJiM!9E05i$u>tC3-Ej7})lgAi22D|$pjPY4W-$>TL z{BaC+?;-f!0epWFM;DT;+lz9Mp>M`D;8h&UW$ngb=iUgXHrPk$)CT(uo!Vem)2R)1 zI-JH}PrZc&#Rw}Knq&b?tQ=g9!EUUFtxqmOo`JQ&u6jdR(~@AvU_|6X_Qa>S9j>l2^x~5Rj1`ZagxDBtsW!%7B`*?IW$x{V`Lp;vSk1Qi zMPyB-w6d52wwpG?$k44FzTh4|hA+4| zt?&`?nA!#!FHey}3*E5>D|}RV+=qC-Rq)0Q_-6-hf!{XW>nW>p27f!YedYS&{fJji zn7@;n6XuV~29?c2S|aigF#vx;)i_(H-@xd~jbwcS(QaEO4%+JH_9Nep}F8*?)w7ucJHeZ$rP=(_P=6rXat8?ks=9 znviGH-NJv_3AkJGd&7u-M3h+8%OXko{{EnzDlX+Rl_&!=niD!2uLGg{yMCke~7ZGR^|sk`|KKY zH>UDahMheQ&O7NWNN6Sa@{AsJ`_)n?jfB@h-XSN^d&~*9>P}eS zCj=?^3->6EG!o{IuSUWXFwl;K`4uS==3)Ig=1zHz$P4n@5P!wABPcxXKN4n=q7+BM zT=ZqQG?k25hVP+E$=P!tHU7jzi>HKaU}Q^;L{5PZHNFe|XTi)%yM}__Y4{ z6+n2nxJGyqxz&0$k$rS8CBl!^R_hn3XeGV!W0Dp|`Qa$aT*?=%5vz_D{~KWbqIY7q zMhX6Ux#i!|0`8Ktp(1SAyYQ0ND76usPLbE~p=g0`EZ+E%MIWt_3+g@nF+24botFQS zGk}-!chbZDVo+C97Li1ewBO-7!t-MEPk8h9&ZT51{Rx=-uy+?nTFkQdAkboi8L`gS zN8xPX8v``P8@$xOy>&YFBosT&OyUXY8p@u7$-81#$)dKWJ?TM=!izN$DbG*W+}34g zLwb@5cDcgr*D66+Ox8Z|aCM{h$Q6*56$OEbMjE3Fkzus62UiARuzYCdDeQXH3soBh z#n6!`7FCQtk7w5_+N6xK1L=U?wz(pMN4CfK3+sBZ8%4(d=#?PF#6J&o_xv0DsZ`*{ zCE7gcU8FIVv98>*!@Gf(lLWlu;&dSaAK+h9QZaoabHt{=bA$Ez;~EOnt7T{_;s+(BQ4k_*QNeYvrp&i8jX>7;6@Lk=b&MC;XX3_A-Ut9igUo zzIOwHIAsr`SW3S0UJQ(jWvhD}i4G|{{tLu6D(ZMH3DT8_EOxmmjCZ-%ChBQj1r>d< z9gO8R(j~gtzg_}-y(H*cO1;v{7p~d!HdOG1@wwAqup>tGy$D)ehV>a_)$C2T-?DjD zSIs{3nlD=Owivpeja>c!W~t#1q#N&52{397Dg_SsqgKt=pHsE|(QL>Je+=E=KMzLD zvDbkgzWp0Gzp&#X(PbH%*|7k2-)A0!0gTS9g@q!c zG8dzxIx@4^XIPn%dFokZ4bLp~Ci=>m*Br!5MdqJ-lr=Q7!KIcpB=fD;k)q7O+tA(1 zT!eLW12ccE0Ta)h_AUtk-_VZX9o&CM)?rumiJ%uDX}iSj57g}o;1hWW2cCtyfdXUj@VPvs1SW38!-~FVlVBGJXG&GY)F4A?JIRRq8SgwQ{d;Yx(^Ta19d(E?Q6yQCo9kjUmeoE zQ4a%Y5%{=0Xh%fBje$=vNuG90e$>2U;K|*1_)d`Sf#b-EwC~ko_~{*(g87NGf z@Bv<$v>(LJbgSH3g^{eZ-y{xQdoe!0iv}yel`m<3$b&!dF$^^APk9LI1f7{z<5+Mq_`f_j6kzyNcY|5p(Cs_ZMr=8 z1Ap}fGD9K|4vg3a|1$-N2Dp_j?Lm7d;#0C06Ohu2(I}5ZvRa6Q+2}{YvIT?*PJ7yZ z2i)FqbVX8BTHJT%U6T4tUlBNT>Kpi&C6dxBeQrvx@-d~%iH1JO3FJ;AA9Kg*6HaUD zs|)`Do=A{(wKO+YK$6f*9{iS0UAlq(z^k8DTSGc^bw#O3T|Gfe>KglYAdr$>d-oq? zrClQtPO%Cyx^r_XY5-W303_I@GRXt+cf!Ov(7y zG&PGM7ZWG=)cc4K?8|Hr&5Be<`Au+Oaaz;AQBQI^Ve;WOCMmrx)o9aL4`kAH!)PC;LL0Vz* z(x&^@(9fr3=;uX$@#zTXhdcl;n{Xb;foA=HWRaKls{c6sJf08MmG-XxD){L-ss~<~ zE#k)8NlVOz*m*liO?%&;3Q_hTRIoKeV%dZ79ODhfI|Bc1Z9Opy&E{-+=a0&mj>SV1 zJ|K;tCtOG2Jhry{!5PiWE}YM{YW{4qlKX;EE``)L>21kvEZ!KaFPAyhEbe2Y&!4E# z{Q(AHWfzDef_0?7!=J4&s_Wvts#(IvR=+Bx3|Veg_R@y{brYj$?s(A4sk2d=S~YKz zCFXqy$;x4Nmkd7kcHlk5>$D5toy)!k{4blojdO0-QPR7!*In+adHN@BShP2;k@IJ*vWw~TKva+fRA z5|*5`#xGh~H6LL&iHp{BP_4g4=b+{e3b};syy!Pv40tyu0=Ts8JotHL%QJjwx#Z1* zkzv%kic=Ah8A(Vm?3><1VH8MTIpHgSM(GSjeY5|joSB>xZa_Jm!4zMx81@DA8fC?= z;YyC8@m5pN?iY=><^tt@T``7N@%5_T@=|oXsrt1-yQ$(ERlkK>=-8y%Hr|AnKmLV% z*L~f_(rU!7Ezth~At_$-$raY}E63ON3g{pdRE21f0pmIHr|99aU9{tN2c_({7A zIyS79)w_T`^x)mQ5cQw!YBh1<)%}3K0rb5K*X})JgcbkIZitR4vq%My<566q+#G{j z6C>65?{?`I^ri(>1$dovI4_+=Fn`+q9_UO{PfOq(&*6NANrCtoI|v)&+=IFu_;{Cd zh8ZH0$Wqo(E)=8p@fJR95AE0mF;B3Fj9(FndOo!kzCNO2 zdp`?K#2G$y5Tc(PaiT>Cp}tSq2%r!y@fXvjCs?LWm4&>Di-kSK4ScErinb4Snk|kSq(b8V7uu%aL%?|3pVW-q_djJ5w-=fiEMcJk_UPie@j~ z+{X|7Y$4SffHrw>8-{R=uXHzMu`yKp0UdJTgj}XgwEYS;5{`GaKhMMF^9p$Puo&tl z{DxoXcE5_@lg4GZQmcIdxfYM`s~Qm1cf|U|(%2Zkx(@PANmwFbykFf0aN_xdMAjs~ znhnN^i?9-@cly=qpzZrl)I|Ptzxoc`2*ys5D8VjzGfAH5S1Ist-T&q(F}OK?)fv8~ z{{x?i-skz%e0crfpL$Q!vBCGsv?xsaut34xoZ?p`BhWM zTe_Ika07b`9B75s`txBLSL&aBz;)be4;l$*qyG&Ieg&EgXePbaf~m!H1-Tf@E#jl? zj6SCNdII=LhhwEnwUcYF8Dvy!2DIIQjfw^6g2kuU||e{eYZ*pv7)`{{nL zb5uDhgqNTwE*%T$7|G9i#qYA0^v6^?In{vIBZn`A?l`26!Y8}<-S#6-7(HAk)%hGf z!kKRGe#+<>4m>wmkA91u@vB*oFLSZW8Iat%A_B%JU+}9}z}R{LR$sE*_k2*V!Lp6S zwxWkX&e+)jHR=m!@#9l`tY^dDpi>l|>^lpv?HtU)MC0vzN9O9rceoxDNaL&Bex<8n z{S;QZ8rDw*Rs8TNWrYTWRGSAdA&o0_G^-9R9vD*nAe!Qcz3!*yGSopK^#Ev#UFx$m zHmzoGNUef=t&2_Yf*2A~??e8Xi*eyn>{_XnC2F^a)b{|-BokzLlQhU7{NVQhWle`l zRUDa+f=kj6lNi(<0ka(&T5}aX2QLJrKHx^A+|M}F?m5f~jSKa7&!9H}x!FbKkf6}Q zP}ys+ZErq2j|6mw2eZ}>Eee%Ghl`e{<^o#a!GAo04V#aJhMiCr2Y>1%Kx;gB?Q!^i zA~X?SE~J6|7|`bq9Fqi!Nr+Lmov@0v{!ZWa2hprfy&YNmg;Ub}RZ#PUW|hSFZl6j8 zu@bJ-s*;x?-s4mCA!^}>6H8GDpZZiMfW2KpoSr2g;(L8p<+F7{Cjg&H&Xof5;Jx@h z-_XUTtFQ>rQWqY~N+tf8?@KgQv{E%>J4R>m{k|pshB*|L^}whgKv_TzrtN@}V<)2`-%r&vHDe z3x0^LDnK50v4u1kY2xCy+bzGu7Z;e3x&gRWDNS5_ggx*R1N8eSXH2}On!vtAn4^M|fPVL2 zGidk=V+R>D6hYRNz@>eAEk~3sSCnmE+lDtXm%p&N4M`8wbsp=1I^78a^&arim-=ME zt2~{yNl^klkhN5 z&$kHgKQ4)H290ko^v- z-&#$1GgPA);tD^%06t5Kgdw(Dz3xlUvLSSE58e{wbP2?+3@#JKC{!--jiLR|>poUp zAXyF?8_j6cn0z@XZ-s7Mt~)R7lVK~$>=~9_29y5rcSEZ85}o>wLKEn_YtNClTJasB zsdxwvCo8@sv=AyDqnZau(bBD<7a&>ZNcFU;PT%{Xa3TDr0l2#We@V(RJS1kp2ccp} z<_btp0k#nb(QPP5fEE8Rw6Z!z0?2~WfKy1J7OT3v3)@1|m*E>E1oeSj?JAH7&mltD z8Cr0urq~@_{8g&3qJ0o*yTJvJQBQ_+mC=qLP2$Vt2?Gp%Xo|0pCk!z7QOP_NRPFdx zDE>@{pK+q99lsvMpBLLHsoFh_kz)KM!BSMa*MG;;%korNwOfKwU;Gt$imP@5SzD-9j-PhoyF)CSEQF5FQ}I0^#rEsq4XQ~17q|pwN`kKUybvD`s+FK^aH+1YSOV+GpxOb(H!e#Q z(x4NAeJZFhD(+Gx;lB!1XUx*Z*j46V(+f~Fq!sjPz?rm zy36;Ha4z+^pqdZbYL}Xv4*hLlmzO&)1l1OBzi|1CW^A8xGZA<(sD1?FY%=SBr{<-g ziY|rK}r`C4@eQ_W0=2xV;}#&qMyYgQYTC%zW~P!4>1N6cJ>#3;34~N77(8+k%Y> zVzQH*-+}ucchXsmVbmTrWHc(F;8YB&*tDz5fL9`io7=Sk)$TO}gRq^!w+7-f897si zqBY|3RdDrBCWwZBu6Ddzb#9pj_?zHs_=2N4Lfp%NVs+5I8~kW60|R+A1{mv79WwFQ z^+H;<9!ib{@7aOOeFTKpl1Y+aPPWH*v+7ix3;OrLMr%O-0Mz|1oyBbOSt{lqgU7a+ z!v7T{>j|gurT$3&wKB5ir{Kf`#)wJ*ui|nTHyy$BRii-sC0Mwz$%LjLwI$KD5)-Nr ze+_2TGsFQPO*)?#ld)E9RJOkdJE1MV850lcA@De=I~@T-6} zIoPEHLxg1G=WSO2mJR3~A!4$CXNp#(C=>$kA=#G~Pt-v+`dQz2u zrzdkpe1&<3xIfh7hzYO*aEXg|vot9X+WdtfjV8&h_zWHl-7?uEe37fft&gn{r(Z!x zq0s$D3~8fF^6X!e!lAU2hIH_pkVHwoP{FDO52C}QW9o%8u+P{&Ry==bFanf}uI7@w zkTj`4=#M)M=?0R#G}YOQX|Q-C6uuJ~*B#hMT)IAWmFP0l%xLKAqIe_j1#5xJJXc+j z(9`%LrdENl*(HfOFZOkwgQ>+Y34QYpn0vwc(qUS4W`+Tk2yM9(&@T}G=|Md0AznJP zsyw0_LHDgVF5TGId217(%R&Qo0jdgdT?f(?LY=RV0>3=8k|uIJupAec9+8~y9ZwB? zkO%xWU?VStmkV{A%Jc(Ukc8`oxK4deWmgE5e+^1jfw0yky*i#%0DrP7UL_RAbhz3M zY@druV_oO%7Qm~8*8c+kKLY#H#dRcSbp>8CwCD-=k35Z?8o2bkP>oecE!?d1>#-^^ z9uIY1XR46;Ahjity&})xJ(rm{s2v*qo_Ws(fPROon-g>~-VCu$=qU`dq~@Lr(h`>_ z?X#YRs2f^@X=>rD2ENJV==sA}@l`!uKQ#SA)SA z2bW-zm>pWx2Ax;5*=jVP2_8JpLeH~RXk!p_^@QgETI9i8L>0d=H0@`7E|hu&(CZ$| z)pqf=q1P7~co(339()%JBi=srRc{0T4CoIJF8w~>n?gsh#7)93x)MVlTwZ-{$r>Ny7d&m=v% z64J{0gsKlP%8EUM)jqhq3`ivQ3mw>PG+qT#8xq}^d5JOM_Yb|c2%Di8$J@Z1c)r?1 z-2+3pcu6I;4}+vKZg(2X@HOVGVf> z-+%1q)pQsWzu;k@oA9y7ODsm9cjK*$Px6%nA^h+oDBD~gXMaZifF2zN?}73Fj^i42 z6oOlR1Y5<4#oq|1HjsC~mC7NOHG0Yks9GOT^r8mgHpanp=XDG>QpMj4v`42?Vcod! z9t3B-!_#@{*7C6t2ro9yy~+4_tkmHf1HG;hEk4n*$JM}Tgc!3`ZZOi5fWl9(UVYE^ zH{5N@BfnDO&-yY}@k_yKNm1Rm5wjP3s@!wv3E@f|!RfZn0^ zd#F`UJ-_L@PBwGXMORZ)=(%hLP~XxU`=16)z)i-uC%nYBCvA z2~Z6emU$qFYLu06^k=khMfwgMX~l2r08+FKZW*!J3QqPS#-*WS++Epk_}S0xKV+z7 zANJ4vf|)F{0ruCj_%5va_pt0Eeo?9V_jKGtRsUX&`y+Kn_DY!XQ9px^D|N?H_rcQj zV|}H@54Y@Z{Vd)rrQ!_N-r!eXBNE@?N?pJ}wRodnoq?$8^F|zFgtV>6sL#7GBwe)9 zQ@&cXZuYAy;q@BVyR^0G&ATJSxBdIkiI--o8}L5lJj#$|gk|samqv{&-76lde2)X0 znt&rLdzZh{>!zQ!6!^2roOF&+>`(mdA2)j50lwoL4rk5m-TuAE3kmBO@IPISH#U~0 z)!ySjgyvC*#n3>O#U*i$>E@Rci1uFp!OkXK8Ni#m97((MneBc4cHf#w#xB5bJ%=*_ zwUR%pp$TaM(5VhX_wwz}{WHHbF=5o)wKmGRLa79SaDpUQ(L)f?v0S>msZ~g`X(Z2DG97iJHjYt<+?2pZKr* zL{EE^dKDhN{NFq!`utR>6Yy2`q6$>@&BdEx>`*3kW!-%#AG*^7Bpww=p9zO3R~82 zSgcwETqk&fjI_U2ukA3<1R2YIqng}=Jv-zMmT~J5bry|*2=pw@Q8gZQw+N0=TPp?> zy&pbCwEhe)*thEFRD(xebt-wO6-GpeA5%weH+Whyt|thNtEHH_q6gIicspFF?GZ+- zGXv6JW=^QVFBr~1;G>f{dd}^P`gDfXIBFDwi~dTfPwr9;Is$I>`K^9uY|)=315Pd=H<9_S|kqX zL))XWDQMp|Kv{2*Qy+K>m(!hz){&KIn222zbN3S29v}^Li8^ECgHBO<*%pjmXF%CB z;19YS8P)4hg;>mf5;jqj#OFX-Pa3OV|KHJM&llk4kc+c!^{%4!W$#XP6H1j zEuJ#-9+a|G3COFv7-KDE8k3LIx^^v%^$%dQy#Px-Qft;_w(14OmR)#p11h=m>XpnpZ^bgXjx-hD^_hVQyGdbfVn#_#hz476Oeb=@<7PB|o*t22W;S|Feh#<@o2dsMuV*;@lE)nb3&RzFX9`ZU#SRw)IUYY~!d_EzOg>^3j zz?gOsRw5PaUgm-J%73CJ^0Dsa9dM8RSAL==ta~{P59Ki?^^z=vi{89-FLmLo*FW%? z7%g!sx|cGLS9USJb33;AYzmt709HBuD>@uC#Lr86O+O1aza zjapGEsj9&1k|U`};7qsgywp@Ft$=q**5j^wxefA(E_P0pqFGq?G8>F1F2M5Ey|B4) z*1h;~(l`ns%CTOQ%aEe7nDcieH;j&DK}=sd9m|E|Sq(THOCny!GVlg|hzD2dODtly zV|f`wl^n6I-rbI6eaKrRVTlBEEIR=ld_EzOg^uM!Fy>!`l}JU$as_Bx{u4EkkB;R& zaR2_V{6tUaSVq?4yD%=v3BmsJ&14ojme;}8=zridF<^8or^D;(|I~Y;4sRC>xim)Eh}jv)5A!N`=D)3Gd+fjR|m zmiiUA__dsld1U{UO!-n349|z&}dnI33F+ zd8pp`vVRNw7ndW|m)o&yXyQ_6BZj`XbXdBIlh2{H-MJuE-7-gN0nczarenEwDWb+e zS^;S9fu>`5CpyUj4+1pCfpva39m^qQm31jq%mMzG%i&^4P1LPR#Qp$Zoup{ig7mga ze48bpzY-L+UpQ^nDt-a{2bbfOf+iNTOX6pvWZ`4zEv)&$b--c@=PWgMg{ zNtEKyStPyG684fu^ztIr9(cDTW#&CV$8sp-(_D8fGcN-QWDuv^`&^C1o{Pe<=TIdUKv~;H$4oR*f)w@D8v{&5^@!}*X5kH`w1GedW zJdt!n?F8ZYMM#OvAJku9UAEc8PoN9kOvHax)j_@SztR(f_*-=c|DONNkM<}w5DF=^ z0KVS)2R;+MN0r(OuLUuKEqQ;TWL;FIjWu+sQf1&j=U?eaj7(*vZiSNN|6Y+7RTH<` zO09vq$UFZgeV(?9x^#mQYOGWWf~e*O!|x`#3mj^umOq4FY=H$Sp2VROEDcI1TNMUR zAZbuSZPln>4ObeJP)AkhQzMWDB{WQp4H_?VsjV1am}nNXOM?=+L)n{d#PaXczG$^rmf%Eb>(x8M^ zs+ITf!(oFG%IE8L zT3LNrlPm%7ga@)g3EkkE6T-lP@OnU795|st2|e!PoWqjs;2Z*e(&cc2Swe#n+F&2( zU?N-KJwUh;5-sgS=soq}-)4VT8t}$0=bQ#5^pQG*-LA4(tRqPMNMzL~0h;Ksu?T&t ziWO9P=hKIH-&?4G)o1j||7R~?A zf-FHePcW4|>+R!hNu|9|tNJ-RK(e)!7&{)N*j)C=8=WuXPR6kc2X zQ}2mUKnv0h{vY~RIuaG31z8Ry$Ns${FQr}!avJK|prazT$wTpa8GNP)f)eizV;(?|G zsgIAH(n49=F!g~;r$yo*LlU$g!9HgDae3f%TuwFW`TLJLv_j7Aq=CAT20!RUPfR&on65{zjsOWK(Wwjjz3 zNo*@)cN?@IZ1daRgds^mj6n`!Nba;C8MMI*k3kM1oFfeDT5_B*$SFu{*s4ginL8~= zQHZZdf)eoqDjnFh=i`Z_BdP-kw_k*m$oxS~2J6xP#7w0Bs-6dR*MFra`uSTO2EP!# zoq35B{GvBw8W@9=g|F;?;8TaI8z8hGx4`S{f9gF^2U?KF;D7JG(vheLEyyt_ss8c* zo(umidoTuR2z3wqf2h1rSY8XV3_+}NgGp*Z>V1h5{$R^$D{!4)rv>@0B7QrLkkf*6 zO*I0i1sRcMJUcDO=-Z5-Bs9!uXh@uGfezAXK_2X3@LQ5`rv-WX zfN4QS0iTR3wH?BUbz(sBwG?BJe*MhX(gnbuNai>#$g5RN>#!d97MGLMf;7h3A4&cp z;K!3WP76|Ns@ug>d{$b%-(?v9^5qZWF@sH!UFVn1m$~aQsbw zfVt-r5?PaQ{s$OOUxbxN#ql?5LF4ZiOC$xm(9Ogi$KM)N(WBNZ=IaJ)|NT z27E%Y9-Wyu{^owjA9t~HDiY1Y@i#AnvE>3RsYqnKgv>&i<8ORcl+}0!7vM$@!dv*$ zAF#IPF5fA*+y0IX1qI{ze9vNDhW*{VVHNKR&!P3$|G5u?AY7>j zkE6)7_|2fY5u!eh_#CS;tPMdm3XI7v>t|}XS`=>#syxUaaWQ>DonUVT)pL-qaj~=? z-HFQy zeopvHI=T5iCp<$i9w~<-^TCc7HqStdgl7v{ct7YoDIq6ZicX%2kRuq5rv@Vu=B|~T za6N)NcP0|%VLFj8m&oUYdGth1xT4%CiHIZ;<{slnn7iI{!u*x*967c(MB3`j{N%=PY(FjpMsgt;(3C(MQHI4&1nb0m5> z;j2XQJ(Y2HOeB1YP98Ul-vIa=PVRci31@?rV^nc5cSKa>gtK0x{3f|`oor6H3*3=# zLwa2@nNF@WipZIeIbp81i-cFNCQ;wt)$Vnab8SZ?Oi3inwF^1nJE?}tIq>@hpybFs z66QMDobX#Bp*v!7_Fb(;*~Ja9JcwGZLTd5s6P5Qg$VB!t6jt z!W^RJnEQ9)W^W@BX0;s&QxXYt!A4GaDpW*yx>Jq_kT;URZ-joc6oW1d{s3}L_(@Dn zM9h5^l9Lnek1laenC?h;oVWwkDADfxa7V&}A&-QYRj2$#k?0KBDcSNj?h(nH_P*3j zFt;|tv8~5<#j)?OAL#K)_!#A@7&?SbOy#$+b+`(R%~H8vUJFh&K6F>P_a6gNgVwP@ zrO=D7mg?~(CP#3kQjevj8EP~nT<0y)7`^Ko{aVd#N5k!Fxa9!^@F+;HT#Tr-WvRD8 z_{b%lU_f<$Uyq12TRPNiU^oi+2U30$log}zxxTr;qNc{Ip`#tp7~z5{CWk> zq&K=MYYbugd)$cV8&@vrm`KDVIwf~tVZCp1u?2c>*h1Hv_~p$iIplSubP9JHQp0*u zJXLhzXWVh3sc-5;^i}wL;(W@vJ|kzsYxKjCh zSC*D$sIidT=}2SpGfo<6pyoon%!T-qWV*YR(vAuns+WPi>EJrzV($K|s%y>m!##jM zCxySOWi|7|i2Tj=z~0nPkQVsLAhAN9Lpit0=n~Qv(l8C|8yXElDPIm|9kLmB-JRjv zVeDIoZ67-bwJkvEnN0NFYOOXEZM(li_7t#3fjQY_(;BR1r!Z{w_e#w{s1E{qA_;4G2|F%aksV-fS&ww2C&|b{EL2q|9=pR7Nx@e0B zu@b~TEqFV|NGNrkTxJfcW&T>!6!pT}EZ{)3ElB6UI z_-l>_#9qc&frd1g-&G3ffiyJ;?BP^j@Ho6{XmsuY%8pqcs} zDLsiRb%kXA^OUww_BUv~afwvFP0hY72k$vKHz7Mx0X27EUEw_s?_N!;-wJKw;;+Xl zns_Jp8FK-DtY8woJb-FEFzCRC2x=zi54m0>%j^E$1n|&BsQdV=Rsnv6lvkOAW=-w` zI=T=HRBeHDuj|>ZAFXC@FF;zRY^VUvao~Ts9Cj)70Yc60#K-u+{j+L;Qv@Y>87|2V z^PbOYk6`AF#x`RW2b4va5r}c|tlX@+B|Ct#WMvIyu{}^7!RhL#v6@X;f&iYFj<4ft z7^D*%Bu4YJ>OKrx3@m^7E>zu2<%571lA=@DumjLlQ@=;>s~~;T^-QCZytJCF%a7n+ zJ%e{&?F9an%i)+a#+%jbH+&!qY$(zZA)EqkA5Bi>73eVpwzN74I7Y~YX_Qvo#!Uf# z(DfbNP^nD7jY-jQyyOFf|MBanXH`2$yPo6unO=Z*mqed|%G4;p<2}!roq!%4n*n4t zqzhfov|p>)Y4k(_CpTXUcom?vE{vnP0Pm^+-)BZWjHGV^blim*l(tx@<~^IJbE+pa zDs^%XRN8oPisI4*E{*w}3^sj?1yVq*Ii;*z2vjwYYB@ywHtTG_h1IJsnkMIZK)oGU zH!KpYbStD4t!&KB*Q15r?tjWCIgmsr$6zp8qDw2sn#`1{QbLL%eEh#^|lBqP$)(>OdWKRU0aap`>ciVfpBa;kp@ z6@lwi$4_-vG98?zE>9Hl7gnw6Ux&cUR>}rGRTm~6xI>M8 zAG;P^f){^zO|9tVs6nQy{iyZ)BTHdoYVeN;MQ+l`!Wf5Da85XMh~pjJb3_sebMIw) ze>skQBH?#X0y*Ih{pi;Bm#R}Ts0QzJN5Z+{POV9ac2|Tu5*`M5B)kFHn-kt95@vNy z`2INWYvewp9^I5j!gNQ%>_+5-+3AePy?vm(jtYNNJtxc`xsRCp2=QDK6-G{&?ntncc_d6Xev=N7$O*G^lOt}78b{F0 zUmlK_dl~VyNA7e-!binD94%IkagTvJ65cOpQMB1P;gqJ7u>H;nACP;2W^`}7j(5r< z;+7u)WA&$N1uoJBG8=+Om?fMOeg{4xVRpxI!nfYYJBLX*Vb0{`ga?RZDDLfRnG)rE z5q`8#V7f5~7B}-f66PIwoF(qBOH|qiuefFgA{H!H43-!P&w5`FfIiYAVfAcyL=FgMngg+*Q2X02fm4%|k{ONQAH-MC1N#T!4 zptOae#Xk(BWKe%nO8y86e_taKP8W(6Z@rCRU4oo2#MIUn3b^V7cCwxB*WgJ?kcUIG zgt!;}-=o5tI^mF3ICH{RqMwhfxs!LE%y6&z+mE)ZAxs5-cz@(iE!qn93^g6GQh!J9 zK%+gU-wbtM!ejTDs#~|gK(7xH?P%SncqWG3pmd`w(J@P_!iLiCq3cgv_p95A~Lm>--_nJ{eYUW7(sq%+iUQ&8JA8MT~?Hnr}NZPaPR7( ztiw&TKGb^hrxqOtatc?TqHkvGe8qXA%bI2hqr2Q;YKuys7gzz8jz3*wM>%=4Kyhqt zXF|rlq7GlX+|rG5xI9Ny1%3#_+VM@ZMDy*g{?R}t9*&zn5kOgp^`jgEmT)rt+mfH+CPWJMXJB$AI%ogp3~l1h}E{5)6sAg zJPT)>=5(|kQt8c-If75n_|3>&wxLnBq4O1oAP*aQbw}l*Y$zRrOJ^Eg$g~-1K~uEt zquE!p=X9T;9!q%aXhk^|o3A)>*=!W*Sg;fxfr@v}RX9@(tAzm#M=#Kar*!wJBQ$aB z;%}~X#NDVJftF&wI8md^i`u-B^5{fuUKx2r)Y!9+a)=|h`HKDWI!1LNS9Md+yWu(~ z)w8D})hF3Qx99Y>9#NB0Fx`CBQj7pNRBU3@!1&}(Em{T@Gt^?t6$Y=;c|LH4>T{Nl zl_p41q2$MPP}1^)5A`r2<(e$- zmQ?GTvwY%Wu{D-Va6}mnISc}HcNOtLk|Cu-`rdfhHuXI<4t@!>-ZiR|EOZQjsx>C zwH3?4f+LutzBwnyFH!^gjJ1LfOp%oSJpM8DP!2L|u1@(c;}@z`CdA!3K9+w}*M8c4QBdxQ9`HA-g5K55uKxg|204HD<`;8FKFW zGWBM<`(QG=y?9^qR0nbN&Qra`(H)iXJ{W`j z4jwSl>UM;00_x)itGEs}BY^d>I%Yo^vkk#EkYxlgFE77pps zg|!PU=60h2+b-l!x0stj&*yu+YnF*xl!tFd`4g)P)Gg?5+Mj~(6)v44bXn1s=<>}` zTj1`07nTgP)1FHk6>3qzKhWLAm8a+%DTA`lsH_5n4RCqNl6o@qvqIU{QKQkz*{(qP z;mT7~!JJ*7wzh+7G#(e=(i-V{3c1+B$~Sb0arcCK8Ql04aYQ@HE!c9Kr>O9QvdU`J zUMC>UO~8jhzQvWNs9!8*vN*cusr$sicJ>CmHWQ3r^grcKElT?n^<1UPF}aIOT&`MU zMlAmu`6xxSm#b$n_RPONUmbbA(Te8BYbm#T)o|!jIa#7-xK@W(Z#nh(QdAW&o^M>PiZsXb*kbf90Z;8J&(F#;x*(fSXUOedwE_W;FacL~ z1I~XsAtCi1LhU{kJ?navF{jGkfj3DzlaD-PkxANL`S>%9V!ce-e3g-ns=z4EaFwUQ zQ$t+ZXz8MvM7yDg=Brcivdeg>>3Zo2(j?alEfHSKJ7fEr)4uS|JfL{QTIf*7&I9*D zT-r3~dQgpuTk#itta;(CmEcZKaMyO+R+J~T%~1@sE6UNnDNAB=iyNokfRsN&Lt$uQ zD2v1aLs>44j(O^3aom`vHi)AI;&@;+;&{8?t_QzOaOpVGWkq?O#vDbh58aQip(f1I zZkV?L8G$QL(KoYgmQu*3aBF)S1PgGPw?SN9^;H|S$s9$m>oSxz?mWetfP9E6Pf-sQ zFHi;XQrP?P_!BOzR=Yg4H;7t#yy|9U%{6MhE^6t&kVm-k6!qv@u?6BA?2sjCD(t4L z8CY#`X`^5`jKsa_FzoXZ!>;18mxEdFm*tklk)_mvT!}G5@x>@OmqPfSA@7Cr*!^)u zUyMVv2BQ6>YJv1MGOFvisy_tjk8>l!@!?P_yfTKk!_xF{F}%_lF3wV$+DIYf{~M>? z;L^FJU0!aL`+Lh~rP~5N+v3Vo)T0ZbM_agh;Bg=>t(Gn;dZj2My%t`& zndd2c5Xcf-d5XTZvTaoM93D5}@{}c2#PpN1k`jWydgN(rJ z+c=K@GjQ)_0=UYJ_;v>H?w`uyoJhU&2k|zev#2YvLK!~7(4;G|LiK&SRmuH3@mu)G#Z!%#Fa^iouxB zxa9i4Ja&IZwKd+08SkhQ-s_9bh7n)kYCQ<8A=@oyxDW?jCqxE)aVyOS7WuI_P`yNu zoCRtWss#H|a4X{SDg-37AOxq7YxkOf8oB{p52QV=JVlR8=mPcYjnISBm8>ziv>xs9 z@~;T2XO5!BQGa5MjZy36-(x^l;>uIhL$yz;{OCd*GEA@bdms2ITuE=PQ~Q0&6rErD z-K!o)S;RVLNiw{$Xz4R$!FSdvi{#gNoOzv}Q5B7Mw=CwXyL+NpG2+5*77c>dJ8 zpI~8+D`Q)ZqdN+0GYjkz&@RX26&Om0emS+=(`fZd=xQJ>apfs`)CIq7H1ynzN313{ zdbGVL{D}lSM2~#r6il$LWPtiY+bN8yF@cF0l=@rJ;i9Tnjw_if1 z;%1Wy>p4}XGrA;QvLw;y^~j%EloC`h@}hAJq))OYwddSCLlx}fJ$AHKsr|h$oqMzP zT3q`?H~)F{9pu3ZoNdYHdvrv1L7rC=@f@tErORFEv-d+@{xVhyJQe_5= zNl=%I#l%*y3cq#a^KEdYT#Oj3x(bIu5VO&a-dY_YO=oW$F>|Gj5~T0B(m18H zb=p31r419LyIg70@!;0c+)s`41=V;vegYeL^P;NXfg$cU$V;jlB6P_?j4tpu=4Evo z9=jaCs-ClFC%&S->uFhstKy7-S5yg<&@HzrYvN0)6`W;43SM$wRh{rK7USx(0A|CT z-j;y*oA{bq2wADsq7XXIPQ>B}RiSXP0=~kR7S1VAL`6%?Z(0J@60>}T8x***z;&nq zeZ`coaDf7RRCx3kaI)J%2JiU9G=AyfInPW+07Ejmj=UQC<; zIxopIvBuf3HC5wMI(dT&UsZ~M!~&wHgdn16aN+BUX{=---4T{@g#nLQx~2ui^lpUs z?s6^R!H)uZ1=zS0b>oCIuOu3mqM)**Oe{ht7u&~NJq5H#N2zL} z*okK~SBQ*pgw+#;%vY_T#`$8LM3*Sk#ggVm$<6Gz$MW*1mMfHu>t%vTcg5n}(QN}ZsCcvS>VH?7S(~((KfHIgDm3POcT?lc!MLN$zbZz z)q(m&=V8l||3}`t#>Tdu_kBZB+M%OFu`N4Dil!YMMbhvb?tM7J;q7Sa*u&wFd$dW; zoSEUiNL5yc!{O%W@TQp|H@77#wVg}vb(*GWo5psUUeF>=P_#$^D_@eJsFSAsR=Y)k z0zsny1yVG%gQP`Jpuhj~zVEsWhkI`+`cR;<&*rSP-nG`d-ph0SKhKjxrA?NHYuJbz zLOEQ+COw=_2-`K}=;1e$-Q=C-FP&6KERR9*4mT3tNNz}0!N=K?Va=Q- zr!}K$uU?!j*_zRkJhw5-Y(L7)*=?Ge)MiC=VUFDVS@#jvY%=}M8OH0+pBN=i2`g}% zoJ#lSDw+I0V1)KsW2xyWLm^3unB2DesXt(!;fU8wa^d{byPw+oed&v@?z)KQ$6grE z%D0lKTc`7g(y2kW6F(a-wRl9Zlla?gA?db19KSK9O&achVaA=;$GCm=b2I)0@vT2U z6L-nz&@cFV!e$J2{*jr4s5y%DNA)b5D+Y`O^T(1q>1&wsKb|@3dhstN=d8b4-~ZB# zc)cT}|1#HiKJ#NUKmI#@`uF|Yo3?WPm6=bvm(lTA_Wi%AW33*4{IA(9I(D}medbTQ z8~=5?QQT9v{Tp$!{HNSZ{$hMxy6l6Q;%Cl39UkG|{cqBwXm9S!@7hzt6Mf@<&rXIH{~3R)=K0^(&8flo zv$RYkpL%vAe~B9p5dD5R)o7y0uUHd3`OjIml41T2Qa6Qh@}ujH`%BE zN2!6(6#U2b8R6Uf&HqVg#B|C;<|ms6=$8MK{>V*}e5GtXWj`r@VdhggH;BKQdbA(3 zzsRY%PnUncd!K*XU&3~sIZ2AD)QA3*EtUV=&WIa{NlAt;=XR_+|AqZJHZt#I z@4fflFF#m+|8OuKXE{$GIeDAFmbUgc-xNqIw z$1l6XqqqO)`^STuzHc9TWTWFBF?DKB1}{!tTn%9AC*5H*9(Gv@4^FgH&bs3VCzl|h zxYj-E54)G$A%DmHp&HDo!SLh$F#Y?8tG)4sKWD2W8uv%z2R)vHla2K>?2ZQ)=RCnZ zc{DzauERaNRL7Uyfu6w8VRtm>MPPdd=P%qDUGJX0xJZtAJZ;Q6q_et34F>eF>$<_2 z?x_}ae0bh}$rF-O?47fV>%O~taKW?s-Lo7!?R0QB(0kZWqw!Jr`S9>+a5}y}IRB6h zr3**>V;Z4HzSIc1_C5QphcKIT@OYr^{|JY+zxuk8`L=Pd=Q^)1J$`vaYkE#^BAWTW z@zC+BOS`vc9kS>1i;n82AIA7m$vD_UBHiq#mIw0^-Nb0<&vHalusXU=bWaQKgjg{HXFT`i#ZX_zSh&w|?V_D19ws;5d{pWYQI7*H zLI)@kR4otoZ*zc!a?w7sxMY3Fuug-g8_W)i(llWoTJK37HG?MA0 zEt8=)N4=AN?_+m+dSkf-r8oEm0_v_bvCb)ZbDk{O5OV0GW zT0L5AeC5PXUgo!i|8+xCe){{uG;(ItAAjS__sD%`)I05tMq#Tt;-&dxP||dVUicLg z4bDgXp{HNHo15<8AUeJn_N-HWZf!PM2y}xphlBGYM~1PC4ZG(r`cb#n>yI+C5%(je z2%>B%q)ulXnd9Jrzv{>l3)j8Df68&U*AM)_CN9t;dLKVxg5J7zgL7Dqrk9r4huwOi zKjB9sbZC%JV*fK-IZm$hE{^)W>zMRIrK8iqnVTzkZ`27}(+hEDN1!}+>ILQdR%IgN(>(~C>LKjL;V%@ch*=^JtKdsZT{c9?D1p1#`105ika+s})$%Tqxa zH71wgH3%SDp#C9TNF6}H2Jsok@WX9a=Y#XXc+fqCj*6Q3`w!F7l^ZkN(Z9L87~0Ne zCHJn6hTWG)9wwl3FXy~mXv`K5zdtlS%%S*j;K|tGXG2`V-2XE(Gymn;-;c&Yyzr0A zh8Tmn4`zPhmuJ8Dnc0V*`R;dT337CoL%hJ;U(3z>+U$Ejk-8L6|8sw0cIK~qcmDBj zCwB#m|J>i3o%wEV_7gMrew}~k=AZHJECuvF@rh^m9)Fit{buTU0k}W+S3k+Cq!L*N z12TW^uYYo8{!iG$=6_z#nce?!{>;sOQGY%$`vv`(`8EFe?W{g*cD5GIo=iPEcs%TLu*RRfZr$u)Hta$D(&CO6|T^mT-X!f`G z$Hw53^T+(06UmrydwkaYd8-|NUpwCQTHOms;kkb_Lnl5R`WE26*2^)uc&kP)Wx~khZg<&_xB-^v-k9Z-^GIhMsn^?vig1sT4xMIuLuywx&MqSA70JBNG=5k;>6nnm~ieN z=hAZ(5P_~!Yw1n}2;fAg0@g3vsozbf0!VM}FW;M)|B2c6^cgxIFm`kQ+b3uK$oyv? zyT0m5fYQzVZB2wxVNF|r%gz0UrtO~!;~ijYbN}7UjMnz-Jx7g>@A=RFH1`Ie+1&pX z559&npvva{@7&BkoTnh79{PK@5P)HGe@}EanXQ21n)@F#UhjX#-Jv%GC|2qXnv#IK znwS#3Jiw?jFW2`6B-GrWx)1i&F)dFQEwxJ=AvkOE845H9~X}Fs32s^Sp-ejtnaUw&b7K{)<*k?qCTR?M|gd@Upvdy_dC&crQPA%>ihY^{bFgI z=WVvPqGqSHRo~u@I#J#mATNe_8F-?xDGuYobw5J=I;X>}+E%?)t9AoBgQJX zz1xXem96?SdJzAPqTJcxXngUqH`3I!N5$o+jWzA^71o>Xa8zsdnvZ+!V#I42&DuV5 zRBg0sQ8jwR+wGRO9P(%%Io!j_VfKJ{n=|)=u%~WD{4QjG^2HHrJw8VYBA5> zy~^X7hO%Ji7K^q}9#3EAgJhkThZv9OvVVe`jnw1Zx5t}h`Z`-V?7tYC^As(x-5M*b zxT+7z7nVzFt81F?t+@P6jckog?x|0uz0kF z`$vV2Hd|+bEwGS(m|s#4FS|#Y3;vYcpJn%_?Eb9qr`_4AM)g`XT3Ok!3E8bZ)9TDs z8@s#F(^`G!QKxO4Dy^)@_A|bq()eI>GvO{Y|%SYr9^ z7nr2$c7M!;N^m=j3i-k1X%&0ixL-f(zUV)~7}*~#>a1_%&fA%-exP%9=x&3^x+BPo zYn!)i(Z&)2IUJu|%FqP`thDPasZOiDzY}$;RkyR)w)=(d;-QvGcOn0W_`}~nR5)B% z$bZ2duy*Y5e!jRM(p+yu$Jkj`%h5QGxjy{PU5|R*^JC3unk$qXS14o1eZe^Hx0hf+ zVid+KYK-7+HQKAS+Z706ekIqg)gDJ%l};tUoZGGM*V=kuYL=EWZW~)K%gk@BwjRc& zcGi5%V%y`(^s!Jkw$$#yR&C3O+~aib)Qu9$s&^r-6+IER%W3B7d-NxIRI6;&T8tEZ zc$^^%8!!6fYX2PnlA|lF+HR#&7a=U>MV-QN{@bEkyPi^|U+o8GnJnpgwe6eYGJc68 zSn_Z*jxLVfCYdA<&)d-fbMT06;YGt=Lnm@{rx_o_6lK3rdseMAJA8Q5dDLn=&13Gq zc$qIO;6&)c=K|^3t2BAs`GsAE9n$9=HWowIsqIBi>TUjRLBcCL`GWE6Ws#HGvyRcy zsMD&zw>3_5ZNJ0Pa5O3ZxzFVfc5~Rv`gpJGBk!Kdgbqw00^0nwweHao+;CKDwO1qc zv0eKpVmC6$)y96OanQ1{*I9Kns_w#kw%ioNO&4-b5Yo!4cRCQ*+WuC(vY#)Omf6sv zN5x{w2U_pd))<(mZ}VX@mGtz@6sx!b6~qfU!ZOc8pnOdzS&pvli|MtWw8b)SmX?;v z{I`_b`zUI*m|k%fF&3EQ_eUaE86jkv+L>Ns%POX?cSulWYtNR64&_vl0}h)tvf^^7 za3jJ{(4Q~R%j0A9acOBG|KW%EQh9-)O)f4ixyyEOee#;S3L|=$pK?uC?HUhV&|=;j zobPpSob>qpegB<8tw#Mxw9P6(=I|Uto70^se6jw;#rzka>z@Pt^W=*Qwhd}~dr>9o zu((Hq^AlTj<5A^md;ueY1(^gR&Onn~baobvPOgrRPq7e3{mVgdwNP2ycS7$OlIUQ8 zL6+Zma2^f1@a{`BKV?TELSC<*3>Nt78Wt)Gi}?cL;zD6xbeEp)H?~+awT~c^-v5N0 z*zcZST}GD|mxcaKzjrl8T8}t=EEoNQD8iR;bTJs`jvHz=W{v z-A1Fet9@)lT!gjTHmV)1iOAku4x!kPs+f=sL_dv`I||P#O3CaGF>!zJfm1>r_O3tp zz-_&Urc8Xen77NGZeI*JjB7){s0*1R7MPi`QLAw?`lL%gV zyq<&o?H}ybS}<1`5cpf6Cp!2F2nR|Z4}3n z>L#TowP=r4oi9_&7}0dkNy5=)Z3kj4MXBAaRBbKAOX+BLPt9HMZC+ zOSwl4lwvIp8;Mop$0_~p&FnUw!O(3!Pl^kwJR2A*2Ci8?GF&xf(x3eB%js{m2~Fsn z7g;4MCWG6%ZuLhe-OK*|R-ro_F5>l*@Gc3pcW!@<24^GtAr{Mv<2zVv+?3aCka=Cb zI4-pGQXJq4b&-pQH72~c6oK9desqZFpbXZi;;l*gFa5)}|*93lcarh0aH5gES z2G+v0MYl%qGUqxceP^{X%S4J&P1Cs^PtdAlCqtp62rf$&Jgd_gX=P5rWpbVTX|l5y zud0vgyBHE`olbpAWWMO67mbOrQbbiF=rKL!ah!8{J?qgF``mXNTmVfM$0#kH)}A5k zb}F5NcES1f-L5e=eCTB&S zorIo1c)BmTgL5a=CoF6mHLq3?|Jq>(Mimx6s!6lec=kLGKjIlLBN$TZBe^mqwy_AC-}LY`7OigpH|zQ z#x_Hxi4vuP&2F^{Y!6MV{ej%=A0R1w!8Bs)bG|C8!n+IB0BR{HNt)zFS9Nh7fjqU&zqzQIfu_(E-167XDb|H9-lr*|_gas{keF-%hL#%C2#1w0FWn~#7-dDZMQ(Ud0 z1q{2*7_eI^zBt{%RxrE}u?AqnDuxXpiuSHo+K}$3F2m_^)I2&mI^^-fI!cK+y}70&sFD+f2o*HRgS)EidgLdj#hIVB{wyc^O6xD3I+v=>YC(jJeo2tDg4XxQ-N^bJR z&}{l!UC;FH&~DmSxK{3&p&g@QD#p(KK^1~uYuOI^#v)u;`v-%&E<{qpiwxj6*HeS z!AcYTt&KvnjSmG~T_!fk5v>~DoL0q27EX(nU~bBd^Nq1PJ+oigYmfVvreRA~PjNTV zo&xrD(fO?!BAoMC@QRNPBvvEQX_sR|Pn3XZ4j+awlJr$Szq}fofw_u5D0kXlE{!iv z0+VMA$d@Chg)M^Oh6Z7|v|TMbikf&`IJsC}$e|Z8amA>VUwW{%v{qbNU0W{}*YRwu zt(3~;CEdSTo^n4Yb-!^a^AfBf=DZko4<%Z5v9-zfC{z1RROvMK;G~|Q7}E-iL|!A4 zY#uS}?Fg`{{zCxk%fhCozd^<{yZ)}1AiZ55;|D9wton`(v}G!i^U9_PsN3nqa1WfR zaVQ*w2UVx0z$0ub$`YmZ9iuZh=8_nRTiBNU-K-7GHXpW+5a5K--2SdswkBI7l)thD zbZWb`<|E95c{2jUhxV2?eAnB_&Sina(Q@11EfE18y6x~G<9^=nA0ZYFj|Rubn1Pua z$EBoC#7Z5XQhjXR)#B>%Iyzdsxy%QIj2~^cx9huAzA0X~Y-uRNZge1@s9(TgI*k+W z)H>MuS?$#ctF8rwTcRz(bcPqNG~*AAAbu!&tclk)JMj2D$Qxqf4x2?z>&!G(@Lr0{ z3sqkkz95L?oRBhB#1`iGR^ez7+AL0VeK8OiKo_8*@*ZG+7%zOyTGn)ZxMC z6xRX{_wH%oX7N>8v&~-0zcTujUR56YP{spiiwgTaR?3Sj%&rCb%h(JxbcHYIcPT3R zi!uftkNW+O86IO#ppQT9pT44>=ZsCFS%c9ywjEY2WJhI1uOC#GCppFX!Q!%{ zhWhzbBuSz8;Qch3gfRs}fw37BL8H!M!NHDwm>{uU)WbUQPz%Oh)^5Bx?;2q`C`i8_lXj5Zf{ym&vHAu2aU3HjY0~h-l zoC;$+hA-yX`Fn4fYHeoori{Mp=RIxg_<_~~E5h>;#khTplbGWj=AeZcCOt1s|3Zu~ zsW29bGKOggH2RLz2FGDNkb?&z0eAtyZv?&{8MBrz`S`d0Px~IEK5j8_kC3vUe958Y zgKsaFjKVmFa}nO9@X9S8QdufNCp$zfq0FBinP%5g;1A}$oP&uNy2n;ui6sIDAFrov z`AApJUu9e^&tVKvGGLL!5ooT5>qr_)92&w+;KqbQ9G^*1bI^fw3+M9i?F<HAWk zs@T#Vut~S^K}n<`i)aE{la{qup&thNbK6;{tFLnv>be|y5FYJ}3%cJPuX#gw{cSXX zTCaB9iLkD;9(R_a_F9xO?|h`qk)Jg8h%%dl!!6g^UIB>1*`kD=K1T6x#`G=N1;elqW{CcloN2|AIS%wLjcywshaxH>W*4*q3uB?m^~( zi+OiHu-d3?Z|BR!)$#_K$i~uv`@Ttl%d)^Nw(qmwp7BVm2x}m?(|*f1Ouo2OLit0a z#cXcokS(BPh$nKhh-{x-Nh8PDP+UzJ8yxawzxEV|jp1IfG1H#aXcnDztpxImqb=5U zeIJG(gOYjd{29E}H4In2iCspHMn`#x(=WcT|fBLxAyT-C*iZt%O63#aI^b)2| z(Bt3*Pg7K@9bOV($YHpY8ZleL(jP8I>XnF;L9!k-G?^vYAJeDXkLueU6M8Z(*A%3D zFb(p8yOUlG`_cIw=ofz-z}uJT_gkZ4u#5DrPFcLQ8*js5l$=pW%2s{9dCm1zaP)>bn> z@|J;1L%%z%gDQq3q$fGhQ0<&_iue=ubFqY}6wjrT4$UE7h$M~#nVU62rv5JjhPbs*E;vhGC)2lcFN?bHhWqZj=}Vy{UU!IQld zV`p)?U5{_Rw)Ay*H4CB1w{flL9)Go%xhj}~zjzKz>TBUZ&&js@y43$*BDAoHSUC|3 zOcWH{@AYkGM6GRQ-0y~YH#e}%OYtSa7T3TQzK7LBs73v2zy!CQg`{Pr)EoFsO;vw` zfi*1*OU{X4t41uf?B0L~?Jj@;D?IR$e|{2?NItaJ}htJLS+ff6&G57zd`*#Y6UI0MZpFRyY5 z6rf-T;;#l_V+(cw(85AfCKp?1dz*6M2p7qfx6vqK9O_+rrC4PBKkW`LrCm9B(Cf&z z{G7!#1OlgEv9m1Ikoh>BL}NaNr-6KC$%Hw?fCg{2Dp;7J{Ho>#Eq$6Vq%B{@%F*B& zTlwL|1#pxgf7l4PpqPrJkRy^$K=Ta$2rmh)_)*x+W{&fVc2PM#7MW!>AuFMGZTml7 z(u)OMUs#ki&{`})4>2%C5kCZ(2W6-r6(k;5L!nr**p!!7L%2ccUe#HzMN0RP-}c>Y zexke-gbHQgCa`$u3mA0qtcY?bxGIFcOfx#Nm4Om$?;f-t8IFK%0Bc;jSr;AT0Wc%$ zSVEdXqYK#00!<>h-eR$4g{TQoU0AkStby-(x1UKR-q{B|k%_HY1zKA`x)JTLKXT#X zH#Xv7WH?AL9#oz&SeR&&$S`*pZpy6QJz`;EQ;vXe^1;`d+XF%){J7UzgIY3E2OSF@ z(bR0!HV=0AA^cI)KB!iSnn1vmr3MKQdg6aUPKW}#BF<)L6K2dU6As6U*0v>3r5f<7 z?^NGmq=aRNP77=QzVT#<@l8e)zL-eW2{_Tb zGHTX%g7kvXfbB6!c_-A&cw{DVlO-Tg$7e3#3M{!zBDMnmX%g*^=J2ODaAAfz6#(cT zOq_)?tiv#!JQtfb=q6Oz$ASO~8l2ezu|FA5kR!<$xz{IgVCSt^%SbL~f1%@Ikwn1UMntl7eX0Nzr& z@fOepp$l!U?QP19NIsxgE_Cvuxi&Tt=k4(RhmwgTaL9pZKmtjXsnZY;VFH;E&_^f; zpx#aH{2>2sA(jQ@Rmi`~3{Kq4Gv9k}0X0M!M~<$}E_sOQAz-lqC?KmlRU};YS$TlH zUt*9c_`iDfEiVH&`1}W4aMDh6+#Q_CG37?U-MGkQPh<&{)Wd3MREckm9nl(LUj7(vD|#w=iRLjFSy+ODG`3}z z+I=n1)uGa?x!P(e%9(C>U1V&hxPEJCap`B36)wcqX7TyX=%)RW5`nlA8kXphaiFoB zjxvO-91r1HXfEOq4!D_tn-K(KTac%$v;-yvj8qz*I+b|$z&lJwD>kyVv@kQxtScu! zl+-&kA6oSA2Ol_VI!+4bFP`)aTlg~9=)&Zs*u4k$euWZn6ul%t5XvvvrWnJ`-N#cs z-ibJy9j-gZ>McJGiOhyH`yEL*h7V~Y_;DbV7E;9a>EIIj>`lzhyc{2j9N4LxM4u}E z5YNl**E-Hja=O;EQG~3FedJR2_^RSVRd-J#r*<$vzr$AITxm zuFfMZivVg>aJF2h4&$o$Go1;8BvV6$W|Lzk@R&iI8sD|{*way>_xl# z#ie}d!SYgRWqoC7qqIR>ad~BRy}VSCr&myGP1cAQC=Hm;hbdQnRH~ifnly+_E@}uC zjUFi72ec~0WV^CiH9K7UV6$0&mg`*&rRct5rJo(%5^QUoXPwQq+!)$L?a|86pf56d zv8b0Zu|!J$n6a}gMomu1+7ejhUIT>mCcu`bmDU#V_MOYc=z#y8 zU>LE8BMYQI^to8=dR`6kY7qoP=eP=i6u06iQob$8#6jFBmJ;(3c38$xjK$Ik6nbGn z`x7N`CPt(c?N&BxyMp*_*HIwBx>-wMHDc$WGb8Y{;}J2yJtF#2Aw4iS7td@qX8R{% z2&+C7nIyEmCSEHhjY&@Fjnhc&Bvp8wT5Q>4iCq;Y)t)VrUtk)8+KTes5#s5>SDZP^ zF<(QU3f^m0B!>v0o6e7l5c973?A-1SihgwtTF@fh9g1~4ewCVWw*f5P7zNgSW_u*^ z&>vpoPG#J*^gNotDZ7pn=UCS!4fw;}^$v#4lPBcCIJzl-X1hFD#Ks{KqCc5f8pJ#N zn*`O?x1O>4vKt(d5IL9wc z9ungqGs`8ni;59qzmbibm%<4)% z(tO;BAOd&uUoIlXVF(KgxJ?p)j93qB%GhM|i!WnH#jz^npSRbevVK`T= zLL$7~%u2f2SmE91gxiS-D>5A4T#kDfoWJ zq4D=xH5XUS5Nhk2yW1^WuacxMn77+vC>}oa=d*tBH*UfV?(0tD)a7O3$Bu=p*z|%G z*5F@h_pI6SKV%FP!>@5gC(`$%!^TYW3s@B(vD&83D|=XL(d{zPhz^&L1bt@3GGt)) z@dqR*sUdUMcDLFXIXMYt37-yndN^Apo5D9S>?4cj#~0)7srwl_S-+Ggp=)>eZvN1K zC^&KAGpt0{7&Pa&8|ju2F4gP3{#hG44T%`uh3eZ6o#s88dd9nXOSo&#v-YVAGU9*f zElA~C&k|DdU52o=%U8p#i`Xhs)+u@{btn5eZ_?V!N--FMA)KUXH}FhfD6snS1n}*WFXHd$R9Q zXQthA@|yRk$FI95d5;@}+s#Rge(K*awl5hM`4Y88LcaBITA%puCU%EckC%LF#`(m#88m`TL*j1*w+x7Xp#FdEb6)}gTUnpZ)LfgkvCu2`bvC|t0sVOiQmMVgl!VX z1D96JJeD{{lR#+~fNk5#6MmfkdHC z4@*kddt31&D0_bQOR_0@0H4f6+rx{qXM4Mbj;CwQYQ^NO%99;0i^Z(|&0)VqK*=tX zSqdzzXemQMraX#i_agrE!K7n8C5oo#p7(mZ1%)gtg9Zyn?>5zFbr|wS#j1w+1}xQr zM?T)^#3&-ow>pSr3eiegh$qA*h5^POn(0Jqs!^XtV8WNzfeY?2na zXuV(Y5ec4l4(ib(VDCx`=1p9Wgfd5$nwHPG< z`eKWo)H`0wha`V14|PfZzv0AX5A-M5>EM~9^*hUCIz~2!(q?i{}%Ig<~QMk$!h}8=GJ-9Ku-aiC7|J zLz2oZ?Vx!44XJfmKuRIWSi;c4VC+s*AX zQ5&GJA4!CIZsaCD;?2Ps;t(3sL!f+`4jK7!a=S+d9p#I1s1vA5Dv8_DdhS`&A<+(I z{0HS?d7TI#EY3?$S3y)#LjlF+Swcs)z zHRuDACxQjPl|Ts`vOp3c++FP91{RW7m*dOmW;gJS!LgWgh+sF1i4xyj>g)~90v_U6 zP&P46an&{WQoc{b7_EF7z<02;z0Ci?^~L<>UoO3aQTd%O58qMKYgw4z;gmgBzk9uR zct2NQUf%KG)ueSvqvqi>36QgshREgqD~0d4*oHO=bReAW@x7H5Vay-fk{XH1v2kP_*D>w z?`UnplWSc{wN`JHKP(y`mg}|t3l_jq>Dq-&On5V$NpAR39(1w3X25+*8p8o8%G{H_ zlz;C%eo6CA_gfRG?+U$Dn8^UB0;lys?&v};1rAGgsORbyH$5@l7Bi_M>@jmnGszUo z2v{x>6hP(Tj3@G!K||X>e{OOnt$NKL;vJhR>QzFX(k=qd$lnOZvVgtX+4zbY}qrP>~Dje0C zM~cmi2VJun`%Vp7o6aFPL8yDRbwLNUcG<7^zX0PO*~Gsg@1H#28Nj zOmlWBP@3}&9~}p9rJz&G>n_?r)JI-gzj$0y9hMDhH_`DvfFz9BWKhDYtGSbwQCcb4 zq#dQRwkn!kvSBGYK8%F{l2gc^4XBFP|E+xB&e_w3%MNi9+j3gEPzDLSX2IkKv8lzV zNpRhR<>LDC(#HDwDu35D2vIGrlr#3x{*k$Blcg~ES*0aV31mQu!`F9JHVD5tSqzIS z6QS+tk5_&=lm9Yg!f6p+GDD&}%UM|+MjcZ_4yF}dTK-wGd5u!Z`D`yKB)Z|&9$gRzN))OTork`p36P7QF}F?N#LhefSI~L4^S6LEHHLt2 z85t}92m460+r+n<f~`+2}EocCcQPH4nlUKKHRf+?P1dIws!;uHN2 zA!K)eH0ZJNq@*n9hDzh<8RvQP06zzOWFiOHB^9Pz_Af`L!*mS*+pblwP2e(?zC-L= z;`~{5z)h{C+i3}bfnL6HS$$ba*F@S4#_d-ls*OM+k^^~(MS3odh0~R7i%;>9bqc4o zN+E`jipbJnOivkEl!(FX(T0i^WpGioA-}VM1_O&Q)25wq%bx5b8J~uEh7!FX+DCpD zPkpWSpjOj02w9;7N3vlF2}$0pCElfr`M5#;Y&LcgOj#gGlG-`|9ggtZK7h%!48|5% zuMa)56`=hn=NrYj)SPrX0NO z+cLaUNiGFrf-DI&@a#m@?^gDA#1x##u_Vxh#p?6XLH`kBR`Hv20+d1ZqdS`4*yrCi z?T8o*@91-Y7`KugGftN!fvfnrr9ZVSSIj z`HuR>u2CTN!G25W)(j5Hhrje57wN>~z6F?*9KhjugNyq%?l)Pw#`|^=OPdmMVMofE zN+cbY2n!xD9n;_eS3~teNhb=VI^kzJS-8?paN3q8{YVVz005gQII#m6yOUBh zo|4^GA}9RFAl3%+3F|2KG-E;=LrqywN0LUTrG|vH>l>@f<)yW9 zmKvwOBDIe0%+4zRz+KYiEm6xk`CNdsBY$Z}kzB0Fu~Kfon->gygnNc*Y$W_Oth`iG ziZ}@itXCAIP+TvUSJ%jCvbMaow6U^MTrI8~VAi?uaQv9c%CzC32`0S#ec?7Ho6hfug8@Ob6)vK9_8DFrC zHzB8IbO4{c2jw`R+HHk8=(apyB~rUa$}>wGJ(8Jp5wfweG|c8muGYh-9)a+ zT*SRJk1-fA{D||k#Xt^5D%hXR2B-@{pF!80UT38|f{G_mM6g9~@<6>^$I+|Yah82U zSul?+*GFhFX+T>sYlRfHB+iY}C5dA^75o;0t$xrI=yDo{&208hAkzY0P*?$)ps)?t z%21*~w&t!jPX9%pyvTU54X`*x2T;l4paamwL@9i;I2?Qx=&E|t@bW?-Z>25O>kP~R zx4ZddnR#uRB(1wV2Crp-hCiQ%yfQu*1F%>pnbgJv7tc5q@I8)W_>r<^f-e~X88a^OCW+Em>A@M&W zmAQlZGOCIrQ0iZG6Ti7nbxc0vWilO?_%EID7-uY)M>4_@@kJ9nxf}vOy2@hc5x(dO_G_AMM0p$q} za*C^v6vR8~Lqz-hBW>^*SY!-pbT=6n$vzpOB39KhZ=tS`B`#IgUlKdzl};)DuE@N+ zrML3@$y-Wn8<%Yt0#=mfxp+=eAsN`%7wDITE5lbU6x?aR(6qrgF*v$$)YOEA5!59b zm9WbSzI-|zzg%3~*eI4)%3zrZ)-5fq!(URf;_I^{u?UvXl|fZ*eTw&d@IfY0A>8Zu zeqw^~c_gv}%~P@`6yg*%oVQySH*o1rhJ@FSO@O$aWEB%?c8bY~W<(Yx`9R1|lY3n5 zX=7Gwznhzz+Y#|2IsH~tl~Hy_I5><*do*CCkyN?1PPDoxV12Gh%>d&XKO>v9!;PPy~+&9y4dr zwlor?s()b4?f4%>y00&9ZB0!4qvy?r_g*B zo(80-*K5_fOUl>XRIdbcZpHK?rPoc->EyK0=~Nnmn#eR;!u=>P&Q$ACUF`j z8K0c^eiv6;8;lSW2XWOf41<=}{*8!3gG8g;U|8`OoXoHmi&XMM$bDA18OVxFAf zHRVQ>_oJmMY@{8~`sW)a>72JH9GQhYzCK}bxRS*oxS;XhnF~7oJhS&(Z5RJMk_LJ4 zITqez7K<~jOG5}*^W&{7Y1rl5doD1qwp!b=v_3P)%`P5BBRWc5Eoae$0Y+ms)dG{l z5OReDfyC=lI$tS?rN|qy_TVU`Elaj_SeLy0p^@5pUg+Y(DLEa+Zu zSC2AoHK3^bmX%7pOR)=CvMENRGFz411D8>@kSNz~pD>4^0FQU;OLIj;rSOT?gL4sf z6oV#Hem+3Z8C!qKuN2*5ePZjnu<{-(eerArPmyD}uVAr){jA;e$bpPdg*SjEdD<-I zxlK{aGIdYJLoNV)ANENML8+^NsDGH^&{Omt^Jm)x{F7-q6GHzF;fc3Yg&)E*olXYA zJx`IGGO zUB@V+%wX&i?&gZPW!9nbapGeHDr`( z9JFl?HK10ZvI0iy)%nHwt1|_ig)nevk%pL}l4jac{8cuz$OMyAu1iwR5XP+xGa=a} zFDR}aO!Ikg+W9;#yw^Oo*Q9fxBT~Ks(=dt-`@pLDximW2yI3^N78vDN@R>?Fq0CW$ z!q?MPvc@PWZ|eXUr_&J_%WD)%T`6HvUs)=xuTcTjmVFE}(o^$%)Iqx?DcL4DkwvqR zo!Al+rzT^U5+h*$+Ufvt?49Jwj;T3yu?7Sk$ZZ>f;>if+exhNy&@pAqGAC{+H??2_ z{`L-3lF*{_5oiq|pU6lW%>T)}Y9o?+3?{Z}*$G45&JfuYR+YFUa)PDvvP9%I^TEJo zVm($(R}HwFt_Hjqp^$mUm}=)L8@OF zL)fp@XhSA31Q((7{4Y1Au}Xf&Y>-~O_c5|LvuHu(=)K53h7&8GeWtP`>QET}RH{Q^ zGE-?2QC7jCeM>9Cp5>5d+B}cHmUN5PDu}2#AUqO&`@F05iwXwiXj zMD#=JsOXyKl}abwUV8FKEi(Wf{{ZRFx1JCKhzPPGUug+1zz;oFGtx*f zeqgdJ*)|!!qL@bx4=eWSSCT`4y3BZ3SOL;TSIrJ5!|Q!bI$P) z{onz#vHWac$o*Xy^K_Rx)KpC30tvF{pNoozCN6ufp2oe>PI##PxMo7wTWv5Da~0yi zX>DAxXn99rx@kB0)Ed|yW@})_h$Ww8+GW&*xI;r_V0HB~1l>vSX0;FgZO&3waZ|YYHp8H@*2CDyfvW1a@Gwq^JZ-Lu^}K)?BkRsrpJ5n zg_-9Ov0^{f7=~9g(UJ?oW|RDAMXO-xFSdd z*i|3xQb=Hz?QE8mlvVxBM$a^|JKj>x*b(h}*m9%QrWE|H{F$d{#w4}KnEc-S5@-Dr z_P2@>=CUz;CtYDzPmJd^F?5^3=BgGOfTZ@Xx)Oh+a~s4N#krjlMBk+DPCDEO^nil| zM}y<~vk{3UF|lId(q_YDSi^B+#+TY*ZI$3o_%8Bf`%sk`Cq2}hGRm%+jqNIi^-w*x zd-}5bin2WuMRc>LC)hB%d!m-EG+I~aAfy^$MyYq`(v5hNQig`PDnFuaPl0!~C_-xh zVx(P3tyEH0ZJeArHnQyM&65?9>V`^f;l0@a5FrG$zs%0%Q?r>rV6vDYGn-O;nB}4(?Q1 zSzF$K?T4>1PVl7tl*G&CwaRugV{MJ~Gw^2Dx$jK4Xo4S@DNv>%iM#9$ZxP0#nJId( z0!GFqM{OAjMrCDumQ6aH^V7`_O>JE`=q?~ z9ccG2`VLoO`Cwd?fpeOX2iP*0_b!GZ;Q( zym!43+mGY`i*D9efxSA3Tpu|qt{L{;C51S@P*GpYM{WBHI+AkXBr#$Lfp{zcwPA;& z92*tOf=H>MB%{Jktz95q8hssnxcx|1JvHIewK7>X6$wIdppqokG#;1YK$!kGDff}9 za09m1f)n)o_-7``B0%_5evXB>6&`10kLsd5-&un-q$Y{W=s*Y0&Ol%cPY3AUkUSLF zeZ8BV#L-|v&s@NhV>F>BAT_v~+Dy!WIp z<;R@18Q&5bVd?cU6hIZO%>5kBdeRn>jxAZ}(K)jZ_ao!iB?Y8&SR>?5Nz`Cp(gppP zvM)%b>1(J%j9a>e38D+R3pNL74|z~J!k6DZ-$xu}9@{2O9uC0!Xd5|WG1{_!clC!E zGD|#fANHdW(d7@7s>``{KK$SVFPX4=>|SuLQ=2<_pKY(l(;f)F-jlJ`r6Lv&K$Ah5 zoHff^hw@bJW^oz-OR$JwG(rTIju_$u9R4(QgvtL9zndW=R7r7lW0`d2#r36?6*9Z6 z5WH3hlRDVRT}YjXaS&NUXrwidM1M3NWt-KA6A$1bP_!2SCqy*XuwF zEVYx-*4KFoZDIYB`K`>i=%}E<*b@i3xz&AyE=j*>6T8$|X>B0w&_BX=O_xkvb&*6y zT2*ebAZMNIr}}F(W2{n`${`TD8t4HPu(uFfAVY49nir?9{5XZ57J1pg?TjTCKm+n1 z60_F1xpHv^otXSAk`B7;;QS0mb<#i5e2#c(<>;tQ@sQq0vpYU1T=EY_>xn#d77G{Q zkaYqNSEmpcBh1)TDyN<*n1nqGnSvpiDRD}{ctic3 z4$cII53^umqRJ`>IpF%j8)#(H{zd(1&`R=vI>Z4dgY) znv&=YurGq?c(4?pr-=pDAucW?PhpftC`3->J;#m3N8Z9F|NqfPpF6%Zm)qbN8ORie zN$iK17{Yk0;TAU#pV3hfMiP)Bq$y=iPutc-CoR2ZCaTmZfI-G25rPG1*AzI^zHJCg z?N=s#2({JKrd5*^lJct!c}F8O*WxZ5>?F~ z_rj8e5_;9bV)T@fQ~?$!8*?y656+K|3nmMlo>~=%m=kFJEUL^)NJ)koCnzN~7ZbmE z7?u$8YU!|CAVH4u-Bju6oh&AmdN@O0IF;8qM-V3qd#z&sCKa;OKf$i>h*|cb;8zYF zWd#zAdc(oxxDcVp*qUdBCkEW*ql>X@d?@HDwrw^t=}&U%9iR4322uZVKyscp=}WY<@ID0oIDXw;h#!0pa`aHW7WouT-(GlE)|2JXp zWy?Now5KyShFaY*RfHD-YGv=e^6GDjTkp$8!Zu&?wIcNkOh+|WTufyzyOg2 z+vi`^?E7<>h(5!91&yp)n4*m>2o%OQW_yrfYa6j-?{|O9KN@Wh3$q$~lMk7)Y7@g? zDn*}PTdtB8J~^c;4%9~-SvdlNc%ro{T$o6ihQJwdOdKb3IsX&hCB?Jf&fS*q__z1D zCHKURx8#)fzP;=XmGmKDwORXrWp_tWXH$7;$gPoyh-;${5whvB?LMO(k)~{96{G#m zLW(<9{a?dn1%EGTYX$GI(M)FeEhOboqd{rps=JRXmO&2Fg770!t9ENlS0@Jfs(H|t>!r#|ey z_YhCkkB#5Ut%M+lVzSbjQpIM|t%PApUzTZLd|}SMI0Lwx7M6FZ9o0TkluvPye*k?d zS%?ndXYvE)ah}@+cc=mgwqLQrShZ{IS%<+QDXOwGO_y6I7VTYxF>n~@gMy1nf*ek< zp%vFk%cUZa>Xr54+A4_&5rfmXTMM(Mn_3VKXVKVyLTqkf>8>)BF2qzt7A>R4fy-xs zG@FQd2}~+hUd_}M*IF^U$yR7Tt~Z}T`dF-qKqYZ%x%4*Tlt)yN%j0#@)ZqL=NWNG+ zwM|G#I4{Ws6GRAX^S0}EumpchzM%Uqk$zs-Zm?DU@SRX-UAS={d@330mUUs`x&zcl zt^|glTkTX0s{FSCC2=tgiJswXeW9ner=_4n2?!@Ry9AU2HKe4g1=(dyf!gx|$=<4U zEQl<{jzv&2u%rwREc@stqED@)77ugQQ-P!@0UEkw+8Yp}eXOv>iOM-y1B>{y_g3bo zF=iLrKHU>qT$U$PB@%4NA*tFODFa!py%PQ4La>?NUn2&)>#v()*CNKcGq&{4c=;Ys zgUfCk;|(O@BqG!Rl;IK4<}dOEGfI#jdA|X?DIB;8L5V!qdTPk$H>8YR%4U#Va==S5 zqIizq%mqUbC6nE%0oJNhqkcX2=zyqC)gVpjfG1bD2=N%09%!g>!7IgS#6VG=ldR9- zAT05;;rY}hA4vLm2hmFmS$9kG!Ahn_qzplPi;vcFi5_J?hJbV0u|iYF?PLpJ5QIfVPy2TpI*mc`d5k8{#@UQ+wU4jRVOEcsU{&PO zx}|9Y_LPZauf^%oG}4lQHWZ3M`S@7ojplBDfF(rShPW zVuK5}(dU%xI@W6lIFA@{)l-#RUGhkff^hS;*K zrl76lL{A4-INOeslFj7bP;wue$2+L|I@Eo_FXA-!)L>57v`8}Q!`ILOnvYFMG^d>M z9Hy3d{vDyEde`@H@Ultnvp+~H{*Ig`s8$Idt89`|rb6xbXV1uBk#h60usAv#MB^?# z;C$g702Eir;%naw7} zTUre(&J5nz#woQ-+>nJ>B_$ZLEF9!%au5w^h#G+&{N>p;*rcJUVKzSL`ZLQrQ@Va%p~Ssglj!R)-d2CzuS*bxwO+y?_PUAc1Z>n{$oZb24^mMGy z%i9y|Oykl1POEXybU7JV@hO~ya~Vsa0w=*vBh~RVutd69-Zz8BWQ0lKJ{1^h0FioQ zo5B;ZbtHs?Vlqg_e_+BEDYs&u>1bT1I-mAVKubz_9$aYbr!35j=o{}Y;}TEojRCQELr~8X8?)uUH;hrL9$V2BzUR-jD(IBbiGfsJgCal* zV_fEmPqA~=J65TeP?<$5(<`tA(txV*Avjv;Ub6D1;q z5oISlwZs`4?!Ea80wcG*-I_~odrIhkMSx8GVOQ7k;wenHo-9SQ4m0XP-VN}Ef?+Ub7 zpRqw;3?soav{~RU;L^Hda;IO%mS;3mby=j>@#A|5__BT3<*ofXUa?)H}U)GacV z*_^3lsVt#^IkZ32cBLg#DB~?5OEr_^2J?ezf`3NnrzD^W_-r@dCZi*m%ynku zAs`B!;ntE1$KXOgfMF7p207q5OyxIRM+%1ia6G&s23WI zamQOMDcVIM&9i~ZCGyF5yRx4%WK~pA%QGd$4=KmX$_&J|Q>1$zYNdh5m}9pNEO8sa zsoM@;w?kg11&GlnDqI6Yi?XW|k3#BSN9V^UU>rhtD4D=b(U{n0kOr3!6{3hN90cLX z3L<>1v!YD;E|T!t{F*+{tA#%3$Tm;{MM{)IL61=xZ)t~kcgbQ~!3{G3Rm#&IvuAQc zc8|jEVnKkSi{lW+YOs8dWOE*Ym@uTJYZ59`^y;`v2@Y)~24cCC%20P;>yL6gky7k& z;G@61-=KJmNtti=Os0K6K-lE`O()rz$dAl^dn0V~PFT#@ne=~wB=?$bF)Q<<$b-e# zu{P;ya3<|lzJick_6?jze1cfq(esomDGm_5Gs9^HuRkp@#>A>riYX)hZE|aXH&e>C z(HOH_HYbF>BzFIsI?f`@(yf1@alDSrG*j&~W~lk*2w_Ao^#q}~6g#@4kl=4}Bu2?% zEnQw48Uuy)R?Eq99*!Hhy#~`P!8A!HIW5a1ywn+b+yvdu9pw^+m@qp9a}wxfT28$K zdBP|sJ2xiUw!{+-QG!EEPdZsYt*ros?%t#TL}-LAUpVQ{T>w4$3Be>j8@eb_IyHgw z&*m5N3d8Fj3y^8w6%w|mSyAePQqn(Z9)*$2>I?;un^gzV#-0DmMTh1+HYgYz;i?j+A}YQf>r@hJ$-v{TK6 zQ@E_?AwJwvq`%8-nvI>;^L6(*43fSp;cWM~l6QCyH_dz4saGGz+a8`LaXuqBf_ikM zBay2FW$E`m#)ya|hoChas(kW}%IUyu@?~5)oC)vt0X)VMWYHkocupCNt08M7Bcs$n z3<(>fVey9h7nQxOO#(2sU2R!ObV04cpW^b1$)YQl?Pdj99FY2{hQ7|a3_xyqJI}23 z-Mg%vXVwHGL(F1jqUW*bhYKN_aEj`bR|H@bN)JlluF9)R<&Dw`Si@ypij@NDuRAE6G5tw3W9JeLr~pm~r>~Bt7uAoA;MrKZH zp#1l2Lxf3k3xqD=j>#HDkcF|(tXXazYKH`x>b=I6Wj2-1!W_YA_<#OB&fB;k zEy&@AN#Y6b7$#qdyv{g=@rF>~C~*vj2Q6a9Rmut_kYMp5`WXNuol+(_trixHL&CYr zb3_`Kd#lxQXzGOYgU84u!m4A3N>gN~9NZbTt6^5L?2r@2)Imny1<;|g!y%N z1c&#qaI^R-tG6Z>@~;+*IJ?*Zc~>OOaa5fuoM3ovTE#ic)?w)=vEsIc66UdUGYE;w zOMEwF%!BpQ%1D`>`(}HQVOFxrIfHg~L2ShsJCQx5=F@UjNW;Q5AkSP>Q5}PDta-O{MrOpEAdSkH3kW`w`$VN99X*JB1tgcZ5R1;P>wR8}3^q!?`BWVL<7^^(cc zA=0WsNd+sbKI~Ka0?W9~X$sEyq=I3Slm%(zrfM^g5B>#qT+Fu-#(cuM#>0Z;Crew| zBH5yK8Sf$M%o?yP*hSq-#eu1~sOo`eU)lQ8fxl)0nA!-_w*JIUNGJ*fK?Ylq1p)C! zt`OfX^e4#+Usy7SAhVMvw)#j;6*#}ok%cyu;rp*krZvI-NlEMjLCfGUxFO&YA6CL~-UQ69u*p_Nz|lVgLlgEU{M+vmVz( zc;r+S3{$gHkuM@TA?p%5Cy=YBwTkkPSRS!Dc|QF1z%p)D+tSJk+SBE$Hd08gDWTgo zTkNIMcQPmrZMBRo8QT-PRX4k&Z*KfMr@Gi(-c|qCP7j zNx0oIz-F;)sX!*QiQ*e5+=QPB=?!DY%aSVsrAo0SGsi9vvqwFuUf$&v0JdZX*1TG9 zE(u9=VTUit7e~T>tnHt(XW)-gq8@uT`RBVY&bkG)$vZP`gJy+RFx2MPdO9CxLqDas6S_sM|#%^w%X^pi+S2e z{h3UaC~l4zEnY4uT8OLfWKHVHN-~ZgAH) zzIVT=2?!V4O-yZz{!R&0jC{Sh8~MmKGBXrYH3rwI`Jt=M#idH!-zLP@HBe2-Mr(_5 zU(Ncnnj+$u&cOQ&V9295wb?zYvOhPiioH0hXU(jrJ2gY4CG@yrD@rAv5vMAIB23mG z@vHz^V>;<0`vY;I2OtOf;~flTCr`#V4tl;wdTwwZ>8fJZstsD5%Cm}O2SzY^Tj58;tJkYjwQq-t;`8iMs}%A7&T1dhQ+Fi42cc) zf+y3WzTjNO&e@{DfF%wvJwq&=iQ@@Hz116sw}`z#U`?0Su@9F336|GYxwuR*qasSy zMk-jwh30iC`z4Jj*=DJYMEGLgDz12`V#ru;<3cH*-Z8xHAR;nY$B+g0TYZTO+M1>jNa!c$tfr{0aa z4NrrH`WNMyXj9HyR5lu+`ej-E=qB$`gK8#nJKe^A#sfz6I+C)c+*9$%@zCOCEn$i? zs44s4M8BB;e}nWiRvG-m*_gD?A8gqFY8$It+s-*DlmUH`s{$nBWWLwT={VynqSBVy zS{5*qn4D9ixI!LE$jfGz4M) ze7vqjBN@RhaVta9f%FdC9Yc|+kDvwLD zX_`%}DaCTL)0Gm77V?^T3dIqfZsjA})caJ>wBxZ-z?20GE!a7D^2{l@cPS6bhifqJ zV8H84^CX1G3f8VpoE_D2_`Q69o4g4C7Hg@Ov4GvH?EA#GbjX69q8=EdbLb{l3?8eN z6CtaQ>br)6q;^G|cS_1E-xEjFa)^1@ugMHp_SVYGT($KXTk*Si(NxkY1^8j0%z=VE zN!#sN-uLQ&-NYehU3v!G#;)uJy|N4q@rhuz9!*%;)pXhkRIsk)8bzkSS!piEAkkztv0;eDcE&^+6oEbkzCx z@rzYc7L1v(ePLww*pex;NDpDJk6xBtO zR-z^ly~D)Ii=ZR@iZ4Nfm0J*WOUuDaU06c`AC*F<@v$9X9z@c~a&Em{C10vI8H`=B zB6!Lj*3#tpQX=LU1d7WM!(rB=R@uyur~O(~PLyIA~QVFhT0jTIBK?i>`MtIVi)4D;T1&l@=VP zm;eh|p&g83ay1B0!GsB-Lwa#0C1odglLhE*x9xzLB-8j49A7(kq;N@xp8yrBqY1U$ zerGz(eUHixRb4u;+?0SIoo>x%s=xz;3Z~bh*pOt;!AKqr)&L=|x+Ww$))=mMI_Cxs z`r5(zR@>qgVww1;b}%;V8sA8L_%s7aP#)9eQcxEKEDK~V$d`x&dUH&G$w};N7K{qc zGG1;J9v6GPl`8eT;8b~vkc?HYP|^p$fh252az1pDVhGteyN zONKC)-km285r0Irb6%BTH@2<__PgK2BSH0F(o!hh5PZb8ERa%aKFXr zkS~&*HNF2q3m6Enq@^f!VKL}vBmpRRef`iJ4PB%W(P;BZq<2O zw=z#!WY=A23m|$dC+AnaaLDHe@67~|FOyu;nF``?e`3Tu>v&_Vf9)eo|CS)wWeFBc zEPMEFz9gVOEQUV|=G;5Mk`^yIU-X8JA9oeEwRc@6=ThED88q=@>C-Kh*POniG(B3k-P!nLuUQEp1U7e+O8Rt}CSHHDcXF>82Gp2W1s z+p0yrOEwv|VB)XyzRr{GhYtXT!;|*Iu+Y;5T~Q?Nq5{iC1)NM&Z8h2iE26F% z6HZyYC&*Z=Nk|Hsmc7R(^tpS|5FXq+u_BQhOogOfWX)|OxNB!YhM{q63Kty=m8k_{ zH`qf00LvKo9O;;Z!k0rGu0W@^YLuuW0%EH|zN1#B5Wzf-tVXqxJczU4q-j+vWQ;bJ z$`-PEWG=GN_@(Sujk@94%~HCnLf!-c%CaQ0(l*3ONe(t&H3^=;>cO#Hh)qpKNF$h)8riP8d@ zXr@8`o=`uu(OSxfW3mdgAJv{CNLdkAwF{(AtU9KBSpnLb=z2Wk)QJ3eaQV`)SoJIgw}@h#F^2|ozuvfMD0RVz&i&oEj!3i$>yj{IhxbnooQncXDoGzEBFvO zL_D`}d>@@70JMIlPavhUTFRfJUKC`wk3A4l;kBr}LH}!C$26n{2@R1_=eT-(n^oXK z7UL=-g-EeTm;>G498MP*a-E7sV$3PARiwtW&?+q|q&Tx{#9``h1{|o8MH+CRQ1oES zx)q9kH{u`-IPfXs*nq+}n5`I>N89q76=WZwpoaE_^Nr{iLbeRvogtYd7xVAbDa|~L zOD%xd#20(?;2py> zLWVXx3IyLmmIUR>E{+M}DS1WFVTl29GA8kj9Uq)qfkPPJ{?(blIjS!%ipAm=)M^(@ zqlXN8ImLxQ5|QKd7iS76J4=9h57`%w`$L}71|Zu5cR>*8xpLm;aLXz4o&2h-@!G?fv9?vQugxB}V?4n`n5It3Pb8Po zyLrOi$PL8L5%qh(&Zms@;JvqYZjcB zz4i6FZzeOCkO6uEZ+8MxD%p1YZKjHMIN@E5+x8_mF6g6&v;ql`!B3pSD|C;Q1CCzgeTr`A7$C%s3sRkq$)<>WEAXG=l zCR4Y?NpsKzZ0R{C?bRX1LdZ34U8-BV3_oU{kE?KcBmFc)v^Th$<$^r_Bnl*h+Euny+Ap$)b zO7tWEqwE6vDvpDvc7rVFM3gwlz3^<(i=DG1|GCfQ50qjv8&vT6DF=|H}m8jrHWT*0WPBZLzgsZq>XxTd$Uo0KGfplcV{yQy{ zm%B(gW}$pWU8g)c(JQQD=2fy#gTR-5uCM4M3u$vZ)G z;Diyv47m8svEbV~!Ro~@WsG1p-}+EVMzI*@BoyrUhP4sYzF9$?ri1OrzIZ3g+MmT( zM4Yw@MDhk4475p#;zXkW$*Kezz80TT!V=2>xJYMcVarH~GbdYdgB)3_YMR#Ky*)L|&8Bk>u=RXy&$VH%s#>0zO53{>g zP~6$`Sz-TJ$}UQktxQ{ymvGZF-r3JE=!-40~rU~n%M%AR;&rN zJ|kH;slpr>%9WyzOhwel@GLKECalK2^puw4;j29VS@))weXu&6B(Wt8K+tjlY-#BHPrwIn+s8)-GuNGRCqDqtHnE+}`oOeNt0zrzwBYDb2g%Pxb z|0AquU2%3A%bXQXYY$jWA8)XTb^=J1%(+OG6qpa+qg=XnrjBHe8qP+-7K(^))inYc zBM6r9k$pcHg5|9Ro`rIQokMHF^8by{un_R&?<qXmoaI1*PY%WW>o>63E49Y!goN{}T5u(2-@=c^)7%29a$= zl0}LXMR97n$sy2#?m-pa!=53lfkFdpyaoky_mHAWXgs=$gBMXicMmya;>4CLOOa#i zVL6f&Csq>c5k*;Yys{!WPU1vSRxHVhWv?W1v@F?Pio7I6NgPLs?3H}~-sjwVZdKti z%sA;agQ~i9>(;G%&)H}1fB*a6`hsaJ-!4f4FODfxyT(p8bpo$Nsa>fzur18|&aM57 z!(?Q1uiPza=xd&bE=MxlgXPMk;pW9fp}hGF z6nTiw#1V{%(BOwdL7gU~YM!)inVh<9(E-zXR2fcp%%W9@2};VVfevbGmJ?@rY&<+{ z86mHf3n&~u{49wPi(Et~Bd+n4m9-9c_6h*5R>`lyH^plcKXRXn{{XUwTwLZwV~?UOiQ7dlY5w%9+ps@1YQ2} za!;;)xQ!W1Vr(5i*$zQvAX#oc8YoBLz$fNF-gfTqT5WETl0jT6 zd<46U=a&vRHY%nVBeviJV(7iqidOAxIAt~LwBA1AhuQTLgmVJM(=94CGp7i1xUp6*a_TexsN0*tRf5nK z5@yEBQKXT;(z;#w+L$hyA%kqH7niKLByl4Uvp&_(N`0V0UftLSD{F+(trN`6FisN> z*YEbqY6HT)fYf$^eEeW3elk3vMD|;3*%e2xW7>AC2bZ-kk&`KWFF;@+N>>;p@cG8%Se1O-k`*u6^)Ujqm<63 z6?_BtYB-vhhnKkW6PL}3giDXxCAMy3SygpSdh>?IoNLBHgn;sFZM{lvXf_)_p0~-i zbBKe(1NmI%=wO9CxD-dOl{I8Sc?hbdK9w-FqCdqrMv}ALnF^HNP#dqpce@J(w*Y?T z0%ag`E4n`+J%Y=_8FXIF!sN@VXfAs}i>(cVc0%!#@G@j!#3&iI=4DD2N>3D9;8?SK zSDjhnt6nDh%1wo5xi~_bWr^vgWRi{r1_9yz%-uT7fi)r_^daT5jEwB{cq>Yl_d{~J zZDBC2P_FFu{t{cCFf@ z@qoi6;i=~apn2gdiJDK&4fW#8HSvJK#H_GOm-h6X>>j zD@}cLB*)5NT2K~!D9^j)+%!qTvs;#B2>hgVhKXCcL7rYH%1~l222|^uFDgV0h0%7- z4-g|Y_bUHvB>qH1xaH%ALX#q?gO4Xg>M@brh>En6lq&A*Rgan~A|p|Vs42NN3RHP7 zk#}*((e*uw??@%YIc#kg?cy2e*AH(Ermi_|G!mL1dglw4Hd}8lwAPi!7l&OL{f90m zc!8BF0v2ZizoufC!JEaok@Gj=+~}@gbE~6F0d(!GUC`nVnn)17PngnqwCwVB~|a@DPDY&-XW}6ubPyFhK}cU)~c)X0j%yaaCEOj zf#ys@6d5_Z%z&L|q@oW>S7)x0Zj|r3y|Y_-rgR?Jff-c2n?T(={1|wS;6@%PXbBO< zJagB4Y7h~MPqG?nMO6jaurs>;qhBnZ6U&H%r>9ge(O^(`DCn86ijrr z7hZ6LY>0~qeMdy6S6;C}BqrM{uhgDx(P0QcL|7{a^Un{Lt_fGONL}1NL4xfy{6j+O zW8-*lW|7Ilv3uJVjWeBJAOO45Z}eoQF)pVs-BN3{vE5~wMQ14S);p@Bz2dG-B|x$b z?kuQ+7mq$;^@yqS*lJ}B{iMznM$hN2BR2NA@Z5UTTrU}M@M9&MeVTNYz(}*_nTS#Q zmQ7Xz9Un~iWD~3fRgG+ytkBk|K_pk7Mx6$iYhYL`%^GE$xvq2f4&lL;dK}yxZ%CH$ zEPmiD8b`(E)?DL!AvuQ#)-nd0h+u_Jbg+)FRmttCsm_ZOrWEUx_QEYlb6@1Aa6pkq zssbXy$%deZzsXfNq%)qqVi3ex@qp&;YqK;~7e`k($jsuJX~xBxe}-DvEbv2Wa-lZa zUYOdlQcH1nT~6|RtBtk$fPNI&1R(Q1et=ZjJP1h;{#rA@b*~gnV&kBn;{`PyC1*d^BS%%1wCM%4jb4cb!HV5_BAbel zgyd(|wRo0AM?PjAJNm^DHiwwmVxF^EK$0|7D1l!IaYhMQ2D`OsZq&~cNesKhq2H#H zWNQRrllYqObs$^1w`1uPd>;dE4|r1%pSos13>!Q(GdDEo+h4MdR#bw513;?;cvS3) zQ$dKZ;~HQz;FJ_tK5ISw#5`MfkSRKSHowT!cOk;IeR!dM|28#mD&2p|ddIi=ciPg` zI6V*nc4r%;h}NHqKkW&s^mtaG z7plg?YqW4tJ(L!QBK}r+GNBM39G*aQ6^UCyTzI{mlkGOW=a@*$ua3v$bgK7Qf<;!T)BPPJT zl>L({X*Z6y&{=K0S-P$}$t!j^V!VM&koFuJ`m~(}d^-VyIvzUoua>@KTRi~r&8kcr z!DZC&%mi<;mPxKLK|cW~B?O~_KJP4Vh}dWUZ}JIRp)X?HCJ_k>e6uC%{1(qeE$>OwL6XQ zRMmXST<f zF#86@Bk&&S0AJ&m+YyUyPGMcRY~&w6XbpYb&k6t5*b1|;=hT`nu%BnAA`9gt|Q zh@5(F3-R7#>`s)Z(v+tc+MYuFD%=7aDGaQQ+p`PzDzW%Y!-tKv^^^)5VTW8}2v^g~ z=h;8Ln;l9688{LBn+H&5!V~3TZ7Ji;j13Uz4Q@M@Ubh9>Fsmua^eh}uV{ao{H>r3@ zqu-Wzj$paI(sYLFgz$vZMfQ9#XyRG3$xvBeV(pC7ANy7oQ6UJV z>-{9+K#$CZHd4fO!>P^y>yz`$JX|!#OUbs!X4NOhsOsu&2HQTd#YjBa zQEZv=7J^Dbk?0bCkWFY39qQ{~X(As!uOqqwYfLj7IA~)I`l6m!8s+e&F*Q0II%NP@8lqe4 zex4A2@&Yt~Uu5B-nd-!lgsYxzZ<5ncPSHv+?Pt5ossfDMW9`6Bi!%c`(!Q9~p|SwO zT}Z%`pk${BnLHd@INEdb%ptUNd2c|uzJreuMlfziz8y4f(6`@s?IDQ*($pf`!P`VDo5}n`bYw ze!K3;nFh74qQ?DwnOz(GLRGzg&B(LS%!w>qvbowa(Ty%zI{(k8CCu~xn{6tZn|n_; z-)eC?QRms=B(o>&DbT`>M!X|m{H#GG4|0I4m9Y`hgC!zmt+1d93(GT$8%Viiu}mUw zRpi)x3LMGF$jUU-iX=%~hcrmHY<7Ih z!mZE&11kxs=!I~pMW&VC_ zWK*4aUCKKo8WgFMtzc2#f&ysDQjUnOW*e0sy5BtTEKB6(VQIxhhLbOYD zg^MN#gjpZUmvP~QGjj6sO)lZmRllXMWQBp21tKK5DSJSVZq59}AbZFw4SJnXo#aEK zTAvXw#R9qABbANwi(}ImO)T#eW6*$?v(IYnE#*pgUfynLf6!GV&S_O>ZPG)797&95 zz`Ohx<;lY6DdODY|KpxHtF$q04z5S`BLDd|ClNqTkM3l{yBN#Zc8ZdyDjIF@B*?DV$b{R1Ter>nOlQt@jArsCKQ$l--h3oL zFn~_0z6vc;sBG8TF!ZdLT$Gr~nAL*hgB|=qIU7f1o-AyeKT8WTods}0ZRReE7eZ^;KjI2Q!ttvrbZh?c^4V%|s`3wC-vN8#{t z{tix$8p>#sa3KqSurtgB8KS=!F+hWE_kJF6M~l4JzO}c@q=s%2KZHN^i0cz$4j}_z zrdp6m6{}L$%rHinyh3$@hN_{6VX0AneT^3Hl|6yWBa<|1r2?)zH8DOmIbIoqtIaSP zov4&Y#-_%mM@C8W;RA>|H@LfvBVx@D!WIZvz9~Nsv(xK~DhJ-%+kDu{TrPMl*1x4P zJ6hRga42PE<$9fRbJ0uVBP=<~**(o_D#N-NHJhj#YCS&P-6i#V)S{Z`_OMcLO5Bm^ zBwLz}vF0Xts`f?@M(;lvYwZrj{UAS4_uIblwl*@ChX(Di#Fko@&HlLuS0UE%4RQ2E zIRb`WXap`lUxLRyh(GO>DmN#~Q~MEHRA~>iz<`@*A5z|4pjCT+O&slt$7VY9E z+8bfJ_Q~=_Jbx{0&r)Hwb0~*xcYAC}iCm4#@&RKPynsALD-gV-*~Nw1?r#ETB1z59 z%GtY49A~|l7)NBLZRVnwTplsN(ua*s&QtVTPxcuxTzOmN=c{UE9$pz~xqI!8z@fNY zG4V8*+bTW_r|y|tY=@_lGUM^WtLNVbNzPl(q=_Tt)UR)cknG~NhpskRNCwz(v$3(OvK59|C=;pwg>-SNY1;vnrWUuss0CmeQd zoy)DBM`qU11IXGta7hk!9>KVLvSVv0gBCUj|1VV~s%kfz&pvBZx3@tpw`4e%PWN~v zH3O^4tEe<@G)f*wd&Q*=$}(@fRpGIG)1QyV=dOVu0ff|bvo`kKI_2EJag}pch#eG{ zaXMq$&CN$jG)aTn@iRqhm?Zk|TDz=W1-VPLY8_epC3K6Jro|2UU=7&b?TrPRUM-vW zggN9X(`CtH5BsWY=@|7kz?r(Vtb|6jBh*mJ1SJT}8ML@E!>C0p<$lL#TqFM;q466_ za-Enf1s7>d@jxQFyUcV=0OA^bQ@Pta2eQC&H87=|Mgw1oBPEV(;h-6cFmAj!8*<6Y z%(`qYgq%(cD?u@T%W7!_VV9wpQ+9bVcy()U`(q{Wl8=?H@>Vzw<87smlX>;kt*e?B z+arvFtFO-b3zJ#Cy2%UMJi-&Vv2p`MY8)b|8Mm;s;7q}wQJbBsW%hkhUqgyhnJ@Vb zM?;?;`Hc868Fv_-OzG)kBNgN0LQ+Dr123|t04wG%dgNK+PaO@u1)`?q-XA|w8Z+&G zH9pU`Q7)%wW~B7?+ogO*Hd>p2Fl-eXggnCP7IbSHe)T%7D!ms$svt!0Y;Sm%Ok0EU zYHgWYUY*%0Gk5PZx;TXlCnf_-rPkiz)1701T(HUJn#Zk^;l?85mEp!c)x8ZjK)NUa z)wnecB>*9UP;a^PbFUh4L!oI<2`#@6ogFZep~*Y#wl_#^8T%hj*Nhj$pM_7U2A+Q_6FHk zB}fqt;f5fdZ&3trh<3OD4bee!ILPcr*JD7DYn2di9B~`dbsw?|z7Mu#2_Y8GKBw?s zSc^i%T)`1y<}J?|qpb~gER9>8tLO7nwza;-AA!ZDbK*L84^JuBQaCVQt2*!)lKPsu z+q6joGb2<`A&9{%Vi=ITuW!G~ zaN*p}V7vmUt1c`KY*`q|--!ta4(Lt7#HC_33sqF+%WpNm7={9k*fM;_JS0jVl-R6%^fFkp0!Bupr#$-?5pa;?VQ z3^-j;$(K7C^H-ueIrtcD&yi=u^K|=u<2WEZrGjfR`6b0DLMc z!ZbA~-Q941cnVrz?m9>Ixo8X*kIyuBqRW|6=0rM)QYagB zA%)8&6(H+S5a(=fz^{r4L#urPZ{# zcl3BO4$>?n3&Q`SEUE%(qq%S_gD5vozl7l((KW-c_6{FWKI2JJ;2!NfOu-_#mK64j z?}MLfhf>(%GM86_IEXzP1Q$Jh9fD$I`N>&k=#wRfa=)qOSQ zGZZFAxM$x>JC&`E6Jxg~RznJ!VqapzJX2v*`Q^gFF5Q|6mkcQti-&D$Tc5qkUH#5{ zTojk+EE$EP;1A*JsS*VR*5X3_&9k;lgCxLH|N%ziRxx~wZnC`sDxTb^mOxqfJ zx-XH}1D~Q~_$D1;(G44$Dg#kh#sN}MN@hlZ0YBSBn)cJ5A)%_W-JT$*ssSzChMC3L z!&4~!%ZErc9~}XIDlJjdp6sCt7*DrrT{5LTeylxm@OEerxBZn@f@1NPX&Kt=%e!#I z4vX|I95b_FkF{{b6on9B3N*x7jk&5QV%R)*>paej9BfmCf@OaX2Cbc111tklCERHCRiP}u{DIKX$ntp z?E;7ftETz@3=(5y-t~M{OMz?^=8d~sJdy$D#$_mMuianSP~gC&=QD~gFoS06WCZO) zL@HoEZWs!m1?7A&$G3UBBVSiHFH~L0io2i@tD;&OSpAmJCQf(naf{;66ZF))Vv}o` z;B1gk%%RsMX?Rz<*LtW&W%@5#sou;XeIJfnOFJizQM+$jU1D>Mk%p;WnG0U8jWx=; zcB#W|2&VPp$Q^cBowe#Lbj*eZA;v@{dc?ehy51GiH&b=5;`>=4#YsGrAJUVTS}Uwn zse^22`q7`s!=}uV1<1Vb8$*?PgY1ta^|B?aCO;6ev;jT9h z46deQRtxT*8ePUp3q!B6i&}x>NiX*}Bq7>D2FqE15m(M!8!a1!#7QLoW5hi+mH?F) zUGLB(&)h%|>o(&o6VKKT0~@xRpNFT%8N}Lnmrq!yq^s1vHJ?@y5X$CRpvsr7N=+Z^ z=f>#)^-kMY?Z`ATRsUlSH$#79o(|(#eJM*cg;7+bLl(uYn@}Nzy>JAtpf;%!HAG@U zMMIupb3hwlkTd)pZY;IPQ?pYMa1wWHm`?`|*{*TOHtO@lWtO+ho*^v>{3|b`Z)h{#B1c`jsUsi%+jlc?tx{aelt7g8I=fkNE&dWsK?`H) z6;FwyC7BymIcrK7gfJzrf0G7h91u$wZ;lv70KDZYGUXWybv|c89PoQcGpJ-T>9&$< z)OafxeTxzy`71)C^ze3&s7Nqm=cBUZY)fJ18?V`vwPI!eVmrkfuL;v&ia}OD*+XnZ z4=M9+Rl3F7l8^+|e=^Ezfxan{?nF56sD<1U*&9Dqj{P_On@sSq# zE|WZlR0-A)iG?bwHU6!gZMTCedQ zS7`fkNcN}To7ozKb7pVYT}X_4TQ;U>@#6gn5URPqxl(4kFN+D9cTdqou_Cinrw(X- z$7}+1Vl>49Yz)JUH;di{KTTbO(~Xv)x6T(X?zKU!4@dFd)>8p~Za3EP)RUoQByZ;r zpXEur4pG!_i|HH_Zj%c)3lQVALecgnZk0*XnF1RO=J9b8dr;-~US#i^>y$D1&2th# z>Ugh-D+U`1f)vI*a-*6fu9Mcg8;ihp4WN;v2 zeug&L9q@*U0t;A5^kBV|+nlPRHyNo`n`i~N2$TdoT$CxJr{~|>t{GNcyt7hbbXmt# z@X?(fGD5NCd09ZOoT7lcO+BbDWWLyq8*S&s+7|6cFn!YG&buS3nkJoC=dkJaa#1W+ zXt-Ew@Mlm4pbb6z7+25j9Hz6i5+hgY`kb~)No9JvYSD_79Jy(hFKSB_ZwDS23>e}S zGPIPGtfOJhtl*HynS{WTuH)og%ftMs7)Wd{V;c5bOvk4ZUP``|4=V|4^T=78#?NqP zH&j}bg14I$Uc~v=Ht`DlwAe7MU7fo4878|-{5i$W$(`z6D;ML98?RA3+S-DKX2|)PJQ=K_&^*&d<>0QVpiUeT|>thZ>4EW0`=x}54Fbdg3ciwSZ zql(U1D{yjOvfLi7p*1T(`xMO*dm8IT64Y;M1N0?W@I3jiT9D~!4SvJ$z4L&bT4*_- z7!>iNiCkJn^)F~2N##b2QZ1p;e_~79UG}=iaGUM>rv%&k_)i@-Ks2gSM*U0@j3~(y zGoI8j+0Li{k#sRHV`-3^mvovA{w1!hc{iM-M9-Es7FpqzKPm7kNsFglU_6V~u|jm9 z<(?ypwq6!Awr%!&8R;@0zoq8EQcPn4sI6cqtm@n~of{0FuIkQMYY|smBj?SYNbSuS zyBX#a5_jCY*U)<*trbDTya=^DqBop6?xq`nQSZoX?VpMVZW;k3>W%sy9Miz$;TwG- z8_%5I;cT6z@l1^~fgEc70z^@|zW_@!wN{XW8=PMlUR)U}Ev^ig<`?u~)3cHO>$PXRB!Sr4YBmQcJ15)7-xo_?k`$Fm z!V0R;jLgRW#kfrj0@F1Xo8F%1Sy;6cAnK>|wt+@@@^FtF2&oT(dPi4W`c2w%gQopf zL^{vDib>Sqy53udt;c+Yn<}Kw&S#uFmfh+7}#mD31_8Aja7>0t8xd`^c#W(CK7d)y-@)FXV%Ck2yL7PVH zhSu`o`pLo0!d53O%vQtNL#D~Y1=jzO1~n?EtVpGkvmvROgEyk|J8&ZyCx9L7l%YG0 z$Aqr9hVW#Vh>vMsj8Q+%sFFH)Gtt0TQfa`lnA6Tvv-4txHnW0AA}P%$+tW;P&Xf1b z*J#N+e+phjRw#mf=QtP1fauVXy}V!IjH5Gbk%&%ii-g5l7dXQGu7Eor-|0h2mAthh z7Z{YpE(bO)OFG2_oq)WD&6CTCHHV#{STeU?$(Rqk$RIZQ?^&YO)t$o!GP&mPde8Nm zMYUs;OZzS6qIIT`MJmk^A%TVpFjoq%qX@7AF7b?uj%k_+cqI@dpKR3 zrC=)g%%S|wi#H*mDWGMkqPT4nF-Z2wPzo#tFFT77@Vf4kvRML*$O;tP=`EPwi|yGv zPTq_)$Sc+~98e1Y{dyKO+W@ewfEn zHX9b|ikUT#>L-)9OrpcIjVj9wmpzwh^$H&IYla)mb-9&d1W3AQJ!r+gbcv|?MMN^x1;6YaW@Tf zmt(}`cjr@JtwxQxrnSua%kg>Q{pTzeD$-mEyEP<)Xt88td3}C)@!Wu?l#{Arl>>9B zT3^y+FAZMv2ubra{-&Y=K)&7F)VAV(D}^8CJCtY1UKlynUb#$#pa{ANuL1O|o8Dlw z{2mx%yGEXG~l8c?C(94q}F8ehq4R6TWjs7T`Np2b~(OzddX8 zYBfy>6Z**(C~N!H)R#u#OD%wbEGH|VfV5}pN%g_%8=!9meCHDpRE(Zdc&Efp12 zQLhGX+1yiJYx8(8*_SlOZA99+><-MwYWNJDv7ep%o&*L-*1t%NN<~n*4Q+CnSg)ET z#XPM^o%)QJf-G1|)w?x82|@zcJj~{yi}bNg8QT@J@t|5KH@A+C{F-HISQnj>{KXn4 zWq$ixxfT_1E18ep_1GD`i!?N%@x(z9F_d~U#6?{mI2DMsh8G$#XOU=di zN)UPnZtqawbXE0X?s3JKyIC_*x%*meN%OdG7Ab~OfDUOMh#(NqQ(B~J&uAPWeW4`m zJ`yiMLk!o5VEg`giB`QI^Q6U> z)_Tvggv6(u02kFEv5MU$W;}tL=~XVTQUAl)6@vyB!NIgZ^EJBB*4{wf=G`hc$Y=c1Q$D_p10`+cBZy(dH}$D1}3F zxGz#b^KfUha&q{%%b4MoP$~c)dX6p%{IL4#8>A+5mvY8C`UuFyZ*w@Yo97Soq%ylh-jH)1yS6$<#VfNN4E?E!+ zm#mWn0oPSuG zrS~)rR)eO2hud!rZVm4aZ-)BK&OD2(VS}pL7S<3{#8F$w6d<%94Jv(NYo4jMB#Rm& z9%&zY-wv>4?G3EC0mtG6!ZGlOZYUK&-Lqc3rjFmGkK1$Pgf`X6U`y3h-ipaif)DOm z-Dt;V?e7>h-^-`MIa_~&IM5xn?Hu|l<;c3fDnY{PYpna#8Q8)AWQ1-wS>kr6y+*8< zB=or?Vs?hG6Uyg)N@g>nWJ_Udk7IwP-rz|%a3D@8zn&SgG1^cNlNAm+imxuB7jK9@y10Q3jc6 ziH#7ZoaI9#**ilonNi9i>*MuCdkz^?t{h-FA#pv?;CWNUte)E z7HdYq%yM%pcwtyf6Yj9Kcg|LiKw=vgfYIN!MdY0%q;qUg6Zi6Z00oesYaPWCt3W;L zHZiZd$j{_eX?kDTwvL?K8Mpni$FmF`x6Q4ed46UBy-+aqKcadQ_e%?JqU?Kc=w-&- zfOIolEiPgHrpQO2@u-SH`!|eLvusP^lG&XMO>H40E))RAS}S0jun*+V@y}adwWHw8jmDF76a(of#o(Vr;%3 z4ZqMCYZ$$Y!J|(%o=iP%+E3JzD1gNiQ^3-ZAbRmDnrLt0fccn~a$jsa+nwS$6onOt zj%ViY+AnX@R51D})rX2*Ph04}S#e-jfrZ-y66^fVo$d+?k!TT>#Z(`&}B5y}4ZV!T%)I}@m-^-_t zQuwvim6>_2u$Ps|2OHcyGQBqxxh&Ix@ydF2@hhs!^#xfNqX$NBv%0R@Y1S5Zf0jhT zMuSmRhN`@YZW3cvZuZ;?Vdgx2ROV^lU0-ku=)|DV&yU{cBH%n&S`2Pk?(C;=aPoW6 zN+gXYcsR=8ajJD9=`1sy=T_Ep5W#}Kg!03Adp{(8Y2h+o9An@g1Svjg3c4Jhh5&Vo z5e_hq&=o6ky@|EzeQ}xt!`_$Se1TA%xz;+wmPen$JZ@~g#VWtObDXIDkh+xc zPnId;EkdpGcpHBh90q5IOGlU8B_y0OE|~##+_Okf43SZh)udLzZ)aw=KxpJTW;W@UQt$htF_zOVu?VZ?=FHG@engX|-bAUv}_DS+tu9 zr|WD3nvb76mZhb3SUj|$m-L9J%dJ~u{v6Qu)~zXjJ~75~%&_e=Rfo$kG6dP(KA~6H zNAv=Qs>lsGbodsv0-qbPtuWE}VDgUCYV4Gl6L(;^ew@->agH#7ITIoQO1ssp6peuj z_;RCdjf=>&1;diTexdZ)Ah%ONZjAi%zvkT8Xw2zau718&pQLOs+DR)pPsQ!CY$MeL zK^P5-gUL`9)MfE)E*8JD08Hd$MTPNJcnOCMO)diYpyonG)UBy9Lyl*9J~4>;E(yUl z*H{6#j_$}rGoOhbI2&FE!dtE_9aIT{3!F#_?d`^@};?Jn7WuW&cze}5Vg zh53d1)%8+<7a|9Ok)5WYE|rAhy1CF6ZbP>eR2wqDS6E^gqB!Q8o>nLOM^O1xGOpUe z&~TcACa>_+Me|s<&VHCCgcl7qs5^g6d5_Uf&}MFUl)D%&O{n?IN@UH!WeU05#?k~H z>31XC3j_9#Cn%mazf>5x! z!EBS`QkIi8R;%@KW|Pj4($*Rooms6|40`pj26BkER22y-jPAkJk@IwC=j7hWvl-Pz z44OA3i|0>nyoMJiO}|hz@o0~o)faQ`UUTOuzsJM?>u75RG0%7vZG`_}Z zw0?5?l1l3WToFTH0<77MRje~e+BnlR9^A(WJq~Pe*x~UAIr4CcYsz3*0h_zCx15&H z?@H^Hw?c;=xe!GzrIcmc6HYLJj9Vqz4{6PA+$*C3kxZKu6_CR{i{Kg9fK(A0G?rulzt;46z}O%E`V;SfEnX`MWY&>PpeT_E+x$zUGN+ zxP6*X=bTylvC{JZW!M+DW#>sAEV=H~6H>nRhGO#14wn=OX;9$*2_Gv8(lZG(UNYRw z%Hb5xv(@ou5UX?cn476sn5Ar?3m~9BS5fHLH?D0T;Wck#ftAp=cM*m7LDAkdP$7l(!1WAmyO6;TF1p z098i18IRg36r}n+l3r1wI0T=FRMdb!2L+ z)lDh|&m?MkrQ~IXKqpL+go}FLV2>26NY#-`=^Kt>q;K2sRilK~&d!_L&EssUs6VF> z(Cne8X_wb)nwp&p?1Z`rmHK5x6E<}cD}0eFS4h-!1GY+dDO<<^?OW$f*$*zd-x{Nd zS^6ldCa@g4G1cHpZjcGDwl#$0OsDGIC>v9CXKaM;ifeVVR?K9|wZh9#UfBta?;yKe z92KZu)JXuJJ@U(8=;ve>T>G4w6K4ZV8lpd#b+{$S({_vcw96#B%1obaHyUqh?msdkX+H$p)*u0`800dk(pqFfyJ19ZpoE$KaAP;?bR# zmar+ld{DBA&oeDuxhyH)8H7FQ>ygCv=}qSz=OfcDvzxr4=C5Fez2<&!Wm`!?3pqw} z@F;A9<-D>CNV67`Snfn{x-%q8aMVIq#_37@F70ll|BzkJc+rLIdPtdPZ(c7c`Q4C| z3ZxNfpPSnf8x9=rJW}Iq?%(kV=-C#cvubA!PSrfvHIOPMr;2P{qtH27*LJ-;x7T*h zn7#HY@OnF___u6UfQi!7GZtdE6*Ly!>j1e6@4E5kLV`FbmyFzGDXJ=9x}&~=ELkYt zk%ft6@Vi)U?@#T|dYm!qK~hjc|x7~jH+W7 z<`z&2&9kcAEgpn@suweoJPQOoh~q3qQ(2UZig?-&9+b^?a2<4MUA**EPS+y z(55zI0=E=w6=*dj+Y6DAW&YRoPx1%8#O{k^!c#j2Yb@8@XiRc79r7gHv0T)V6ETd7B5CUH(M=_-Ok^}& z=D&%Fk@1l!XwrQ&cDYZha>UYiPQnYkXU;c)L%GJ85VF3&_`@&U3qo{)R|qOdEsy(F@LeI{A;Gho4GUMvhUc`d449xLY%Azxy<&_^xbrm74{mans5 ze4JzRSXN>z-@^&mz6U3&Jt*8N=AQ$v{WSp_HRQK7yr|Lifsh$TLlWKPbm2(vOvV`+ zuC|Ous7|-NN|19YD99fbNgotJ#7=n|IpZa}p1GGHHwYpjt_5p%U9aSc$aT!F5XNi= zm}gIQhIE^Oxwd2LEz1Vu_`uARL|E?}o$^b2=wCxqAh`+NHk~E-@EB|i%)u&a!aOh; zFiUu32~hG(@CsL``NrqLx>7{D8uC8#qNcA?uwGmjE`wjEF!GpBaut2nt<4atWx>{k zZWM@CuSBnl^og>cB{0k5BPPh{F(}vG>HdN7duJBc&Qde>LS@^*g7qv!Cs!8&dq2bq z5*+Xkq!A-pu=wtrA-Tej0(89c45 ziRyqxB6f%xe)$|5s8GO&WeTOel9Jppjgt5^T*HTHviL}ik}x+w+E32;(Jt$hlh|2& zp?K3C9TySuCyEYMV^aXKg9M`H>?lLeYyV1El?kOjR#wFGw=+mt-MG&Kqu~{ zWRo-Za%{B1kSMOl?TcI9@sW^@m3%35PSnRPa7r#GlD%#6YH%4&Pb?4&v1A@rip9DI zDbWHAQCAguZPFx-Re<%ohAy~Ap2}_q*C6vRE=J-~S0gMGQBqge6Pz+9?^&;3H4WO? zegx&8n0ChT{2H&3V6x4u)d>RQAEwv?b}kD3Qh-rLHd$URsJ7%wmW9?#<@);2?C3UB zhQ<)3t(;Ggbe`zhCsKPvN4O(um$1lGA}vEFha$UVN}IwBu%jKg0o#hW0fu=^34G3^ zJW4IkcJ=ttsR|2P&DB+az?jdss&n-Ofk?krlNf8e?Hn_U%3)T3C(X{OzL+ ziu-AJVum_Kfod{AH4#NaFsKI%n#f`*sS|hE@rB^?gpK@0d6exO#_pyl-%G6ntd>~u zlM9uiebC(B>_Iw7jc#Q1(5`9&HY`hDDR==sn;ux?IoF^@f@z$R z_I0*5pEd5PW=$xH#FR>>mT5W3Xlp)!qx$7)h^BfH0g4FZy*3oN1E*+vUnsK^?92#S zW}BH^tFEnw=BnNyBaG)_qxLpejdhq6WsYv`s{tNE6}^2(01y z$`VjJ!pSr%tI=27nGd&DR#01_^mNjEuf^INFli+y4%dzkCFa_*wbL{eiM$W$Sv^qI zBOo&RT{9RnOv{#e9L$xCdg+|^Y#byjc6+9 zwAr*D80ND-sAjccV9JT}RR zB%Mg)kR>#kCsSm5tiYC~A9LwCf2xF675vD}iBTz#Pfk`wCnm?Is74wcnN(B9k?~Pl zw~mZYB9OnB@qb=L3pPt660Q0P=?X%!L}`R~gWq6%H@MdFt3~=Wxj}A?u>`ylXNh#K znjHvrqCa(_$eA%~Xq+8a#WsS0feZ>67glkHIfD-U0|1;Li6FCmAA>CESgx+~l!EHv zwoCeGP1dPe1ftxJr~NCH(NX#VjZRF_+Lr4xGPx+@-2d9wOip6`O3c{{@4!%3B zh`k1B=w!%VT{s?&k1Z=;EcvCEZ7`5ef4Ej05jEe&(T;kZ4DuHZVjPwivZo&}WuEQ5 z73|XtV_n@1!O+C!N6YTCJ)=7={B{3kmwFq`Rwc7X=)V@&2mxWIV;!K`c|cPa46lW9V%Vx5(>2@p#Zaf+--B=kZ-Pkq6?uaL`zupa+ zB8t@CVjEUz>&+Vy;WHwE26PvPA}_4Yu7VeHTMySp{r&=2V)WSaDRpsFYXof6r|4R2 zH}K*NLJ)qDkT}XOwiIPibc*~ZEbo{$4q>ykWH?ecH+}qq^|t^HDB+QPS(L1g~a zE`pX{*7Rv7{H?7Z`Bk7Q=v9*H^$XW3f(lNKri(n1T*n9G0H7kFp0d+A-8;dIb*8Jj z?Iy#apz(r)QGVlt@zf1%R}JmPs$0GE9TBt~-GK9#R#u|WP7};#+d7$g;o@NFg~4P~ zs-D1S5;y}Lg{m1{rWI{rOV_eb@j?ceCI}@(Y(0*2Mh|8ZB*;B*!j2i*vPwnLD98jy z?bh(3u^KC~tt=YDX;L2J$^mD65#thBo*13-b_R*xMs29mo8^wo5h?)5b>4K;p>`V$ zZnLGkNeg%gGjEi$mezSjPV*U0L~p~#_-CMn@iv33;A#}*6f%oU=QvJBi>sI!X=hg= zIZ%r;trZ@4vxl zy;t*8aPB!=cCI)?FE7rsQzA~|X#>ZI4l2h6)ik~@xPS8tcjoVGEU3R9&Y#B<)ioC^ z8XK9m_OFN%j|r|z1qh}#hUJvj8xIc;_p)AciKlbf^QGI`t}?Xpo?)INaJVEw9E7QH^N`UE-2YWNGA#I^FR7muPG14#Fe+-IltyOl+)X3z&G zV>|^pwsE7=VQn*I60BP@-qlB8SEO+#+w5m?rOp4ySE@{CAx}=a6WHcu#eyAosWh4} zWyYi&kD3i84-r6S;CQ3-4pREG(}=RFSv7iLE6m{5n*#I-TZeAqP*b2jLb~`ySrs?H zc3t*(sq=&6pHEe7wC61^+CQcCUE$pqORB)mPtZ<uan{DMAW$ju7VD8jM)k&UGX_k110GY$tgV_goyn_e|h3ZoHupVRi zm$!*qQle7jimZ8*ovpX&dX_MdqLaUg5G~)6`?BW5cJp5S4fj{Il4)FwF9}ei!wlK1F$9=n z;6{>hf}QOB8*9hMXN~2QJKQ9>@#p}Zz=KDZr8Z_CcqatM>_Z97G1~^N@Jtub9ZVN` zm-^wQ%FZ;sw;u0oy{X&>@s6%`xWFJ3wZ+*w@UpH??ez^>-m~6k8zBzTHsP2*F2Tei zeR%9o30HQGpX_XZdh|0UEY>}LG$v+8LW;$SD;B&aQz{|JhM;oF$j|4xFbSE^ok6RW zYczLmo|F zR#P&Wq$_Q1PacyAFQVY%Bkf$q%bZC&yT@D@*g8h*o+2!fxTCH{S{RrQ9uS?V-WB-8 zX>z<|{itl@6*pDfoT3g+^z~iLusbxx#J(B3x z8G$i}SY1vtxrgZg1}t66NYJjZh)ZcYlg*hGYe1`vH`9d2i1$q98K~2O4a`S)N-AF7 z?b9uzSQu$Y1PeV;q8j2sZC%N5K9Ro)S1ZeTha^~DncN1j3zm0idS`s`|;;7LKJ|?~cTK z=kQV2m15%E_;n_sX33c~DnFSLPd(KjgAyTZR14J4{l$cyJqeK2R^u38=l^ zDc*n?6G_*od}kr_ncm!y$NpXnZE=;2-=J3N+;T3gVT9t>Hs9RYLt6{LDWwSPJUi0T zMvnl!A^vQS<$di>mI}7qL1F#r?)P0FBqFx zac{A+&`8m;v{ZXYxfaf5l8!^tVJ4U+vlu7+VE)t;HceuasaN<_-3NR$Ljf|h*!1YF z9;55qDtM1hG%K@$2BLC(4h)bq6)PxOsuejFQmf0t<7|ue!v78#bDP;le~Rsy&GF7B z-$MxS3+LMUVuiMll~AC4c+9m?7-Z=fQ^SqX^}y>SI6SGIIHjOC%abMQI*xZ1Dd*IH{(oq-EVUm$bd4H}Zv5(gj_Y zRSVEdPj{M+9_Q+og+*3nJfE&0!d6^?sWrb|dUEniZ;?@&RsB-)nDQRdN&X~1WwW^V z?!)PgEvvO&=c_AumC%7+$d)z1mOs_43w9ZP;RHnmqtlb)6jM`HAWdkHbdAO&KX4i6 zWHQsFCOu&a!l`rI<{PIikg%eGhnf3}EC^`uC7na8aftN|F|*aRhmU6=FFf(W6ve+w zPJEOT7Ze{&rCn<33q^y)V7A@@G+^~_Ae5uZF0`Qoxdw%|yu~-;4YV1Eu8t~)gfj9C zd{!QU!Dk$hH!`fu6i4W4Zlj1I{QR=--Pr?TnwmO55h z-5PmBGc216Y4C^#R!jR$#5~;A6LKYnM>2AP?c$8dz1`u5TH{HJX$fHc71PRu4_1z6 z9@CN3W-<;2;RWnzlqB;jDPr5&!utHu-2I@6Aai$?s^03BOqGL-fz&GSOv|(TFM``R zrAQ3QC^zP#xK(x)u?yrlBV6b^C!bzkWzsj|+x1lnLWfaw9ARCO2q)K*_0%S1#Iz#dgPDj}i!hoTn^7=)cVU^@UOF5Hb%wZ2 zi4Hpcozef;8l;4Ee;%qtWXqLD%OjH&ZfRRy<2K#MNw|{Q5TEEQ4<-St0v@;ZWZ~cm zYa4&DTlsJ|&mHnILCyjMGpk{&q8u#pa>q8zP5r6b*YH19(;se?rc}lx_JZ(B1kAw! z>@BmK2u`WkD(+jJIB(~v!oCm`&pI%oRlaZ6ujO67*200fuy`T$xswz_-W|ayPvJln z*_=p?YzwuswIUkMtiZ5LxOUd93BkCcDYz#FAgR6KC3a769$Ry|G}W_cS&N*l8jMwp zcTAFKk+aQbWKP)CJT45u299d=p2$JPi8@nT0Q_JTHRm*wu&TnBThdw)FSb@(H5t-^ zt%K}0yhg?~7~89BO6rrbjFoIlWg46y_TSdwJ{$^BboBe!4^fqrYV9>mKzG%}Bm*4E zn99T9i5hF>^m?(K_oWN$wNgly#N7z%g4T&9GC`58@Hr-2^k$-o$f{$QHmgto$@&o((fz_eti$eIod+x_ z;e0T{a#&F4pLtf9Aq0)7lN}0s)fUaC&_=(;nEiI048y?M>K-j!L;ID7EwM<41Id(^ zv(4SzI=$N2Dms+*WB6}#OF)5 zTPHFW*V+RSL4wy3y}1`&y`^Mcz+yV`dYu@JhEM6W1b48~bFH)q&WE$jP{>3kD|WS5 z1kPf`T8v?Jg#;qtZ?iR1bLpsODPEvFf;2shLd-78bFspeRYDn5*s|o&%>v^+XZgFB z*V|B?WaRO|<;d@`4S2x*@`y}$$PVG_s~fyo>9Y!8e4R;)YueRZ+J15&9Fa+lrL6|7C7{FbJ>5}I@%&|=LE28*^s<@=2#>eqrd z#%ZBy$loV0E>2)zl?Ja~edU!`uCmG;9=~|1{CFXk#Hx+g4oPCm>(pGKC6r@R!7QQ_RAnTr{yuWO4v6A9}9T@?{ zH-cNI^+IbOKMcQABxNL&t$*s^V|Z(_0{-O2Yw~@($+>k@jr&@WXbGJ!`DE@Z2F(R@ zclOZ$Sm4L&#!qY>p&Wqbgvt|ic_Ajwr}ANJef_o2%Glwp`Jdprz%6?1GE94 zTq(KQQ?c~3reHiT`3QAe5f~|g<~g@ICtH}@wB*8fsBRH=mn)LpqX>T0SFK=6hR}vc zT|NZIm^H6bKZrW694=Sq>uICyg;T@)$pmU|S{P4I-$E+adqC3taRdIEOsvPzWtFqo zOx|v8K8lNbG{a_vxid1;)bMV%8bp3AyGG``g`vK_Je#$mIoO6AE@p=HLeg+`U&=sI zTiR-G#V~8Bpa4Es8h3K;iUUy6V=2q&CQG%$E@&M(M?PRn`HP<>FUj^ zOrua3p1yBd6HTz$FQ2k$)NMt2ZL>`VytX0T)N+60jH)X*Vtufaa9s64_>L_hUHuck<~(W;00l& z&Vz>{#K1HT%lD@;o7tYF@Aezg_3zA|08gHPDz4b>{E*0v>xaCMZ3SM556kYlto&zH zD{+iDT<3llQWZgpf`xB1q`!f!%OwTCnU?QRD>A<#P8imN3l!jBfHsLz$f;6k zo9f+Se*^U^Q3Lecr{8=wxNd=VEKm@EXJ@;lt$;cDx9)N`czbalSG2HwM0<1cn_wiM zca<&!Abp9C$?g|!Xe}#RDfjH66r;P559qeuFELWjVU+tpkq-b6-y`&l$ppg+ulb1r zHG*`=gZe_x)8_WcV`nSIbHcl&*sZ$6oS^#r(f-LHTIHsXCLU_uAkEn7cmXT4+@U(2 z{41iz%FJ3z$80;`guM}j?`}&(M^&`<8n3T;Ho$RrD3UZ$(tyTE?IsMyW@{w(4%EO3 z7xH3%HE;44Z)sXN*Cu!$?Y=CYli;?Vtln)hUHNGwIF?WML)Ngop?xuq8Q~)GFn8WE zjf^V#S$#kb$W`gHdga!Pvhz%Cg>vlASF#G_+D2D9-r5M^4tPLzBx{LN4u!*yuilRjuxCTWg>cJ0|b**sA6tcuF& z?S;4q0|!ym9jUKsI+ACg-I1@>>2*H&?#o<1V z24-%-FXXhqzM{sp7o?zJO%X;EpRWv3FePYh%PO8q;O!O>m%?`Hm-U8S^J41{4?-tz z-I8uRO9q*ytv4wz+c6a}t~}dZux1`erMkTQh7MBbEE0Pw;cAlci*-7dq~r#R46WOJ ztVW|f)hEY~)^VZjOb8d`ze5kzg29cn+`KH>JHe~38?HH3{Y!Bkt zw?6 zt5P0j+!U>_j(LL@xlsKfnao&%p;#&oP$&$^wCtV%wwzOH$S_K*?o(ruN?dy_T$5dWFG$^wTWg%7yUfCQt2B&|oxxI53 zgqQ7mlB?7$gein!!tl-u!?N?t{Yy*r=RY7)%;cj=)_^l&rc2pOlMaT<0p-1mi#^tL zLFAuWJ#{+FJuNsL7?y`S<%zbgrzCG7;p_Jm0-W+R>Kj|9K%YA`TCQO-n_I2gZXVTE z8YXg@T$6ThtZ=+qZQ8gK=ogiboEQ8^kEIc6ZK+)?(x14%)I@d$vXADyo!yAKSJ|*Q?DFOHW#oXO6_B zct7nj5kt5c0*)KTk*BNSNrUdQMT6{ds@>=w)1cCr_ncWB&8M09w6it>b0LdanI1Al zI2gyEhI@@8+9?J?#5-oLgD@I(9aldlq#)T4*XUFjn98HMGvz5XFaOOadvPA+;`7IX zFWcb@RyN5va%6#fvx4nvY6^vrla_PcB5z-pz{!?2EAbKak03Y&(fZ1>=Y4V|I+tdvS14G530VRf zyyW9Rn&f3LBNBgP6XgO)2|KHXZk%jBD!vkxDu_T(d=FO`4_j0%F$@~J2gsNO&nv?` zrh2TZ^A@ATmyi%~kSInVAWIb+E)tMe5e+WvWtuI7Sam$5a*ugKqYdwD_gYV^6wT@s z&+vCSDN==}$Bda12KF|}GwsfYiaOW`d7?boF{=zF-_HnrK|`yyEa{LeVN}=}z931T zZPLduR%g$j~P1l*mk^Rs3z&R z99N18)D6rc|L<(}xG7kp2vOF+jDrPucFT!-9yKHf>HCHoRk0hjw+XrMa5vl|dn@x4 zQW!Kj5BpvCsq~0mB@U6~9&DNDB0bA#FD*uW(C*>l?Ix)%^DGIytBZK;!B?n9g}q!@PYa=nTpM%Cz}RL}{1c-U zXatzbu{{3$aFTka2WmCgM%d99#TTHj;mK8>WD&a3q2 z=ZX@>aR)c9LsG2+O|gXtO}H+SdJ@_8^_}HevG{#j(i%RU_4F)aE93fvk0F z@{%`}0AKLGoI}bFjQo+=8f(OcjYwZ2AO^#|D3ETt?qfeFjF)n`*{YvD#P267tO4v8 zEj^pI-n3QLcJ6*>6Sl0aYr2)Y$D3Q32)YRX8DynP7`|~r#Y9=;UTLhHo@4|G6Y3~( zCgRDCNBIs=q8Y3E?(o6MlFh8tgBxDIX96+ZV<(4wo(M4#;ltr#Ez8+}F1=mib=1?f z9c+tfGUYD|>$vR@DVTmx-xTti$)?rnDrO)*PDruB=Cn^#XJz-EoBP{=)2mTd@F&X^avQ6t;_dIi z=&<39H3)$K1GAeo(63>rM)S2>%lzm3;(_EHzqxZf%TZPKV!7jF;aS@a+*Awfv<7B% zO+07guqq4U(iB{xoa<-N;>uiBZzBONydZD*bJ+_WE1Ej@5A!fvDavQvk}L)6$%;0m z9fV(K?{Nfe2Sm2b!#XSC3cC9HUJZ&#egO~a(=U#kog)Nvo-b=`B_~oQ+FmI0{dx#VGDCCf=Q^G-l=&lvpF$ zVMqUw6O>7}Xd>KDF$D`y9kZTq6;Mgu#fkZX^{l0!oIa@))5!N^m`Ohjb#3^l=>i+f z?kx6+%)p^R_M$E-?>;r8&ko&WbIWSP@;y0Lw>{b1Dv7dapHuzTO`+11t12?0&5FMv zuK0ETHr~q4={jKS9?3NZ;X9dNzCZ!J$&6YlV_kmw1Zk$GM#>XYQyV)$E}idnZyu8h_S@Zd;WVzWEW9RV@mi;sBZ*tAWn4%$1CU%S zP8-_Wh15u4PTrf#YSr;gj3e9g=m;2Tm!WQxu8XBFOO~;LMw&<*%%YjtO#Ph=6!X$$ zLeCx=x&n5y3X1xL0hwN;Bn0qLC|*tcy{>U(*@H;7km=1d*`25+HakijBr=U-#Bwtef#vw0bSlc*h$pDWrC^L9QTYY??cSp**wV zjWi(77kghD_aB=2z4p$y!n$u~`qksYIMf05!O|iEUpqM0257mm1`ZH>u`@4lG75`z z2gl0O=WU(m3Z&{3zl+V~Qkc7S^+OP^t6yz82>~>~cIq3?6 zr-%hHNn+yH)(K{Jn5sk`%HgpR>7G=upV5Xdey|ij8J-Bv5;?c%Dq=niD{wI7=hJKcAv(kz4b(zCjRvJs)l??(jqOqmYXly;&YBk55-7U>HwLPfm|kMrn3Zq20;o6n*Qa$E{;x zW9uzs34wQ&&49xM5DYx(-e`*T8t9V9R~QFIRI1Q~WdZ6OcB4gfDDARXZ>)2PQzC{C z@wHD8g%M1y`3A*!%VL7)Fa?mZ;FrYuUu;;A`Bgd4pu9_A<$9EI_tPzII!(gtV0kfs z>HehmFfrZvUR_HDNTtXl>}jm&Pr>qb$tbh~j_vt_z~wOa{nN z7~EJ|<_d`Uda#qlWsAIMsV6b^oSxk})$ZY3WgJV^y%ZLn+eo8QIpfV9QG>Qg)g^b* zNTMdL>vs~Q#!5b^_+(+=&&#+KArXl+3lyl>)XM#UBojTFXz+TaqBro#>T-4G<50-0*)cC`u-3esraJTvnov*_3P+l11eM#y?oX7b_#6&wd9>2b5YZ8x14v)x<= zH>ZpLYS>mG6C{nQ6l2HEVRF`(w*}HN1d!E5nxdIQDJLxdP6b`uUdKtsBbZ(bm0WNr z=egFtu1Ngb$ED@Lmv;7VYaZy^EU>n^2xqhkwvqcXvpnbm46|C%lf2uY~I!AAcxZ@A&wKUia^dk1vPUBjNG;!sGXc$G_b6_zQLa{4a*b|EcY9 zY2Dvn4v$}Kd+got_v7P9cpV>qD7=o3e>1%PYk2&8+vE3!*YUCc zzJLC?@c5p#$5+Ga*TUm*+v9hI*YWY+4zJ_m-~&H?e4Gfc<6|Sdj*mYWUjI;d{Mok0 zUkI<`A_$a)793KBf+v9&3UjKY}{PniSum6-EKR$jeyuKA4 zKiT%U9$v@Chv9X6d^^03kKYzv$H%`IUdP8@53l3nZ-v+K@&5|1+1g-|p?{d-44Pr)=r_ zvA(f^?XTg}S=YLDPEx%VAKKil#t6%$T`i5KmSO44pT2HR_U){L!6@0}v_Fuby z%%KNg?H~U3-d>K=H~3orhkyJUdkwy?Z}{s6{+$o-_J_Z2;IHs_rT@j>`!9IsqaPpm z@Aa~8=o16KsV4*fxAq!+@4yS+s8_FkkXL`Hx7z>S|K{K0m;-O}`pUo;^_MUB=fpd%@;7I`YG6W#9$2<77_#4P=glXFKG}cm&pu}V!Mpt* z>Al92p>OEBJ~Z&H?A!Oj_YNG{XTE81;5+OPzh-~GucX8Mxc>HCU+TNQ-2c9x_#Ef= zf$RP6|H5-z{BMkR^Jmzn@2!=AZ+L-ypZiaGzw;03BtN=3@cV4&`|#`i1Hbw~UB>ra z<>jrl{`ddlKjk( zqaR)G|IiPAi6_@K20pBx^S!gN*^Bh zqF(iX_)q;wK0fruz>nLTzwlu;_PzhZ1E2l~dwt}+1E1rO>-MREKcx-#K0WZ8+VI@g zFa45!$lrTW?|$H$`@ilR{swQqH(rR3d{o=N@iYDJ`P~;d{0G8zCQ01mnf4g?_#l6; z^nLiH{ts<@S{uGg8~TR6rT^OJ2RQk`M*r3S@jxf|&-V}i?vL~2=C}4gfA>G+yI%O} z{tx|@w!Cny|Jo0K2WNAof9#j{`1o@_)cg0oMi+i~v;X-Y`Dyn3@WcMn7k*#6^o?vW zEWe6vztX?mKm7wewsCZ)|M{=|!}`eBALzff!_2zUH~xYC7r*8?JH79DZnyv1Cw@@7 zUg>-3QUBGUn|$xjKKHo)+UtMa_84yVKR@;lIqF9~+kfrf_%@z=^qcxB!vjCW&*^*N z>%+YH3H`W%|IB{k;1WOcP0nYp|M|zi$)WqFe&au8=im5)`&asY>OBYj&o`O6eBZD1 z9rize=im1)|NB>txbuuT-WdE`|JC<>KOgyDpF8fqHg``aa;5K^TK!l5k!;N1N&iQF z`2&2cZ|FmP*S~IHYDlLNZs-6qb3J{Z{KNg<^mG3MKjrxw14r8M)#2^_ulfCd!shqH z6aO~eyfW~U+NJ#Q=dRxPQ4Txs+;wgI`Ip%EBfX~sFAnSFI)D5A)^ks;Tp9R+HuPP8 z+JEhu4mS9#|Am#GnE5F=7{8QUJnf&6w58u!iOx+y#IiB>D-aGJjw4wha zJHvd-(3b}KZrTa_3j-T^GVoXV+xH)R#lP4)@X?of_NRN}g!l`3`76DDq__X;x4)f_ ze&}E7?fd#TPX8KjUg>+`U+x|F2YNQ}zia0kpNbRUYe)3ODe#k0Ti4SM#k@T_M*hkWt7d;7oP&%eU{1ASxc{;Oa4oxR+$zrph>ec$pY zd;8!2cb0V`|7vglH*Wk}+IaO(@$`TBex2vFKh1~#$zQhPd{1xx2VVa(dOGyI9Q`vl zw8M?>>m8uO8wa1@Z(r^Ed#?-}>DdE4`!PNHqrH0iJGRr0OzQKMKhrz#zE9}cMKTvwV;h=(&6z>TV^I z>ZI{?skaJaG?+;B^;e_l@fIUvH(dkSGROn8o@>EvUxnu5l}D;o6#j}GbRC!<@Pwk) zXpgB;)E9NjF}n6cd7V^k>}Zr6r!!G%ocA9T!`Cz2BlFxomKUUI!3Zqc8mPxmcBSAS zTB+pca}X@Gf$8=&qTaM+MXBVb4ZJv&IZGv9+MtcSlT>m~C}&P%8<|w{x(Th|>qMb9 zQELyZExn0J)f~wLYiwpx)$c3OwE7mNJ40RT$_7veqDItcBu$Sx1ebU-z-b?-JS>39 z-4RVCP4Phlhs+{425E_;xm1qeU>J|2If9rjX{Hj3atJc^{*dxXl{v9(B= zrC3EtQyMlSX&jdmtc+Af(v16^YSTD8JdMf?w-TJqsCFRsb4k+)QBKn28A%Xtg#rCr zQh6E{Skh=^DLl4dtjFiF#3AHg|% zwh?2gJcjqy^)FuLk_)*u<^&OEHh=1`NHZl(|C7{bmm*Z2H-zB74+wVG6ThMpl{J1; z-WW;cMuGsiuO4yE^B(4(p>h@EA(Ey7vIj}y*N340DT0yniGRBx!O^fUN%JT_!Od5R zQ)&T~zpUYq&+`xr6VF(hAO#Qfc9Gy4gcPjrc`EnZLGbobf*G%g-?Sr@>zt+X(T7xi zg;Z10^lD0QdlYfpupLU8&}f3GTubW@Qu$F~f*-4qGkirlA~lgTcMuNXFR!SaA6tW@dBU~yX*hAFwt{aqGRH*XteHu*BX$vVIYn^yGJ-aC1h3s9 znVsC0Cch(2LB4{`CK12zF@hi86F;LX!DY*cvyRKuMkawda~pFXPUYY{1phceG8_1l z&I%z;m_2a{VK0z0r`r=}L^mpjA-_Zf;Qcq^v#ocS_ElZpcq?3S;>+nrPny~SJcNVWZ6k{nZjILCNw>BznN z2KST`SBS3{2s%9@xRTE^Khj#*;Q?y9c?-e5Jqb30t4f-++=j1ne`%FLoC(}Vd^b_6 ze?}1J#bAOj4-luxNh&+NB-oY1UA?K?@;Sl0H%P{<2*JEviIaaXm0u+iymXd0I~o(b z!pl{c6TjL^g4KDsbziF8%~$pf*Jt{F#5%7MRM2Mluz1gEvrN|a+R zEd=aXXec<{zUoj;wxbxF;!n2FW1uU_e@xH;j$!kUfOUu!z!oSyWm7zXp0Qb5P`5`Y z)*sYpkup$?-V+*a1G+B^90KNJ zVf0;~X>#!T%WBI@R9e~>TE(+0tWxI*k&&PH|lsW45mg4QFb$T0_|0d~B-3$W8X zf}J&^FktNd?#P8s*sc7>Z9Ck$|b<$nFerD z?&CS+)I1kZPW#*&<@9K%RPG*?53tAc!Qk}Vi6xbLrQk#<_ijD}Zil(uPIJ3m=61Wy?e>`4 z?d9!Y6Fobj-Ep>1Nzz32R3Q=%Jd69X%#pdyrn8#ac9&f$y+wW+wS-4H% zcDG=U!;rL3_>0`=kZ|?~&`}{U9q6RsLmE6IBo_y|Cb)mZB>oYOtOE5|z$XcC83BI~ zsF9uv)k*-p6sDvgBOqwMCYwZVY)!`msYlqf=?TW;qwtXQAT#@t0y`v6NsO+@@_A2$BLgAp#*YV*0 z(R>?V-{GNv{o0e84Q7R4;L;G*EpryjyB@kIB*wTiwS?G#m5SHS~AfP{m)^Jy>!88iu zmjpeMOzE<~Dgr$hrnd$2v(RVA z(@Qh7B{zbasja;SXqGmSbUjDgnsS`E+DUDcMpor_f?=(_MqMo?Z5npe0JN=0DNEk- zlw{0v5J`3GvBn)K;gQaEEs4LGTZ0 zuhs`Tq4kD7gi~7F+^Z2zYw0T=>5{gKJ?gG%S4;r9uDwYbzolKh3+S%)0PXh=v<)e7 z_(wbCDbT;#%idr<*T&ukdZqR31oT0hMmgDM?dZcm-?Z~S0QDC))3!87oKFuqRCFgh z86ozI2Q^x}aSPNqaRtS&iDJ`!KvP5$*}`Y(zNL9?5=$TuOIyVh#8GL7n7SLx-D0XWs0`7!Kd1xZJ<{M| zF~|$lQSq+^piYRBM}ayeT2aV7D|YJ&>VjB<686jDmIP3Li$%zIZioe{1Kk#bDSX@$ z{Sc0&hoTkVio}Uc!F(!Kv;le{7KhbHuf=cF^%rqqF`)0_=}kZbB%4n_gQb?Ff??9C zDWFD5`TqemM(RQFZM?MC9@JFn<~pDmQib_I^Q7WeQMXWPe-zXbDRvR45g&>xwkF8qhT<@C49J={q8!bVr(35Y!VXl>+HAsXFQU zrR3cg%s0}^u|V&ozsbtKO8X82{g66f_mc+7MJ9n7E>Bnvkx{ZEg^989N-dZZf0Cxj4+?{tDaUL@-5gn(6*$*X1eCt$9Vf2Yni%GSsh1^42w=p33WJ^cV8U@nF7|UnBs%lglC2Ngw4ClqP+b-*yEx zK$k*(G+5VQ63{T6A9XfT_l4YIjLvE@sPVeLD}kD%OAiAzL+A1Z)NEbwP*C%9!*_#P zsM|ueyF^#K5SYt#ZK6S~)paH2zj=_!z*F;dIR@%=o+?v8easU|rtu}ump)*A&r@w7 zr~&#fE}#bMmvjairnm70nxSWtf#&JGYXB|P`{f5(p%)ebt=2z$2(($RZHMN2^h>&f z+NaM)4G!vyA`D4K^!Fk_oz(wJO1`8ot_5{XKQ;!`ZG8Y4*nRzC${zpGxAg$^RDbFu z&`bS(a>}>*9TZAF>fh7W{#F0Z1Jn=wrzt=~ti}d|Il^jbPoU9OwO(fa$7-(*4fa`;hk^0HTI>L6*8WHX)RyUjh{U-b(-DN_+O;>KuEA6)H?)Czsm}ar zSE^XrR*@~Z>lw9uG8V8(<$n}zwW)c~wt6`VZVu7ou=hKSq|>AGCIm)4#&&j;;@}Q8 zm(WfYXN~Gz?8!AyyO}G+tUWA->}4-Y`3)$86*~u!ee8Ea`&s#GKnGYu^8SOYG*O3G zJ6hYr>?MV+Bdjy^`6run56n~SB2*_oVV^pJIZXJ5he3*!oQ}E?LMmx~q_C3aGD=uX zN**m-B)c9XXpKN)g&z%p#tErGK;wl+WPB3@mw`YNh4mB*CJ7EiO%`zLA#K{-C_7y! zJZ=qiS*S}Hz;$6Y)>ghJkTT>k+T))fIZkW)0@QTvK~P*b{%y$Hf(KIv;Vth$CT=Lo zQrb|m_4E%50ee-&*_f}$ARURwgLTv?Z|`_?J6bmE)eUUp3mAv&JEA&ZqgbrF+}M>; zT)&<$OS#D;hO&RZ0Pq7|l4R3>FDM6=83PzJ8=8}Yf71gttL_9C(uH~mZSDgYwi%m; z+n5&lnJ4QSQ z?DPc-DR=h%0vKD26w;+LJ%86KbR>y8^d2z&8Fpnkp@TDE;!!xDoTSxa#BM!dgL3i) zob2S3cjEz5{Sgu6^bmqQ`n3e?xsJ-c+U5i7T@_9!_u2am@Q<5Q!Rh;B6=1(vI|2K% z%85V&*l}y1fruIyVi4<3PxXw&{EJ4<+24d-u;zh^IomxNfd#5`7i_8W5ey<#xl}+| zj#4vbfUY`=+_PFuJh)Wj02z$)8+!bpk=W4X;I5?ro83fclS5W7M{U^J(U5G_tu)H* zx>M{i6(*}0!ZuU|8p`hCrHnj`<*`DO;q3NYWi-17WoZP%TR_M1bW(L%SByr(`4mUt zgD_5RBoo;F2rwqHd8*l!%Ci`9^)l=L>wmq9O3ykSQTCee3+TNl9I(MPs9o}DjG0Rf z%Pv9LxS2K>-+q+O;qM&*8{Z~B@Uy})NlhZj-TWuk01S8slarbjg*i!qB1Pz+^U$pn zTztARr+Ni2JH+&3Z&CRvkLiZOf2F;+3!M2uxMBe?ic@IePLQiF73oGBSh32m zXQ}vLdV~`7X^@h=KcieK{WzfAav2=^ffNf%S1%4&=CKzzW!)$ZD7XAiaLNzVV5$}3 zA1V_l+rf_Nm;+$>x|G&bm;w8fDh`B)NR^770<65BQlTo(DW$AB_y%CLVU!hB-&PXP zp)>`n8fgas=d#WX!J5Zb7=Y$89a+f&*6=av7P5gffkkWsJP;?_WZFTNu&dKREoE&O z&@wjrCeU(r_bSi|w&Og|N_Ofc&@t9ANSSl>5X4j2(K9cg(^=?(8$#84i0x8!Ex4R+ z9YUE#o0l^I+cv)j*zOWNLHmIakUESeV~X1m2N=H!>X#DcOani$%L2e8AKI9@4IoZ( z@Gz89E+hh`O~o{M??vwHKwX&)l)seSUj%g(juuC`V#qU;D{XC!a^>G60IO{C1gz?O z3b6X;iKum$(E(+bzeu;PdPGvG<^YmKwJ2g0ivf6hPkz zv?h&8mH^*x#Y?~@FA9O8t8%N>8HdVJs=SJDoy03atPau{ZHRLxf`M zP(NH)I!T#K^+IllG>#vkBH8JCT|nnJJfiH9NC|=K##EGRUW7tqx8AP--CrRJ$R0PS zwpLHLhFm)x(Lt{B25OM&hAdJhQv3olufwXLq;V4&H=8tma+oa(t^+?-QkHey+YG>&karG^m zQR@&y$6Lqe6kVKqQDAeKL2KkXk1V0)MY3JDBb2;&G$7xqtvd%;XBp-p)%%_we9x;Xq^ZpSleds3q}XC%q@K@Oq1>zT zSir$7bO2c6nY#zjG}h1nG?)Dm2DFr|q>ObXb23&5Fto`8aGKBEfpQBSm0Rw&0&J~?-AQe> z-3M&fJRPvZ^!~hbkiXMmYNtM2R1az!V&r`3(AwW+V`@iJfb{U{g&;f9a{kx;w z=vFU4zkD?T{jbgfY`X9;V30S(tdO*}%4`bUrWo(Be<)zBisb=o3)=wec=SMDbt5TO z)oY&)SpTUDpy!82fL;UXVB_6^9Iipvi-10z^FR~62C}V2Ul6gS#)Fms`i&vWYm(j@ ze1CBmU_k$!;52P87cj6N?N&jR$YO$%wgQHgp&g|8F7n|PTh0TvoV6MJ)oJZ;~p7tVBekZ{2=@-C>SdWZLin2d}a4BblOo?NfR}NPGy59eiTMSigRMf4lhQ-cs5j`nSVRh7@j_&m zC0thmmYkUgSV~(G(C$lfKzke7B}*3}z4&H$q1^R0oe<+@P?C|szQMNnq{{B=2zL1} zTIQC%rBP|x{x`~zZ%I+nOELgE?m7ZaXAZ})lZ!!2Vm9R3E7;eWpztzy1JEY6mNc}5 zl_v+=!+ib%mBE&`0Ckwn3Iw{y(rGy_vwzNly3Tt20J_PXXM=g0Jy{0oK1-_(^e+qf z4a|k2-$|h3;^&G$XT)39sJkL=jt9Ceo^w#Fs#gC9FzU>!4_LQ1iPk%{6VTg+JjM8d zCgfYKE?~=LGWe|*kuA4-PV!MLmw@wv>9ARFog~hnA(NMjijGQlBIC1Hk79D=r8Iht zT@*CDWyO=!vgi8qfL?!7I_t@(B!*2%Kqbt6R;7z@llJpeZ}1DxO3a9 z&MlRrqp4J$j;ZEjsrf)EHU0vf@^;cC3N2Q=o$b$d4la}{IU2}$oO&BkcIibX?&eVz z(8GWwFK@Xz@u2Oa%@a&X>; zC^wq`?MWe9Zh{jk(AZ)6<$&$VLqAgc5?&~Gm^l~Fbd-7szfR#kV$VdtNdG>NiE7^o zFgki4I5CI*K)K_^Iw*HapaiOO^G9!ZZvy64`>Xtn+!CTRq{dAc($D^3z-gOqSM$Y=$7|hxEQTO zkxT!ew&{>3jmAWcVUtx0T)$Bd(DQnp2Lg24uN2%F7(Ub=2n0R`c zB36C$H#Bx_LatfU8T*p#HX#q7`yJwYlpGIOYa3#UT>BAaLUj@<0oE-52a@aEf+NWF zN38<%j9d-qB{_rd-F+2cgYxG9eU?G}a>G0@YS}oPT-mn?!A9Gu&&C1v;QJ*~pG_`N z2=Tv7eFluCKAWb~yNAFi>N#jsLBQZQG`D6Wp=LRx0`(tybrd*Zj&A{*e+>t0@hTLs zWgaL|ZdDmOlHB?of}7mtCiUF*R$-LeeId#5tS(}%Jb{&?o+h$nYBGt{A?9TE65Vi3 zmU&DYa)pg$pg`}&hyqe8Mqbh(aRK;Uwog^$c+x4A(}aN;0fjSi$y$Gb6BxfHlL(Fg?y9cq1!8pp|MbdIea= z@e5#75V?5tW%8Jq{A*F}80>&@Cu``OE6D9CWipH>!Kz6^=xMad*vxLrlL zW%ueRw{krI*t#-OeyQ!a;wUG??MJy+JnRu6!oDO}L)hCmprPy8(t{pset3=pd#q8dZ|-0E{OEF0}`2-k)UJ)S`84+YLr2wVRK%m)dWuq>NC| z-vM;JXXN?y6CVM39_z2vJz(yLTKFKLKxN3hWD6hsGbmplMn zb{V!1KKsI5=^0B=y=12vyHrWn2~vAmyZ%5&*%1UkE?u%x9oVLQ)Fw!mz5%9Ot_H=3 z@{?zRUttoURB_Z3z)E%Cc2ec3RZ#03JP5TeGf$xGIs;Iu`GicuZ89Q}9>AhW5{?;Jq5n+1q4pH(eM?3jfVkBe)*q3rb(64fc_^dLME^Z z{7DXa0u9Q+j{&7-)?`B=qs9P+PO6I9Hk-Nvwq1w(R&KWz5SuLgPwFrNkxMeI4Ff-> zmm7NM_z(P8?(~Sfxw9LYO6)1}=`J%|z)u)?9x{m|FQS|@0#NE!fkJz7jczEXe1#v% z>8YNO>25a| zpylk*YcN-^4SRvsFr5d`TGnb3>ejKb?SR%Z(-5GIZ1{83ZDMV-K$}?r#fB|x_g0|o ztN^8yI~W^^x}B^br7pYJ!){<^Fqj?r*Eb3s2iR`1;6K?rEHd6reWZ3LSPPoqNyaGZ zoMmOBL0w>*iUVC@J5#~D%se7MU1yh_K;2=Ro&()wa~6SlkA2PnddglO1bWV#s-W%_ z+x8IXBTKde^Aj7s9;m-i$_VBF;TV~~5TW!|pov1$gXL)~=Y z0X^+Zp;REye8G$A76_B5ZlUmMGtgq8M;@S6LQqGb)xz+)MZdeB0h_pzclx)Z zRIBN>=751^>1{#KxqT=Hr|kl4R_`4oL;H|2!$ST7Z2o{A^o-PkLc$Z>hzUT?bfpYH zKXkL{u?OWbP^0^K22z6fD9_o$K&$lokAZnw|Dmz+FwPD0g7HC)>;p7GUuG20B)#q) zgs14clR-_>7nlaL-O5PowbN?Eb)ffFk<%gh(JIXv=(E+P1xkN~-~9vft+zNQ79YKsv1MHI$nl_62MaBLTKdXs@(v&0DtNE!*;z?Rd*Sx>L5;uOBU6 ze>T7#XaM7F2Qq4m{Usb`&PN}@cqc;{?`0U{-3({ck30rh8_-yG1_`k|jy-IJw)@zb zAfSWnUmpx|giXbpZg@j5?an9JFe}tuWUps~y2GxM6Fy@OXfC~D1=E4PvEc(yH$vDw z9MlA11AQ4cQ5aPL)O2CaV$>}XCTsw8N$463>Z;&DA@pw{?f{tA1Yg>muL~7U0o@RK z5xObdq7ZOLNO}YGLKv_Gk}rkk;h;;S5fU(kH=z0>Wou^IV`WLWdGS1GOSK=pb$UCZNIEn9)E(v@fW$q1uUY zK*O|;rvMGt?%j>L5!yUAfktY3_W*O0)^{$b(b~EupfTEa6fwtY2Uh}`uN_X$ut59c zJE(=)6=XJxv`(eLT&yir8q^Z)DTGbFg{8#N7M=P839;0(Bn7@+Cuy4)#2RUUMzIYD z$lE$U1m{yeDJLLh~R zdBU0zpcV;IB+xRUoD8%=s6@uFP8d-DXoFCN9Dk$WNi+K>3@Zcbli-DCmL_O_VAGQ} zX^Y)Q-4^XsTDYy+(G*R#Y0EqW+OEAs>$g)|oBVT^wijuBw{~uMPvsr~l?sK2x>`G8Jo z+mgAR)|MkpOcCqoKur~6j)R&eHiCOgv&Da%f#!&ZmjlfcpWy%?%@@DZN-q?RwSbn0 zCCQyuip3s-S}S_ng4!aM!sbpN&e(z4CcdZ8xkGdx2eeZx-U4WsIED7!-C{q|?jF%$ zJR`Xgdq&u-J|k^@v!Ic88;4O?rl7;;@lG$HgwB z@)KgERS-ET&L`JCB_0=mPKy(Wc}6UQWC-8nI3U3$1HGctJ5)F$` zcUheG4d{ya51Gc_;wRdMu8C1e5VXF#xAkaTz+ddF^EP9e}J`tm$Ks^_Ob07V6L><=?iL?)za%gyREF(10A*s7>>HrR`u_Kx@Pro zEvV~OdHaC6Wp%VJsCQOY6otQ9ZP*Mn-TGlT>K0lT=?ZF%b@WD{4C`4GL-t$0yN|j< z)^?=2!`AkLfsR^ls||F_`Zihe3G2TefjVj3pH}QIYqlNCQ`V~}vY)ozToBAN*7Zr( zXRWW$>F=EN6*9i_)=ejax?p`Z2roCsPpya12+yrMv;p

u5rstq)WK`eJ=I59+>KCz4wX zuxT9%=3pByGU%Z;3En`%Z2CP1bEHiHvX?P7_{XNOkJ;311vJjaM47;No7e+D6Kq=3 z!cDe$*B=e0*%-n=&9o^~2Gkszum_;#+svemeUZ&8$_ba+yr)36!lnv^t~E9*#{sRk z>3kJvlTFursM~3Cinh;VHU=5!rcGZzpocc@ZGj%!oWBRjH#SA*0Sz$hzX3GL5KPuO z*>~-E*d8P4RpzHivr+f!wiZOR}9_g*`FIWO#^yiC_#JNOM@** zzA~JA1?sgy3IKXzn79+@t)b^ApmzrA42ZlpoV5n^!4MD)>Z4)SR#2Y}kDh`0W^gAh zeK*XYBhwGVzRsxYZ)@WKYM`z6VW2^_FY16f*mmGlpdq&1C|(b>)vZR|Fk8)UpoZJ7 z>j-Lu?EtdKk+yNyz#L_3Bz=ytO&1NFR{>TYNSR-aw-U-O59b29^?L|dt1`Wys5|I2 z4c@Ezz=&znL)dI5rE-4{Q;wC^d7uf*r2xm0*T&hv!AkbWdR zp;aq-LhDv!r#2h%qHM4u1=@xXC-2iFziAszFiL(wJt?n{&QrR#Yg=OSZW37p|tM?l*>J#wW!pk z5@5BWwE=6?BT1KGebJZuXLmr)2CD%Z#O4Py?$@HWae5-k{vnir23DU0*z9O!aKc9J z18nulq>Nf>F1e`vm??^V{pa`CQf2S@N*izeY4q+Y~&Sk z;71DSAvb12COm;;BBBB1$mb*zbstdfl#i6uxqMHQWB;VXOP7IIcsc$EmAgIhMLA8T zNSHqL31qrIqRIByj|55Xz48b6eI_6s!`EY@`+^w2jv@EK*JH0*fEvU?;dJY z^FCmQZ!5t!Eks0?!{0%K1}K9)cmlMK)uZ*>&(>3S2iOWQ z`IFX2`=HSX&h(6e3!lI1iuI7JxfdFj-R$!~+&@!9FD)p&6oGtneG=g;` z3meH|Xj2`(|05p!xLj!p(b4BEkl@B5QhyQm1f4I>G4^PS# z8gn)K@jszxLVrOa`+HFCeu=%h0Ca=drvlw%i|=6)x7b?|=q|hVN|}9y4UbVBe19C+ ztqTXE+^#d_4yFMVS~{KX0ocXL9h~^fUMQ#87Du_qUz-7Y7fS}GKYLA)XCPx0Kn-CV zF9MBVr@nwWhFzt=Kc4wi$Aew6>9Y`zdD$ind76C1=HEA<78uT62DQ-eU?o(x=-VTWL@P0=2meYYz;?NFItN2V*Fdw z?GS$q0NN=o32euRB_Bw3G~I{NAC%?( zD>Xj|YNWh_EPRZ7;x?EQKSzY}b%=1=b`B~h`{W+Kyt#-}7iE{x|0#DHuRx1aE;*`8Oi}et2;mk4SB4=JY-mm9R){;4Z@8BV>a$^_3($AN z-%eomw|zu5G0L|34N#M9!wv(@vRxkokwvzRX>Hfp{vf?oXoOl{hN;)qVBIkP~zTXG*j~H_c=&@LR80wyg{jY=iS8P@c=&3lG zH2h3_NeR(&vFI&`ybvE^cgM?_h9g0}5>qb$y%s~TmEs-Ek0zkriUo#&dMAFQyd&B7swr&A78#!krH>$w?Xq z+&UcNfa?&d;}%(k>XeY?5SbPm7hOFzTH`=e=ZKWf8i(kf3Fyy^Q?GiBrfCC@bJVsNWjS z%@B==Orv{C>0fmR-%Q3PbWBvs5s{I}G3hFWe}5Z;o28&ZwV1Ved_+`oBDDVhw zLo0~Ai~l?XF9ma?DPcxGA<1U`l?8!7__KYO-~~#f2~#?>=hQENj3Ubq4wI#Qs^G=z zQpo zMt~p{sqJ4X?O%GBrSe%c$fq<`@nnnkntEPliF#%v*qgBm)pja96NSvdH z1qxw=a@xZeP%henPdK%CJFW;;HSul=jt>_Vd-ip){~p8!8w%1s~thg0Hr3YQd+u z(oe<6Tz{6$HD=gT89&^@;M8`iY~@_qUqzBgzw&vqw#Z6pG&mXLIR5v(X362Sxn%lT zwnz4vWBZ{<(&+cOP^QKxTDA{T+FRbNS0wpZ15R3706U(MV!iv|`XA3N(4ntQpeGfzB8*?-G} zO(9*iUv>YY@Rrl~!FJ~Mhb%TA%kj;lf6r zFK6W&uqa-cS-h;pj-c+NnJcx3^y>bYm7h2ZbMQ!S`19^DX!-dH=;U<5R;?`W4GM@^bcd9|Jy(yU(H@ymG-jv)j*h@N;^u zKe>D{&Gqi#DE%7u5-;b~tXJ|lb^G|B@OLSEH7pm0;%GkVzd+uO6p5d|!dL$#a&;Rq z$LqYBK>oS%JNNFb+vHgWl)8b>8RP&>+?fuT1` z|H6xL&R6q6qUd*z_mkaUK5>3_zr&jt%)h9{-eTOm;O94YnFUq>zYO%L-q4^N<1}81 ze7;&!u#zCokjfDoyi8>5td)SC;`ZDh34PnJ$VtW@OHn-a<@0vX`{27kYx*onIC zCv)lNVJ>_Tn_PZ8mu0-Q2H)OtGN3DR`Ro<-!6*G3ZSBP?)#qxRi~K-EUi}yKt&}b| zbNTFXekb_!y!#Y?wC8r9(H!FPn16foxOhzdyP(LQvCz*Q@M)gvO&T7G{CnPyLS#bu zp;(evZwgT7*-+uvZR5qIZIw1D3SYe$LOHH!MuVS|T`kLne^lv5y+OpN^nV%rd^yDb z`-;4JV}`vVZ;L#e=C9riF;L;x2R|S5SJ%@u|I@A85o>TfN^6HHZ zu1fzI3SYhP#8cs)0zW4`JjsP`g#%Jf@}+a(*94!Qx3gl0Wt4H#z^CWEt=NOY*7Q>3 z)tfNtDivE4zIwxjnn#}j-(GE;3ExuW)tg2JD|JEed$QYbMbG&ZeRc$&=BeIX5~IkE zQ~2r)9KR|2T}nTi^l;8Wq~pIMioAM*NM(ip0sNfwW{1E|<3=iWtNLkeh5xL97w=)E zGS8Oa^L}cZ`MeTfz9O&QOrrL4QWwIs6?r>2IK@NtW}8zA-!9$}n^4Lt{K2{OvoM!_cIG1gLy=c+fKl~d1Bn66N4;T%&NlRmSNQ6UHz{VeWn;a9TQXWN0F({p7g^6Je*s-0X^_P&>X*69FzGdFh5B!|wIaB4=QF|~CunGL%V272I_=#c4O8yQd>(;M{i=SBOk#cP{+%O8=Jjuf;><^jvni@PokT z`$$?iAGL@wb~yO-yq5dd++6zEo(un~(vN!Mm+D`I3z+*?Z-i3$wZW(P9980{y1u@O zyk%b67JS-|)SJ_ilms_f>1VxSC#qiz|B7e?B{xX-x|LP=m> z75Th4*wXr{H*_Ja@n0?Qa~d~Zk+)pORp8s_kZ*5Q`pH+CFY19$ z`ms+B=N*_K>OO{MDNU$C9eVcPlzeNFEkAQs37i`jp-DNyI+%W&9EsdE|KnE&VgkzMJM9o;l#*@I8!t(o=&0b zVKsD1d0Y&x)^>2Ka}YAow{4GgzT6}y@G|5Dn=o6GIqWtb^ zQz~wxb%;u$OT$A`13XNo*hK!Zim8qH?q#~(yk^r-+%${(o70UD=!i?q@pPF5T#{0g z{ct~Quz#2-Hj9j_6K-b3)!MO9d;+l%ak0HXB_^065~5AfbRBM56sH|=%{OjP4r<^X zfF@~?CO=bH=NP{PT-@CPw+<)LfB{Yzzf)@G1`)MEFjJ-`bp_+|BFdrC!JD;=pfIR&NC68uf892IHY~yNA1>6E63SkH81M z5Xp2AwE5ON-7Jzj%Vctj}a@$RCDJ*U6bLzfVL`YFcuPM{-PRa%@aGZlMkgHIZ@B zwaQs8`gKW&>28XM!+p~caHecso9D|v!fM(O{zX^4LvL}hkx{0olr-*AnU%tC>;AQD zsWD0B9&(tRYhqGN!vA$X>ZAJqFIP`Tg{Bx?=~8A=Hy_+RG%6x4BH82@O-mV#pHkuE z)+r`6EHzTC6$^V&_o}PsSBIyRpgm& zg^ulEjHh0*-y)rLbybVy_H8oZ>$FUx{L-`%a?oR#UubGA^T%+A9NFzB+pX0(kC*+{ zY9|w)urj7HADD7fS5s8yuBMI=v2n_1CTFw%ntOI~^T8*3$r0RZU_;5=u=%qm#-$iz zvraK3B`PAJiEiB^DFpu7Jqph)5CYPSHCMgScX`d-z2M#JU;-Vx4}*kWTT4&)lPV zoDWTn>E;(@%sgR4^_T>5Yg24Tzg8*9SRaJV6lg6X*)Pmw#Ng=>acMEeNTZv{MEAN= zNQv|dbuqy@oJh=+nj8_EnnEld!+?yTJ~Xq?RM)7)wA8Fhpu>loHUy`SALn6#5=2|}Gs#`G{#sH-UigOL_VgZ_%)xJ4x<^$bhI zBa-5?co>(v5Ce+ z#6#~iu~0Wt4w|#P*F6^xaW=K!8aBlu`7rwfHx!BrS*%cvLA8wh8&$X^o+y^K4X6tL z<3TxlAh6IXwk{~vnB6g*Vd7XEOD`j(=b%hwqGmhIt}C>F!r&G#K(3=O?3cm*Fg)t- zmj^jnx?xr&DLbr%on)1votW&zC9|E>!e$fALq%vxY;Z8L#5U1!!8z=Msjf*8vB`eX zi095Qrf4XKn?de&sTd_a2UPD$1L4V{;&ZToUwzZ|{TCiI>| z=l{*TRHM&s7ZzGnQkq}Ak~x|a9H`$tIVLGCA}Yo&^V;PQ?#$sZN_AX+cn1%CrcOP2 zn352*6BCe*ruHJvO0J^VHb56gU+qmuR1~NJ?pxfWmiWH4>PkO12%x7)h&+1~p@nlM|E8!RMDd zngdVj&xo0`=c&nuGP!JnV8!4zj{iqT$r34oVa0rx&20?x9GWLUC()*mn3S~m7+9E( zkIBWsksdL-dAiftC?YwAOkM3gG$kUGXbas9`Q^O0G^+8O(elgh%r8 zOK2*Rj~q6s|2gpH{9w+`Dac*vNYXhm&KxyE=r0wF@oK8}>-^$O=NDSel$!j=lk*~G zT`9g-{M`5HC`3Kru`*}V&Pq1@>r|keiB)AI1jWT*UCpkYN%2`1k(5sxoOy%dXEWq& zIiJjOG6BWha`PNFXbBHsq*?vcilyV58)r5Lep*zz_Rlo;oF$c$EN#brc*PK#fRw{G z)_l-atbrQ`ugAf{FFvAEj1LkbMX$d?64wF}(d?=4|2he?0@X#xO&?~dHcl?|e!x4q z6Cz1Ms=DO{hEi%Cos*k8IV`S&)uB|HO?~15rh)htnT3vn-YXW zY)th3O$!zSn{|>oq0SmjjK=ifFcTg%%bN$qnh;y)kZsQ8ooYsl2ZkX6TApmaz;z&c3{m`zRLkRHC&0O4LQ(7PaP#zVddvuIQNyV8N8w6FCfyOCMrRZ3C6HQKFM8C zb)NO<)Szo&lVnd>-e};MiC`R;)5i6GJzZtZCUQ_Hy%)>ztyq?*sYFaVY}KMzXN*ru zRbJij;1%qPQ(~yeXc@!kz>Fl&l1@ISizFLe2+9?;f#q<{+ITT>Pg4BR4skbN1)@e``AM?*IS* literal 0 HcmV?d00001 diff --git a/code/recording/aruco_test.cpp b/code/recording/aruco_test.cpp new file mode 100644 index 0000000..4e63e52 --- /dev/null +++ b/code/recording/aruco_test.cpp @@ -0,0 +1,242 @@ +/***************************************************************************************** +Copyright 2011 Rafael Muñoz Salinas. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of + conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list + of conditions and the following disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY Rafael Muñoz Salinas ''AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Rafael Muñoz Salinas OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those of the +authors and should not be interpreted as representing official policies, either expressed +or implied, of Rafael Muñoz Salinas. +********************************************************************************************/ +#include +#include +#include +#include "aruco.h" +#include "cvdrawingutils.h" +#include +using namespace cv; +using namespace aruco; + +string TheInputVideo; +string TheIntrinsicFile; +float TheMarkerSize = -1; +int ThePyrDownLevel; +MarkerDetector MDetector; +VideoCapture TheVideoCapturer; +vector< Marker > TheMarkers; +Mat TheInputImage, TheInputImageCopy; +CameraParameters TheCameraParameters; +void cvTackBarEvents(int pos, void *); +bool readCameraParameters(string TheIntrinsicFile, CameraParameters &CP, Size size); + +pair< double, double > AvrgTime(0, 0); // determines the average time required for detection +double ThresParam1, ThresParam2; +int iThresParam1, iThresParam2; +int waitTime = 0; + +/************************************ + * + * + * + * + ************************************/ +bool readArguments(int argc, char **argv) { + if (argc < 2) { + cerr << "Invalid number of arguments" << endl; + cerr << "Usage: (in.avi|live[:idx_cam=0]) [intrinsics.yml] [size]" << endl; + return false; + } + TheInputVideo = argv[1]; + if (argc >= 3) + TheIntrinsicFile = argv[2]; + if (argc >= 4) + TheMarkerSize = atof(argv[3]); + + if (argc == 3) + cerr << "NOTE: You need makersize to see 3d info!!!!" << endl; + return true; +} + +int findParam(std::string param, int argc, char *argv[]) { + for (int i = 0; i < argc; i++) + if (string(argv[i]) == param) + return i; + + return -1; +} +/************************************ + * + * + * + * + ************************************/ +int main(int argc, char **argv) { + try { + if (readArguments(argc, argv) == false) { + return 0; + } + // parse arguments + + // read from camera or from file + if (TheInputVideo.find("live") != string::npos) { + int vIdx = 0; + // check if the :idx is here + char cad[100]; + if (TheInputVideo.find(":") != string::npos) { + std::replace(TheInputVideo.begin(), TheInputVideo.end(), ':', ' '); + sscanf(TheInputVideo.c_str(), "%s %d", cad, &vIdx); + } + cout << "Opening camera index " << vIdx << endl; + TheVideoCapturer.open(vIdx); + waitTime = 10; + } else + TheVideoCapturer.open(TheInputVideo); + // check video is open + if (!TheVideoCapturer.isOpened()) { + cerr << "Could not open video" << endl; + return -1; + } + bool isVideoFile = false; + if (TheInputVideo.find(".avi") != std::string::npos || TheInputVideo.find("live") != string::npos) + isVideoFile = true; + // read first image to get the dimensions + TheVideoCapturer >> TheInputImage; + + // read camera parameters if passed + if (TheIntrinsicFile != "") { + TheCameraParameters.readFromXMLFile(TheIntrinsicFile); + TheCameraParameters.resize(TheInputImage.size()); + } + // Configure other parameters + if (ThePyrDownLevel > 0) + MDetector.pyrDown(ThePyrDownLevel); + + + // Create gui + + // cv::namedWindow("thres", 1); + // cv::namedWindow("in", 1); + + MDetector.setThresholdParams(7, 7); + MDetector.setThresholdParamRange(2, 0); + // MDetector.enableLockedCornersMethod(true); + // MDetector.setCornerRefinementMethod ( MarkerDetector::SUBPIX ); + MDetector.getThresholdParams(ThresParam1, ThresParam2); + iThresParam1 = ThresParam1; + iThresParam2 = ThresParam2; + // cv::createTrackbar("ThresParam1", "in", &iThresParam1, 25, cvTackBarEvents); + // cv::createTrackbar("ThresParam2", "in", &iThresParam2, 13, cvTackBarEvents); + + char key = 0; + int index = 0; + // capture until press ESC or until the end of the video + TheVideoCapturer.retrieve(TheInputImage); + + do { + + // copy image + + index++; // number of images captured + double tick = (double)getTickCount(); // for checking the speed + // Detection of markers in the image passed + MDetector.detect(TheInputImage, TheMarkers, TheCameraParameters, TheMarkerSize); + // chekc the speed by calculating the mean speed of all iterations + AvrgTime.first += ((double)getTickCount() - tick) / getTickFrequency(); + AvrgTime.second++; + cout << "\rTime detection=" << 1000 * AvrgTime.first / AvrgTime.second << " milliseconds nmarkers=" << TheMarkers.size() << std::flush; + + // print marker info and draw the markers in image + TheInputImage.copyTo(TheInputImageCopy); + + for (unsigned int i = 0; i < TheMarkers.size(); i++) { + cout << endl << TheMarkers[i]; + TheMarkers[i].draw(TheInputImageCopy, Scalar(0, 0, 255), 1); + } + if (TheMarkers.size() != 0) + cout << endl; + // print other rectangles that contains no valid markers + /** for (unsigned int i=0;i Collecting images for participant', p + d1 = os.path.join(ROOT, p) + d1 = os.path.join(d1, os.listdir(d1)[0]) # ../p_i/../ + for d2 in os.listdir(d1): + path = os.path.join(d1, d2) + print '> Processing', path + processPath(path, p, d2, cameraMatrix, distCoeffs) + processed+=1 + print '> Processed %s participants.' % processed + +def processPath(path = None, participant = None, experiment = None, cameraMatrix = None, distCoeffs = None): + if not path: + path = sys.argv[1] + raw_images_dir = os.path.join(DEST, 'ImagesRaw') + raw_images_dir = os.path.join(raw_images_dir, participant) + raw_images_dir = os.path.join(raw_images_dir, experiment) + undist_images_dir = os.path.join(DEST, 'ImagesUndist') + undist_images_dir = os.path.join(undist_images_dir, participant) + undist_images_dir = os.path.join(undist_images_dir, experiment) + if not os.path.exists(raw_images_dir): + os.makedirs(raw_images_dir) + else: + print '> Already processed.' + return + if not os.path.exists(undist_images_dir): + os.makedirs(undist_images_dir) + else: + print '> Already processed.' + return + # else: + # print '> Removing old images...' + # shutil.rmtree(raw_images_dir) + # return + # os.makedirs(raw_images_dir) + + p_frames = np.load(os.path.join(path, 'p_frames.npy')) + frames = sorted(reduce(lambda l1,l2: list(l1)+list(l2), p_frames)) + + cap = cv2.VideoCapture(os.path.join(path, 'eye0.mp4')) + fc_eye = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) + fps_eye = int(cap.get(cv2.cv.CV_CAP_PROP_FPS)) + + eye_frame = 0 + world_frame = 0 + status, img = cap.read() # extract the first frame + while status: + try: + if eye_frame == frames[world_frame]: + save_dir = os.path.join(raw_images_dir, 'img_%s.png' %(frames[world_frame])) + cv2.imwrite(save_dir, img) + + save_dir = os.path.join(undist_images_dir, 'img_%s.png' %(frames[world_frame])) + undistImg = cv2.undistort(img, cameraMatrix, distCoeffs) + cv2.imwrite(save_dir, undistImg) + world_frame+=1 + except: + break + eye_frame+=1 + status, img = cap.read() + cap.release() + # print '> Removing world video...' + # os.remove(world_video) + print "> Processed %d frames." % (world_frame) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/code/recording/data/.gitignore b/code/recording/data/.gitignore new file mode 100644 index 0000000..d1264d0 --- /dev/null +++ b/code/recording/data/.gitignore @@ -0,0 +1,3 @@ +*.jpg +*.JPG +*.mkv diff --git a/code/recording/data/display_setting.txt b/code/recording/data/display_setting.txt new file mode 100644 index 0000000..077d5bd --- /dev/null +++ b/code/recording/data/display_setting.txt @@ -0,0 +1,238 @@ +name of display: :0 +version number: 11.0 +vendor string: The X.Org Foundation +vendor release number: 11600000 +X.Org version: 1.16.0 +maximum request size: 16777212 bytes +motion buffer size: 256 +bitmap unit, bit order, padding: 32, LSBFirst, 32 +image byte order: LSBFirst +number of supported pixmap formats: 7 +supported pixmap formats: + depth 1, bits_per_pixel 1, scanline_pad 32 + depth 4, bits_per_pixel 8, scanline_pad 32 + depth 8, bits_per_pixel 8, scanline_pad 32 + depth 15, bits_per_pixel 16, scanline_pad 32 + depth 16, bits_per_pixel 16, scanline_pad 32 + depth 24, bits_per_pixel 32, scanline_pad 32 + depth 32, bits_per_pixel 32, scanline_pad 32 +keycode range: minimum 8, maximum 255 +focus: window 0x240000b, revert to Parent +number of extensions: 29 + BIG-REQUESTS + Composite + DAMAGE + DOUBLE-BUFFER + DPMS + DRI2 + DRI3 + GLX + Generic Event Extension + MIT-SCREEN-SAVER + MIT-SHM + Present + RANDR + RECORD + RENDER + SECURITY + SGI-GLX + SHAPE + SYNC + X-Resource + XC-MISC + XFIXES + XFree86-DGA + XFree86-VidModeExtension + XINERAMA + XInputExtension + XKEYBOARD + XTEST + XVideo +default screen number: 0 +number of screens: 1 + +screen #0: + dimensions: 3286x1080 pixels (869x286 millimeters) + resolution: 96x96 dots per inch + depths (7): 24, 1, 4, 8, 15, 16, 32 + root window id: 0xbd + depth of root window: 24 planes + number of colormaps: minimum 1, maximum 1 + default colormap: 0x42 + default number of colormap cells: 256 + preallocated pixels: black 0, white 16777215 + options: backing-store WHEN MAPPED, save-unders NO + largest cursor: 256x256 + current input event mask: 0xda4033 + KeyPressMask KeyReleaseMask EnterWindowMask + LeaveWindowMask KeymapStateMask StructureNotifyMask + SubstructureNotifyMask SubstructureRedirectMask PropertyChangeMask + ColormapChangeMask + number of visuals: 20 + default visual id: 0x40 + visual: + visual id: 0x40 + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0x41 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xab + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xac + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xad + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xae + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xaf + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb0 + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb1 + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb2 + class: TrueColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb3 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb4 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb5 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb6 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb7 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb8 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xb9 + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xba + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0xbb + class: DirectColor + depth: 24 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + visual: + visual id: 0x7e + class: TrueColor + depth: 32 planes + available colormap entries: 256 per subfield + red, green, blue masks: 0xff0000, 0xff00, 0xff + significant bits in color specification: 8 bits + + +Screen 0: minimum 8 x 8, current 3286 x 1080, maximum 32767 x 32767 +LVDS1 connected primary 1366x768+0+0 (normal left inverted right x axis y axis) 344mm x 194mm + 1366x768 60.0*+ + 1360x768 59.8 60.0 + 1024x768 60.0 + 800x600 60.3 56.2 + 640x480 59.9 +VGA1 connected 1920x1080+1366+0 (normal left inverted right x axis y axis) 533mm x 300mm + 1920x1080 60.0*+ + 1600x1200 60.0 + 1280x1024 60.0 + 1152x864 75.0 + 1024x768 60.0 + 800x600 60.3 + 640x480 60.0 59.9 +HDMI1 disconnected (normal left inverted right x axis y axis) +DP1 disconnected (normal left inverted right x axis y axis) +VIRTUAL1 disconnected (normal left inverted right x axis y axis) + + + +256 pixels for the marker, 1920px is the width of screen +and as I measurred it is 121cm in width +==> 256./1920 * 121 ~ 16cm is the width of our marker (also measured it, seems legit) \ No newline at end of file diff --git a/code/recording/data/eye_camera/2015_10_03/000/info.csv b/code/recording/data/eye_camera/2015_10_03/000/info.csv new file mode 100644 index 0000000..49193c3 --- /dev/null +++ b/code/recording/data/eye_camera/2015_10_03/000/info.csv @@ -0,0 +1,14 @@ +Recording Name 2015_10_03 +Start Date 03.10.2015 +Start Time 17:48:58 +Duration Time 00:03:00 +Eye Mode monocular +Duration Time 00:03:00 +World Camera Frames 4309 +World Camera Resolution 720x1280 +Capture Software Version 0.5.7 +User mmbrian +Platform Linux +Machine Brian +Release 3.16.0-49-generic +Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015 diff --git a/code/recording/data/eye_camera/2015_10_03/000/pupil_data b/code/recording/data/eye_camera/2015_10_03/000/pupil_data new file mode 100644 index 0000000000000000000000000000000000000000..c8be8d7a8d68b4ea19b07e62c06ff0ba89403b03 GIT binary patch literal 404245 zcmb^4dtA=<`v-7Ik{p^PX)I|*a$1s5DoK*lIx!?kDxtbnh{SxA=9Dxehma)4#EdW_ zX%3N)gps5f#f+q(-|PA~Z1(%}etrM=K6>c({JihWeZN2N_jO(G&*s;U4%6u5*(|^} z%HJn6WJXYUP{{NdVfDSkG&@CyHR$BoXnKfhn*1hA%h|5Tvd1s=>eX|$yVHD7bjQNz z*szA4jr~HVPY#;oKi$tiOxtgI#I(@a-TXpS{@tcUObrk6RjGVuH=g0=JJna!IIPjz z-#4BVK0DOEahQ&0{mIs@^;|XL+`<}na@F+Iuo*sl_>emMPx(LBdf^j28{Hde(O!4Y zJ-b-9FkR0E6Mbj+hc)pIYZ@Ka45OPAuzlCXDUh;|bLD)bgDkFxB7L z-fGqg<+ly}{HH4~4?bb@MBa-1sZxa_Kzx#m4hBf!p zn&vwt$2t6v0lj>HJwB{=nHjz(=P*-r~Qa_UZem^?SCT-iPXDv)A@hdnli(_7W=7=&&z73e}+1dpBWxABC#@hfodG|8au+_Cu(? zHr*f}CTDy9A4j#r5C0C8$@@^*Md;cqFM5Z^hIRN5s`uCXCsf0n`)yKQ?KWkP{I%Gy zF9SO`+rPW?$D#W2zoBw@_2%|}LiO(IA3`NBL8v+^p*sG>j-ltz=i>|bZQg_atNw@$ z>-0WUxxV2^+S|(HQ)MQh>Kq+r{!yr$e*IGh`cbIN{s~oiuL<(o52138{38^9fG^-u zUHRePq3ZNLRQ84w?`4Fo2*ZU__Lo2VmeYM+Y1Nk8~tZQHwgzD|3KMqyb z|AuNv_P&V!gzDYZKZHtNf>3o+LUrM*sQbzn@akW$a4lS{`}dzg{sGgP<><5@M~vt8lCVQ;^Hw(mnV{`!J1Cfrm))n7to7aeB*QK+20o+$(UC{+DFgv!Ow^cVT< zhfh`DzDsNH2hR2(S<9^P!@omi`#x0m-Cwm+Ui1!+4RiPqs`uCXCscOJm;R!>de~XZ zJNO|sY(Sucv;DhEe;ld-{|(jfrgy{s6RLMt{}3v92}0$lgsO7njvyse4C@Cd9=hJ-Yq7%vDYjszK3V&L4$pNdL9-F}{yN<@6y`p09e@%Wpq~>RY!d-{KGO zP8QFcfFJ%Hs)6rAWtn=eo${i0cx>3<521R0y?;XG{N?tyuXb+qmJD+V9E?xZ+e?2O zDwqF;%Dv5@=l==SyQ_Z)mAnL@8lr?MKljynWm+$3kY0%8ZfuzA`%w9Bo_X@6vl6PI z5~^X*VZ%QPl~eV3ysM8xHS|NMz9_!8Nq+kwR1??zEH8JqpK>5ue)xB&T;GSP*H^9E zDKC15$A*pg5UTgr`zKTr=e#5RlHQVGBLhb`+rPW?$Dtbe-%xq%dvWeRp?Y`q522Em zAXHx~p*lA#@$IyJui+ux2?v|RhPk~DRg=s{Q%>wDmrs?ugvuj2%=4pA4cfS46W-QG zpDOncq55V>fW7?oL#TZ9=DeNOCloE~f#3W)RBrD>)xY&4`CHER@9@|#uMeSmf4zS~ z<;KL>dKfm1}{AcgbH^s9x{wc<)?QHgSfHO{Q zCv|UsINoYIX-9(Lj91%9|8*W5AGMtw1~G6ZsO{`(e;DG|?S=7#tL>ESZ3`ztZKwRmVmOg%I~94dgPy6j^M^8T%u?HV zq|6($)pn}78RLCLsqH+OEC23u)OMaO?ha?J+Rn426gbgpJJol4!iC5dvo7)`DcWOH=;$Fj9qPC--G#k!RwH<@f8aT_; zb_}Z)!C9`hV^sSD&I+}iwu5CJOHkV}_NaifQftXA9UVv>sS{Ghhet!pKmHEKIO`mTesR&B>(u_v5lwH?c(Ksf8v zcC0qbyt-a($6A>;HmL3N{o4TJ*{HT-t33)%irS8yM-iM&YC8_TU%=U{w&OT$JDe?Q zJ5ERB{+_C~X4>ZQlgWHnpAM-CDp&SKArc_W+#jYCCR=<$IB# zw&RiH4rhnjj@M?X#qCtv8C?|*XP4T}n7_^8>{i?H)@}o5kJ^rpvfkLMw&Uw7--}GO z9lvRt;OtY|@jucEPL|qEK%OI<{c1Zwmu25{Ky7ELiR|BhRNI-}^*Nk_YCEBQ{(|$9 z+Ky`RbU269cEXi;jpJ%N3#b2x@%*B;v*?HyoD*t0amu`LQf(*xN;`}vM{Q@Ri8NZAQrlVHtp}W3 zwVi}Mui%_k+ewT&4JS`+C+U0XFZ@+)XZ5D9;ha(1SyS~XoU>{>$+b=4oKxG`(C`+V z^J+UO9=qV=tL)~8f+eyoo`)PsNPI}%rIG5CRGA_${wNPzmmx<(m zF01YA>ADBb6}6qrK62l=s_x4TWUKw)1;37o7zrpw&aa(tL@~SmUZ|YwVgAU zW&KmCwsYRZ9nM{~oeN!M-&CfyQ_xrPE%(%R3gZUDDOcOMvg!{w_tkcaHc5W0LT#tG zN)yiSYC9#h(!cRQZRb|QIyir*?cDZQ1?QpKPU!?$|2$ILDVt^wr&4XFJo_G;Dz%-8 zy!LP&tL;3vEc@IiYCDfitl<2qwo}#hAe^UaJ5Tx?hVz%&&eJ$qXFgNgd6p#WvFB<# z)hSXJu2$QrQTo1KsO{9&%J-s1ZRfRie>gAIcIrGCv0n{f9 zs!tZOMyeJJ%Jy6XP@giW0Ug?a(q~YEV$i0pYRRC6{O~I%0|qtxPb3djtr(R1P}I9s zh74-dq&uKmGpMo0zX4^$pe7ii(4+c{K~4I~6I2@p6}Uv^_O=Xa>Q?NaRi87c&^Ibj z#tbTAbZ1cQ7}V@Alq6Ip3@ZBUT~O^A)VCcBL76hBSlbt%zF<(_{jeHT2L`qL&vH;- zGN@IJ<(Ar!LH#gkF{n-qYTeH$1**&#RLZq6pgJ?CtrjRWsLUDE_9Y`hbzxAuw%UX0 z%AhjeAS0^k#-M&2jU=tAJA*o$H3U=-26gQ0QBYqosFNM!>ukZGPRAgzpz6t>&i=3k zlqG|@@YD}fF9vnlH5-%_gDOh-8B}iuRb06elr@7Yd3hOB9|m=+VOLOn8Psie*?IM2 zP^A;(J7~k8%BG^NLS@UK%CiGN^=D8Or%_x`*)gaGmp=t%&!8T)L+M22z@VzS;B>ia z0E2qc#}$+#gL)dL1!^FJdbUaykWLJ$dJ{UaRf8B*O{Gl8&J3#drA&Q;8Pw~B9YDD- zs5Kg`SGGP#?F$~Id>KagE8B_;lk{QRKI-Z_~QF${cGwB3Sjb~6@+UtYz zVNl(=NNr*QgX+=80+cU0Ts%i++xw{qY7hC9!YW^R52*86dO=87}V%W*#d+!s4*`S zKt(VpZ>`@zMKUNKWs;f6pnNAtrf3#}@|!CEg0mTvf3|E)q8L=b=`*0_FsLA9l9|h( zrnXDRsG=Fv^e!7f&0|oZ)-oZ-Fep`=ossQ)pjXf64)FJe$J9=$=uGN=U;bV0>2sD)FzgIdg>79Fky70;mJPWyuTjzPs2 zZUnW2K`m{!0Mt?jwY-abah5Tt1Z9$0&Y%+GWLdj{K_#uS0+qm^R;NhfcO`>bQ`rkt zB7;hPi341!RSar_)=p4K3@XK4M)f^|+U)ZH)M^HmI#oJSeqc~(hyMh%hC!vDb_BJS zL1ieDOfrMo)h+^~TF0RFDBGp=3@X$5F-En4L1o2efZE8Q4k(jM3WGYBG9IJa#GnpU zHV3ttL1n+(4QdO6I;M39R4Riy?!E%lRt9y#XFsSk29+}vCq`A<7*y_I>C8-LPP=&E+pmsB;E35i~+QXoVHp-~> zGN|H8N!@2MsFIh``>~Hf-O@@1mBpZLd)R^6&!9?uZh<<$pvr=!pztGuDnFbD>L7!v zIQ=E4pBU7G!ug;MF{nrF`hz;mpsKoffy!o3PppeT9br&UW0!(D%AlUD3IuhGK~<+T z2K6(8s(JJj)Nuw?`*I?vUl`PDtrSov7*w6IT{=mi`fK<|Pf-qo(hQcu`Y8sbrL1If z8I<;E*~Fb@P&$Q+K;<#0Chgoo{mP)4bqN7=hC%6BZwGajLA8jz25Na zm|S5{9Z$Ceb(KMx6~==qVo+U-?}ECcGb(LZHS0m>xv7lR5alvq7uP*ata%yR}cy~{9+s+vKCTKj`~!Jt&J@+5>B1{Izt z+ohKbDsp2ss9FX!>(Ni3{$@~7HS%bhR}3mzYaFQ83@XNbB&atGYJpE5P<0Gyp)$$T ztFQj^ulg@KJQt(VU{G8C3F1Q&74LYJ-*>ac#n&Qrx?NYRaHC`$$hw zGX|9!ydG3@295 zWyQ)PL<|_zfy7i$tr*n7jcY*}GN?n!B-5HfW!Ffa!H7W}(~^S1XAJ7NduLE>7}N=$ zHlW%vsGQ)-pgw0%xysokV+NI%n~PDkV^C)K8-M{ zFBnu|tT(6*4C+dv#Og~1RkTrVUL6@!@uN^sofuR}jZ{p`7}PDT5uiFVsN3$JfHG%L zr9NMP>cXJPf+In7Wl-gZr6Z*qgQ_@P4^(#s_280hmwGU$N5;!ReZ`=vx)g)5U{Fu2 zFM#UFpq|DifU;yz&l07ls278(-YBE8Vo)`Yq>j^@LDklr24&5lUTaCL`Y@eQTb8@?uc7S_?ppVo-K& z(wRA$K{3s4gnlv}J8C|?HUu~H_Pi44kXV?$7W3~KbFuRu*=P-AM+K>0H$Z> zx9Ok)7?kh$a!`Q`$}iX&R1kymKimq`6b2QLI}cPag9^GN-O*DS)Kufypr$dX>E;JP zO=nP{y`^Czgh8nmNfjWJL4~h;0V<3^MQ)VBx{5)~dL-G884N0_Mlwa=3@W;Ttgj*% zRE%33s7MC2VEnJ3W-_RS!BXy@#h?})k~1c=8C0Ay$wV=z_)D@Dox`A(8asoU%b=E< zZvYj|pc0gv%sd8_7`q0eieXSmE3bl@&!ARsbO5!0L9KZ-0Mxe(D!FDdsD%t_gEGnd zk3ppY&n$m%yM7Jxs)?Rx+sU8kvw28Pu@` z^3bYP4C=UBZ%|1L>cse4puT5NIl;|9t!7ZUht7lgfkEZvjs&%aL7llIRm-&u>b$Wd zsAL9p!F&d&bquPYcMYiZ460DsE^S~?S5{_XR2vyo(Z)`oQW#Y6BSTP|7*vU}lG)6l zZYkBJEez_mn=jsADuXH=FZrvj461C(98hTts{D}j$!ud#6}jC&r8B4p1u`LTXHbuf zTY}19P*vtLLG55rPkKwA%uWXNbfA2lcQL4EEAN8Z&7i6`N_X@g234~o3e;W(Ra+zf zf|(5Jbpt6V>|;=Mdaprc5h#0&@moOcXHc3`u7NthptNR6Ui3!>rF}~7&IcKk&ZRz} zeqvC%w~{~|Vo=S@{XiXNPMmzmF?0|24%2O7Td=dl;MtJpnhgh zMm67nI?kZlHW&r!7Y1dlcL&r724yln8q`S!WjaNkQH>Siv<%wHK)kKQLhoncTGj#ogPWl)wYQ$U?#P*xl3gF4TktanI% zdp?8eTcZMXfkD|ekZt%y24$z02C9HTIgFS4;3Wp-s7x}249aO{AVzhWK{=oL5!4k1 z<)Tb7R~eM+E%}HRF{t6@T|ix9P$PTq0#(eQ+#G)bb)7+ZtdtaM34`+5@Bq{e1~q!e z5>Pi8)R-D+T)f4gyc@Iz^&5lo(UaDQ+YHKg{8&(T7?j@>nfgi@l>bb*dEI4D0jD;B zDq~PVmxh44$Do3L>kX=$K}|O|26dl7h4z*@P6dNfIm$NtcLo){Qtmhp7*wRvjQ0nF znzdszM)i4gD;Nv1V}y4Ao6Z_tQA-PQ{M^%;XI9WPn6HVmq4${J8@ z8C3a9S!{pKpejyDiq)7wJt#O2svU!R_?s3e69!dfE)|pZ4C+bm_Ml7|)KkY$P+u^p zXDcQ9(Sbo#Z&(WIO9oYwA?5y#4663U4N#pJ)N4)I(VH=-Iz8DgbtX^_8sp{3c;*aB zQ)$NQ!l1MwXJb@d8I<;^AW+>Hlup5~pt>_C-CMHi>%pL!b(UiBR}4z8w`}4p7*q?# z08l*{l>SPom{>9>gAGzK>BXQ7cXS42#h{E{$lbX&gKDdJ36wR1GS)K%)rUcuj5h?; zmqD3Mk(^9F2Gt>QDkvKU)lu0l*)k}zf{hqee+FfK^LJ2o462*5lCft{J*?!G>cF5Z z90!0Jz@RLZm5d{UvfAK z%FR)Zt9;F%JXSUU<;I}Alu5>&L5xP{*jWu8pEIhPDw|~SOyiOOfusbRPaq%Gk7zo>E^OA8PA|Xd)tBX zVNfbZTTl}iRQSpfpnMrrUkva}~s(;!a5&X9|OgFOW@KFoRlh^BSnB3~G7j0#MT!RD#uE zP}3PyqGMN3Aq*-h;WtpB3~Kd;KA^%F)EZ?aqhe6WN;BRJ2DL#`)>q*SDy6yP`XU(A z=J8U;iDXczO2_X^29>5vGP4*|`ldb+di2n2DS61Y%S+7s6EOgGnYYSTFIfm zXa<$#s0(TygF28<0xE_<9o!&~#hA~a4rOcwwSYloznBQ>TLyJZQ+huZGN|LqcIkf% z>V)?ejA{{s%9+v;R4jwajqDF9jzQ(+yaKhDL7gcG2Nlnt&fSy(=ywe2Lgyo(mN2LS zE9paC%Ag9BNoE;?x{`1oqgu|OiZ(cbTEU=-GkifMFsPCj!$7TMP`5P0KqWG$+s$Q# zyoy1UdjA3{i9wZ3X#whc22~!}0n};+Rgv=q)DH~mL4h&O=b!7?h4O$)qzV-J5b;Wjljv)>+E3 z84OC#O173e7*vY^*Fo)MQ2Gf{EZ)VS3^sfQYBz&2%y0y?hd~*=FbB1lLABMC?&wSg zWvnL?@;(M-GQK&eECyv7Bp=cJ45~w?~RZ%Y5wPYkMC zX9rM+7*r1{IaY9(L0LG;Vmq5bStg`{I>Ml=lxWp5bQ(Wvdwo z>Ntb4(;E-!7Y60vy&BXB2IUx(2I?e(a*DVMDu+QipBfD66oYapFawp#pj>Y*1$CN1 z4evY`R33vGY2^XxR|e(gD5E;VpgfdG<}8EqTK^oQI>(?!Z+`{qJcAlj{Tft0gYs5N z*cTX-PjkujU1U(c-qD~67?fX745&*C%0EI{1Pd8dK#n}O_A-MCDv(Ot6$TZoOfpv) z)O52q7*!F23bmRC>KcPm4HyNgm_dcFoDJ$agNocB>-G``HEX+U!*4LCsA}nFxXGZR zH6?#_i$TRSm)pc|3~GV59FV-tpcV$n-T4lKS`={=R4Ids%XtRsE`y4{SPQC*K`psC z9n?JrwY+nEP~{9N!D=X|`wS{^z$s7_3@RyMJE-3o)av!p`|*H5t=V1y>JJ8$TzwDJ zLk6`$(*o2Z29?r008}M|+U&gmR273t4O$H9F@s8rkk*JN3@SZmJ*Yn!RK~?>P)`}u z&YLPwe=(>%W-_X03@X#g8`N_Kl{G*vKTWFsMV@rN8|pgUYVH z2&$Gr9n)+C>Td>hym<(yR}AWew-2b-3@Rr`9##5=LFGoss;`bg)c4UFm&234ixWLhw&C(0!ADT8`CKyF_84C+~eGpLpfs#@7D88E1t?WZxS zRt&1PIs=p;gL++G9`(?gLDe-M1Ima%Icj(>0`(b#(hQPHTpI?Z6|o#tTLz`AOfsJ{ zD4mOvjy7gcx;I{eYR90OnH>jZ!l3l5J_psFLA4k#9F!@8(pM&#FBp`;dOwV+1A{W$ zei+o349ci_A*hZFs%?Ggj_$;ujGIgL!;C?hcuQ|>X9i^&9|0hRKCN`A3Yy zsD?18fE?KbxH709<%r}^1{Hi`Ge$LxK}|PX32Hcl3hh+}Y6OE)4QK;uB!dc17!B%c z1{JAHGHwiN*7i*pl{?9t4q}U7K~$1%gw$A<;|cHdPzESJcCLc&>WNxgGx%s z2Q`5~tyVTnz6@$jMsJL2B7;h7njm_h9^3kNloL1p%m&E+%(l{LT+)N}@Q zAfW-M5C(N{T`j0k26brrG*Dp-D!W?JnJNZ#to|fWGZ@tIW)iD#26bZGPEZjHDktzV zs7MBt8<7rbCWFe$`2^G~26g6QD5%*C>fDWcprRPm1+yWb<}j#&UavsSWl)6!WYrhV zpspzOrFjghXx*O}RSbhF-Y%_+^BGi0wK1p#4C+>W`Qm)bpl&NUnS~6hbew!~{>Pxo zg0w&_Vo>D~R-j@TRK>|hpyC+RgNw4hTFjsxDl3_I232JifKh$Npq}(v1!@U{dg|B- z)KUiZY()jAWelo%otzF|&Y)_x%OQ*v463$T9`=yHpkCLH0JV}q)iuikl}MllYK)V% zsZ|V0Gf)b%NeoIW!WGo_3`+atZBVNjl+MLjpnhObx;Lbuat(uOW@Za&ErZgt`UX@o zgK9Bg0H}2gN`HkEX4f+)17(ugz@QAb55%Z8GAN_!#-LIdR9nqVP@5Q(aWl#FZDvp= z%1UMngE9@02|1NPb%<~TwUt42JXH@=8iO*sAnkbD7?k;qi=fgOR5vr(J8x%DJ*?!5 zlfj@Y9HeTwgF#s;lgv&AWuU(LD@Ac2bIa7 z9K7{G?PE}mfl@KaVo*-ue}UT1pqz8Q1a*Kxxm>6Q^&^9Fy&+Z0gA8i8nQTmcVo)P{ zN$cVv2IZ#o{2pdd9xJ}XsInQ9*E;#)9AQwSx3>g!ltGQDmPzIqgYvE~y&pd_D4%A2 zppG*r-*K`r`GrCG1xkzA2?pgK(E!v*1{I)emvR_Xkg}3F#h`+3bjKUaWl+=2q$A}t zg9`065>y_8QaM}&^(%u4Ul9iC41xz@Qe4lfCmr2DLCS2~+`tS`?lO>Jo#B%h3T<$e`jc>Vdk!vUe_KP^;I;Nw4b+YE60>s1gR1{5%iT4FYle~;U z?NLhD_ZU=WFC&bqoIzzd><4w9K^<6O0jh#Q9bC5z)b9-H&~~|bJz!AT&nrRw!Jv-S zcLw#4K^M4V|V5SY~F9uc6OBxrSF{na^bWqP3)Rh&oiK}K%MeEjrdcmNI(=ULkVNfN{r3&zp zLEWk^6LKwsy4}nP)ZYxMRGDO6F{rXYdyMKegDMZt2la+QRh(P_s*XWDxUdLRJuUU0 zfHm-8$!ky=463TrEl~9t)RSIPAJk+}PaU>^YQUhLt&so5krsohUN;w1Lk3loF5S`E z4663|K~Rkt)a&{kK96<%lsUc5= zR9gmRc0o=Fea@iFOYVR&W>DQaNv5bBgX+;sPOO_SC<}+KpxQGi%N5e;WXhnd)(rvm z1%t9q-wmn*gX;VI9;h!Fl&ywTOgb_syJq!4bz)EsW92C+W(>+Puo z1%T?wphl;!2W82i#_TTz)r&!SYe>b!ib44_l_t;L49eF?vTD{0$}dpLvV9noe|RjY zz6>g0zMKW<$Do3gNydgj1(%$}sB9V3v_%K}9K(%s>Vet+5@Wa$-<1P0xcG#Gn=!MS^l>PzwVj&oG!lEee-4 zgA0R-n=eN&hA^o33(`~M%Al5%NTz5ggIZP*0cse7O0bmPkKqg|(O$OUBN$YYvXU9e zpjNMwvh3FkYRz^dP;Lw=d4C-!cLudVV;(3E29=^rGM)@-vyn{5UJNQVKq@Aq7*v`v z$&6-D>GOjzs&5!n#)Yw<#xSUzB?+L$GN|46 zhBp8ez@YNx_W%{hpw8q=B`%0Tox5QGY6^qOzyCL=UfeL3(Wr0gUMKGvx zWhE2Ipep9eM|38GdT>E%6SEl9!xCv+oXwyr?@Q`Fia|ZGJPT?LgL-QJ3#hpa>e(=9 zjfiGY)k@-b9)qe${|2LqVNkUPWE(!8LA}-(2WkO>s%v^2)VBm`kcN_zS;(L?19C8` z|1l`7@M)kHF(~c%(zqDQpmZ(_0u{%gbW45*wU|LQt!M-)oc{+m{X(?NP?F`CI zS;=HDD375xFsdC4$}9N@sGSUIboyXWyBO4%{cS+)W>DT5UxM1hpnR0X?_LJw+gfsc znGDJ=;2fxZ49b6oqz$teRKWZ`p!PGUp!}zx4ltW`42X%r$#l^@r{3L^l&%XsKhe0hV`3%%42DR*dCa7Enm0;-& z>NJB&w4V(sk3l6VE16#z)av9N7}XgDwI*GP#b+5*vNFk>V^AA3x?xo38B|JBDRbmA zsLieA>wJMhr3Oep!$k&_Hsc3S1q>=Z<^iZn3@Ri46sST5wX;MvahDm??)xdAt}v)f zOUZs*Wl&l6;h>5b)PbP~L0w}|2a{KVDrQiJ(mw}vok3-1Rf8&FP{->11?mQaI^NU) z)J+C;qO}~sxW%Ay0>1|J8-vQ7A@{-C3@R_C7StUEbtYdbCZ!DOT#0<0?=qZvlxJY!JLhT3CP&lyy8a&u7C460_EB9Xz(i1u7*vNDQWMue-w@Sq`%XrN^MGlD$BE!l10T$wOCKFsQy+S3!Nspls_M z2BpuS?3zfyu_c3YXf3_>1`NtEzy?$+2IVxv8I&P|a*inl)tW)MnH z%DbN22Td82Pt*T_`hr3E8p*S{Ixr}|083C`GAREUGWB(2PyzEl1=Wc`1?7(dWyYX_ zugllDGlQB|{t%Qog9`0=5>yukrLrFjsw;yEA1W=x-56A4vKy%G3~JW4O`v)(sHm*- zpuS>I(e+k=vS3g#O(n_GlR+(LeFKywgIYNGHK<+;YS9cwP*w~oE=DHg-V7=}e^SWe;1~aJLbOfth6 zRCd-c7}W>{b*x?lsF4inxKii+nn9gtEh$zv29-1Udr{nRBe`wY8r!jT~G21(-~A8%AhnS&j%I8ptMv{F;Oun?HI}R z&0tVE`BE$nXHdG=*Mf>*P)*Av*B8m4^m=-On#rJA*vS@P7K74PI@V@0Csj^*~&!9S}{=%phFsP0( zwxGUcP-f>PnYoZbnO~Po(f=4!*Ya3Uix^aoo?nBCWl$D&()$s|pe&W`(qaZ>rA#vM z49YsK3~%r|2Guu9Dsf8~lx@A1pq4TyyCz3KEn`p)hBcs;GbqQ&OF^w*P)@32pb{9A zb4)8xD;bo_`QJb#GAP&UlR>RwP{Wj!OcH|{+0zE2`kq0#*-4&ZHG}dPY7FWJ2IaN3 z64V+7H9GAYsI?4gOx6%k$qdT7o~*CdF({uV%RsGXP`-xyKy6@9ev_rsX(NO3S4qVr zg+T?xdijn}OQLpq7>lb%;T&uai8(VFs0=tYoqo)Mmp-jOqx3N}cQi>L`Os zQ%P6YF$R@B?+vJ*8C1skI#9Y2}56u>MVoGnJoPb z=NMG3N*3Ga8C2dp*@ovcs59sDL0w=_=ZepPy2zmN%UwYgFsOo_TA(g5s6snQW)?E2 zD?_C=ahX9Ct(82(6$VwDCdUe{GN_WQQ=p0%)UA3_0KLYbZtFe(Rm`AD4edc)XHaF6 zhk+_#Q01z5pl&dzih1RrZZfC`=cQ-+7K3_NoCWGP231+!8PshC^~B;Cs5=bmshv!H zr3~tsYY3>j461sqY)r}+R887?Q1=*AZB{!_ zL1|8w6zc;9rKS1-)E^8=JH`jpLk6XDzCWl(3`)0n52#88)wH}Ds451fXCbA=#|)~4 zoy6)1gVJ}M0_sl&Ww2KI8J;pI!?Yw&e=#VdtiGV0F{n1mcIi2TGFB#;Y6fLuD5r#8 zFeuZ>4MEi~s1B+MP%jx&$9cy=)iNlv^YQ?SzZsOdGReGRP+iMgV^pshR1XWOn7m<7 z7Rq+1jzL+v%3`~ow))S%aB9>O3vdnVo=WWWX+(%pj^(k1=W~AxfXv9N|!+myY~WA69zT1 zr%WK+o&(B=K}{={T;FF5 zDzvAR8rv`^m7N?WZOfp-U8P0va|RWutYnNC)U32`FsgP8Dr(z zu51BJ8Pozp9Z+8|sD=I)L3LnIi&U}&_>w`z&HEKpM+Oyt-VjtL2DPL(0hAeoT6V7l zRA&a2U?Is&a|V@YX9cPYgGzF345}-GTD?{iR5u2-CM^n7cLtTLOfo$f)cQK<{rHMO zrRd6`KMMx6*-+|(JsDK0{}oV{3@S|}cjsOVDt+F0P*w~oN=gUYnn1gbBC%Cgf2)sI0P81fpF4TCzUOft3%>QI^|M%AA|W$&8}%8o%Dt!od; zou{LSJe{KKn9f;(+QLlgF18WD5yaU>Rj=7P|gf0 z|K1@`gBetT#S&01464vhww6N})RiI6LAf%hqBYf^hBBz)G)ZO-V^Agg7JwSgpl-gA zX1oy$>bCBDP$L;ssWQoY&7jKsC7J2QpvqN}BzI>}74zg+fd_+na9$6TCxd!e90JOV zK~>&c2Wk|9dSW4GOhz-Pr~MN_eZ!!h4f!3^7zS0nM(Q|Y8B~oj$&6!AwfmwY0>6k> zc{8v&-C(@M@dRp!Mys=+d>E9b|7K7V7?f6+oSg7wP}=k2K}}>(I_K_#@?%iC#a^H$ zF{q}>+~Cik^entFs>uwhMSm%D1TZLlWo`&$PzGy`U{pa2$}sJ7P*WI`k+RDPW>9V3 zT)?QNGALu+xuB*oD3ew{gPP8uOqEF{gh6!(E5fKk8C1t;S=fd#D6?~t8C5YT^J1x{ z&R|eo?@6&SoI&-lhyxYDpe*`FfQn>LmP2lWn#rK7)=2qf7K5@*lN{u12Gw_8e^5~j z%BD`HzBvraPRZBKWl#>SB#Ru)pd9^W3Q{JS1O^pc zEY;MN3~JiFX`m7rRH%jIAXhOcRe#ArCNZe+A$vf5&!8gLECRKfLCxBF8`KXBDr(;( zP-_^}+&alYu4Pa$x_v+;GpGfvc7s~SpceW|J$yZbS`;S7vo8x{*P}pUVW5 z!l0I1lc{eLgIacP9H`9s5M)q{IZQf zCGWEWmCm5nAM*sYok68EmPXwS2DQ1Btgm)3sMIgUg4)TT(!xfA+Qp#KqowI)H-pOf zE(p{f2DS6rOHg|m)b4wQpfVZM-an;EzmGv>^`8JLi$NW5Z2@XOgE}~B6{rIY>d@Ac z5rO7-1{MQ5`m+tjb&x?F*OfNGpBU7MR!=}3Vo*6>^a6F5LFFpDoNNY_7rhdrI>MmN z#7oBgD1$mzEM@Ft3@ZPg1*o4H)Ws(SppG-B!v2zj{DnbX8Il0%1cNH_dIjnvgDT#d z4JwC0m1OCII>n%F9yQQpc^%8@s zQJVM*8B}d%I!1MwLA^Sb1?mces?(K!!K(zyRil-xuZkFy<`-Xqy2hZi!sL!q%%HTR zB}I3gLFs(A5L5|+(!C~Uy>2k5rezO7-DFVBpGfoKEe6%XwhGj53`&1UC8*mB%3#!T zP$01p_H`L!HSe=w-7_eO$x$e_AEk;5U67?ef-CZH-Al;sd7P*n`d z%Ig5A#|+APYZ9m@465%wiPfJB%I4@DP)`|@UE^Dz{$fxLtz^6Oj6pe?&Ia|IK{lZBR7U*52>(ua3Kj#~c`X3r`jSEIR;HPb3~H}3&2(Z= zS++;<2F)1MfgzuP>dc@HDr*^Y26bqwY)!f_sO-!@P+b|+QDvIx#-NTjmJL&P26e)q z3{(#Wm18PJ?5`M9Zm3Lt77QvcdI+eV4C+k$8c>!D>fAM1VfA8A`O1dLia}j`ED7n} z464xf5GZQ~b!A91P<~Y<~t- zY9KWeI|fx|Djit%45~b|0+a)Ts)+6dY5;?J5HHVAa%4~sugMB)AcLwbD*@%ipdLTo z4{8vDdTJ|`9A^ggY=~sA1~aH?FFQ~!460`9Ku|*%RBh%cP_7K>)zJ;0hBBzS##=xQ zBTz#%4CG04!x@yO=?G9G7?f7%1yCayly-CJYjeln;aI7%hd_2@J|CUP=nS49fhPC8&uEs_Q)y zP<{-m`(xRLPhwCOw!eY$XHb?yG(yH7yHFsR^blFXdX zpr)052WkO>3V9q3>RSe-vX#5@LIxE+WCp1JF{ntdZlD%1s99SzK*chssLV#7;uzH2 zqY|sd3@WB^S5WZ`YC)@gpuS^J3)}w)SA?L7}fU-Dmhci{i_+& z`lGT9|A9fJ=yn3NhCyv^r3Gp&gGz0`6I3#TN>fVM>ljqJ^00^X3@RgDVzq%m?YuS- z)J6uiyG$x3DGX|_l9SoQpt6)nW;262;IbKSa0`Pv==lm%DuX(tB!0ItsO-HjFsd{L zbyUg8Y-3Qz8!y19(izkVgE^qKGpL;QPeElcsN7H$s2vO{FIp4SP6l-*em$sN4C-8w zR7`d=sQj|_p!P7Ri;v|H#$EmKzp(70H zVUaAhk20vrvM)g$V^EJD%W=}58Pro-Gf>AF)H4^k5B|cSs=Xw|dV)dKq{_pzPBN(4 zz4@SW7}P6eyL5^{)hUxqE`b`RVXz5r@HB(cY(ENA9)r>fjRf^8gVLV64b&M1rL*`o zsIv@8SE(+YV^B@+$~o-w466BKTTuB7s)el{s0$2A-$jy{7a5d+GRYJ$C_^PDbBRG2 z?UkHNA%kjj^fOSG8I-Y(9A&%0piB(BL0x4~rphE!#GpEaj>M?0F{qAn=YlF`P-cr| zg?ybsnHOCFRl=aU-j$y18w{%ZV>yR?lR;V7%8vdPgR*py{MBy^%F43{)NKZ3z10ZR z9R}5RZwjbV24!<}CaAj%%C52WGn6qX2ZPI??lCCG_EPgIXHZU|())3rK{+eO3Mv?s z%i@z5)$a`I?Gt4lFsNa7r3v8=1~uYwD^L#^l$*`p5rGzX1{MSJ@|33`R5Ga1sWzah z7}S`(Qs{WhpvE1!1L_Hb@@cFM>Q4scYap?D%Aow(j|KG?gYplRMv`X?DqwCrsOJnS zXt8_?s~J>qQ5L8d3~JilW1wmnRLEobT)kvaDx3SDY8h0xi#4df8C0aFG=RTiP_t5{ zr0|+SMJWrAHw9rd4qHEe-0$!IXBh3!o+s`?CSQK&TPYBH#} zxqhG;FsOKClF?#NON#bjR1F!_vb#Tm(q>RAs-A*s#Gn#wBnPR(ppsmiKs9Dit36jl z1opvGuozhK-gJzs34>aHa{03@UrCJYDTG26gmE9;h}9>bTBcP;D912?J^8`kX=Kv^NB0%%F0WNv0ix z%2Os869#o=@$Yzp?HSa$B1wmtGN}B!VW7TXP#3Enfa<`Y3YC*WUoxmGF4E}PkwF!C zN+Uui234FYUmPT)hLsUHG`_%I|`%f!=PRrIS8sRgR0Y+1*#u`8m`e&P72vDC{2?WpllhG zR!AYJ{tQZcZgWs}3`%FQ)V=H(lx|U9U8E(SH2K^ZG^gA0Q)X(@a9Aq>jYce90s-IsBt^G?L9N*$|AOB#sN_9=fLh3))+_zB|6@=oI_?ecgW6dn%i1LjYWLlipq4VIy;ag@v5Y}w z+57@(IfFVdSlR?vFsOqbvJFpQP=`|GWZX&ymA&ULP>Brc=#j5MtzuBem6c2qgF4Yt z2c!C)LFJgpCT=x@$_;4>>IVjuH)lAgH4N%Z+-Xp28PvI}(omVqpz`lZGuJu>b+O6~ z)OrS0Xd@qn4Gika;3J?mGN>YtO`uX3RPmPApf)k65@jW`nL*t=BK?M27}RZ@cA!!j zRB6k>ptdroGLtY+X$-17L>el$F{p|;*FmK-s0VS^Ky7DG53l|NDuY2)mOcZugF!v6 z`X1Cy2KCe?6VxsS^=xnmsND>z+Cx^zdl*zrs;n9IGN{@;2S8;qs8>g1Yq^g>)#*fo z$|6uBG+Nq#+RvagO{Cm^fI(@690TL&)JTl5*ILkz0v-Cdv# zGpOcOUxCVIP%UgMKpkOF`h$moI?A97JbnOmj6oT0kuJ!e8I;i;*;*cFP;HKk2lWes zGHxVwoD&Snq@@9s51=8LYZXFGAPTz))>_}24$s8GUpkT^%hyl1qNkvqz9;r z49c$2QBVa8%AsX5P?s2#qlt9y6f!8M5P59QWd`LuXFsSb49X?$GN`Kz%Jr%=;}tQe zVWmN!t}&<)l@hCB2IbaIwgA@|l!vqAWJ(y6mxtWEZZN3Pn;(I?$)LvUaRhaXL5({y z6Vz`E%BPV`eYY8uufF83?l34n6RBF3GARF$dZ6wysDL@MK$S75ptv=l?lGv~t1Cg3 zGpK2$CqUh2P$5;Hf~sIp_>ZiE`kg_A50<<00|pi8F%8ro3~JUE$um4;P*HmlK|Nwn zbF-!Ys**v)G?K-36@yx!FOPkG%%B#Uj05$AK`jcAvh1G>DsGO%>M4VYkIM%27lT@I zbuOr93~E`aA~;WmIn%RH{z+9ls<#HR4YxMEg94mXZhk7FsLGrk)T>JsNxBd zzcOS{C3|*)YR#Z-W^V&!#Gro5`vlZy460OrAE-7As!Ykrv}I7`(;s3~pEIb6Ig*Yx zW>62}%t5tdP!F%p0A<3UDwUN?dj|EmvKpf@Wl&H1<%9ZyK|OPp-j5Cps@kJHs4p2* z%@+AEbYxJqdnG5+i9x-}UJA;LLDe;qJ5FZ;HBzIc^oyD^D9v_q1fvUsdfO+{l|gAM zlT0@TrK7B5x-%%bj||h$e^q| zhJqT%pscq@gP9Y9>Z_En2Qerc!%0mYX;@voCC^@ zL3z192IbD6MsNNdlm~+vv&S8jCxaT7EoYa!7?e+=4xmOcC|~7K52G2BU%NvX)i(^v ze|j0HF$^jo>Q_)>8B|c5Jo0%Qg9^SXYX)xyHBDK`jAu|Gm2%F@he4_O%>y-oL4`Yq zfbwNfk?tj+CNij5TjD_ZF{r4$a+q`ygPNPY0hB+3ifPmr)MN&=KwlQy0SszkJNZvi z1~RBc(|3akVo-5WQU#d8pyK0nKm{|XC99%AO=VEaN~QN>8iQI@i%%IN1$vLlh26b+g6xP3EQ2C`jK`miW7b_)JOBvLq zmkFSjF{mrfQtn^Qpo-itfLg(ziYIghmB64%cK;1(C4;)DOfrcK>bKKNFsfAys#Jdp zs3Zne)@~iB?-^8i*Djz|GpLF=eL(%dpdQ4v0JVleJzOOp(X|Y!vQ##4$qeeTazJt& zgZlI36O3v-gL>vX9@GW~RqcKl)J6tXGa&#}3WKUus!N*~)T`{q7}aJ5_2#q%s4WEQ zYYnBkl**tqm16~48I)F+&3J=p3`#p{DyVG?N+q&bqMiCO_LnwmqEF%>Hz9AgBqr+ zWbzo)h)43p`ISL^{ZdwaXBd=+b0w&=49d%WFsO43YV-t6Q0E!cnB7tZ$Y)UFlu70S zgBpMObByXDgYwlk235eI{FG<>USd%GUB+Nkg$yb{S;<^xP(iV>7QMotf>%k^@+yOx zcIO$WA_f)m=qadc3@YrUJ*Z*^749r$+3O4{(p{Q7OBmEFpK4Gy7*y15IaY9!LCwvU zO5805HShE|P`@##1^VBBy3L>#Dl3^g3~EsqTa2odLB%N@zjql_e4Ko7${5s=L}@~} z$Do$o9SW+PL9I}7GWQwO%9lnMRRx1eQgSlCGpNe-&BL=lGcN3^e2DSOqrJ$-9RH|_usK*Q{&0MxiPZ(7CY-uq2lR;&~%CilhGN_%2 za+~;zLG8Y?5Y#gUwf9jdsOJo7-%A@%)eP!@b2+FN4Cd*vfLU_rbvUfX! zs%20|51#||H-q{)w*u5F26aMz4ye}*DyN-H$Zr@_ZWpOq)-kBOsQ+v2Zv1k*zc_#= z4@sJ(v_`5SNlQ|aBzdSbBgtBl(1SEf5>|;3F-ww%B(X%yDy<>a0INsWYg1XQXE6#h_}6 zB`OUDbzey`y&2R4BOOGg$)Fy6>Iv!t2K6M$2vi>i)v#$Zs1F%b4R$x*tLH zWl%4gZiCWhP|g4L1*OBFTAic^nkJU$1d%b?olOV#%ggL;!a9F!h|>Nv9w)W-~} zvse}v`xB^HU9{y8H-JHPHR=XRpFws1)Ed-42BjADAt(a|rM^iPHVhe*#txap88Il$ zx-3wiFsMFFg`fs8D6MwcBp=M6be!evJcL2%D$^xn2BkND7NQ!;p!zF4850Jje`X`1 z8pfauicf(uWl*1J%UE_egBomD4a$r`8Gkwe)Ta!}B=Q+3a|UI)Nsa&`7?jzLqo6Dp zlzE-}9||KGltt4PP?ikJvb`46CWJ$gR=3E1v6^~WjkNiDMvFXyX=0TY#5aN z8M(GOhCw+L9|mR1peE|b6ksfanrtXfXFCSvWcCKsI0oes`4=dA2Iaa*`d8x_)U+LP z&&z>9O|O$9zyt>6)|3LukwLk)O9yfygPQHU7t|yM<>?_Czmpl1*L)eQPhn8r**!ry zF({uiQfxajC|_l|_`GNok4}v$zx&$g9>em0OiJ@!rIS*n#rJ6IDZGqok6YgPyscIK}Gn=EY*WSMP=21 zn$4i1&x{521%p~!yb6>jgIcdGW7#hm)CNOYmG@#$v1Xm1<}j$uk#aEcW>8x;*@BwO zpyHHD#)m=0*I6K{c?>F{X)!2Y29?-;4AguEwaYmj)B*;TX6|MP(chT!|WcY#SH3bq#R6w8Pu^&-9dfD zpfY!?0kwodW!E-=`kF!IG|dMU!k}{7rBk$&L7j9C1r^Gm@@BPzTE?LAecM2VF{ray zzkyoLpw1~H>~IEEP`nmVtzb|WwB_l%l0jWGl&WtPgDN(Y6a6;~>T;ACs0apCvMB>p zB!ep5A=fse7*tv9pP*JVsPZODP|*zPX1lBiu3=EOoTZt$mO)j{lF#Z}236%NOP=c( zRCSgMsPzo$-kBkwVi;6SvGn>jFsS?5GBMf6pdJ{8fQn^MkIasP+QguqL}r58%%B=# zCR0gFx>nf=I3`);e z2I~hHRR64@D;K-q4Ok4!;CCsD(iqey+51Xw3SqQu*-$F?Tn6Q2whq)U49X=^nieM* zlq)q zg9?s)1?nP$T9SAY)FlQLQu_y}Vg?o3qyg%81{KyWbAZbXYK6-%P*)h#s#$wLl`yCX zU)eXj%AlgMB0-fhsObF5psq2fwZBWvP{yFv_m%(p;5viaU^oI)IfIH-cIa*}sLhdu zi0UST+7eq1>JJ7Lw?p1ATEU>=YZE}-Vo(W9GC;e{pc30<4p7OUb~%3v>JEcSnk6>~ zs~A+Wl4R~OsFW->L{-h8QuAkl`jbH&DE0z%k3k*mD^KUY7}Oy{`4_BVP#I?4plTV^ z(MVHJbqwm*rr$x`XHc1ma)_&EP}#LZK|NqlIm+buA%n_&Ejx6N7}QB8IhZ_VPkU!YzvsFK(@p#EV{rHLPdYGzPnwf#Z8WKiXevP0Lxpl-gFRiIV|b<0H#ajzIu zoz)5)M3Vr5GIo1kP?`)%JyzyRA22A5L}`=vVNjYiO`twxP<C8B(!tVUP3fg0~nM^#7$8849Yb2D^LR&lv$#T6bu-Yd95QTLk4BhC<)n! zL0PsB2K5PpvUX|!HHbmkxL*Y|m_gb4P69QALD?x?8Dj=zpYMsNhB7FJOY$NZ69zT0 zul#U^F{sG~l8{Xql#^K>P{SFNON3kwF=J4!N>}Dn1~n~lC89ECP}7yx?+6Cv)+lSu z77WV$^(#;#8Psei`45vU8I-4cJE&0%%1cQyRt(BJQ`(u<49e&98&IPeleh0|$Jjtpv*yKHSwWKa>ldY~pTsHn_(P?H%{biNE`r!c6smllF@ zVo>Y*+JbUsP#X;LLAfxf*x^zkPi0V>Bd&mQWl&p`B=Z@Aic1`hsHQQf_}cEEK4(w~ z%6w@$gGy}w08!0gP`jLDX5z-6l9bl(Oa_(gD^o9b29=We2dG&LDm8yUC=Uj8;L=P` zvl-MuWr+O+gF0j&eFjekl`&kFBfex%MDD@misWUlTI@b*E|N5=N<>jmqF$G%0YZSgF2g818M<-I+s5l)ItVTaLEFc zAA`EkSGvgl4C_M_myetat8IlU@fR{2KC5HQr`*&^&~>hUMm??!^T~pRxzl?#GatOVNlO&o`Q;C zP%o5K-AD%2tR$Hz2G!~$_W@Qjs5W=$j7Br4c3*Q)YZ%m<%pX9lWl$aY@>zY$pgJ$9 zgIY(RzEHVQ2x>ip>S_=SDuzLIA07{C1A|fD5n8ld(vC>wVzP$>+`c3wKDeGJMjb04Tw24#PGFR1+t z%HiUVpbjvoiCXey_<=!99w_~*GzR4~TrOxGWKb?Pt3ahQDA$d?pbjypX^Cq<9cEC| z_j`lNU{G$)Wcl<6gK~d82h>ppHM{H2pnhafp30)`F$U$OB$=NWl=oM%1)0g9d``;% zEsH_nR19b%b=o9 z%YUo!D}(xPz2zK(TCXKh{l=g+4158qfI-EYse(Gspf*Q1fGT8ATQ+_J>H>p`+kO&M z5rc}aNd|S1K_xtU59$(wN_^b~R563v)pY@=-x*YryNqQoGpOWw(r38Bpi;iF1y#bJ zQcp`%R~giSi?Vc6%AnG2$P&Ue26aeDGGz=ZL%G*@ok1N{Dw%Qyb!?*?Ol~l!%AGUWX0@H26b_uG&AopsAAJMp#EY|mu+T)s$ozi8@~Wm%b-fP zORukvL6xOefx6G2%Ad(vdOd@>*(SFE9x$j|UFAI~4;fUYyNntiF{r9}0iYf;sOsQw zP)``tJtfIJWl%L2e@9de463eNe$LMr)PsSQpc)y}qv7(gH!-LuHd8@8XHX3rtwH_G zpc=Pt1NDMIJx`UI;U5O|qR|{wGlObwy94SagKF(M4O9z*YMa>(s+B>t&vOIyib1^z zE(O)bpgK-FfqKoLIxiN1Y9~;hD&=jU{$)^Il_c|qL3JPQim2W)C^e;$>0nUm8)ZfC z9fQ)?F5A(a3`$c;GVd8wpJ%fBp`!W!zXQutt1S;y7Y3!HR5GdzN_S=xqUy?^^yW#g zuN#BvA1puT?hH!*^jJ_m7?i<9>Gi2GD5LVfLG@%%g9o;NQfE-c!-s+D#h^@V3_xix zDASG7pzF<`%(hE2QSG4wVlxI* ze+K2c;TWg^3~E}!6HxjLYI-YBnu8P1@VY_|ht#-Kt{e**O>g9?2nM*wpM71ky@+anm%imvh^V+#hgYUXuN zBNbB%dmJ7gF0~WPf(K?R9g8YP*WJxp@I3JoETJw={iu( z4C?6UtDsyM)G;N=Ol44++r1E#D}&13FYA<_F{qqpUxS*)pmN)?L4D4kPO8daeL926 zn<={=GZ<99uk6XVF{rb_azSz?gF2Vj3Cf*86)2U=ECy9ro`R@67}Ujqa-(WCgDN&n z2lWMmx@;p$o}LVIt0BRnC`tLtX@?}uf!MccQK7+cKCmSgX7*x$gNqq|$RNajsp!^urgMspN_GeI! zlx4gC2KB^74sn4Ds$oM0s6`B_ar^h6f*92E{W6JL%%EO8(*qUEpqksh0reGwYE?Z6 zY6*jCn;8Y_YX;RmPYxy_4C;;2_+84NI+RK#ltFbWjo)Pi>PwaK75J>e7*yARYe6k% zP~A=Mg9>L*YNMqcy@ElhZ}(oQ7?ibY z2&gR#%4ViCGrwn0wn~zTV^DUBWf^ZPgR;+)FGD1^AIcMfh9-b&NqpDV5Am3@ZAxd>Jws)Y^-OL1i(hbvH~wWizM^1ABrx z&Y)sVe+8Appf=mcPSMW{YRiU;pmG^hT*42aeqm7Y`=lpxfR zwW~`9s5}OhG}8doX$F<-GZ$1ogGyPv2-Fz{m72E<)L8~~phymJzcQ$_>(XL9$Dj@k z3f*o=psq2fVkOCxF{sOLO$JrokOb-v26gk* zLr@hA>XxdczFQ2ca^_S}w;5EGj}53w235T{8PpvHbuTXhR274&DU##TT?SQGF2|*6 z2K7My0;oS3)T3dNknb_5C!+&F{l%afHps=08V1#va0pZ_gL=MC78mOn)Qg5#Q1=;B z^Q(KH>KRn4s%$zvU{Gx{`+|DNpxS+;*nY&I-Yk}??=gew$eRf234`h^ItJ<~f$~x* z*8$bQpt|apfO^KDx|^zkYGhDqqvwHYVo>Tzl6lUcG!mro`!|Eq+_w?b3kKDvVF##x z7?jp4c@a}HgVIrT1ND+Y=_-xi76zr~V~(g=8C3tpt3kbDQ2KeZK(#R_gQ8uaUNb18 za#>t#XHbLnWi#_%24y^~4b&S3Wnz;C>MesZ-S8At2ZJ(8$O83_L76L!-%bW)(Xa+l zy=PFCucS$?^1=WA{wptQRr##CFesauT|lWaC|ji`)0IKlDV0n&24$}#neGhAp-6r> zJs6Z@xm0~>3~I8zG=6(BC@1B(q|TsRM$4W|F9zkhVE`x%1~n~VC#c>GYP!<+)nrg^ z4PPRv4;YmDtIMGJFsRvGT0nispgfi1k`{yVnk#R6>&u|L7jFfn&7gepfqRPY8vQ2iOyl7wxb1~90Qee%81 zXHcO^B{Psgg(=H;1`KM2>c{v44H?ubw{}oQ3@XCM1JoxBDk@0&SA!T-^l1%HgBjG? zqJ^M_FsOCc{{m&qpf>1xff~x7Vok?`GGS1gtvf&sV^CW*$P>quLB%D=g4u8e6~9le zV3;wegob=jpE9UKQ1)>_kppx9~fU;mv$#Wlr8p)ti7Q2G7WKgMjvMN7{ zK^-XC0LqF%r7716tQpiH{Z)u+G=s_*CYw$+4C<)$x1h!_sADnVpllgb=C4CU7*x(vNiyRYRPL*fK-n{>lU?L!Ii5k~xyeR~1B1$+D=(m!z@W|s6@YSNQ0Go< z0X3096%>sDHHkqLUbh1^nL%9~AT`4j230(4J}4&!b=i73C}##$5>pP!g+Y~WlON7h z233|KwWuqDDp!)sXAJ7*s~(7I8iTskMJ|(m&Y&vYl0i*pP*rmSK+RxK)r(C)xiP4F zr>=pT$)IYKO2(Z*)m@jGVHSgWpzj09gF!tSCJP(08PpSNsgS>5Pz}m)$&*1fCVYaZ zzGP6(_k9M+i$T3;kf`P`sODEvGk7zoR#o}$W9BlbHa972eHc{x+)PmO7}T4^y+HXg zsE$)o*3M^8okj9lEg(>HRIX0~wU9w|)t>;$k3n@GCRZ^08I+p!@1O!0l)7@QAdo?6 zY^z38ix`yVKACw1F{nOIUxQlAptN3jg9>I)I$dNn;wuKF>oy0}5(cF=_Z+CN8C3ru z*^UljQ2M84fLh9+42nJn70RHDuFH1xG6pqRe;}wZ24y@f0@QK_Wnz65R5*h&jrj%C z3I=7iO@25l8I<|HVW3tqD2t~ppuS;Hmais)ieOOIUF2FpB!jYX`vO!HgR-6b1E|#u z%1)UsMKdV-QzsDB8V2QXp%&Cy2IY8NAJn%DYO=nJ8rLx>r(rUdUC*FgtmO(u41;ow znGR|LgPOMOXHXj%)bxF_9UaS{+@98e+QgvTUs;3N%%Em>nGNbY2Ic8y0cs0_@|wE? z)b|X^J4i0x#xW?LQ`iW)E)-4%54g$WCj&6 zcMYh$3@S>QE~PN2=u`2CY9E7IdqHMisSIk}by<7c&!9F8kYf7)gNhv{#r6*jYO}Qr z6w(;f7NwFo$e`l3jY3rE3@Sb)8q^^MmGD$<2_0roiLH{5GZ@saE<-^bVNgkKAAmZ_ zppxf)4eCb*l@cUZFpe>()Klr8eqvAuF6;u8$)M88-h#?vP>1w;g34x48Nbhd~{Sk?zOO3@US56sTMVm7Q`6)GrJw=jl;UCm2-jD??Bx8Pv%xl8{d^s64ms zpz;`0zB0l-&7jT(?L}1i4C>q|nV6hmPz4vBf;!8f3a{&c`jtUl9B>BIIR;fcY&WRi z7}RB@CsV+nN@5lvs`Cu0blU|`g$(MyJ(&v(s=VPtL{-F~ZobL@b&)~cQYx8C464%Y zD@0YypsIZ22=F_Dst%Hx;WC4|rzDvx45~)y$&@gty0Y8&1Ftfu2Lt4XQ_7$o4buj7 zjX^!JwgOegpc>XUgSyV38n?+2pqxQHPq_l>27`L>GzrvA2G!gu&%r+!RIAF5peh(t z+YCQYw-{9WT-jp1&7j@{^#xVQpgK;OgSx|@IxoxvRYjn@Rjzx0y33%t4v=dF)eNe; z$z4!?GAK1`iRvDMQjZx4>MsVRp}e%JhCykj96(gH464slX=c_jD6Lj$N8e{qI$dl) z)iWsF8UKQMz@YS$B=e9#^$!X~RF4>x{;9E`9y2I|3$pw1gh3fypAYIOgBm=b7*qp; zGB#-d^^8H8SUZAhWKgE-%yR~1o+3xfzZsOp(*RH}7?fqJG&BEUP}W_v zKs7Tc8>J`nl0n(dmF=At24$xtnN|j6e`*M#dc~j|F38iljX^n<-30ZTK}{a82~<0S zax(b~)V~bM#kxPJHw?;keJ7~53~E|}jQcwn)btd&81jxmxjkJEs*^#vE7PU-3~F|l zeuzq?&;S4aD{s#kvS8MQL3w%CgHmNs-a#8db!AXKr}}{E#-Mza<5G79wV>>8MAd^q z`43nNN{vAUnw$XDlR*VptAkQ!P{GP^sTYGi3BL8Prj$2cS$C)G^0MP{SBh=C*O5Oc_*mid+mC&Y*IR$Q;Lv zLFKmkg8Gy}omBY&lsSXS)0CFQ2nLn!Ef*v$7}VJySrHt`pw5NIfU;yz1s4W^8pWUr z%Q8V(F{q-uPeEBTsA7{9phh#O%hq3jvSCmqj`u;0VNj*pCV;YKP-Q7bpvE$&@*|S^ z>=@L|*6E}yn#7>$tL;HeW>AkzWL16&gL+~uFZgm|Pz{ccK{+$1#%%*ZxiF~bDN^-KWl%4U z&H?4hpqg8y&+r+8YV9JQ)iegxra2bW=M1WSZW5^J4C>9II#4qhR7dz9pxhW#=LLBY z(@X+2SEWpLirg8LYPBJ#Sq!TCu+gAA7?he-E~wcIO5JfWs4p0lM!X#2JQ4(b|3||Ii za6xK@`3%aaOdr$&1~usJc2El$l(ESYP<{-`#OfF*e+FghD2s~$49YBCemH>)$~;8_ z)FK9DaYR-lf*6!#t2L;_49dDoZ&1Mu%BHt$?|j9eY`vwDS;C;~l%CAj49Y%yGNKA$ zP!7tqf~5?~u}q$Wp$ux$-MyfeF(@aK8KA-#l#5jbsO1dG)p0(ka0WFkekZ6E3~IX4 zlUd22+>S&cs#Ofiy)_inHweg)DvCjQFDd}FnnC%54+j;^ zpnNZ+f?C6%7L@6LTFapP?uLWBko9|83pgNpE$ouVxaDr!+4sP7q6v~paE zV^C`^XdOP>HRdg4)fXcB!lcmBgTudP^<3he0KKyMan(P$`Sfg4)ZVQp1Bm zr7)-i7t}%RV^C?=T0o^TsPwz8p!PGU3==(22N={*t29tQFsNe_>OrM3sLc34PzM=Q z_TC$y(iv3F5nWJ+7*uY{Lr{kq)JY}DWH6|_-qPzk!l3fKWnyxaL7fdc4(dk+b#D1S zP{$ZlL7_~SeqvCC*Y1MKWKczSWontlpo&ejL1i zL6z;j1}c|9m1lH-`h`K=Y?b>sCm7T%6{-48GN{Vlb3vVAP*vVGpz;`0^&**=oMuq> zmN$aRXHYeT(q}lspz5y4)A=les=w<9>Q@H!$Yd6%a}4Tfx5_`+Ipvhy2PN`y=6tPm_fZ+BnkO< z2Gy~=64Ye|)mbRJA6E#JkIFS!Fe_nDs&{*Uy2_xs4{ZQd%AnM&WWnqjgHoR`4OAI} z(uj8fb)7+J?mYpjoI&-;XaaSEL1`&T<|c#EQITc5KNys5Z+UoCFep85nV8&SQ2iIl z#N;-E(hm;+Rmq?X3cm$)hd~)#le`E%LD?wBrFsTst0b8R49aei zC;q^P49b4F984ZDD2GBnP>&guKTJ_o!|kg zkwHz1HvrYdpr-FV0O~n|a#NDb-wevVMQYI(3~IK@sFi`k@CGaf=Gj}OUd;^3YfcTQ zmki2#k?cpeFesnp)u37#lyBj5P_G!&f-;%pv@s~ZyBVNfGpN9!nV{MkRM4n)Q2#Qh z;0Z55ye&GpJR)t3auI`2W8F z%O_$^8K^D{Dr%AZa8wyo^m5rH@5-Ro7WN0#jX|xuwhdHw1{G6v4^$5Z6+1Kulp2HD zJnA1%JsH%N2{N}-XHaqRDxi8XsQA5djMHFH2^oh$^=43sEn7ipGN_&JcY^wWK_&H8 z1=WW^CC_;Q>O%&VvZxm*Ee4gU46*w%r~`#v5S2EAO1riOln#SRuaf=feheyO=oV1A z4C?48nVEdVppH$j0j0;FGPlZC^kW8~pQ{tPN7BN@~H29?{A2uhzpop|pBY9NEk z(`W-_z@YNylz=j1P-hoO>N8?c=avV9`h-Ch6v|+B5Q8ebCU=VmGpM2}srrU6sN$jW z6*Xp1mq)dN8p@zbCiDhn!k|i(uFNn7Rkqg|QJFHRawW+OXHYkl`H~rfs(3H=gg#|Z zl^SwfGG|a#bEKU)fzpzbZ732G#Rswo@{%926VU6cLjQ4FeHNitRp>XDLU ztQpjkQGenO9L=B_Ca8h3VNi`*pMV;}pq}s517*vgUSy<#8q1)XTfPBh$Dmr?$AB8g zpxQL z?n7sRn#`col;hGA2Bq%s7Ew7dD2;eoF>_{6ntRhhxiF|c8FC3@DudE$aRuedptRrr z4C*rmrK|BPsA&vJPf0SLGpPQHK0;K}8I-JHk_%SF~huetCpFvGil1u=Dn!dLyq6%bCZW-;M7BMLI7THJ%Vo)CM z7lK;MpgenD0Ts-kyynQ4;VTB^y-1ejmoO-wuntgPGbrElMW8|$)Pif1KrLlZes`tn z3uRD&L;XQ5V^Be(mVydnP{B$ivz$RK*}4Z&g)^uS<+!whL4{_-A*z)ODy(G*s8tLq z{Cyy(Zy3}njq{))7*vGP_>E*xQA*=Cia|w($uiz*2DMg6GSLib-8COXwT3~(RLOqy zS_T!XR5IT(sLiA1A*yu@YKy~3Q0o~~T>L6fF$^kxuLh_M3@RaGIjD^cD)D70s8|NI z^SxZc*u~%%GCz+ywO$>0jNXcsm|h-x>3%F~DkmBgU(=eU5{!=TOvO6p5yQ0KzVfZEHT3eL-@F@-@DDoJJ^gDR@Z zLsY2@s(9#JQ2QCwyd|3zT z5QD0CFF)tQ460H?zE>Fxs!B;RM;KIfpe&djWl;CRsE#qHx>Bk7eqvDd zRZ=r#GN?yGEkR{5s3)W3GGaD^YEY8QaR$}6^+!aN!=Rog%N5C=8Pto6DWGy0RP#$Y z0{p_DS~{g2eS$%?Y0L+8l0mi4F$Q&tLA_BbnLGy75w;dlon}y-=jH62PoR8Nlq7S8 zL8(?*BdW6us{2rLP`@%LwNbjD&M_!;hsB_NV^A7f$AKzfP@2io%skJa`W%)fc_D++ zdMQg&7Z{ZGdv#Dn3`$pH7^sU3N^j1Gpe`|}{(;|sDrQjnVOpSmXHW*`<%x5dK^c`E z0(FH!4XPRms)Ruq8@~s2l|h+UHi0T-P^J!Fg1W|_%(jjJRmPyqlTU!U&Y&y~%l1w= zgR*>?2-ru$n=+IxGP7Cxe={wI8T^3~GAv-=O|tP;MD= z;H_a$?k{DXvX()4bp8gajzM{97=gOapuFaE169wUyaNY=dcdH3!Zv|=$e?`B%faLk zgIaK{C#c5^%CBk>s3#06&^QOwQw9|@>MKwU3@X?`9$wEF)RH*qGc+=&kmN_8niy2* z;a#AfGpH~n$^6Zr!aHSR@`6FF(vV)?KMX2j&c~pd8B|ojD^M>PRCHJ{s1^pbRyi)U zGN^T>k%;OQgNmtA1=Yr&VvUnPy=G9GEo(uwGpH>Nrl9_1P;qgspx!X3_~b31-ZH3! z!>d7cFsQ_r-k{zwsGXg1qpFiZC24d8^`1c`dp!fCqV@m3|H?Ndpczyb29+8nsZW(b z9XKz|%&rV7tyF%_-56AQRbNou8B~U`42yd(sH2wuf>L8p$CM<~lR;%Fm5e%r%1)L( zLoWuEb9gf-4F;9_(ic>326dwI0w_%em8YQw>H`Lquk>X4FsQQuml4&64C-8%>}+c> zsDkrdK=ox%g{9wv(q>RaN+qMipo)#o1sy~Ce`BGk`HGn}?bS8k(XHb>B+CU9tP*q;i%rsz7)d8KL3>nnDFc~Nq zF{qmJhM+!SP<5r!%pAm^>hH(}$-xZjk+BD;Aq?t?l4Oh-RKxh!h-xTlnH}+ zp8Of8VGQcUVd+4cGN|U4=Rpl;P%WKuF~p2Pwe`9M>Qe^Q?sXHCIfHr=a2(VK2GtQZ z5R?Uj>O9{a)JOs~U!_#uTWiUnRPV@VHHtxXHQKP+Bi#)Hs$wX)CYav|~`Zy&4eJI0mKXB`*@UXHfkEegQR} zLH)NUqwD#9xOQ3W%o zsDLA&zG6_(%dUf3!l2d`$i(Dp2DMH}G9e5q=FUfmYAJ(?HP!3}3^bGK}>>tz}S0EvJC`mO&jG ze+bk%29+6i3e%Zpf)k6e6MMs zHZ!QR0qa10$DqzFlV0Bz231gy4C;FZRaklyR2+jUs+4j6Rt8l(;@+R30Q-lc-t#h@yC4FR>AK~;Sz zWo;6Jst%B+^Bx9uZ<*X`OlD9u1+pizmqFECmF7SSgQ~BT)rfrz>d_GCexx#}Czf)e z-_M{L#{U570E23bliiOW7}WDU5unl-)C;AOImn=zn|C9sbOzPZ83^hSgKE=|SCt-S zQ0-qMkj!e)ay%Ah(6WV-Ytfm)za+6~k(2BmsO9$r5&sP02#3XsX5 z{@atuVo>VJaVeWYX}JE2Kkzt%(%kbUs2m2>=g{wE`!p3R}1PF2Bq6; z1*j7YN>5j&mM0lh|A6tJPBAF`Wis>1V^9Wbq-AlMK^a|@gq+Wy236L9I>VrbJeUCL zEQ2z!Gz0Z3gEAdo1L_=uGIN#I{cjA)e9wGP1q{mKPywj(49fCnX-5|_D64leF}c8? zYb!#N-l#vR~E#s+d7JtQiLCcLwEn^&F_n3~JJyji9bDs3{L* zd#8j!xr}@T>MDbB9WRTEr3`ACYdNTE3~KtGOi*PE%I&Z`ov$+}_n*6hDrZn0om!x7 zFeuMnYM^d1C@)<(@czM|yp<$V!JvGW#UQF%49a(nC8*mBYQa_c;Z!mxzsifC?l7o; z`m3O-7*xe^I9)k)!v>Mc33@R+=HK-Z}75;7x zs9FZKO8o$+ItCS?yAafU1{LMs396n!MK6=69x$l2(TSiQGN^S|r6==;D68yHmF)aRg{F{tww+t#bCm&P? zgF5kUCa8A|DoHGh`|7yXxWlunLVNeCpGPP7?P=zHe zpt>@sqDt8*>c*fhJs1P3JA=ABG8a@2232DJ7L*!;Ds`2CLQe)&wnr{Vsxzo^rSaQ~ zLEX%e`-2(`s-jc2EP6AjO7$b4G#ONt?zf;mU{KZmk3jWdQ1_N;g8Gm_)vS^29W4e` zcXc?Zz6`3qQdT3h8PvlEpM%n2P*0Q#lKmJ|gZ+C%rOTiiU1i+=5rcZZ#|V@jgL-jD zYSE7wRCA8hSN$1O%ex;z4Pa1h>c4{0XHf0Ba%bu1eJ%8)^IMqdME zM4%R`T$P>ePZ*SHr937EF{p0!wV(zwD7BHY9X*6WsVhmwm_cc{%41?EgVIcr`pSes z^*Q7YY8Zpk%Gn0WltF2~+X-qogVI$`0cFOZ^!mwH^iu}a-(P;t<_t=I*-%g;7?eTu zdQcV&%INCHphhyNL6uuUSu&_0^?N{#Vo)X{WsYOTpiIY424&5l%v|M?@@NKSp7a=$ z4TG{cWD9BxgR;z#!MZJjvQn-UjAc+Z>hi;}V^FsJ9)cRjpzQqjgR*B(_R9u=8qc5{ zqAfu=Fepc*l9|AuCRI8iDn|x2rCuuJi44j`Y5Y!NP_Fj35!GY{HO;jLs3{C;x>CtF zF(|h~CWy+JLAmFw0p-G=Jl@H*f~gG3Q+*jIR|e(P?-Z!d7?ihvI;d$3%4eAzOg?8& zzR^QKO=nOGl*aE22IW`jim2QeRDe>+%w$kOBf}AuJA(?gm&>HH7}OG_lJQ_rAxTm* z%w|xbhoqVL1%nF9*$2v#L507Y3hGM+wMyL|lox}F=;sJ(4ugvF{|J;fgNhDK1vQsJ ztzDf6%7;O%EAav~k3q%UE(PVwpf=ub2Q{BTZ60X`Y5{}VV!r^?LIxE#RgRW^3@Sd! z8I(VRN=QElDu6*H=E#y~AcNZ3aR$^P29?xPj+Q|TD!HElsKpE_#eX!YUMDpxSR zVo(QG%h7TPgGwur2Hn>TD*g6pP$3NJaJ@|8mNKZLBV`H@%Ak(f%cj#Z29-HA6jT_4 z%1)BCx8)2fCtXrsID^W~krlxe4C=%?D^M#LRGzwAkX*%}^83kW^$mkM>wg(k1cN%a zOxE5a8C1dQ`=Fv2RAGsH8CEl>qDq;VL^G&M^|Iu-hCy8(DFx(O232D30qR=@RXX)N zsC5jgEGY@pdInXVeg{+xgSx3yG8-6FMaQ3rY9oWHRF`IEEQ6}*w;9wX2374J1Zp#b zx))jr>N^Hiv-%3CEexuzM9SLl8C3o4-Js$a)WiBQptdroCnK#v#WSdearL0KF{s9= zGEhihP|uU}Ky7DGFVbZ@I*~y&=kx@%gF&@)B!k+?pxS!g2DOVpwfB?vA@62TZ~UHv zN@7qQq4IRz!=O4>&j6K7p!`%yW`o+xpj2;f1eL;|y50W+)IJ8KW>Enul|iYGs{ys2 zL1|2tN!$SjrJ1Ay>IVkZCw&p9GzO)0ybRPq2BqDx3RF6S((QQ()FB3?*Y5*RhZ$6V zKUopXU{Ly@@^e1IpbS<^)pwLZ8I^1T^&^8CbX&Hgk1?nr_0o?1i9wktjo(ZLWomDX zsInN8+0>svWiu%Ar1_wZGboF6Y2D{AD9arAtbS%tRvo86R3>D49ZbSGN&2Tq}z3fDxX13sUHXG41;nRX#(mjgL1W( zuG+5*YT8tp51wOC)0OGcZw$&UJr_|GFevxqa(m_aQl2?O;zgYvsAuPVLFpaSaU;dO;U1&!1NRl=Zx z?d4SuR~gijspX(b8B|EpBv98FRA{<9CdwF8*l`&sTxU?>N|GsOP^)_Wji_!gsEB?R zpl&j#DE|SV{$Nnip=&`^FsQYwb3olo3N(QyDekiCr3~IB*J5W^& zYKy%MsJjd*ZmJ)sY6cabBnOi}8B{{Lq`rF$Dp5%?e=(??9rDAeVNglxT|w0{sN{Z9 zA=fdelmI7C_Zd`bs7y@i8PtK*U$0#J1>S(gz|wEuLtGCT)ZzL;pdK-(qZZO0c+8-V z+4lqWgh6Faoe1hFgUU|Q2i3r!a+DpqXACO$xV$o_kwKm4SPrU*LFFm^+UE=^zn|=8 z{>`Ay`m2I^!Jy8CT7mk9K^3gN3#yqx6_!i@^^!pq-Hr#U!*9@v`w;Xxf8B}??J*a;f)Xn2Ed49v7Dmvsm@s>eV_S^xggF#j4 z$PV2*2375M1XL%3y0=uCVec7KP1GY$D%$`58?gN9uDk%%g+bLT{aRH9^{`%6V!JY^ zCnLWA)r~?Y8KM8&}LBT<8Fe|VNe=VwL$e`P@20RgVJSCebT=H^$~;8I_?2Vk3ng7Oa=8Z zgVODp4yr$c($jebY5;@k@4pU|K7-O6f4Pj8`yJcI=m_b>j_X0JPL0KwE#)Lsxbxc82 z!x)rJ&o!V-8I-M#+))_LpzQqQhhxT|?3exy>Qe^guzD;ga|Y#jr3KUo1~uunG&3z2 z)Rg-%phhw%7bVG9GAP$^S%_*BgPJx~dVN+5YWi-ue`d{~+|s*%8qJ{GkK2K=VNf0& zR-nc(D9@gYLD@1WFP*2L#xf{xzi3c)49aKekD$geDBsm`%fy~REl`rocn0NnTlx$R z3@YHhtO8A7P(c>%pd1-guu{oPWKc_7su0yA1{I<-ekU`i(1Vu|)f5I5rX(3B1{L06 zfT)}q)T*8lpj;SKgi^^&Wl&Lm(r0jGP|-`1L4C%c)+$M68iQJQpi=zigYsffsY)d?hd~`!ZHlP88C2R8nV8IFQ0cd2 zht7vV9loCcY950+sw5d-26b#)2%?(LpfX)9f?B|!vXvyWkU`}fT#cyw7*wv(lksOz zCpzR17r>zMdVUQmkU`}uNoEm)I_tLuQ3WxmbIJ&NF@q|IdWNWi8C2nw3!uJYP(`<8 z-*5?ox^#aKsIM8+WeZuF3Sm$s<03&VWl*I`k_lx{Wy*AE8G|Z6xD4C-EJcTnFjsQ>n4A{bO%$#6s!$)M_$ zD@Rcb>fwFatzFHao>*)E70sX;>^}juhCwyDNcUqcgL=L@6V$g1>P5OOsC5jgIr}xJ z^$e<|L;mB(7zWkWGY8ZL2Gy=3OP(7U)Ei~G6w9DGmc}8fO$@3t>Kdrc1S&wKM1IcS zF(_3f$!uXz-R{ekqwg7%+K7js;uw_rI2TY`8PtETfr)2On!9C-bsK}~bMO~X2@Fc> z_{X5OGbrs2T~LV(N>{mZw1YwE>0}_PoeZkKpBkuL3`&2g?0)QKPzF(Q2Q-O68C}^1 zY7c`NbbAP>WCk@vY5eYGP$na!#hSvPOvlMsb{~T>bIAjh%Am}XWcOn~gR)S1G6xux zWwxwF{J@~B-d+cl#-MDJ%hLxLl&#KkM3v5eytm(Bxqm_a#2rGv^~P>xr2 zf;z&WCf%0L>L`PnQuhz29~qR(h-Og77?kTcNytAjsA(>8T_ux2O;;+JEC%IvP>z<_ z49Y#b9@KFL<)Li+<}fJFo<@l3X9ne^GY3>IgYsVZ2GlPM%4g~4piVF-UnR+$WKavP z$iC<)2IY54e$IIeDxmHGsM8E8Xha35d#pR3y2PMjZruk}%%C>bNrn77gW5cz3Djia>exqgGybR4eBO?IuI!d z`40w_c13nSDi~DyEt#0yVo-WZkFOdAb#~z+P_+!|+)_dA=xpjsJJgIzVKR}8ArMQ(bv zF{tOeyxP0sXh8C1J=0jLfJ^+riD?-*1^ z$P+}>$)Gx;WOea9feKW);sQ!V=l_2LR-o!FIn#GxP~GYhK&dh)wGm%|>dK(h?Jj`o z#-KD@dVuQApfq=tf$G7a`W!R?rN*GNvVB1HWKh~~z#>JqBf?mILZz24$-)6>@(D zWw%gvQU)+6`w$t->N6;ZC}U6q8I9d!LAi{OKEo#r%GK^S zP=grMG#6D+gBjHHUGj7u!l2v^_623kpxm?N5I2-TdAyZAg9(H3RFcdv2IZwxGNug5 z+er4KhchUjkUv0~F(}_C9Z;V#s0CN1fih=Mez#=jcLak9sEYz+!JvXhOb0cRK?U1M zLbha3OI!>=jbczCyX5wg6@v;*YX)V_pu)08ff~)A!p}&H)rLW>QjSYw7*vF|+*!9} zP*Fzm8yw4^{@awXV^C|AA@(>1wQiH#^s;AAF}DVQ8qc6M)-4C+z@RoY{R(OVgW6&z z+aHb$D$cn9)IGe%vP&?230?LU&C85`EPbP#xH9DV0R7)Au^I5VY7|Nhtq~(EH#-N(BT0n&{sFpL{pq4YJwjS?6g)^vj zZ5gbuU{G%i_kmi;pgNS7J*;9-osrKF)i(rckxJ~JpduKQYQ-Z^kqoL^o%9)^7*vlY zIWDbcQ0hugCYnKMIQt{2H4I8qNiu60RG+jAMD;C$(#n$Q(mDpEt@LEpGbmj(*|LaX zPIQ+@!k~sW z%>ea1gEF-X02Rle%$#LAdMkr6pEUtgJcF`G8wqL~gR;yv29>~|tnxcSZD&w6J)|d- z$e?Vsdx6@)pzI9gUvMXbvJa6y!!8Ep5V;1_ZU*HT`z@#>1~sWd?k??NP*ZB-KqWIM z=SJBi-^-v}$I1~Pg+WbolBo7EsOht&f=XpjZfQ-R_A@B=tl6LrFes0FnZ*6TpgeoX zbSaHNdG##wk|iVl%O+%F7jZKV8XJ0}>_x>&h_agssBRLI2S6ocAW zE05GX2DPba5~$M*YKxtewfPJx&RLoRXBbrctPP;fGN^Jo!G zTqm34#SH35lLM&V8PqYmZlEqRs7z=1;ap)**|VhASHhrj(qveCl|kiZ$z_L926ZA| z2J6=tR9+9+jxJ+R`F&+O`Z|L;YbcA0-7*s)|94&7$s6yqq^aq0~s+f>Sk6JsA>jP zk$(}?pA4$9haBSWF{rA(lKTE)P}PRRLDev*dtbMMs%20$k@CZ-V^DRm(!aXTpz14R zj#JN|9@Y&7^?*S=Zt?*2kU=$!eGTdngKBho1L`q@dafjyCk*OEn(Wp-Wl+soe}HOW zP%ZgKKs{qnZ9R5?YGhFDeM><#F{n36l6lUcI+RN0ZwA#FDa&{-2vm?t>}pW|FeufE zv7nk6RJYolpk6Yl9!+haS{Ri2*dIZ)GAIqF=b&COD9u?iwQOTheUwV(HG|U1+Jvau z8I*Ru^!ol~P`W+lf_lTC^!ok_>MeunZ}>5&4hE&KR5I@vltE-yMAgZljACVR@jZhY zRPiY&m45&K`>%qAC?o7H3~FeT8lqBVP^M#Lj?^3)r~=!&zb_NJA<ysWT|szOO*_Vo-L5k3nfLDEp8upn5YXhX{#ElR-Jg zZUFTGgPK$!7eo3ms42C}L4C-eoSWpe7+MUd&Bjma2mqz@U61{sN`X zpccf+V0|Ei@~apI%78%yC`rbUK`m;$iKvViRPb0YP@gcUB~G_M4PsCsv(i8fW>BF& zNHcQ?g9^)v0cFgf!q0pJYAAzR)gu;^34@Bz{s7c41{Gx}-z!rF75#MusNoE1ZA3jN zGX}LTR(gG(GN>5kT7fx(+E}NFs75fTO^s4BSTLw9V`YnVB!h}`mSM3agNmOe53f-S zDnUsyRtzdJOR7F=2DLNa6Vzx1mDIx=lnsMQRyKaeFsKv*87SB?sMN3j1T~gH9f&9d zWyhe>Vr6l09D_=)SOLnOK^?A@7VCHhb)->NBODmiv9ZCRCNQW>r=XRAD@Xhf3zHq> literal 0 HcmV?d00001 diff --git a/code/recording/data/eye_camera/2015_10_03/000/user_info.csv b/code/recording/data/eye_camera/2015_10_03/000/user_info.csv new file mode 100644 index 0000000..c868dc7 --- /dev/null +++ b/code/recording/data/eye_camera/2015_10_03/000/user_info.csv @@ -0,0 +1,2 @@ +name +additional_field change_me diff --git a/code/recording/data/eye_camera/2015_10_03/001/info.csv b/code/recording/data/eye_camera/2015_10_03/001/info.csv new file mode 100644 index 0000000..4705c6c --- /dev/null +++ b/code/recording/data/eye_camera/2015_10_03/001/info.csv @@ -0,0 +1,14 @@ +Recording Name 2015_10_03 +Start Date 03.10.2015 +Start Time 18:06:58 +Duration Time 00:03:37 +Eye Mode monocular +Duration Time 00:03:37 +World Camera Frames 5215 +World Camera Resolution 720x1280 +Capture Software Version 0.5.7 +User mmbrian +Platform Linux +Machine Brian +Release 3.16.0-49-generic +Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015 diff --git a/code/recording/data/eye_camera/2015_10_03/001/pupil_data b/code/recording/data/eye_camera/2015_10_03/001/pupil_data new file mode 100644 index 0000000000000000000000000000000000000000..cbecf1889a033cfb36d058dcda6725ab109de13a GIT binary patch literal 488935 zcmb^4dtA?F8wc=~N)AbmNi4}}B+aSPNFpR@lvqeYspQvz7)dOWX5_q7%$#BtiY3V@ zVV25iNg6rClAK1u^SQoGv**v>=lSD#y!3tE-@9wS-~GMs>$>jmA~ol!KI`b#VCuv< zlPAmw3h@i|3z{CH(j2F%)p4FmtD~Fl^q}BC`H4!~v0wR)vVh-zKj+x*el7F9@#TG^ zRXT2UCk0KP;^#AY`lQLK+LNY-1kb zKd zC)6)+a!BaJz!{GHFD`sDICJ(hH%&huCk@=e4?+KM_ppiox>jZI;p(t?szx1s<)2EM zAzhSzD#cpUztR5HGg{Tyx3;gYZ(TRtwK@&kzd2GSv1{Uss5PCZYW7Lg20gi4iZOo@ zwI&~;=F=!WTYmZ>YV-Ym498zM+W8H;;fp`~J8F&JN6kL1nTc}IJ3Ly|{6o~<-|u78 z1`WHKtz130drUI^5Un!wZSH9I?$)11&G285P~zb3y>`NXqW13YVH17dhDt6$s9GqY zx;Zy%%=+~k5VKX))h=7Aqg6)lL)Cj}LgJ2JZ%U|IN~nzIsaky!Du>DbCK%r*p=$Xd zRAYL3mC8>)gev@-iih|MN4u}m-W5N>Mep!vRqGF-dVjx zAX~ZG_4l`=>T}=Lj&|>E{b{H^|8J-~JS|TBCsgn5{vlLy2|{I}gzCoq+~1T?-CNwe zL&3a`(W*A@L$&RC#ll{zZcC_4B~)$asmwkNRqU_wYkd+b(+{B^h2m-ZOyo= z{9eOzTV>-9{|;4~_o1@QT$Zd{B~5yLY$# zG*s>XFI3)Z8vZ9#@9zFFRJa78>hShc^{{?@WIH^v>RFK8HqpqJ5(0$LuGsQs)=&ZJ3Ly|{X?kU z-|u6nzBz1|tXw@Z`7NpH;oBXbs<*fPG*mtQFI2uYm;Mu~cX$67DqMn4^;AN2eZ=By zxmmxzQ{T**H7h(?)$4tz#^j_sF7Km+s<(v7YM#pa(@>2XUW)(kC!y;7Ayl4b6C>rP zA3_zfa?}<4g`=JQn&bH2PeRq}eW<>Q>g1(dE{b{KB{9ma2hdTc!RPXNoF;ut&p|XAZ1^l9EVfXIc9sAwAn0n~Yq0DI2 zSMNh*q22h4h9{L!^_5Wdo2Rn-G*o-OEX6=S302<@p&I9)Owb=gHS5WbgO%TFRQnne z{Ndl9s;}OM%KqiMiQ3^U9tqfKB%|fk}7-O{`dmEz4fP|vj4wO z1-b|RCsgn5{xMXz1fd$BgzE3f7Bi*%mL^1>({S~Ryn*6Rqp(TLu*}7=Bj}b zszLKqj-P~TP@8iY=cl0>_#srzGr!1|pMLmMEjaSkPW*+Vo$A~vPyFHEp>lX1s(y11 z$^Q$>q_=pqYVe0py}#ebP}w~$N>;A6Dh`wXTeND3?_fu}cenmDR73t7DqpP)UH%iQ zcX$5~D!Bxq8mffq{>X=KpMbxQn()2V%^#vwPVYnYZEMpvMn{!T)i4Q_^E}n?PeSF; z+f)Yn$){@Ahfodsp|kSSkD+QZGZdF2RNr;(fj|5^R8H?hW!v(iiE`09JX+=QAyn`0 z_c2tK-~SP*Ts^$gm}LARS~bGg1pQ<^t+v(yjWZ5$x|NBX(zJ4F70rtM7%0=(+XqEeiP`$t3$545gtuj%r_W0*5 zsT%F;?r8V!)}MxI^nXJYpx^A~e?s-{?jJ%WmmpLgN~rF&w~+`r_Pe#RtpWZMts3(_ zRNr3N-fGx1so0k+>XF$*5Oe z_`|0?_;REudl?#AU&JBB~{~n$2r=)yY;7`8vox= zg-3sL@jsz@clQsWl1mV(32*1BKWev7K2?`Kf4v0%iB@^N50&eIkP()flu%8SP zmCwgey}RGXP}$da#Kj1eibYFj9DBLW2j8)z7i_Gd8%ojgvw!3L?p)dNvM23 zgvzmZShD=|L#P(a-yV;@aJ2J}R<=C<4%O86p|bsHZlrS2J3LzD{~=WG@AolOR!_4G;clQsWl1mV(z_+1VRBNBISUB0& z-RkD|R?({I??YvBWTExyaOG1KB%zuyPo?@aRPpoeFuqSh74#ugo+p$A^uI#&Wz+~< z?r0Z$qOL5k{vE36??Ywpx86j#$l)y>tqT4as(1JM7^*?r+u>r6gOc5|@rP(th%a&l z?{58Rs6zf5s=3i)ZT=IgcX$5~D!C+VvYY;luo-><|H=r3j#GspBQz7?c%KnM<_5pf zzw$rGU3^H$;LmTf8UM|5q{QFDKZ;puJG+zj!Ur?$Oo{j1{ zLo^CQ;Uuc9A9ah^JXdVJ5OKr!|TK4Zp)OLmpyZ|R# zZO18IN)|`ecAS%5z&WP2_T&rE-oKV{trIQHfq}q;q4yWa#tNc7k?!!6{JNQC0r|r%-JtRA&&J3u-$v8|{E| zQEew&S#MlY+nE!Ph4EZg+nGCC;&Vl9CnD!GoIlleA`6?tDN@^sx*iPYs@l$X<~`sP ztL-edmio>$wVjxOxo}Fduh+o2 zr?&I6d4D+f)poX7OML!T+u1Sj3Y-eHos{?ua2}}b>`tnHQ>nJIXIDI&hiW^i)w0g4 zQrp?D(+kccwVm`vvd?|2wsX)+_DxUJb}|B_&iGVqCv&zvoM&n~Svj%}|3_^nyWlmP z=W090u8)B8LT%@yc{e!KYCETteacIVl3gsLvSGrAFvV3f5#$SG-1ps>Ps+0{ei{Vo=554?t-% zsFED4%!73pRB0hL(ZRJD)Q#)0pmZ73Epum3br{qg>$RZjGN|%_$3fL&Q1|0~K-FhZ z6-lo^=`pCvT}MGRU{F=ndZ6?f)Ds;;Pz@Q>vqnhq1sgD^=U&4>HDXZJ0S`enW>7WZ zCqOk}P_J`-1l5#44b@O4nPv=1^Ll%XsyTzwHg5~ckU`b9_5;;|LDdrszU{D>bdt+1`8C0i1lR$N1 zP@UtQKz+fW%#(1mCAc$#vXGiia2Ez;`Qkb#a|YEz2PXo8yE3R=ji!OJU{F?GjX`x| zP&NTuKv^;<+wd?@Uoxn^IdedDXHa$pzk}+*pzN>9HoPZ;8rTIDjo@Ak%F*gMsNM`} z$iTUvtQeG2ybCC62IVZ>t-&@7$|VH_#Na**%Jqdj2W=VDC~0#I{)$1lH?jlOmqB?< zK(QmZAA=ejh;szNb_~ih94Wrw{tRk-&TpXX8I)H64z>gjU{KyAPe3^^sL5TBb`BoM zpr%?&a$pdH@*9YvUa%vB@{jKeYA}NeOp;C95C#>LauU=~2Bmr_+a)Ik6{>w3)G!7$ zvk}S%!OjdSd_p;>;S6d{z(i0k3~H{jk{Q9EB64I4;L4yP3vPk>nn6XCpa2~_l0ki^ ztYk(psKwT!Fe*0&6*H(FD0c=GyYdaF(F`gsNs^f!3~KqVexSxMs1+|3f*Q-9R%xR_ zH250^wWiSzpgbAWx(R4!3LeLxHUyyKD|kGEN|-H4@(BzoF$Zv`U8Pv~RWD78fL2a{k1m(k^b_|RIHJL%BtgHkzg+c9>X1U<03~Emb`X7RQ8C2?v zF`)by)PC&}P}3MxdLyZ&`ZK756Qqt4z@Rb$BvTZ~pfbaqKuu>*SvgY231U#$1)D+5 zU{J@D?UIT?o$Mm%=wJqQ$_fWtf48$U3}sMf<0Y9H#-Pq6$rfNHgUU;hSj}Qk z`7azmg)^uN+CPDs&7dwBNV$IwgSs+dAE<8`R8hcSP;(hnaX8xTg6A=)k{rBIAvl6T zl@?3|HJ?GGO9HUs)@E#Ox7}}W(K98)-fo<30py}XHZ4~jX-T+P{zt6^CN?5 zouh$KB`_$H0@(s=WKgC`;x~~&wd=AGqe@~>9jxL(B{Qf_$|SRiL3LhPgi&o~Q07TP zK>fs^EK;ICZDCNBFJuewGlS}(?E`8ngX(1<`KxUV%4&l2er#t@HvV~_b}%U0@HL=z zGN`^MRL2;U>VOm!jx(rG?UA5PFsPZ0T7o*spu$@Z0F}d_=J+oJb&5gF z4Q~S~mqA4=8V~9;gNiJWJi{3V6;+Z9>MVm=bZ-x+-x<_mD?d=@7*veXjQ0nFiglG$ zUmk;sOKJ=1JcC-EB43<*2DL(&WC|G6D(%r2RUv~~qa=PWFsOB{r8VLrgWBMq0O}Hh zN(gTP>N0~$Tr?Kc6$X`DFagw`3~F;pDySj`wdI~{Ew3`DZB}zY6*H(E4xyl~F{l(* zQ&1%gYPYgoy3U~Xq)7i&DT7KqumaRy3~Il20H_-bD&0UTaW@&%!PdP%l`*Ic|6`zT zF{sR0e}cNrpt2T8uI~fI;P@NI{{JLFFHihu1>}bwTSfs450^$siKcBL;P)wGF7p464Xq z`jDS6sNz`ZT zFept1RqZcN=UNP^X4Yv?S`6y-qGq793Dhu+d|6-VFeuFu=_#tsptSDEHe8oM)wUV| zst$vy;}8z2E`zG)D#teKF(|#nBcSRtDE*x^p!67&!GZpu8Zf9PTK7QdGpJ?;--2q$ zpbT5d*3y7M8Tn5G)rdhE&q@N-m_fB(DCPbp49Y}V$uwn9rq|Lis%8wz?4Ap#<_xNX zRaa1k462iZYyny@sLrlNpo|!ld7@MSS~4h$oi{-lGbqagQdn=rpn52iOltr2IbLe04NIvHP+t_R5u3YIZG-gmJDkA!aPu4GAOTn>HX-=puDeL1=WK=`P}OS zswab*+FP~&y%?0AgX9@{GbsNNXFyposK7*LP}U48Xs7JxZ5Wg){Sl}>3@TJBACxVF znrR>t@>dKhT$yD0GN?KJ^2F)Kpytk60LqR*MJy}^)t^B{=1Ui=J%ft6mIZ16gIaXY z5|jgjT5Q!G)IbImGeF+GGl)UOjz|UN$e`j9gFp>tP|J7j0yTs|tw{e1)KCVsN-GnT z6N6e~ASbGZF{pJ)PR5x*ZSa>A>u?5@Fl!zt7Y3EM@E1@c7*ukB6!lyg)aGkBpuT2M zTgu;n8p)uxS;?WlQ4DH_!!uBB3@Sw_VY@S^-H9DBs?iK;&(1BNJQ!4}vRxX(p!RD? zGIK10N;hZ=>Kg`iu$AQcJQ-Aml9L(7pfYF4ggl->Wi4C|v3pat9&Y(*36F~(rs2kU0 z>YKr!%I?($rD9NbdP_e;FoP<0s0S*9LETq!GNBBrBC#$;6~>?{Q<{RB$)KtZ_=B3o zpq^-51QpJpo*66yHJd>_H$jd8C1=z6QJfYsMm`$LCqsj&Kmh~pduKQ z=Cy^O<})a*d+k6)GN{_Ue*?9ELDg|E0~N)f>Wz?9-$DkZm)HQ*A_k?uQ?{1hF(`xd z02tby6mo6%4BL2+4k|WKiZCt3bswD2tsDpjI&` z%LB3&UCp3+Xw3$-hC%f*kk>=4Wl&bFWYxEhLD@{p0JWY$*@hc}+Q6XtE*c2xM+Rl5 zY?l%kl>Ifyer#k=4)?wTmB^qRd;bb5i9rn+_!X#R2IVv&7}O>P<*aO%HZv%foqaH> zpBR*D`c_a|7}O~3zMy_)Q0@(nfZEESJdE#y+Qy*9PLr(Kb_V4+%LUX91~q=6oT%E# zpuF)Gh|)Q!ZP}-3)4~m26CYVNiYpUW3}hp!`Q@gZh<01#T<{mCB%k zcJ>CfmqDr0SA*KephC4CfZETXW;T>jr7@^*V>z~&&YyQ zJVuqvppwtu0(F`}ZN4TiVmia1wv=xGb(TSG>)is>?+j{(!)8$D7*vYOWl(=GsNEZ- zGc%7t?b#s}lk*HJHGKxCdg4ebME^iwr7rrgTSN zVo+HNWkSBppt6-o<_d#4cC9l;^(TWmQ7$=|A_jG;cRZ-84C?d%>5eXDP-k5VL0w}| z=MtxaDq&E0JLiD9&Y<$s=YlF_P#3i1u)|*r>Qcilpl&dzE3IVJcauRC`P+gjV^GC2 z<$&ZZ234|9Ms=G(mFCOW`3{4+p-eJ&8B|&MHyBkpgSw+kGWQr%`G5-;)qMta-{oge ze>13xjgl#@4ia|X$UJL3GgQ}h;$;`(L zs%B;;s3#2Sb<`D5PYKj;jq{RXea4_PuNi^*he2tT2Y`CcplbJ)_kO%!P;~}K{;HZm z)pOYm>Lr8H+qf504TI9(A(^6A49XyVD5%#As!6SLpx!X3W(}neS)-Qv@4p&uXe@om zpD`$-X|A9&8I-ZIlBva@T1TD5sI(ZA$@vse+6>CH_ys5(24z-05>#yl)uFfaGw3p? zP6Gyks>7ffdL7@eMa^5IMT#Xo%i!#ZyWKgbY2QVsQ1~sadYyny^DEEfVLA7R3 z9>$)aK4(y4r&)nAVNjkkvq80CP~)S*yT}oYjtnYrqdTZh3@T`c^dW!2pj2slKy_wNp|zqxbzx958|r{E zXHenB(tp*JLCu-=IVcMTHFu_*SntN5BBC~evSd(^=jVa?l0iik=Yi_Zpca*P0@Z^- zEmkI(o(w8RS;_QbP_Zu3f7P2o#ch;irWJ!)uB>FN8Ptk2>2$JTP^*;fQXdAjMqhUH zwhU^WvXc3VL2Z~OW!b(ADq-dVQ2iKGqOx7GV^GP;B-5WkZ7$BhFKEx8wv;ykHGn~F z>n#V_92nG&0kuF4WKbzCGeHevP`fvN1>HSJGOLm1TlS}Q>fWl-t* zPeC~`sDs7>Kn-J18Ge#yaAr`Mv!qx&oIzzp%>w1Zpt8?P#bgA7I#!$m%9TN#P$rqL z8Pus>PcW*H4C?d%V^E_Q)LEAXpxhYLxrFne+!<8f4oQ-aW>EQQ(!Jxspf1#E0cs3` zx}+~j^05r+im^NP_=u>`bx#1>e$Z!70jUOxr_r9!l3jvZUq&}p!9dBK!q_VgEUzo&ty8%4k{#P;(fR@l2_he9NF(NA(9amqD4Fw+1zjL75hR z3o3#^ncbECtN9G7L$6DqA{kUC`$M1>FsRNh-9be$DD#c-PjDfFve+SchD8j@GEG_| zzGF~5YWaeSW>CHKkAqswpsZTn2epJj*-UE-DuzMX&TI#2DTC@8<8+og33%2SzS)-$N_$|SRaL3y2*Waf_y z%Deb7s00S(bN44u8yVEpUWY*?GAKX$TcDB{l>hKoppqF>V1gv*HZiE69pgZ4W>Bg$ zDRcb9ph9cOj(!V+nyKFn)Xxkmyrq1uwlb(We(9jLF{ru9c4<3K6tTJNyNxJq#)?;Vh_M8PxI}lFUqH zP%F~pl+a!VwW^k!KHJBj*67Qt9`-Y+buEvAN@GwP{IWo$GpK~HY)}UnRAN+JPzM=Q za^7E{eq&IZi|s&VFsLnew}CpuptkiI1}c+5?NBC}!wf2AxUBlJ7}V|rNoF2lPL`QSUrWle#~4()Qe8UEpboZ7#;8s(s0=?THJ)TpnKQLO!8jtsB;PHLH)s? z@^%D*%41OZ%1Y)ugSwz83A%g+bxB{6nFS2$O3PiK3K>+9-!f1a7*uhXG%j9bP$dhh zKwV-`rFr{6U1m@>ikpGD!l25OmCTP|1|-YH^G<@N!ft}>|m!{t~(F@vf|kg4w) zgR0zK1FD2URi){Jy3U}UD3eSngLbpSR2vmzf*`1dLGc5*Xe^v5V+6>A;S;^=yD94`C&rq8|4Y3~tN|!-7 z4c`r_4uf(|SOlsrgL2tX2UI-<<+}eGsQL_Ql%}*(>M_&l%KQWhG<6pdzAV8{URNMdqymWy+wUiW`Ay%b*tBc>~IfK`rhnb)0q#DyIJd zQ0*C1?C^!4IxwiX1beJrw4=D zVJ}~to(w8Q$;tF$P`eYxV^qBv)Sm6ZpsW~F>i+YftQpk4qk}=&FsO8WIkDb{K^<&4 z8I&!9%IGi`)K?5DGwe%HeHm2N0x7KbV^G=O8-TK7P{)dGLG@=)C+^5%+nzz?Jdp`` z0E0ShUmKJIgE~80T2BWusB>dWKn-G0dD}feIWnmH{ZddE%%BR7nt~d_pf2gJ05z0B zU1>QAloNw0^1Tac7=tPflg347234{^PWKOIP^IV1K)Eoe8_ITR1cNHOBS~^s26d-r zDyXj+RJk(AjAT&vhg)G(qZm|0LJv@G461VbFi`Fcs%pP{L`O5ICraYigF!uOAZf!f z4C;By0#IWaRJHG3P~R}9nlO1IkSBwBytoWYV2`hh_Oj=2YF1%nDwCYhBCO0|C^ zMitMXLX?%vDh4%ES;?$sP~k>1@e8hDP;)v+W^^rsnj3lp)H((gv7ig6^$aR9?j)!U z3@YlXoXGi+K`puy1uB6-ML(9j+C~Ny(|Q(h)HvXa@wpmy}%fKlycP$|P@WAY1w+CAnUPU)P4q)(f$^wGzOI!x*t?JgUVVUQ{Mpw zl^rLsI>?}oU2O~MHwJa$jxVST29@*p7f^>7)am|m4m*=UogLl*)L{m7ZjAItXECU} zZC61ZVNm(|%|T@|s6u5WbCf|{Y9Rg5#~9QVBWcGw&Y+6gOV#oOgDMW)2I?e(Dp}AG zR1SkGjZ=X-#h`9n{R~ttgDSft$;{IX>h|N+pw2L;^8WH1JjM&NHZI4OW24XHd_LCW0zpP}S|_m~tV5stH{S>H>p$ z9a#nHB7yo^BW@I^OAJc0s1Vd;2BmeU7pN-?O6T!LP=7M0I(8306)~uK&Uv7&GAO+< zQcx&nQ2N^>&v1=F8SL*1s)Ru`J}PI{uQRA-4J1KV%AgF5JV5=$pp4o}>*5UtWgHp} z>L!C~9a#*jj6s>ijR19vL75hv0(F}~ncZmz>JEcy|5zSgcNtWt{*q@XXHcD;&w{$g zpv=dd1a+T5S!~+^>Td>Rxld|d6%4AovXXhgpn5eJf>Bj6C@Z6`pdKD3i<^1~s;QB1Wa5 zt^OOZzV-~2_0?w#YJB8VP?`+NE3Q4LS`5m&NWM5)49e$D3sBk&YRY4&TIw(;zy5g0@MqxGsZI?Yju79)k+W&H+`QLCtJ16_g%>3OD)!R09Sz zr+p|WeFil*OrFjS8B|2%1yBYIDstH!P>mQ=RM9a|jTzLU+fp%U!l0rbJA-Pntbfn=`1mG13}g$e@;QTMViNgIcjq&S4udsCZ>1(~?20X&~>$GiFfhj5I*C zVo)2}&j8h$K_!Ha0`)nAN{l=X%7j5BFOwa88wRzxND_3W3~I|AT~KWq)Yiw{K$$V9 z9sP|#wPR2z&f`I~XHdJxNOyDx2DN9KG);A6P^tSQ&(MiM?aOWi>I(*y-r#dkof*`@ z7WYAQVNe;$O2(W)WroVAx-zJ&1&u*jFsSThQuFG@ppIRg1WXr#pbvv8YCj31vSm=kp{GH8#h^-*>QY|@Rl00DM%9l&-6$FZ%8o&mDchy~4C?k{ zd3f0~sPg{OU^ak3-FKF!vjc;w7!w9+AcLyh)(6xe2355$7nCD|dYo+pYA}O(rYBW^ zAq?tyizlFlGN|hIQnhqqP&J|TK@DS2ua%BpX96`+V_7^#HJm|d7Ds__x&zZ%V; zn(3_v<-wo~TU-J)hCvy%?+I!wgE9{N64W;gs&!;KC{G4uvP`O$;~11_k>nZ1Gbpp$ zQY@appxQrL4a$o_b+VH@!$bzvd6?v{ycv}F7|CBvVo(-a4}Iswbw45< zzf%}gFFh$?Pi0V6EhdBVWl%QlHiPnGP_`i@pr$dXzLEVw`7iOpCzbJ1~u}? zbWmXo%Dw(0P%{~nM~e}lW-+L-?e>8RXHcFYQJ`issPXe(gPOyjykcuWeaoP{|CGZH za~YJ6vXYs{pr$;Mj+6)nA{kVmM<%ER3@T{r8BkFSN~Nr17BZ-i zBifg=mT#54^9lyFV(&{( zD;ZS$5!qVCGpIH7{{pp&L9J^s2h?f?wV~Y}P-_@eLWm!zwG1jT@+(m57*z5yIaaWq zL2WMT0BQq++HzYqCOD#R{-AzhPzPJ2f!e~LGTKS5?`H;;shnNf%Am3$<$ZnI7*zH$ z`CM&hP{)eo>AZtMolqv3oeV1Habt`sg+ZORy9R0(gF5S+0BSdbI;SLle_>F0+oWG~ z4};3zcMH_7465+RI#8($>XM#xX6|KBS6awn(tQl7sNDll`x#WRvRz7JP$l!_>734> zN|%{}I>4ZA{P_aZK?YTJI||fq4C?kHSu-p87 zP7shn8I+-sJO}d`lu^3_pb8k2afo#M7BZ;T z^EZRKz@SW)wE%UIL76HmnM(}H?DkI>)nx|N{?SHIR~S?$y8uvsGN{hOq;au`L796T z1$C7{StzgHEM`!ad*5JG*BDgyBSxS~7*sDkeNfjKlvN8^iNbNK^yn$5I}B=woh-KRGAO5Ea?Y!q zK{+ecrF#s@WvgV0?lUOYeezH6Zw58;h}2Rm7?itS7f=rv)Z0FpN(MEyT_Q&HkU@Ed z$P=fEL5-ik9@HZS77Q6_WKgs2T<}Q}1g~uNYK#i*cY{GpITDq(1nD zLCtrF0i~g%{`;>+EgUx*)MpH8ak``|G#S(~1KB&*Vo*PPp#w^bL9JRS+a+xVweFfc z=ye!W!ZVrrYBQ)!BjnSr%bmMt=hd1)mEDqp!siU?c9on3Fkw*T{VsxP!=UaDlYRzM234W# zCfYKn%B^!SDl-OEwO6*`?HJVKEGY@KXHd`TuLjkDK|ME=GjSamRCT)vpgJ+Anh<%H z#TN|f^?Ykkoe7kiMy!0Ux-clsKV^k%&Y-kzDeu2xP&!r8G-bh{>h#+OsvCosW466*)54}&sY`xhu%24!|jT5i5#Q0=P@f$GbkI=&tNsvm>u z>?9>2I|gO$A*1Tgpe%gOgR*B(mV50%4Pa2+v!o%`fkE{=+XU1=24!U^AJIV!%BEc> zP>u}B*3u5tUkzFs;UO%%%BFpZVzfW zgK|6!Q;>$<|<#tx;&Z8KVhv8vRZVYN{ zyFsAb8I-4`GpNxFYW)1>pgb6qSL_H-V;GdTGRcf(P(HVQ#i+hvP*bX;8t%!Ud|%7g zc^rfCce)E|JcA1K@B}r1K?V672j#_}RC^6UO=M6Z%1XwYL4}>E#Hc1QsBptnP(BQ5 zPP?9kT1?u1{JyVCMaJ96}48*@cJ>RMYlXaO=D2eRq{{JpFu5o zB`+ilU{JA6(nb=y_Nc|Jk3TnfewnsI2+&)`;&IRCcWL{woG`Y^|j37c;06x1^_N34_Y1 zngl9_LFK-V0JW4sogMZws8|Md&O;Z}G6t3BGZ0i9gUa74U!3n5RAJT>P|F$A#WQO` z{lK8E7)n`o1%oOwI{|7XgDUPOHLrLERWe@_)G7v58Y{`n)eP#!+C)%m7*yG8wU zdV$)?pmdZ;W*dXjeI*5j?F_1(GRf>L;sB_<49X;SFsOYD%5-fCsQnDe z>{e4yX$-1;l{=_(2G#M^c2EZxRA;AEpbj!9bEO&YHwI-fsT8BiU{IEO7l1m%pt@%@ z0F}w0dY+N0pYEYG&OFhY;2EU2|mBXN%oQ8lp#h{!==Yz^+P%e|sgF4NiTvKIPdxk-c zRH{p78I+rnllh%Nc_@?2IR-V>Y#w6u2ZQqLmIo@2L5+`?3+g+T|%(ofdfW^Rk z%A|*`fI&^E`U+GbgYtbP`PvH%%HPQi)I|mrIJyMXB?c9w>~bzMC{?Ot+^;aGkgP_a z{$x;LXXH^|#Gt|rrJ8z`LCrB64XT(y&F!`c)HMbb5fKWigh53vy$tF)gNj<)7gQ;O zT2yu#)L#rLx~dDP8w_g6t9Vd18C0y3EvPaE6*pRv7PlDG@<~rY-DXfL_O=6che5?3 zeg*0-gIay&BB*i(wa#!DsCx`*gV`WZ_Zd_|w;w?L&7cw^q?%g6ppuv7fO^27Hm~go zs**u%DUhvi)3KMX4EjPzzcXHW+XB?tL}L1mbkgQ{jwnce1tddZ-&A|wY{!=SR4%Gdc7 zgF3d>5Y%f1b)xJfs5cBMr%E4`Ms4-qfaRY1N><39F{ran(om_%pw5l{2~;fxl{aZ4 zC@lt+pDLr$W>AHPC5x=Xpe~*n0ID{Fx?*SmN|!+unVEp9!=Q@0g@CHdph_Z!f~v=$ zN|&AiRi8oKSR)PKdJL+pOp^Bv7}V{DuR-ZEsJpKmKs97g_noAgYQUf>Mn{8c#GooC z9RbytK~<&tf@;E`9v_w*WK#z9^h_&I%^1{k!#bdvGpK5_hM)`?R86-fpjt4f*AdpB zj0n_djiqutt0jZdTyqVSF@w@7lZMJx3`*zWGf=G=l=i$Iw&D1+1(P;D7hxi+SIx#4drN4ptf9|^C4=g&AA)$@0U7K{+`|)v^zRavt3elr4jDnY0zuR}9KE)df^v z1~u|<7N~v<%1wFD+c78)WjE2EL5*#D55J&2gYxVq3&;TsYP{0N>cF79mM+Jr1~Mq` zH77s~Vo*M1vJH1+P*bWTQ#6=C`M#2T`w#}@@8k_?D1!y8qJ_$&jo<;U{F7FkUh>A2DNH|B!0&-sC65vL4CuZ z5}y15%9BBD8ZN2(aSZC`sq&y7&!Bc3l`qZ&2K9@vYyrF&)ZQMQK}}>(>EAno@@7yO zZ6(hzi9uy{`y7-HgUXsGDb~phDtl=fs3{EU*cy2{Pi0Ui%H(t9%b;=|?gZt>pmJ;O zfSSgj&JHaE<i9MpFVN~=sh4ABfq=V1`2 z#SBWfMm8o(7*xHXA)sOyl%9J(P)iwP)$$I2K7CI zGHfmx$mI;msO>yZKQJg`i#$*(7*y+d<)BtFD3hhFLB%sD(={^ntzuASWoD=P z4hOY{L3ON=cTKHjP@RWXfLh0(%-v6eTF;;?yd{6Nfk9dRDrJrz8C3VfQUyq0P(4q} z3V9=gvTEKPR3d}2Y5P5>BnD+`kq#=ELG_&{d*@9I%5JG_0X8!z`!#(*{luUgZdQWY z!k`8{{0h|13~F%Abx>Owl+#ctRBdBW&hCa zl-p@1P`enEM{`q9yBXBjwxh#*Tj3p83~cc3@YsOm!Q%Z)U1p0u}Ei7bK1(6=m3M7 zYjF(JK?W6J8wlz*1{E2T11f_-MXfObb%;SN+B^kRCWDH8I26=j2DRjs1*j|r6|0*9 z>Ij31bB_X*&7hV~>H_L0gIW=E6x1;W6|d~>k29#%r(0oECm7V)i_*-0l0j`yCYc-t zm0+A|~O9JW)gW9rr8mO}jYU@KyP`@*%?KQhWonufb zx_dzV!Ju}#dx6ShP=J_a9J&4C=te6`(FKsEoE! z)49l?GA+)6y2PNeY;{3hW>DENk_oxOppLD60P0T$bz-yBzls=C&clwNt}>|Hn(3g5 z8Pr)_dr;RH)H!!)Brai4dEWA7lIsjAe};UnN*Pq);m<++#h@;p)&g~dL0!HiMfRHv zs;F%zP-P6N*g}fzw-{83t+b)uW>BRuQm44Xpl+=G4%A%+RkqmyR5^pXUAY<5JqC5R zMi10|26bOI9Ms0HI2{<$~= z)FTE}-BzABj~P^rl9PGDpkDWpB=A!LHAW+5A*g2zN^|u+Q2#I}t<94Cc+Q}7D$7B= zU{Ja>6G2rosCv4-pk6X4J$EVa)i5Z1?+c(_F(`u|X{~(Cpc-e&7T^toYMNUBN<&xu zcVUfbesLhE&lr?ZTiNhxGALt<`JiessMdXUfzo17CNc8G(PmJlt7U7c!=TJI%SW^} zgKDoNesvjC#~NAn)nQPbbw`7$%b?8N<-<^qL0L@d52`+cvJ9FDN{>Nx&&&eVfI;=l zl_LiF4666VAW#h%l#S^dPzDUj)?yi`MhvQNAK8XCW>9u9!$37*Q1)x;fojU295%@c zxfz2RRCxtda|SiICK!|y#dOEL3wtQBzYSKHNMYUP^JvZD`qgLwhYR9buK6~2IaHq zGN^V8YD%RX!fnr>d|%39y90yr*EI*#kwFEzOP-+dN8QyhmoLqGN>gl4}P-m+b?W>71pr-HI!Q1O|P z{piD>R_99o%9cT`y&x0vR}5-{sc+cij(8gu154;C8LWN`D$%9{lpTXgULqfd{tRmK z>e`^}8Pt|dOF#`^P+O~{9nXP5ZGV{tY9ND3(QOB65QEyS9JX_0PUeV>a1>GjLMBcopbvGlskjU^OjY^Xa?^%7;N!xW|K<%%CbKO4s%j z230kEF{r5w>T%{>P`(W6X|B{v{20_f7Y=}$#-OTAH-PeIP&HkvKm{I8vISVk zpe($*fm+0%ET>Cp{W}KLJ##&%Xa?0Y_a{(`8C35J<)D@@C>zrhP%#Y3R@p8sWl(*U zl}s#yvRfkM;$;lVesvR2aSX~~lT3ZzGpIq8!$B=)P=jBN1N8%ga;p71s1*##*=;|l zl?=+oyB?@`2IV@v8K_kZYGh^uP^%e~Tdu6H)-b5i7q)_0%b><8+og33%CoDS!(Pvz z#@k5$&IShMwL})%KQbuq)s~A4{ zKZ4rCpn{Z~%w`6q3Y2a5PYf#LkmO{xFsQKHKA?VPP_r(q0JW7t%`yEB)HVh+x9b#8 z+Zj}ZvXa@spdw?oU{pI9RMhHPpi&srqD>O3T?{Jv!D~>v8Pt-OgF*empkj4rfZD^L z;@llU{mP)0d&|=~l|iitd;)4OgNi>SZ@%BhpjPKf#biH&T6BetS3w!b9?P&Sy{;bL)dDU{IGY$Vsn4232G_ z8q@^_Rjh26E;6VRo9!6YB?eU*Gab}r26bcA15j5ORN1D^p#Ee~w=1RNw}?UAedz$| zDucRTTejiF464Gt6{u?rs&b;iR*Kz*aJWIm|d3`%pAlx6QQD6LIWo4CuMbRNj4${CdI zOKDuZ$Dr!z$R_SSgVJ-G2Jfu7R#q~P8B}Ww$vkIJ z9bal=R4*7*=i1+Zs%B8;$|UoWL0Kp(nHmOV8JLV;@D+pVen?WRuNhR&Q>Q__VNku5 z?UF_v_1}Q?jg6^%7(QcAwp|;6(qvG5Z6ZL`Vo-KVq>iJ-pzK#=gVJVD4#}@T=`g54 z59I7pZ3Z>CS}G>G49ZEFWa=;|XE#%fsxE_anJDY4dJM`nP#PEOGpLb=B$=tlpxkn0 zYuSK7jlM7mls|LKjf28Ppt8shE7upyqa!Er1Dwim>SesttpRTp~|rQw9~aN;YwA8PuXp zI-txLRCJ{^sCEo$i89HwXHc=V+h9~37*w2cte_)cpT{1jgbQ{DMKnD`%HF zGpND-4w9VnD-xGjUqXtNa5R}3oCdd&A~BzFL1&!BQD%0UfaP`T9>pd1*~Sskf)4P;Q~M*RkA5QEC|k`lHfgUSzh4r(xi zDm*01+93?;;;An{4P{W53nf-g463M&ENh1`sA6;Jj&^2HCCW->ID;x(ybz;uVNf?# z)dDqwL6s#-I@*;%-L8=J)z=K_ZnZp}M>44UI`Z#!7{#C}Mn!>gV^Eb|w?MfwsHy8ES5f* z@eE3HReews7?f7h3s7DRN~c23E=^=my4AA2@@7!=bk=~H#Gv$)Nydjk>3c>H)%}o^WoI&|o~JxP&0RSe7 zr%W<)8I=7hDfiD~P!7pfpduL5pa+Ja<};|l$|MuXpqy&Ub8rEJa#nIOQ4Gq(D;J|$ z$e>&Un}J%yphjjq1@#?+ayu0UDw;u!R<=uv8PwP|UKrI92IXn~1E?4VHQuHdsHF_b zYq9ix#4;%FRZT!GV^BWH--3!`P*WZlgZiF9`Bu*awVXlu*KP>v2L=`BW&mmhg9`GJ zjmb&|r3#d_Xgq@o$*2Ogia~{)Dgd>bLCq?Z?8h1gHK&a{o!2s`x#qusTF0OwtSdpS zXHbzbbwF)kP*L&sK>f&|7A3oaN?=gY70*CzWKc`0rByzWLB-ad0V;_>#f=ICmCT@) zd&w4H6N6fzBz`wDsQ5!67}ZYSqSEp^dB=wlb&$rG&kWK_yzt zguIe2}YRcyWpqdLi; zO02hl%3)BYOCgDOkz0qQh^x?Lgd-)9)q-Rjn$&N8U`IzNH>ok3NM z>J91~gR1lj2lWSostU*fmB*kSXB-E0oD`QY5i&H_}Vo;{>(V%WKD6^zeP6OV1gUla6dH zUoa@=kdc~mJa-=N#nn8^&kZt%I1~t~?Bq)u# z>c9VLoTqsMP@gfV@zzP8G#QlFVmDB=7?gK>Ur<^M$|orcls1E!Qn4A74ukT2Q3$Fw zgYwtu1WK1d1&+J}st$t+^6CkyE`w49$T_ci3@Ri;wgB}RR9KE|E%g}Gtb!|`8Zf9i zCQ>ocXHav^JA!J+pdzg0Y`6h~id-yDoJI^PN||IDGpI#LKVwu)7*uqHbfh$8P)lAs z1l5c|#p=kx+vW@^Zlrv13>nmNuc4q?FsKy)3qct%sQ3(TP%RnM>X&Cg88fK01&=_r zVo)1Qq@STRgG%Tkd4|s!RHF4RP$mp2d9f|1HVkTWyg4XS2DK$gYF=#_)Yb}V;5TDX z+h1G<)s8`>Xg>zko zsylZw8egAS+}m22~jT29z~}x|rh#%7#H* zE|7vk9|ra3_0FJd8B}qX*Py;)P$kMF)0aV&4wMygKL&LpUh)if45~~iVfSZHw|BL` zsO%Zk-D>$T3}8_Ab!5Zqz@RD`$*2Z0s7fz+N6H`uRTU^tXGaG0I6NQJU?>Apw?^)-X4r!94ykqk<&ksYW}3`*Z?5GXeWWe_kClskiJ94;w~(G03- z&X=G(7*z9ut)Rv*s1_x6L5*cl#!55ZHw>z^wKREpGANUQT|kXvP^R(HxHz6cnI)Y9 zHGx63PuT*>i$QgKA$f+0463ts3Mg*|W!|VIs7VaULOCGm!=Nk!I$%_j8C3UhDfdrd zP(5?L05z3C^)4_3<;$R~OO}K3V^FqTq*Z>4P_6;81qfwOBg1_` zg)u0%9O-YL$)H9TNNdC_1~sPSIjC?3<=JH!sM!o^ytO1V=P)R*f%>4nWl-MnwLr~f zP(DdAAc}}3@Rji zG^j-kDlDfdsP7omtO9pX(F|&K$z@QB8Pwb^OF%7QP!Y-`6T_e)mF?0}1{JkZp3boh zYEhDG;+8R}=oD$EjAKwsUQ7Y?J%ftXwgHm{U5 z!v+SmB}u+GKQgGTyXu2VU{Kp%%mlTOL8WMq1eM63b{o6^mBgU-Opr`bGJ{GD_zcu0 z2DL9-CgjZwD(&P=P(LxK1Ii?`g+cvRA_aw?8B}JMji9zNs4S~Xptdon?17q~wlk<> zD>FguU{EKLJ_ogvLFJ@)fl6Uexi6Z5+Qp#GYW)LhH-kFYNDhYl!l3e$?b03wl^ zQT@uG3TNE|mCB$lDwE7!26ee$C`Pr9LH${>5Y&DKRorC}s5AyuVig4{ok5j4RVp37Sq!Qw zKvsQ67}VogvJKB>P)~D4fI7;c{!vb0A7fB2l%WRUR%k=VsqY0D9evkXel;1^K8GbsHDvN1Ww zpbY-U+P(Poe1CBOC%H_KmLy3@EM1tT(hNx>myjflE*hy^lY~T)B-f-_x-d(UOOhlB zBWZ4lS(03mv|JLSlFRRPKKm1XuYchAIDLJ;@6WltKa_Im27}UCE^BWk3`+abEl@WZ zluqe;P^An?_rVKLw-{7Ub0<)@8C0(!vK{>wgEIWl7StUEWwaw0)ZYxsc#kuvG6vP} z&> z$;9LpgL1oN0;--txj*ay>NSI!X+9QI1B3EZGMP6F%3C>P-ZChk9nz|dr<8zt z$Dn)>)g9Ca1{F9@j=mNK6|{UUsE-UP z_>v~5PYh~hf*dlR8PqD}!qFE7wR*@XMAb^||9}6LSGa?0(6wez5j%bdrNW>hQyzh8 z!=R!vo`X_lP|-?_)0RPPYTF-CwPR3QG;2Y%XHeU`-hk@Bpkn9Ay@{_FRNQj;tU5BN z_)C(DsxhctrPn~IGpK|Ij-Wa*s3h~bpgJ?Clp#i-G#J$0A6J3u!l3rWbpfTxpi)!* z0rfS5N;@Cv)Dfk|>BgWky}a-T>M*FRc{@RMXHeN8dqC+j zs2t^x>A|3KZ%I9<$Dr~aSc2-wpw5`f%R2NK)Y&25f$GJe&O0mxWx${=>=+KpkU&V3lvRaNOg8Z)Ttng*cyGN=+|gRUQgDxGJCsJ>xPw?kS$ znJ}n3mzqHJXHaFgqy#W!Q00{mL76eA`)21rnKP&dLt25dU{H@7hJhNupsJJ$M*|sD zb&5=QzhzK088QtY#Gq=MszH6npq{JBR+uG&dZ{TZ&4U?Ky_fWmtr%3pJTp+%4C-yj z3Q#r->fNQOpoTE0#?o9+whXGdvI5li4CU8+cJ2F+`W&uS`z zvJ9C7%9TM`U9tf+jX~LzN{iKvLD^Qyb8tF?`oZisQ0@$BxQ#^h6N9pIFa$M&LD|Pj z?L3n~Ii!Sx@?cP-GiHMFWKfPx3qg4?sPU@DKzTDLr!Fr+&00B!PaC-l;Lo7E9omBmU{F4> zHK67(sM#qYpyo3u-wY|k7ceNlCTCC!8Pq&g=@czuPzy8%gIdg>7I}>Z7093h1EiU` zgh2&`NNE|wpn@;;0=1Mug_O#4DVRa6sJsGd8G~A7wg^-RgIaBK3)FH36+Y?%s1*z< zBK8ibl?*B}Mg9dt8B|op2vDmSRCJU4oWmH@Ce^K=Rx_wAU8EhohCyxfJP#_ILB$3X zfm+L;;zDHGFoHqF7s_;L9fR6cIv-RdgG#8BGJHLQN-~olP85Smu}KHDfkEvZ^%+z& zgW4A>?{wP8pi)ytf{I~KX-c`Yi9w||bwgB}8Pp-w9iX-_s3ToIf!fNTGCfm3ZDUYb z0cSyNXHeN8-+_u{P&tJ%$JxQ4a!WUYiepfDm06&6GN?0V^23Q|P-ks6f%=6(omVoM zT@30%?4OA0R|Zv>+yE+pK@};P%x(r%-1H5iN@P%1Ri#su#GtNs`3qDsgDUYH1S*9= zl`4nK9tL$gqywVb%b@NQ{tfCk231x%5!5~gRbJ@}>URcp-%K`AQW?|(n+Q<*8Puau zm7vlXRF#s+9AHq@$ew}c9H(oVFp$2DJ|9` z45}gEB&eed>TQTzgUV!3?+T^+ag0GVDw#|cgKDm{LsZ8Z)JHQtP}vOXvyI#UJwc#m zwHj3pDu+R-#L5=yNd~2wEIUQH460p*KB!X+szXypP58G8I*>{ z7f@#ylxBeR844JbmXgVwWl-9M(q}lwpmc8i4C*|C(yjai>JJ9h(@ffi7Z_A8n>nB^ zGAP4Q@^da^P)4zDL0w`{#>r`*iWpSCj832~GboeBT2RFd%1m_ts6QE$MHk7CuP~^A zo>ri)GN?fT`$1h}P?pO|L0xB1R>~oBgF)Hc%tcfs49d1r-p_E8LH%GR&%sg#HQdG@ z)GY>OH%flaw;7auY;RD1F(`**SzNrsphjoNYQ*0R%2CN=${5sm<&e3{pq#pVi$AcO zK~3_M!sH%^??JhO@svUNHOh^u8U{75txQawF{lMy%t6&M zs70P#LH)y^0t427s$)<=A$>tTXHda~GFX4Xph9lSxc?=CT5-P_)GG$H%2YN|>KWAP zA#w%dHG>Ktbq-VmgNlgl0_qKeicFSu%C`(E>fm!w|1zlPCP{1GF{n*SCi9*_ZRs)% zQ8hBCZA!V+#GqmWWMb0HpyHOvcJv1Z6<>G*R11UJbu$*!M+TLk95SC6RFdgkMD>|L zrPxT$@P$F`9VM#~tQk%2-x29@dA6I6Q!l@%ZplMW0jdzlOrzG6^0g>u=UBZJDl=?6-ULFHBU0j18M z&X_&|)rmo!wb2IEnL(W&RR~IhL0#Bh1F8#yDomaSN|Qkq9efDtYX()^cm$LdgSy&A zn&e#>)OC%EptKoOiHAHUx-qEI09Q~t4C?l>C{W!Q)Sbd*pmZ5j+08AWdN8Q+`xT({ z7}R}JDNK4Ys0TJip!6Bkqfv`N^~v5A7?f(V{B#B~sCEZegZh?1 z{kJJIh(W2fsX|oWF{n-&QixkJC=Cy3XAWjingN|bSurTBW!|8y8I*S6R8TeyO6R5p zs38nW_x@>6whXGLX*{U!8B{N8$*_hpD8o?`LH)p>jAFw;4P#Ko$=`w+&Y=2bsDc{7 zpiCM)LD?}Vvo_Kz8p)t6x=63cobAHnnBsz zbOAMnLD}Ayts+MT^@HhAP-7X?aO-EF#xW?nQL?%?o_xlGyxiP4jrs1HbGbm5%B2exO%6pV-jQ+%+e3Y)t3#-Ls6hXBpnMoq(6Rxber8a?g}p${ zW>6tFhl85KpjO=94$7B7tumbpYA%CXZG8ijAA<@XC2OAk3@T##9#8=cDl)kpsCf)3 z>Y#Mh<};}1#&19^U{IUd900YDL2c1E4r&pD+U6lMlf?`w*8dTxKn4}JOdr$|1{Gf@ zPn;kIwd-boP)iw9!u>U%f*Dkjsl4oA8G}l(&I1*~p!SYZ1GSt%?c06~)CvZbnk<(v zRx+rxgL6QIGN|;%ZlG2%s6%ZcK!q`=BO0<7wwgg@dMpLChCyZdOR^o#pt6@)gIdd= zata56ieON=H`{|+$Ds1=%O#9R26e_%b~D#AsI%4^Kt(aA^Y&jrZD3Frw#R{rW>AGm zFF|c&P(?~66T_g28|CQR#GtM!hse7{lcKCw#R_l#h|K_ zB-#FzLDd`_1uB6-)hgxEZU*(d%_KyX$e>oy;wL3TzR62v|kSnF-K?bGPMy^O^FsM!%dY}$5C=I>8K^%23HpsXU~L4TG(*(l}GIR<5We+i;G&!C3Z{sQU`1~pt+#=F3v?35AqMFwR*QOfW_ z2IY|a8>mYRYIOR)po$ojV~(^~FEgm|ZPY;(GbksGMWFsMDbB zStfIwYYfUYLN>{-Gbp!`JD_ebDEIrnf+}H9Gya(m>L!EoR9+=s%AmaMA0w(;49aKX z3sAQi)a)eLDf){+`5v4K>JEeQ%aNV!zZuj#mFu9&7}NrdbD-`rs6{>fL6tM8K>x9z z?lGwU_GBs;RPfr*i0VFr3Q>A8l?-adeJSD|FsRVlL!cfqsMXf;Wq8D(!tG^~{4s-y zm?)PWsu)ycQZlF~3@R#Jc8aPQRCLZbP)`}urZ%lW)i9_n8pfcWF{o{NGFY!=P_h1! zGyKD#;(|*-)iJ2}wUv2dN>wtMHw-E*U3QAzGN|+%`K9np|J!+QpmskaMMBZJEF zcLvqOpt6I{fNEw?Icw!I=?4atTe2He3xmq5Xa@C>LFL!Z1oeqQowarV^_fAPR}Psk z4C=x}8LYSJ^#8vB>*vCxd7xS|sG@Y)3R7WF#V6`OwP8?Kl}tvJL0#8SK~!xSREgen zQ0*90ss9jA?HSbVV0k)sU{H6~%69Zu463YTAE=HDs=Pv)&UPVxlRg+bM%KLn-8plVM@ukULH^<3pJC@lu{QbPk&R|ZwD zCp$&j464Du8>ns!>TR$*opl)0yR~Vcx-+Q85}D)ZGN|T?+n{~g(qK>q460pv0VqQT)!{@jsNM`pO+^Y5BL>x}vuynK zVNe=+13(!wC{2H9X7*)JTEXK%^Rz87rC000z}BT|TRU49et03aD=xl$na$!XCt+EIR)K z>N^HCP_Gv#O9nN_zXa4^24xxi9VjaXWwlmTBdi&eO^HkaY#5Yng=|L;VNgSB^+DM( zsNq&J$N8Q?*^QK=ZzzMZpC|)`9~hKFl1xm7F{sh$>7a%)D901AphhsL@hVZE>==}j zhP>cwB!ilyCzCjP2IcJE1Jo!6$HkQ0^5+5Y-q4HKVo* zC`Sh6X_W+OEQ9hM`3BTD2IVtR+R@_~)a;}LP!kxGZ~8V+P7KOVIbsCmjEGl@Yh z=zJW1;A951NKblw&I~Hh-yPHx1{Jil1(XYe3SKJ%g{cfGq(u4*t_*5Lh5VeSF{sd5 zRZwmWYPFTzcb?9m!bi%)#GOG!O#A`VPYfzDNm}2epgb7VCKXu` z^kh(5l*X?YgW9Gi?PzZX73<#?)GP)Sx3mV74}*$dE6MiH3~E=2%yDKjsDz5mpyn{B z#M=I#d>K@V)vuuDGN`>HpMvsZQ2Qo+4a%QEr6%cs3Sdxa>2e^?V^HZQ+(6A|P={0| zfm*E0OL;1cNHCkll}U z465Q;6{tuC^}y;0sPzo$(a39{q8L<_lZ^W}FsSOJX`rGRR89IEP#YOk?TOK#Vi?qO zm3mN{7}QH;y0n==)hmb076#SeCu7;I4C<|t$!ud#@79_ls_hJ_QEB|fGN|SX8O!cq zP%Y0^fQn;KpRMGFvy(v0X*E)|qvIKric>YHUl^2X;s;Q>7*xA-X-EIcpgNq8GCYAn zsi~NP+RdOkbq)fR$e=WO+yj-wpfvqXf=XskT1%gUN?}miYmGteVNg0Zq?x&wLFrZm zfclL=={-vXwU0sdvYG|zcLrrRQWh6e8I+MzGpPLx$~Z}$IB5*3-vPPjb$~&coUj0u z&Y;YcL*^iZvgrI1qRL=U1AF`p>JWn(VFqQnv=r1424%HY1JqFlWpm>ds7wZB zTd@$-F$OjC*>F%<3~IR51W?Brl-LP=hop=sZA%pTg@CT?%49f3> z6{sQxHLvwEP?s6hg3h*}iW$_R901{Jh)52&jQDmYxq@M{bznC@Y${AGL(ygHGF{t=(d3aSYs9iV8 zLEUFi2})0>ty=G8_iBmu|FsPyf zlJ&h|P{rB1LA_;AS6dGS^)G|E-gz*ncMPgTPaD*G234vYGK~!C_R^_{s)<3}QAXI! z45~~yWIiyc@_Vwl*utPHYTknS$eJD(z$Q?w8B}d{HYgPa^}O{xP;D60OXZMJWl;4!h02CQ0*Di zyYMZbIxwil8!JG4#h{w+y#v*eLA5;71*OKIJ`a{_1?mLKx0Q0pbYf5{P9cb@GlNo1 zl$~u22G#CBTTopXREO*^P?`)%t+kW@Uo)sqoiadaF({25qd;|KP?~caKxs25t){HdIc7_fd?14wuN*SpGAO6c?GV)<1~p0dZ&2ScDCfEIbhczr zF3R1d!3@eZJQ7h^F(|hiGV`)#Q118KLD?{<88tHI7{Z`D2TPy9mO*)s^aJ%hgYt1Y z2x=&Un!Woys2>=V?}1yOhA}9=6S7V@oI(A!Co_UUE$H+YqOxO9i*zeMjbu=PbLAqN zJ%b8bDhp<#7*w!w$T%>lkQ-7v|Hz5n3@Uc6 zJaL>ERNT_ppr$aW`0#0?tY4>eqvB*O1U(HL8WIeM^rNz)S=d=KzT5zBb}sE&g6~Lg1mFdzv26eTyd>Q65sO!ohvw%UBDCN>Z230!u zBL2Wd4C;1}cQY`pn@6HBRhGd zE@MztPTfF-FsN#!Tw2bcY7Uqpsuc{XHd}f!D;d=D*4Cgx8Pv;8GD}^>pz3uGg9>9% z4Rd8!yqZD14Uz@3H4N%q_*hWk465-)e^6@~RP#NV4@NMkmYN-))-kBhgC~QEBv5l( zDTmB@2Bk7V78j!!l1276zrYR6eV%3`#p(e$Lw%l#X%>dpm>Dz1JC0#WE~WxeWl(0VGbpQYTTm$s%H~FUP8ZMU|jxZ><>(@aYWl-+r??7cTs2Me~9es>J zc@EYDmBpaE?GAuC&Y*lINQRuvpl0uu&CC-F$~Ub7R1Smk%a(`NNd`5qwX~yi8PtMK z@&bxe3~G_CM3u*&0_RFI^E87B3X*aupFssH8^325RLJ#1h^l}=tthVrb(TSe){F;r zjzO)qlN11crc9TL8C2|C`HKF@pyGn$=)1z8;={E-U1dpwbTX19h80rDt~s^%sLW)OrA@ zI}GYbr$wOtW>A?*CR4_svgRfrs=Ev-Te-Vb&Y*JEd_Yw97*y`{H=rsQR9^W>Q1=;B zzH-P^GN`kbuMyP)26f(UDX51G>cRw9P>&c?q0*Ci%%FjvU0oIzpE9Tt-Jzgr7*y$8c{AQK26cO>9LTi{>dqSJU;V?N%C5`Aq>e$AmtO|; zoIzD6hs+BG^}w0UA-Y}@= zt*Su1Wl%5G?7D*b#Gq6r1O)qy!5gp`Si3Z-2){6>|JGYtY5f0h z!17aTm4~QWGpJ7L*`QPyl!mS;s5T5rb8b6Ostig?IUCwCDD5=~h^iff(zz}dq1rPj z-SYjQIxr|bG!53h6N@hu!9&dNQcdX-7cmGbl%; z%IU?R#R0a&nNqr(HLk2ae`!i6z8I-f{X;4NC$|XqF()%!||JGZK8I;>~dE)eC zQ118ShtrQi&8V3Q>Kg{-@#aTRCJf43sdD-=D4z+k(O}A;W>4P=%8Wtz9*`NYIfL>$ zUJA;BK?R&^4Qc>`TA-9m0~ypJ-Cl_5TLu-_PpaoZ3@RvS45;rIRPdT)P?iiTWYZs@ z1~aG?<=a77F{sd|lJ!|LsIWJ(1!=>e!bh}#8p5C=CTs>}%b+5a$@BLNDk^Ojq8iGe zqK}^k^#g;7Ia>{C7=zlP95TZh)HYpNY97I$VtwWEv>k(r3zEWQB!h}yV*<*aLG8M3 z0csS3N+_3Ussn>ce0l=Zj|?jL%`{M>8Pwhp@^l`MuNSCE3@STF`d5<~RL&Y{M>{j9 z+)W!mO<_=ZUK~EP+kn`j*`iE zGpMpns}a>K231~e2Fiy)RXmk{!JipaWrOS+&Sp@LM*IS54uh&vG8tb6RXx2gqMFN~ zYSQ|F@?%i7$EDBU&!Fngt^pOmpkAs=&M=Qb)hn6Id&Km`#f|5nN&vy?%pjK7Vjf*F+R^iiOe zF{pNFb3lbKs1C;^>s!vCI-Zr`%L)e7Nqq{al?+Ow`(99?3`(=FJ*ZU-N^6O9iozI_ z_G)=LuVzp>o3cQyVNkl|vak`(p!8}~L9Jy_`pO{_!JrIB_#vuw49aMNbUz{)l<{JHU>5Hsf=Z}GpJz=X`o^ml--CGpms1Q`|&b~i(^m@ z)5n9_$)HB3S%QjZP>xw&K>fm?#+_XVY8QiYQp*PQD}$QUeHo|(2IXult^3^!$|Xo0 zR3d|NT`e=OBnIWCluOAB%DucZqDo;~uRM3(E zpfVU#@akevhZt1ICK)IkW>72c${gnig9?2*6Vy=#71kgl>`Vp~K0-=>V+<-{f)A)H z1{LY{0Mu~?6}A5fsB8umef$8Z6AUWmtS6`(2DL?9R!>hdsBPWlzHBaoiZw0-b&5g7 zE%64G$Dra@9|d)qLG9XP1S+3FCER@k>I{QQd@3bC0fS0zumW|KLG2wO4Z3p-YM)Xr zoo7&~ZqE_b9}FsO{|itT7*u+eJe@BxsEo7HDJo=8N7UuZaEU=>c9-3cA_kRZd>+(g z29>QGGQ|ukXSI};e=?}t7|D>YFsQt{cR^icQ29?Mfx5<^3K~X&y3U}^kB|$JHyG4~ z@u8qf7*yeO=@i{$P(}OmK$SA6;w<^$++t8y&dSvCHiNpZ_8!z<463BN2dFy?s?=C^ zivDI$x0lGMv5Z08S-lU`T?SRQNp7r{GpO>rCqdm~P!&&QVWWaURW^(Rb)P{!8X?Jc zC4;J(unyD%230-16{v>{s%HOQP>&c?ZB`wq#|*0OtRAQ;2K7?yF{mdDs=m97`>PpL zgYg|uPZ`wPC31JEhC#hsEfbSx45~5aA*fmg)m;89sDBt#%TrBIbqwkg$|b*vcn1~% z3s4#V6XJTopj4-8gL=uJ+U=J$gjWozLzaBs>lsu>W%B%*L3L8kM^p_AN~8NgP;VHN zrtxS{ZyA)?u_6%xRg9@k) z49ae}tZaP6pzOy>veuD7Ik+W)Qe#k~_j?4-F$r6u&cGbAWGvK)L5(Z852`bRa#E9A zMuS03>Mo^t7Y5~QEcLS{gK}A73hHYH<+^$TllNKlU{JpM9YN_aD8HfP&MY^cmCw zwOCNS7}O%2kDv?~RA3)@d>JyRpui?jy%|*SYN_ds7*t4%v<~|)s1zAPVF{tq2=RtkLpd!XghGoK_BHgxv>d&B}_O}IP%AlgNWLRa!pkfMYL76kC zEo$dLSum(=I`VTKz@TFL)PNeupyC3h)9@{WieD}B)IkhtSIkOK-!Z6!yP=>g8B}7m z>_HA@P|2?^gR){!dzIOGv4Ad|NmDQ&b)Nlrs9ry>R5ezD4wH7El29+DL1k^|dm3Ma$D0>E# zuVgZ#7*xS)d3ZT6sPn^r0rew;x-kB0P@@@Cp_{yAV+?~TN_`E=kwFz_$y)PR26d%C zGURa#>bly`pvE((lJ31gO<+)^eX2k?F{sGe%xP&NDIiQ~qgYLB-EHJw4# z6|4c}&Y)gyodxPA2GvmeC#V?=>bc( zG(dSXD7CeZK+R%M8kO?I@nKL}fpRVXX9lGm_88P`2Bo9qqH`FO?%g4X%9lauRZj*r zmqF>jmKw*8K^YF0UY|dMG8!ijuK)&RJWVREc?_yws;nW*XHX``GC?h1P^JZ~K`mrZ z79Br>TEw6R>U09Nm_ZHdGaFPOgR%^?0kwodS%obH6~v%yV&q?NDTA^tlV$5*1~v4_ zEl|rC)Uem`f}s!wWjDMFsO1dGe%y9YD;Si+vK<#BvyJC8P`i(&)lx2Y0 z$Dk6cbwT~kppsw9XO+sJ_70Z|@cS9mzHz5Pr7@_~X>p(qFsQUtS$j)oQ0d27KpkXI z83o^h%3x4OI=%sQh(TrQ$b|PWgUagj3#cOuDm!ojsG|%jCrnmPGZ|EFjBL;yV^Dcz zGAzzwQ29@qKpkgL1+RYsmCc~e4}S~l1cSOTE&)^ygDRXB0qP`!DoR}jDwjbOA1eoS zia}jD^9fWQgSxI}0_rq_D$$Xrb3TJA?IT6p83uJ*>B$r@s5@aYF*(bi$~MX?qRug> z^0H)5=NVMR6XhLP465>V3ZlBepdJnH4eBC;su~vps*pkbw}fzsLDi&sAgUq;ReMZc zw0D_7)fEf|Rm`AXs!6BlPX<-595PoJR6`$GJ-y1H-Ub?hy2ha1g)Iklok2BjdK+SJ8TpiRc2Bk7C9Mo+Fr8;dTsJ|FgyHrO|cNkQM zESbdp&7e9eeTVCU_V#91&e`c_OZvGSk9od0>1-wk3ngNNm^9Fpma9M9N<2K z(k+wkdnJR?dy)w10fW+iJr~qN24$!uAdeW7(KshW^_W2!E0gCc2GuWB3h^fl%H)`A zj8-!!(*k*)#Zv}l(eWQpH4JK?jx4@CV^D*XL#CENSq5q&s(%>Nf9oxE49aF>7NUC2 zplt7&gL=WBhCbN?>Lr63_F9s)R}9K-_!&_349b3-9;nw0%3<1MPz?-fv{L1~VNj08 zbD!>@8X1(!V(CFPF(}ut^`M#=l-tHR zpgu4t_cF;DS{T%fCwZVgGANJNDxf|wDDPqCL49UWKFT5Ug+a}pHWg8|()|D5fi>SZ z^&3#F8I<3#7*HwC7K7T>%^g%%1{G^`2$VL1id*atsvCof zUu_IZhe7SyD9Ltr29;1Y43sW|N_-Lpst1GmZ@op2LG2xO3{mxDQ2WM~fYN7Bsne`M z^@(5oe_h|>?Vat9|o1xr#&cR29-TfZanv8 zP&rEDw;zMb-6#k0Hw-FI$z)6zRQ?mm`ua1dg8IKenKG#J!=wZ-V^9~yN(o@jpbDoA z1!cjYihN|FVE}_FJ|;)sKn8W?%t27!GN^0C&p{1hP$k`d0QDV%Dm9u9%926d9w@I0 z7|fvVgvo(y#h}VIT7t4>Q04J*U)F{}RXmYS(GUhzSzipwmO(xIXb0+h230jy?sE)f zP}S3PLH)p>YJ66K8pfb%k6C~k&YgPmzIAK)mQ?xKxOPv zP~#Yss_REk;~7*tpXHz?FsKg4x`J|IP#w=if||&n)QeAnn#7xiBc5jgo9nWl*~Day`qHLFp-n%rpk2|JoK&xiKh%mS>=*Gbp36 z@u1upl(BNi{KTO8`N$p684SwgSa(n}8IL#MlGPc z8I<&c@ppoTt?#l<-cYS?S(s`)ag5iL2O<}xVz zu?e937?gvnbRhj1)My{ckOLT$$ zf?C9&T)vG4wU|M;`HHgL03TfkF_2nxVX*VJU<1s6U6Of*F+8$FD#w zV^BV0!$E~GsM*RPvz$Ts`bgHdfLyQ|Fa{N9 z^b4rf3@T_~D5y0IDtMK=3n83Ag+$BZ;#vl^BK~(!5ezEyi8M3UF{rS5xo95Apw_f3 z1GSz(MU0g`LllFGbiDy;1A~g1^%+z&gNjzlrHu?K=8W{OVi?rs;_;w1F{o|IGTvqe z728K0QEg#RaRY~d+RC8fSIq^rjX~|&mmP+9Skb@_0NbZjzR5d zSpsS&gW5M%7R=%qRH|z_s9zXVnvd*X?P5^r$K;9gD}&0&uLYIBpbi(G0JWPzWp?`p zR3d}QG71Kj#GtYVd;*ospmLN$CWS%eMwcO~Jq#)@zCWnF3@ZPLDyZKWR6)IrW%n_t zb1gDh|D8cy7~23Ul|dD{%J$BF236#v1uBg}6=&9fI>4Z=;D08<8?YGEwPG1_9Ar=> z-ONB`FsM=^=`$Q+P`3xjXLXoC-C5-Y>Ij1>i)fRaq~e zRThJK*x~@{ID@JhI}KDegQ`{znG+1EX4X?gmBXNFGiCSVB!jBUm+k0W2KA!YAJi!Z zRj*_+c?_z-NIFHQ8PwYWvQw1Lpx&*L65tGjYK$%dRluN{<7H}jmO-^t-2!!vL4B$> z0d<~1eQA*^7=I9`g(_nustXKC)pa$fiwvrr(vvA z0n}3lHA!1afEotnY$Q$cXAH_^Kr^UX2Iaa+mZtt;P;SxEj;>=+?(theJ!eoes@{Tn z!Js@|JqGoXL3t_H3SKcNAIEb$ z45+UdRAN;+sE!OO`IRg#sxhcNEuBEAGpK!z_d#`HP^qrc%6%sllMqkJf~ok8VSjR2*~pbB1X1l5BNZaFmHjPX<-!D%;We4610BWXQc3RB@)< z!Zu(~SMuk9GGtKKiVHyXW>6(cCS$~)N_$J6p$~()J>Y9l#tiCCsH}hYWl&|&mZ17E zsPg#vpuS;H6;-`JnJ}ozSGz&=XHXA6NNH)xpsF0DnQ6wLs+E^knKP)GS#nFrfx`j$b}YfA|*h(R?N$Rsp?P=gs%V{`>5 zD+blPQx0Tn2Gvp}{{@f@gZlI;7Ss?1_2t72P__hWk&5F{P~S5sRo5|~hBByjvu1$$ zfkAaR+5&1AgX*YcGQ$~^`sGAKHG)BDbd#LHjzMYmmc_-93`)yFrc3q=N;~ups8I|` zCt8*e92k`D&OM-hWKeol@?{v!p!8qO1vQ328GLvM%8@}CDX-rg%b<*>$|P^24!Asji@FvsDauqKuuy$gL+>FHJL$K4rm9;nL$~F7K56? zplqTyfO26_wmT)NsSIjpl?NzS1~u%J6mioS)QArkK)Ep}d&j>(O=nOJuF{TnXHcV+ zp3F}S$}v+qMKc)Gxcrl#W-_P=mm@%VFsMn|0iZk?l(UhH`@I;Hi-q+1ycv{hXeFpw z49ZO@mwXtM`_2Q1>SqQuqsj)vPmAwptfndgIdL)VtYq{3S&@l$|19wLB+3< zcJvwswQEB)sBi}L-=54`29;R#6{3n@P|2_4hqI1B?fH-dDw09%bMyqYo*Fsbq8Pws+(?P{Bs7&o&L2Y7CS-s^J_GSi^Z4nP@3xmoD zT@Gq1gUa1d25K9F%G)WkiR}z3|FM(+u?(uqm=@i8=s0)rig4)TT3SA{{ zk7rOt-Wi~NVNk_KrFP!Mpst+$4C+?~b?tILPzemGq^taJb~C8b-eW)|GN{|iA(OR9^LPm_apeke#9<461pjKB%J%s-0FHol#lxFX~pz;`$ zmW3~<(+o;G^ctvq2Bi}%CBPX5rMpvZz7#Mhy~i@gIm@8*UrCev9D_3WUNRQ)I$a}d+I1qj~JA%w->0#49f4QG&8FhRKV$EP)``t{L8YqSk0goDUIK!3@Xs@ z4x*}IP(c<>pq??P;7}<6Y8h0>h6+&sFsK!AdqCAOsL;ntKs{$rVJ{0oyl>~*M?s~H8H4!xL{Dt3@TAMWIiycX)eu!X29>)Z1XOzll@}+yz77m3zsd;IR}8A)3xm2=^c+!XGN_WS zYe0R?ph^wpUr>ud-L@DBsw;!KqfD2y8B|$xD5C1dpvvQ<4A)^$6_4em3f&o0rIN|$ zGN^~mwTP+*gQ^-MV_7{0RqZ05RZj+0<2?nGK7*<~Dr4DR465$*PoNAK)C*<0WXPcE zmFZG%2GwAA0DqtngL-Sx4pbio^)7TSC}RfIxWO4zUk242C)ZW_F{qX*RZ!nBs827Y zqBmhsUz(4A>QA5oRmP-&GG$PzF7k9XV^Hn9<&BHx464IXbx;-zs^e)%wg)gM^`ab5 z0~wS?S6Q0+mO*J6$`5A{gVHjWy!|@{rM*(x(UuHKXTyF_gBg@=-1nfY7?j>)OHkGf zO8=!yOl%mGL9;Y|hcGB3<&d#uP{u9=i0XR=)z4dIsY4l*iE_yNz@SV|e~YMwF(~t* zJW#_K)WELts)rE_YLMXxP<9N;()>QCkqpWzv?nNg24%BBW?rKhlx>`B{5mkGp^v3) z_#=ZF_HsF>(F|%t^J!3H7?k~(cu?ATn^-g3@U!5>@zH4P`jd}tG1XyCG3zU zc_4#Ie6$YK5(bt0LiS{W7}Or+kXg#0_KlH=Nic&-RhIFVF{m`J^N1>hL8Tu_1ht$& zWt=_j2Q+hl0g-^M1We)po+ZmK}9jBV&#z8z@V<2 zmhI?h26e4S71TxsRigC{R1AYEHI#c^n;6t>^Ua_(GpIW&w}9Hhpvt0>Ky77Ew{mP)~mFZFfgKFq4N8fG+_1641P>Brc-O5r>Nerqnst8mvgKFMU z4=RO0wLD4%wTD4{dLhlsy$tG0^9E4A5vV09ZRIz(k3p%rNV5GqgKFoM3o4aCb(k+L zi~S6$W1cK7rZFh>OaFp8z@Rj=WD1bZpfnBpf;z~cw9LanWiTjh+YX=(F({oVXHbV3 zlNGV`Y!<0kjEQ1=+?2f3;F(`W_lR3|z99(2^@ec+yS{Y$qU{H?p zzd}?O8PquCkSSzP6E4XQ=MsaOSo#!H5rc9zJO%19gZghzrkFvw4v`hXKN*x;lq;w! z49b0nd{$Q()QmmffV#$@JYLAt`8tF0YIXp1gF*SUT@R{+LCtnC1$C1_`FcrIr3}h% zUK6NW3@RW`p3b)!)O_WT`HMjyR?iXS3V%V!K~ zSJYrowG1j@#|}{cFsQ_oN1*B$RPqbi{dmrx_B2V`@CAd~*LDo3mkcV^r4^`G3@Xh_ zj=p*Zl|HWm)N2Nnk(U6ffk7QsdNOYq)X`gQg6EioEqTkpvXo=yUj~(JF3S<`7*x&> z>Gr*6P`Ocdpc)xe-VXVaHZiFDlyjh(8B~E%FnwT9=bEY!RSSc<&~`kij|{3%$z?t< zs3I?0MD>|L70){e>I;Lqk|#I4T6O*Z-+;B`+NB&&tr^sfTkS!qFsM>Pc|NycP`Aw` zYfxoScZNtI-j+d?MfCyIjzN|0SPQB>gQ`f8)^7&}Rrx{&vtKc&hfT7E(2+q^wf!EH z8iT5yA{mxCgR1cw4XP7^s-1ThRA&ZNmnUVM27`KWDIHW72KB1+6(~&x)nHHq>T3q| z);t-M7K3^>L>E+72GtnV1C%y{YTn@osvCo9Ns-S=he3UM@fcKh2KA-s2q;|w6{Mp2 zJE$HEO4UXBA9@U`otGh~o(!tPyk9}-GpLSv(rM_$pwur(r@??hbuOI&%8)^68XN=F zn?Y%r>wq$1P})OAfa=4bbfP+gGGNNc>A#fL?>7v}K*?lG7?hD} z3!>`Jpp2);VAhmD^;60vGX`ZcPwtFoPPZWHMF^YS@eAh{~EljcAff7&Z*b zUR6d4Ll~696j?E|Wl*ENj6r?Rpd9DT0yUIDjZ+Sp9~jhxOE(bJFa|ZTRQgxL8I-d@ z6sQpl%EjzHC_4t_YV!=#NCxE=)eV$AgL2;?eTGpCYDS9m`WzUP#|tUqeq>NyO`)Jh zGbkTb*^eH>pk_}w49bx~`6`FZSO(=cPX-F(7*xQim!QTosQH(sf||gf7M7-ga$-<{ z1`9w9;nF-DrCJ}!f<9#D|Yk+HHATiri6oXVNhW&#(|p3pw=`6 zfO2I}5vu1wO=D1zQ+9!JV^C2_Pi8uUik>H1MeYnL=F|sJKQXAympnktU{G62w}6_- zpkfUIL3uE!I5U||crvJX<&g1WP`lQjL{#1kDq%-gP_r0RqSBM`VNl7>WeV^!gWA(1 zrR8h}wNLd=P;(em>Xap*d>K@lR~t}s8C3eb)}Z_tR7RdOGyNIV;Y$-h1u&?irAtB0 zV^CQJ2SCkdP}ydKK`mfVIX1UIEo4x+>t*+25rfLx(E-$A29>X5GJy=L;CUsYTEd{t zHTi-HVo(=Ue*v|WK^0C(1r^Moiag(gTE?J?10+KZVNh4{R6s3fP}eTW#AF47x^YV; zaVr^Asex4Vp$zJ_nKa2)F{nF3T7e2Q2dK<^~4!;*t@lXa@C4 zxmd7~K{Xi6L{u>h>aE#Npf)k6cQ(=_-^`#I*KY^4g+VpP%EV+VgK9}`2DObreR?iG z=j{yYOVbaaVhPkz71gbvb}%T_DJwz6F{pN)va`LDL3Icy0~ODpI-a@+>K6v3u3RkG z#h^NuO0Vx%2Bm4>4Jv^_X_@^9YBz(@wvp|fL(o2??h@>zm z{pY!$_An@erh%aLGAJWeIr@HMP{zu&f_)6CpQq&QzcVNkrCds7P^PD3jPxAhA_on}z(v3o$}GpHHKe}Ou~ zpgdm46rg}Xc{P3lb(TT-s9pzkjzP_yA`_GI49eG2x@vzgD8B&d^<7|40jFeGe33!T zFO&tdLI$<4)E?9&1{K(=08|l!3Q`W4%M2>mW&xrqW>6vPrJ4CBgIWsVbAx0y2hZ^G<5`Zok2yYT7$a5pdzPC235kKqCAI#y2+rTl|!bKLB*Vs1^!zM zYI9*0sM`!`>&-$?e=(@oUeaf{!=U0!rO)s;gNnDA1FDQc?OJaK>Mnyyh?UjTat4){ zJQvhG29^9=hQ$>OYEPrIqwh1QeX9DPDj8Jj6zLQ_U{GnE8lWCBsPuphpdK-(j8pPu zc+8*<7hVBX#h{KV<jXb<+7*y_hA5hO2R9@@>P_+yy zKY0qMe;8E3^WC887}U8&c?bV<26aKTKd2WBs!-|4ykt;C9+imd6@w~P4w-reb>-Ag zi0UhL7%mxN^qqIAyHw>z@m)u=?%b;#6hs?hW>W+={`ra|9vh|%oy=PG6N+#3D zpemB15mgg|s(iizR5OEm*!T$42L@H8stT%wK~+1;bMPaBs_~E#;1h$Y4F~}BnL*W^ zl6LeL2KAy)<~Xgi|Nr-2Eq!%U9$u{(R70=Xpi~&tTho6)wP8^2Y=(kTWl)XlzXsKo zK{dz9&$%6gYDo?U)t*6pdM=IM4h-r`;{{M(5vX95HZ7n!GALDNd8vXLgKFm~PiJ)o z)gizgR3`@2@zl?tIx{Hs!fl{57*yw*vV_ouL233f1*OTLv`pWD`kFy$+w=pa#h`Q| zWn$8mLFvXypFx{J=_PLl)r~>vKlcKq!=MZr4}$8>pp4pl0j0~JjGdQ(>cOD;dDMW? zV^Agmj-YxnDAQBMp!6A(dErV>y%^Mhn;StHFsMOFxn#(oEKTKA54{nD^SJ^YH0EzP<3s0oE0pez{F#G5iu7{H*Mm2znygK{yI)zfbol&g(4s6h{Q1%QeG~}r)HnvUxlkr1;~CV}n=?R7U{JBWCV_HdP;sV5K}}>(@isP~ zCNZd8ktLueGpK~^e}i&nP>IRIKuuv#$x0^U!l3pv%7HwULG4rC@$1T@?cOoHb$U4 z8C33iJy2c@DsTHSP~HqGKe;2QSq!S+xjiT!26e7UnwdW{s0(e*fSS#q3YBtc4udN4 zC`VMj45~PwHK@4^>PqfoP<{;RTA}QV`ZK5-HzmIcU{IyKMuD2gpl+K6ftt^t?pVu9 zs}?Y*vh@z27BZ-E<&as#pemBh5!GS_Raw^vDv&`vY?MyX5(ZV(M!xMq460f=WR^0h z8l_wcW>B>OvVXOVLDij-&CC!6^`h`!P|F$AtD6>}RuHIV9n&SMl?+O)qa4Vg3`)H) z2h=JC)%j*uP+<&8vzI!k)eK6@)DzSi2BmGC1S*_C=|t`XwU$BYZhsCcfu~JyPiQAseT74ia{AWTY}oap!#|20~O7nO#CZAZDdfUxhbGx7?ioP zxVVWy4Y(;w2%8zyAbrUhwlFA5Q)&EeWl&biA+wD^*+eEIs_hKQc6%bISOzsTNro>w z7}PN3kcneZBO0YAvy(yDw~>pt@eInrc@wB#7}RKwFi^V~l%v1&`hI0l<8o#9BY{Cp zP%@d_3~Hj1$s{r;XZ;8G1Ctn(i>Vc;WCrDG9S16fLAgb42DOJlxo-~!wUoFTt8iVrl&jNLTK?UT>ieNf} zntw4J)IkQdu;e|c3Z3@XUf1k_;$6>J>=>Ij1hSuf4ZqYP@rc7ITr3@S9~ zKB!|1Dy&ZK<76?YHH`tFjx(r;HrAlB8C2xtkDyL4s3;E^DC97xX#d|pon%lkxl2Li zGN{cL_kudbpthFOfy!f0vHH@EKFy%wltU(;LB(6oLR4oM)UI`(Kou~kgzbT#&N8UP zq>G@=F{tFa9iYxLs69$1^9O_4*G4wUFEFUo$qzwYWKd}y!$B1?sC55mP?s1~My}lR zDq>KFFG@4>GJ`r=QVFVI#F(vEBgcDuc>h*8u7ogUZ`3s}a{3 zRDP0dlHXuZ1$FB|l`yDt@2`To$)GN@(Faw^pb97726c-;6?w=e`E3SO?7t4wUkvI> zt}UoL4C>lN*^d63LER{M1*(iemFk}Yb(cZi?k^=kIfJ@mEd}pA2359BzM>Tjs(gEI zQ1=;BMN%@TN(NO~Hy_jk2KDg$1yBzeRF%pJP>&c?^<;_aF@vh{Fa%Y_plba?K|Ntm zb-Cj}RWqm;7wtejWl*n5TUn0plTV^J8OB1;6DtiQORWL7*zB2 zzKH5MgK9}i0QG`FeX3gw>Lr8v@?H+)R|G0VMP&u3dIqIB`3k7l462>dlWAa39sHdU z)f)!YF<0`dw+u@C;#Z*lWl)_CX9hLeqdh1M z2IUxF2C55#8mDA3nha{fMR{q}*9>Z+vW%z2pq%w(_oFL=a_OH9N}EBsDw#|-2IaO+ zE|cmoDEI9hKy_zOGm@mV)MZc}b@rfoFetD02SMpEC?Ayrpn5W>*&4$@=`$$bnWdn5 zF(^NOIb;kNR6y_{P=*X@{zZwZH-lPOBHKGg3~F)3Yfya{R8W7(uZ$T~u(ce>eHm1U zy%fCt7}SdGGBNpvL4_tshHS#1!qTNM>Cd3nyqD6_ltD$P#DOwnP?3{m)M(D2qLdeW zSum(*rCb`opki`=!XG%0L2bS$Z^rwUL2WIO$HX886{~*<)OQRjuD=|8mJBN1S`*Y@ z2DQsx1`1XTDq-7mP}U48F)0X?4TDOqyAEmygWB_6?s?fVsC_E_puT5Nsgw7D8p@#3 zX3ALh2L_ey*9>YHgUSe&O{d`u>hMKNP$L-B(GuB?wqsDoDvCgjWKh}tV?o(7s2nR< z@*KsWa_#$ra$rz-+hjZXM+TLjB$r7?GpK^}JWyj8)VcREmUU!M7gQF28q1&xJ2!wD z$DoR4%F}r~gDUp_8q@>^btQNbC?^JW?SlMpCNiiSB{IjE#Gr28Uju3~gSxF`GR_R@ zj@3g%HHATy*~`wh3xg`(CYMR4GN_8g&!AixRAstMc&9O_hwr6-<;I|@RK|mv&Y-G0 z%Rs@MLDkHZYnwkYs9Jxyu|9)A)dfo#K9fPcxY!z$2ZMT5VgbsNLA|c{1j>s+z3neM zMcxeRoz)RgvlvvPy>!)l7*zANbWlGtsFtKiP_r4-r-Qnn<}j!)?-zmcB~Z&%RAlBg zmqDp&7=ZF)Q0+YAE9%dnI{0@46~Lf626q89k3p$lr~oyeL3J*Xfx-d?)ulovybBqW zR{y!67BMJot4*L5Gbo*rQicaIDBW#|pq4Nwy(BeIK@3ViT|TR&49ehrH&DS0%19}f zmN6*f&Lnzb#GB-@n4Hs^Lq(ngXbwj?nl(oQ5ANs??!65rqZ`t&FKUjM-JaecOZuFrj6 z*LB~Ew@q9MLG?`b5H*XSdO1lZm7sbb@KIDR6I7o%E8V9NRA7&lqS6T}I5=HY20`@= z|6J571l7+;GP4P)f8O7UY7RjSC>|l|Re}m_va25E64b!vR%dvPpayxpENUJ>4IVpQ z)awK_#Hq>5C#Z-dEBwAeP{Yb?@4SGZh99_9)SCn~qRtu$3khme4{OV264dD6Cq!it z)R^!+qOu7pGTTz$B7%y}J1Xief*MzBeVn%mYJ5|*sKo>|vH3%y-XW;TUe<|QLQt_| z$B25DpyK20H@K9brX*R1cNsxVE#E9EhoGh%xL#B)K~1l-6IFQxmFOgy4Yx^|4+ttJ z+rEVB2`bNdp+6+36~#7&uz{df|ItU(M+CLDxs`rP2rA#p=E6z|s&K5$S!^Vz;%99w zvx%VAC6$Q!n4s2|TXx?}P#Y@kOw}g@RpP7~wh+{&9(|JIC+Q9>0^1U9#gwfCwKaRF zsLu#$Ti!uY+X!lV@hzf0C#W4wIij``RC#kdrSJto?ecnB)DD8$JvKztPJ-GKzf4pa zLG4Yl-g7xYRg_!j@=JoMto&BgE`mB(XC3jc2{+6|A*iEK_8Z(wP{)e?5w(w?jyGAWqk^DLy48x>Pf+zvZM2f0PCE;b0|a#@-s*h^ z3F>T;HTJ$GsB`7xL>(fi^Oa{s9VVy?M;{dR9YHnpu+HTXf@%u-OVsxSbt(L!s49ZG zoE;#lnxL-aeJ84hpsp5KfuNS4Tu#}d4xmz+yV-2tQG#mW^@6Bl1l4lveWHFKs8;c| zzB*1&t(}$3j|AmW9;2vE5R{j*%Q;Ce`9ANKk?P)+1~nsNi55AZ{e6zTwfL znh2_2w)Ip0B&hy*>qK25r~yS*qWg=WLYr(<_cB2Zbo)Wn-vl+t%l_K05Y*skTVMS{ zP(z%R%vFMlNP1pTT_dPr2r4qWg{c1!RCJ!b&MgUQT+y4Nt|zGRO^=9bMNku+F7FKlHQ7lrtqCf2 z>~(rTcY=zKx9^|_K}|`TBg&JYrk3{=BDeK!(Ra?l}B z?FedSxZQ@(o}f~)>^pc9L8UrLrUOBx71{dgW`fFSnlH+mpys%(7v)1xb3OkQ)sdj) zMV}OP3qj3~s~6RYpcW)e5Opg-EiAWIV`qZOs+=n7HiBB@Y?rza)M9_@i< zhOPv))JZbk2r4K0Mn!c8LFMHoiRw;JD~hb4a3?{nYO*eXFF~z!TOi7hpz=Nc5amx$ zh0!)4-GiWtog~wfpw=Z>L*XuhTJNl6dJ)uy$`2IP-2_!qYYDkGL2dH4jmbR(wb@BB zeF$nx*cC-}FF|e1vh`H}L2b*kgd9jv+l%a!LJ&diXzC>@n4rqtQbgTHP`fP?eRBiF%Nr4%U7lY5+kU_74^nLQqGX zLETV-stR){sxX48$+rGrI6>7dZ!c;fK^-d^BrI6++sv)d3JBdE(+Hjy%dpswUuTlR5+x>{si%aH`-YPv(zD1e&P+|AZkPY_fK zXC*V5pjtXf=1GET6}LhUIEJ8FCp8oG6hV2E{Uj=qpu8#{5EVsGZENl1L^MIQ_qXrh zSc2*hWOp=-BPj2%Bch%rsE%27Xly(|b;`50>;!`9Ty&eLX9%i`vyz!eP~F^Y)i;Tt zx_esJaxy{rMq8yQhM@f83Pi;cRL{iUMa2=+zjHG21l8NwExQy zJx5T%&Prw~LG=y$Tv0twQ2m_3?=*typKCKkFA&s#BKzkD(+MiH(Mqf{2x_3)2vG?H zHOR9jJz&P!rv(m^quE zCVSb(YYsuhMvoHpDnZ3N+oib#H6_vhu3jUksbyP5%_FF3m2E}6PEgZp?-w!lb}+v7K>U)P^r20;$#w3TA|hYvIr`p>3vbz1U1L4MARaJ zn(Mhi)LR5KFS@g+w+U)~oUQs66V!slpGCbxPz%f65w(P%vMPIsdY7OU)mp7@DM2mv z`$g0;f?5*vs;C@-S{iOiCYPXcveHH65ma988={sI)Cy-Mvx1;jHU6lmRua_OW;VFE zilFk_EEKhxpbDey>%4}bisNi>aVEcwrl}GmDIKrRY*{q z{OoluBB;$lRyHgqs4d|;MZHf@Tb=FFI)d8fB$*EgYI~8TzV!sPqj96C4+*N=EmYJ7 zg4*S*WIiIO-BG6%RS7}uiF;a9DM9T`{6^G9f~qLX5VeV*DxDbfc)R2PA+)icrVX!wDkdO2z4I6?Jxde1)+RG*sn^?)Y` zD$vhX$tMXa*l7#>L{NR5eNH_=^~+kX2Rubk{c{J4I!#am3awf7GeLzmE)jKxpawb> z+g}K3kY{^Eb(Ww8NB0o*D?ts3^AmNBpdu0{i29A7hLt@o>O4UWuk;Z0J3)AgD-ZRJW0!qI2!*+(c003Nu9gNl@b(V?|vesEN&N zXyY$}n(Wy})MbK-joL5jZ-R=CYc1*uK}|_~LexJ5HMQ)FsH+4uZT~7!*9dBQjdcQC z1eNG#{nTbY|Nk9WsmX!%vvMP-nPJC7H7BT)tXZP2BdAnoCDVeS(h9AG_8)@EXtXM1 zOM;r?RAsIwsJR|C;L?hq=0(|ya|1!mcaltNf?AN+TT!_a)Iw*w6|y%$6-L>iBOih)jPk@Ca_#IyH-g$;_>8DK2x>=T zkf`niRo=`J@|^^=%OgjWFG1~&N)hEpP-?M-}2R1boxD6>jYPlBr4Zxe@i z5!At&`J#Fe)M38?qV6WBBY~?$^(LsQP?xBC2&%?e$@C$py4>p()x89DtZ=lb0D?N+ zXmy4_f;!pE%F#gtRqwGyR4_rEj+!j$K7u+E+a#(lL7h!}R@D6jb*`+PsD1=>zQT?K zKR{3yYAp5jC#VMB3Q-RdR8!zEQ3D9-QmB>gLkQ|}X0fPHg1VA(QB)W~T`i~*6;4pD z1}o4F1gMwYTz2okLj={rBUscRf@&G%A?jg*Y87ku4h$x!)(PK>`Y%Cw>|8Et2tj#O z*u9yL5LDZm=At49s=eCj$14Ruds7^UnDSC{cIv13S z8bMH98on0wI6-xFRf-x(P~AOl7d48Ye51OGdV-+*V{L0WnxJ|nSkLQ8g6dUfU*|Cd z)w?1~)Kdi2r^XU;BtZrGS~)t3pn?PM5*1BQeM9dSHI|_IWtNBqNakP|vt*K6*MqP4?&|Y6d~YMxGUwKv40q7F8laO-ZN~l|)cecUpHT znV_asB#U~Hpr+TX5jB&b5`A}ydWoQt1FninA*h+oN@f;8rDS$cRH+1&nloP1%LJ8H zuv%0aL1i>rt1+FRX1f|hWf0U{k4ee#Gjt0Ufz5X=?w(Ch3ljd(BhDeHg-&jGm7uaJ z?0-VdC8$N!_7=WIP>cQS^EHp4mIS7XdYzz_hQ^DUPf$6TwoiS7pz@r~^8$ieQE*&Q zy-8538pA~`B&an`y*87e@;xk%W)W0j)VHFt392~OYTJtlYF)z5qTV8?^-jI^ZGzgc zf1sjTOi(2?qeZ<#P#aHKfoBOpZ4P`<)Vl<=CA3D=Qi9r=Y2|@s1hp+^rKlW&+FoE$ zPVoSeJLQQs!*HkDV1Mb-e7gLbp&<7sn>o$Q1u=*)V!XcPDk47{2@V|iHj1ofuPPh_1cdJ>fFvZ6;%mA zo!{R|R4GASsJ<*}BSAIzZWFbMpqiX|?Z*UlDfBT#wV9wUXLb|y2|-=Si59hmpsp4) ziu#nGTn!sUZ3U<_H&?N!&j_l8lVr9LRLe-KGki`^t(=w2c7kf1V3pA?2+G5$$?PB~ zFQ@aolc3rZ#B`9B?aiaDSl)sZ?DhR4)!eT|WpP+iBZxU5WP`&qG zC+Yw}^{K8Gb&#L}PFsofTY?G>u&53ZRNv4CL>(rmenV45eMeCJbF52$grEi#_80X% zL4}r{7ga@2;jX2kstIb4$8(};2x_oTf~Z=88WMZIs5*j*NbD%;C_xQ#lFTuJ8eTD3 zQT;$rBdYg_I!;g{PfZZ@BSDRJlFSK$`ggqLBtbNkR#Uj3P<^8}S}Dpb_(1eF}HT+{`Eni+b%s6PlQWvG>-FA`L0j=!h|f=Vm+P*fv9 zWt4s-s)?XxyXJ}dlc46hpAdD4pyv75rO$s6)chDreU}Mpfs&RBi;7mt*y-<^;8( zAY0UR1huO4Q&BAlYK`l8QU4*ReD?}bEeWd7$4;4CPf*1%*6wdbQ0o#b_1!>F>(dKG zwI-+y6?ckqC#aGdFHs%@weggPC{Kdg9Ox;^i=eiIejut1L2Vt{NmN^c+LrT@s2d4t zdqIY%b_BJf)VAU6398Ix2ZC=Rs9jD?rUOCk_PI?_-Aqt>VyZ=X6VzU37|(~GD$;G7 zvLiuNR@^P>7J@ohZ3Ez)2O92SL@l+chxV3F@?u^>OYbs53D;MEMfb*%@}Mz>lEL zIZ4K!pw3s=>_-oRx=?LLj(QT*#d;f!xQn2g0;Y)SMNpSQYDC>lP?v}H5!IWZt}MGK z>K=l+TJX52J_O||wZX-E0V>_i*)9bTR15dDiYkzxTKc4m3L>ahF<*-cCaBg4zM}3U zD36RMMfD{puL^5n-%n6&Yiw|_A3?RNx6i}_1l7SwGW`k4JH)1=A0()bLv2StfS@|% z*l#d|pgI@aA}W-ix|F7g3L~hlt{73_1l8UBl&FCOKgRkvg9xhUjK4%Z zOi;bjyNeo3P`xYcHjDofR3B#;ZwNsJ)Z4LwM+ho7V6>H5sBo7}M~@(=LGJeAJWf!9eXMIalAwmfEEF}0pdw~` zCF%)+8kTOOr=tmKxHF9RBtea+Zmp=s5Y))}8KRydsL=soq9O@uOh}ffD1wTNXc84o zP|?e*H!+r=#^s+AHIAUhmyQzkG(kP%vJ&fff|~3;UDO1EiuJbY$1?;KAM>23i3Bxe zMy04p1T{6yw&9ZrYT7=#GAD+hrdL~IJ(i#n>L-YbBdFwnKvD4oH8W&^sAmZ(CE`a> zQwS<`nYCq~BdE0e!=k1VR7UAQQO^_9Y*#Z;(+Fy=d$gz*2x^|U4K7Y6sQEG0$C*J; z3ugQvDuJLDrneE5NKje(T%wW)YEiWnGm{DG?RuNud6A%&1l%KPCP6I?u{y&`1eFu9 zQB(>+7T8*y| z)Vdh~qGl7+`ZViW&LOA``;Lfum7q$h?8TW&P#c{j^BO^IzSrsu^9X87h^?<)C#bCv zXGF~>sBOz^dgl#-+Md5!)B=LqQR*Y=O@b=BW^*zN32K+S)fqAgYPYu~^78BH=`i`RBA*dq(HX^u$psGT~ih7rz zYKA6?T1rrL%ia>TjG&I?TW=zVppH99CYPX2T(deu9zoT+4-mDSpiX;R;dcc=or&=m zwUVID&X^}^6+xX#w-xegf;zu1Pt+QMx=?+GsI>%jvA#sqdj!=KaI>gFvg zx*TEWyb1~G%Cf1ViU{heQ literal 0 HcmV?d00001 diff --git a/code/recording/data/eye_camera/2015_10_03/001/user_info.csv b/code/recording/data/eye_camera/2015_10_03/001/user_info.csv new file mode 100644 index 0000000..c868dc7 --- /dev/null +++ b/code/recording/data/eye_camera/2015_10_03/001/user_info.csv @@ -0,0 +1,2 @@ +name +additional_field change_me diff --git a/code/recording/data/eye_camera/eye_camera_RMS0p39.yml b/code/recording/data/eye_camera/eye_camera_RMS0p39.yml new file mode 100644 index 0000000..b51bcbc --- /dev/null +++ b/code/recording/data/eye_camera/eye_camera_RMS0p39.yml @@ -0,0 +1,20 @@ +%YAML:1.0 +calibration_time: "Sat 03 Oct 2015 18:42:34 CEST" +nframes: 30 +image_width: 640 +image_height: 360 +board_width: 8 +board_height: 6 +square_size: 0.00122 +flags: 0 +camera_matrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 678.75284504, 0. , 301.29715044, 0. , + 667.94939515, 209.10259404, 0. , 0. , 1. ] +distortion_coefficients: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ 0.12812696, -0.13076179, 0.00631552, -0.01349366, -2.10210424] diff --git a/code/recording/data/frames_005/frame_300.txt b/code/recording/data/frames_005/frame_300.txt new file mode 100644 index 0000000..31295c7 --- /dev/null +++ b/code/recording/data/frames_005/frame_300.txt @@ -0,0 +1,9 @@ +18=(376.883,493.993) (367.418,432.916) (429.433,424.199) (438.674,484.834) Txyz=-0.138299 0.0442204 0.497911 Rxyz=-3.10292 0.221729 -0.0411571 +130=(557.896,467.496) (549.296,407.464) (609.848,398.895) (617.985,458.804) Txyz=-0.0245979 0.0282039 0.504007 Rxyz=-3.08427 0.211876 -0.115737 +301=(748.172,549.641) (740.68,491.47) (799.57,482.841) (806.625,541.024) Txyz=0.0988437 0.0832783 0.516703 Rxyz=-3.05697 0.208073 -0.108889 +351=(573.305,576.144) (564.956,517.414) (624.868,508.643) (632.845,566.939) Txyz=-0.0150596 0.0992645 0.509626 Rxyz=-3.04795 0.206556 -0.108844 +399=(720.388,332.456) (712.982,271.43) (773.556,263.268) (780.206,324.203) Txyz=0.0793988 -0.0583554 0.504764 Rxyz=-3.05028 0.207139 -0.10664 +456=(542.036,356.791) (533.264,295.4) (594.619,287.303) (602.898,348.62) Txyz=-0.0341846 -0.0425535 0.498523 Rxyz=-3.05347 0.204274 -0.0319069 +608=(358.957,381.543) (348.602,319.251) (411.724,311.228) (421.632,373.302) Txyz=-0.147483 -0.0265521 0.491548 Rxyz=-3.05284 0.204884 -0.0436 +659=(734.247,442.123) (726.628,382.526) (786.131,374.246) (793.406,433.659) Txyz=0.0889698 0.0122942 0.510193 Rxyz=-3.07904 0.209142 -0.116808 +707=(394.177,604.361) (384.954,544.505) (446.419,535.311) (455.177,594.624) Txyz=-0.129612 0.116083 0.506384 Rxyz=-3.09125 0.216361 -0.0913303 diff --git a/code/recording/data/frames_006/frame_300.txt b/code/recording/data/frames_006/frame_300.txt new file mode 100644 index 0000000..34f0b97 --- /dev/null +++ b/code/recording/data/frames_006/frame_300.txt @@ -0,0 +1,10 @@ +18=(337.292,468.882) (324.79,404.901) (390.651,394.692) (402.499,457.982) Txyz=-0.154857 0.0256425 0.473357 Rxyz=-3.04908 0.244502 -0.076421 +130=(527.352,437.35) (516.93,374.763) (580.082,364.783) (589.941,426.958) Txyz=-0.0420971 0.00724924 0.483541 Rxyz=-3.06316 0.24193 -0.165829 +301=(725.485,517.338) (716.988,457.961) (777.014,447.674) (785.252,507.095) Txyz=0.0819062 0.0595391 0.502175 Rxyz=-3.02787 0.238118 -0.13582 +351=(546.056,549.115) (536.021,488.78) (598.013,478.114) (607.443,538.074) Txyz=-0.0314374 0.0779249 0.492233 Rxyz=-3.02323 0.23531 -0.158536 +399=(693.675,293.595) (685.114,229.493) (747.716,220.171) (755.538,283.927) Txyz=0.0601648 -0.081201 0.484901 Rxyz=-3.01722 0.233661 -0.14594 +456=(507.989,321.903) (497.092,257.244) (561.524,247.809) (571.784,312.326) Txyz=-0.052872 -0.0630324 0.476038 Rxyz=-3.02846 0.233055 -0.0858767 +608=(313.964,351.087) (300.582,284.895) (368.329,275.447) (380.611,341.093) Txyz=-0.165616 -0.0445972 0.465881 Rxyz=-3.02714 0.234292 -0.108089 +659=(709.625,407.24) (701.037,345.51) (762.366,335.935) (770.285,397.398) Txyz=0.0707768 -0.0109919 0.492042 Rxyz=-3.03724 0.23253 -0.158296 +707=(359.279,582.974) (347.403,521.27) (412.05,510.011) (423.197,571.361) Txyz=-0.145168 0.0967286 0.484355 Rxyz=-3.05352 0.247565 -0.117771 + diff --git a/code/recording/data/frames_007/frame_300.txt b/code/recording/data/frames_007/frame_300.txt new file mode 100644 index 0000000..dc14864 --- /dev/null +++ b/code/recording/data/frames_007/frame_300.txt @@ -0,0 +1,24 @@ +173=(699.455,499.98) (691.307,438.567) (752.606,429.954) (760.682,490.932) Txyz=0.0649598 0.047518 0.494322 Rxyz=-3.09118 0.21103 -0.110259 +243=(558.056,453.388) (549.028,391.358) (611.729,382.452) (620.144,444.44) Txyz=-0.0231608 0.0178109 0.487122 Rxyz=-3.06591 0.212071 -0.107365 +288=(509.858,595.993) (501.039,534.333) (563.524,524.831) (572.148,586.371) Txyz=-0.0533126 0.106812 0.490712 Rxyz=-3.10034 0.213607 -0.101361 +325=(690.555,434.231) (682.66,372.562) (744.469,363.995) (751.952,425.615) Txyz=0.0593093 0.0062092 0.491782 Rxyz=-3.06472 0.206759 -0.107539 +347=(363.361,550.855) (353.573,487.91) (418.325,478.379) (427.426,540.869) Txyz=-0.141701 0.0769299 0.483147 Rxyz=-3.10662 0.220845 -0.0927639 +356=(480.526,396.484) (470.666,333.017) (534.73,324.316) (543.978,387.486) Txyz=-0.0699126 -0.0175944 0.480141 Rxyz=-3.06477 0.215313 -0.0848652 +358=(432.44,540.204) (423.087,477.693) (486.636,468.411) (495.612,530.387) Txyz=-0.10047 0.0711499 0.486519 Rxyz=-3.1106 0.220745 -0.0922127 +392=(442.289,606.858) (433.197,544.676) (496.376,535.089) (505.121,596.737) Txyz=-0.0950006 0.113034 0.489782 Rxyz=-3.11116 0.21787 -0.102892 +399=(708.4,565.16) (700.069,504.329) (761.353,495.298) (769.126,555.978) Txyz=0.0705462 0.0888017 0.496123 Rxyz=-3.07587 0.210512 -0.114342 +449=(548.411,386.917) (539.147,323.647) (602.773,315.279) (611.129,378.093) Txyz=-0.0286112 -0.0233864 0.481884 Rxyz=-3.01791 0.206092 -0.143664 +528=(624.563,443.811) (616.147,381.957) (678.347,373.238) (686.266,434.894) Txyz=0.0180922 0.0120028 0.488909 Rxyz=-3.05213 0.209138 -0.118358 +554=(490.591,463.222) (481.316,400.801) (544.603,391.939) (553.41,454.083) Txyz=-0.0645677 0.0236915 0.485569 Rxyz=-3.08837 0.214389 -0.108485 +559=(373.466,617.896) (364.062,555.286) (428.305,545.501) (437.448,607.679) Txyz=-0.136017 0.118491 0.485214 Rxyz=-3.10462 0.217903 -0.0966003 +571=(615.48,377.577) (607.189,314.515) (670.369,305.988) (677.674,368.791) Txyz=0.0125728 -0.0290879 0.482861 Rxyz=-2.98808 0.202386 -0.146367 +655=(342.345,415.599) (330.378,351.444) (396.27,342.763) (407.057,406.633) Txyz=-0.151935 -0.00585876 0.474021 Rxyz=-3.0196 0.207689 -0.0499907 +660=(500.258,529.687) (491.297,467.655) (554.109,458.558) (562.902,520.323) Txyz=-0.0588932 0.0651044 0.487798 Rxyz=-3.09854 0.214559 -0.0948563 +664=(576.63,585.616) (568.056,524.065) (629.927,514.768) (638.399,575.89) Txyz=-0.0118773 0.100667 0.49203 Rxyz=-3.0937 0.211299 -0.126647 +735=(352.832,483.454) (343.004,420.029) (407.747,411.093) (417.501,473.927) Txyz=-0.147205 0.0354299 0.480331 Rxyz=3.16704 -0.223919 0.076249 +737=(633.714,509.651) (625.26,448.175) (686.896,439.242) (695.163,500.561) Txyz=0.0237445 0.0533222 0.49199 Rxyz=-3.09203 0.212088 -0.117316 +782=(642.802,575.179) (634.357,514.125) (695.692,504.957) (704.056,565.739) Txyz=0.0294044 0.0947937 0.494568 Rxyz=-3.08374 0.21306 -0.111104 +786=(412.026,405.993) (401.221,342.19) (465.902,333.555) (475.958,397.018) Txyz=-0.11102 -0.011715 0.477259 Rxyz=-3.03908 0.211348 -0.0405065 +787=(567.301,519.555) (558.638,457.877) (620.81,448.837) (629.291,510.389) Txyz=-0.0175444 0.0592119 0.489873 Rxyz=-3.09406 0.212163 -0.109216 +842=(422.538,473.23) (412.683,410.393) (476.666,401.431) (486.064,463.924) Txyz=-0.10576 0.0295403 0.482561 Rxyz=-3.08552 0.217952 -0.0668197 +914=(682.094,368.247) (674.609,305.389) (737.422,296.877) (743.864,359.666) Txyz=0.0536882 -0.0348577 0.485769 Rxyz=-2.99698 0.201008 -0.128513 diff --git a/code/recording/data/frames_010/frame_300.txt b/code/recording/data/frames_010/frame_300.txt new file mode 100644 index 0000000..b4f991c --- /dev/null +++ b/code/recording/data/frames_010/frame_300.txt @@ -0,0 +1,24 @@ +173=(723.927,447.07) (715.643,384.723) (778.576,375.426) (786.258,437.702) Txyz=0.0790166 0.0136108 0.484619 Rxyz=-3.04246 0.219174 -0.091972 +243=(579.827,400.658) (570.222,337.215) (634.345,327.765) (643.039,391.221) Txyz=-0.00925742 -0.0150964 0.476483 Rxyz=-3.00322 0.218584 -0.10575 +288=(532.679,544.107) (522.959,482.482) (585.978,472.648) (595.254,534.363) Txyz=-0.0388098 0.0736001 0.484942 Rxyz=-3.05784 0.224629 -0.0772962 +325=(715.033,380.428) (706.95,317.043) (770.634,307.64) (777.861,370.973) Txyz=0.0731347 -0.0274384 0.480342 Rxyz=-3.00927 0.218851 -0.0908707 +347=(384.833,499.337) (373.829,436.216) (438.669,426.422) (449.139,489.416) Txyz=-0.127388 0.0447468 0.477125 Rxyz=-3.06431 0.229277 -0.0535486 +356=(500.555,342.84) (489.461,277.411) (554.996,268.146) (565.07,333.476) Txyz=-0.0562687 -0.0497284 0.468424 Rxyz=-2.98365 0.21573 -0.033857 +358=(454.078,488.67) (443.545,425.754) (507.685,416.119) (517.61,478.697) Txyz=-0.0861319 0.0385554 0.479532 Rxyz=-3.06777 0.229777 -0.0678913 +392=(465.08,554.994) (454.885,493.098) (518.413,483.204) (527.941,544.889) Txyz=-0.0804108 0.0801372 0.484963 Rxyz=-3.0663 0.226716 -0.0831945 +399=(732.737,512.68) (724.51,451.377) (786.84,442.078) (794.647,503.35) Txyz=0.0850416 0.0548219 0.489455 Rxyz=-3.04315 0.219029 -0.0818131 +449=(569.515,332.769) (559.638,267.491) (624.982,258.069) (633.689,323.424) Txyz=-0.0150215 -0.0558136 0.469471 Rxyz=-2.95079 0.213833 -0.102914 +528=(647.503,390.54) (638.787,327.07) (702.535,317.713) (710.537,381.069) Txyz=0.0319074 -0.0212714 0.47804 Rxyz=-3.00741 0.218382 -0.102638 +554=(511.635,410.921) (501.203,347.341) (565.709,337.896) (575.31,401.427) Txyz=-0.0505453 -0.00890131 0.475441 Rxyz=-3.02306 0.221724 -0.0744128 +559=(396.416,566.132) (385.832,503.765) (449.873,493.88) (460.227,555.802) Txyz=-0.121709 0.0862522 0.482184 Rxyz=-3.06672 0.23014 -0.062468 +571=(638.16,322.645) (629.505,257.342) (694.553,247.905) (701.927,313.41) Txyz=0.0261123 -0.0619344 0.470728 Rxyz=-2.95023 0.212263 -0.114807 +655=(360.505,363.079) (347.015,297.287) (413.74,288.078) (426.117,353.694) Txyz=-0.138416 -0.0372608 0.46466 Rxyz=-2.97141 0.2152 -0.031309 +660=(522.304,477.958) (512.311,415.374) (575.931,405.843) (585.329,468.295) Txyz=-0.0447246 0.0322643 0.480759 Rxyz=-3.0578 0.225199 -0.0738372 +664=(599.766,533.608) (590.643,472.009) (653.283,462.292) (661.946,523.804) Txyz=0.00257891 0.0672197 0.485662 Rxyz=-3.04987 0.219942 -0.114197 +735=(373.089,431.793) (361.365,367.453) (426.782,358.194) (437.982,422.065) Txyz=-0.132966 0.00355214 0.471777 Rxyz=-3.03753 0.22429 -0.0346352 +737=(657.115,457.277) (648.23,394.901) (711.199,385.518) (719.5,447.765) Txyz=0.0378741 0.0197911 0.483178 Rxyz=-3.04552 0.22143 -0.0968667 +782=(666.4,523.065) (657.721,461.573) (720.035,452.19) (728.332,513.475) Txyz=0.0438073 0.0610251 0.487713 Rxyz=-3.04853 0.219505 -0.0967276 +786=(430.999,352.987) (418.778,287.389) (484.649,278.121) (495.913,343.557) Txyz=-0.0973854 -0.0435011 0.466652 Rxyz=-2.98462 0.217406 -0.0168059 +787=(589.799,467.54) (580.54,405.102) (643.78,395.603) (652.536,457.897) Txyz=-0.00336534 0.0259621 0.481289 Rxyz=-3.03955 0.220231 -0.11357 +842=(442.786,421.342) (431.874,357.423) (496.653,347.995) (507.008,411.669) Txyz=-0.0918841 -0.0026965 0.474376 Rxyz=-3.05417 0.227679 -0.0500495 +914=(706.368,312.641) (699.076,247.258) (764.04,237.726) (770.078,303.24) Txyz=0.0671587 -0.0679941 0.472159 Rxyz=-2.93732 0.21098 -0.108571 diff --git a/code/recording/data/participants/.gitignore b/code/recording/data/participants/.gitignore new file mode 100644 index 0000000..d157418 --- /dev/null +++ b/code/recording/data/participants/.gitignore @@ -0,0 +1,2 @@ +/p* +/Hosna \ No newline at end of file diff --git a/code/recording/data/participants/notes.txt b/code/recording/data/participants/notes.txt new file mode 100644 index 0000000..2cbcb25 --- /dev/null +++ b/code/recording/data/participants/notes.txt @@ -0,0 +1,15 @@ +- for participants P1 to P11, eyetracker had to be readjusted during the experiment so that pupil could still be tracked with high confidence. eyetracker was rarely readjusted between two recordings at the same depth, mostly it was in between two depths. as a consequence, usage of different depth calibrations for a test recording would have another source of error caused by slighly repositioning the cameras. + +- p12 to p26 > no readjusting the tracker +- p23 > a minor adjustment for better pupil tracking confidence from 2nd depth +- p25 is myself. my head was slightly looking down or up in between recordings so even for a fixed depth the data could result in large errors! (not so useful). it's quite hard to do better all by myself. + +- for p6, 001 is splited into 001 and 002 but the movement from test point 15 to 16 is lost, so from 001 only extact data for the first 15 points, and extract point 16 from the entire 002 +- for p9, depth 0, calibration video, the screen is a bit outside of the screen from left, therefore at least two marker points are lost (cannot be tracked by ArUco). it might be better not to use the data from this depth or at least not use the left edge of the grids. + + +- p1 to p11 + p23 had minor adjustments to eye camera (and in terms scene camera) in between recordings +- p12 to p22 + p24 and p26 didn't have any camera adjustments in between recordings +- p25 is data recorded from myself +- p6 and p9 are special cases to be handled separately +- data for Hosna is not useful as the eye camera wasn't robustly tracking the eye (less than 20% frames with nonzero confidence) \ No newline at end of file diff --git a/code/recording/data/scene_camera/2015_10_03/000/info.csv b/code/recording/data/scene_camera/2015_10_03/000/info.csv new file mode 100644 index 0000000..0add6b5 --- /dev/null +++ b/code/recording/data/scene_camera/2015_10_03/000/info.csv @@ -0,0 +1,14 @@ +Recording Name 2015_10_03 +Start Date 03.10.2015 +Start Time 20:15:10 +Duration Time 00:00:52 +Eye Mode monocular +Duration Time 00:00:52 +World Camera Frames 1265 +World Camera Resolution 720x1280 +Capture Software Version 0.5.7 +User mmbrian +Platform Linux +Machine Brian +Release 3.16.0-49-generic +Version #65~14.04.1-Ubuntu SMP Wed Sep 9 10:03:23 UTC 2015 diff --git a/code/recording/data/scene_camera/2015_10_03/000/pupil_data b/code/recording/data/scene_camera/2015_10_03/000/pupil_data new file mode 100644 index 0000000000000000000000000000000000000000..21e3485037c952931890905406638c9cf7196711 GIT binary patch literal 234804 zcmb^4X;g^c+6M5@pa^A1M3D@kkXfb35E5l3X+nlliIBvf5HgdL44H}|geanrIZ-GS z$~(0kpTFYou~`f}D;=dATxPkElb?`iKst1tGe)^>EAX>JR>T<7=(c+d6r^$GB+ z?lQXWoWMEWv;N=zR>Q?lwe4a*wYH;c`uO_Kmj7p{%21WC(OMq9K2yCty?i{phN_IN z>FMn@+iR|uKmL1lZ%_QE=3MXDUIBC6X3rU9`(U~Hjh($}1}^qfSL{R&b%3Lx*okqO z1*fKBXP57BIGT!`*rkPVv=lqLH&?)^rP$eXU@)B8ik-b@T;XUdcJ^IA2&ax>C$2Cb zPF=;${*NEw)KlyntkxHfj$-Ff{mF226+4GblHk-=?8NuD2d9BzC&B&~oQ8^>Ba?c= zX{6XW>N6gWo?<64bPt@yik+lQ*>Lm~JICU_!D*t{IeuCXPE*BBa<(ZP1I5mXg86Wo zDRxeN*bAq*V&_!lQ#gi-ozpsh;TS1)&a^Us(?YRxw)<>2EfqVdgV(|_R_vUca2`%8 z#m@QJt>Ks`cG8wu!D+47N#7U-r;TFg!oK}*+A4M~p1KdGonj~Bn#6~uik(Z3B|dDg z*tz^(zV{A_oy>n3a5^e>uIR|RYNpt^YHR|>T(NVln;o1^ik<91F>pF7cCL??@4bs+ z=f=!(I2MYXn;}EsbXDx!+TabRn_}no-c&f<6+3rMK7i9hv2*vTKAfJ4ot#HDaC#|r z?!A-my|-c~_wPG6eH1$n>Q#qhso2SD*$>X z2xov|=h>A(a0V)NiXLu-Gf1&h z{5Ap3V8zb!KY4KM6+18MwuLi9u~X7Q^4y_{ol=YSaE2*%UJW=6$3e04daSIg!xcMk zW>~;+RP4MBc7iiPvGXosJDib!5OF6`DWw@XS`zPdl!E=6BIi?>~6xDsMz^AM!xq+ik)(A z$x~buJHHl1!I`Ys`MvHe99P9o#qK(ArYLs)B+LHprr7zL=?2GLvGXra>Y^Tsoyyla za6FYdH&rTH!tqkh!% zaAqoYYA%$%)hxx1=32>{W-E5Ic3ptuqu8l+yfGYK#ZK+Z5=YHZ>}Wsmfa9mwsq-ov zj=y53?yoCw0u(zswWaQtar*e1H?I*lBXCHk`$Zou-#0o)1y%7~BtnvqZ7etTYQw zsA8vi`AaxU6+1??jNmL&?6hb$1iX9uXrf_yEc5H11!r7zP>F4AHXRl(Xzo+E6`xHBN z^JG3VPO&p!wI-bXik*Qwd%-!N*co(m6r6*KoxvBR&UQ$#W1l1SpTmlsAunFRiC63l z{b3I$L9yddQ|doQ6g$J49D#FGvE$f5`a6k=oe|b$aFP@|BS*TyIi}cg@>mY%xMIgS zQ06L=6+5F=eui^Gu`@cV1DunJoiRtGzjI2lGdA4^PKsh@+}(?CPAhiCKbPvEx2h z>Y^7FJ04+?a55A-o{`t!TvF_K#moHRWyQ|aH0gt6Dt4ybafXwn*qL4|=cKMEcD&1E zAH1sAnW-lG;5Eh0tj5FOWGi-Nn@YXzx?;!2Qoi>aiXGqKui@NO?96eKxc8P~$1gzY zqPG=0{wri3yrb9&*e?6vUB%Aa!!kdXqu2>N?+)jlVrSm%2spWlo%uy);oMj31bzJt z=Ye8pp;|LId5WDydWYfUD|Uk0Nj>4AVrOw5scSt_?1T(o0_U+}XUUYka0(PVq5je@ zC{*k$UC{{66UEN5?J`IGRI&5l;mvTKDR!2hlR8O}VrRuInfEJJ?1U9Hg!5dnv+}F# z7cUe$t5vtbd8yb5ZzOfm62;D%cJ<+uDt6ZPF@y6;v9r#>0nTg1PK0X$oHvS{^?nk! zyjARMST22#cZ!{j+a|(!uh`jiC2(u@gNk0L~A^PR!&II6oCTyZrjV zDOc>o{$~&8mttr4)=W6R6+3$lN`I$9v9mW-3(gDbmxaSK{)frU$kiVd6FsK9gX~(Q0fdSaSMv%{u&G_>A*rzH5t^gGdDqLGN|L%(cSRZVo=FXEI`#_P$xdk0acqp zovOASls1D(sgDkhe;o#Ox^)dubs5x|o+CijV^C*@go4sxP^puWKNBYG zOOY%2H(*d{n=wY>-;hD2?~eo3h(TRAjUdWjk3n6$j=HFSV+NH`h!Uc|K7+dSQQoU2 z3@THl3{+DFl~unDC<6v{#Uu(;GX{0F2PzEy%^B3SA-bRp8C3QpXHZ5A>blPwP%RkL zjnIdnS~94cn@~9QH)c?`_RA00ib35zEir=$gSwMl3aT}Ox?5-tsttq6`REF&ErZHc zL5sn^9fP{B`wNsQgL+_63sidsmDj@sR0jr?Z|@7LBZGQ45jVj2n=z(nkQ<3n#7>Avb;gLFsNGjiJ&GksM>GkWpHIs+J9uDpTeN(){(c}jX~9G zVFJpXLFrh?D(bO8T`S?HGdxl)x6FDP`(Vx&`3JG za~PD7MSDh(r~qSXwlcZKxm!x@yN_HR&Y7?hO}W`q6LGAQdV z81M97$DsP!Nsm5)LD`Ho1+|_**?P-|vw=bNTXYW8Mh4YCLI>0)24%M!gV6q)8PtH} zgP^uBsDYVhL2YGFgYqR^+Qy&;zi|e&ok7`G$cGcjpoVJ8RM-v%HO#OQR1|}9kc+|n zcQUBqcCzC{GbqO~k}kzCs1e>W+_H;7ja)PYR4jvXS|{b`-3-b(R?_f23~E$zZ%}&~ z)acChp!PASF?q7%#4)I`ZyZ7GXHerRR)RXfpeAS+gF48dCK`1Cb%;St>M|JAVFu-5 zmjo)FK}{a>2~+}ua`na?2>wSH)Rf?EppG&qw+M;)5*d{H?){*W7?ek{Oobg|P@Y-1 z8ruIjgYwFkpeUI^O?|T%)CmSPt>PJ|lMKqcPF+x^7}N|SYfvc+YG#)WpiVQWS#}pd zoncV3$Kr^(|5*m*by32fV_d zg2%{Q-&F>+c=~rx*BDgDqQ;=I8Pt+>(ha}Pph9DJfV#n;mL5+4b(29Y%hUvQi$VRD z*8$XR2DSXPC#X9NYQ^tpPjop%b@nY zDg*V7LGAn92Gn~7wZC>Ss1FS4K=W8o9~soaPIo|kVo-f}26ees6evvwmDx8m0$>A(*26b)fRZw*p zRCbV5w(Bye>ubh?s>h&i#4HD;!=P>^odKoGpl)S6167|v-M+65ssV$#Q!4pjLk4xX zd;_RP4CfVS!6=ukJoE8k~N$zk^Eg96)5}CR;W>C+5N*dmZ zK^1Ft24%vao*RUKYR#Ztn8}<>8wT~#<_V~_464Lgveb4As?bp>Hs~^_?hNYN_)1Vc7}WP!HlTVks2?Ftpn5T=pBwgp z>dm0a_sZtghe7>1DWxz=2KD>uI8as$s^XEHe6eOwf8I$ctS^K5`}Z9v8wOQbPX;z@ z3Dj+smQz6WV^Gz)t_0PeK~*341(Y3wsxe-QdaxF1~MqM#p^%~Vo>Vq(?AVo zP#SxsQe@AdYMzvXAww9H=9LUkLm8CT!w;Z_F{oPaq}}hpplbh>NvGiqs!rV|r5$)I!w%9iTHpmfI_1Le%1>d(jrHHtws2$qSI(G05L`bnV1FsMd* z0zi#rP0)uMuRw|Ga8C26h|ACsspqkZ@_sWGqH8-jN zHJL#fR^933%Akw}$b9=02GwG$G+*2pRLiQHTih9xaj*;mc`&F}5mG7gWKbqmC#t*{ zRO@63wx=?vHd%7=Wg3HOn=irkbOzP#jU;j249c|P3aA+js)KeRsF@6^qmlHTXE7+V zE>c6D&7jQffrJWd3Ia*EjsYCVH;PLic>1A`iM z$qUp*1~vM=6zDcFs4=BdzuL^8#+JVZwS_^AucZNMD}$QQ%p24;1~t)Kn#J20)Fj&+ zP>~GEWmI!eI~dgDsWQqH#h_e+4uaarpr(Xh0u{}m++uoxieXUhNm8KO#h^Sgq;wz4 zpgiwOonbeF@+y_y#2yAUwOnF`y$ou))@o4u7?ig`9H=-3HN(6bsQnCTrmX>}0}N`G zv;2Su8PsgAcuO6z` zucS4oGzPW&=W$T!3@S|X5vU6cYNdfGsEZ70m68NwYOZR z?jJL#I4wDfSiqq68{7s}$e<3GYk_*gpbpwPgL=xK4mpQ_dd8p*d&$yQ#GvAXI)W-@ zPzmAkUOi_}N22$FdcmNMCRT!a$)FN5BxWdKP)WH`DJo@9$4Z`qdc~lQm-htqnn9h= zas~BTFPVP@fo7YPe*n zpBdD-X!)ztUl`Q+q*I^E^EFF{vgSuZ;XQ;`b9ypufsWcf>o|i8uEe4f8|1GFm4C-OH8mQU~>QS@@ zC~XGyIB@}}It;4dVk)S*45~0!27&4^s3#?UpmZ42)2iNtE`uu4EXGsSXHdlk%|JC^ zP|wZe!)eH%Uf4u{YQ&&kI=us>$Dm5Q3_&$!P^I(b@Vh>PdbL^xHkvS~*E`RFYRaJA zByH9V_>>dK&0=S>3DjX|lclJZV>2BjYL8&nSlrEyf6dOaCb z%?px-_hL|*Iom+>W>8u$PJ-&gplbb42W82iv}+CoWyPTCG#LlVnnBg=kOHbNgQ{np z1ImU$>5Lo!%9cUtdaMG~k3rQBlpnA^gKDs<2`D=T)iA0vr~wSB(UF~?1~MqUbcsa= zF{s8lH9!q!Q2HSjbl*lHDu`<&!9T!%X>9}L3L~|@6|*GWo9Mg-;)@W`3Nc9 zyD+Fu?pB~CGpNpU!$G++s4iiWrA}c`7Lh+dxiP4&34KAiGpKH9BSCpEsP1ubydNHV;-=wlVl|l7ZmobuQ4609KsccVYP?n}2KzTDLD=TR=&R|g1j*^DYWKezG z*AqCO0&U*twmz6`2=d={uV49YG|7nC1^8gQo_D1Qbuuy{MD z00uRv>>;SR49Z?z&P@a|s3DErK+R)NLrvwqn$MtySw007#Go7;$AenHpoUki3=0{Q zV}P8SSj3=4glT~aW>6y|=Ym?ypq%0*i3?#+&S{-MEn!fj?hFJK%AiIUdx2WYpvII* z^JN)>8mBHzz5f{0_{IxCEoV>@OfP_1!JsBqtqfrdYLcTRo@yn7a&a32Y88W;93V}- z)eOotOuFIW3~EYbEl_J1lv})fIBOY{`}swn)-foL+m)aq7?fwRH1*aqD6cXZ1lqu$ zrm39)wUI$h*OTe!O$^H0bU3KZ3~GjDHmEHOYNn$FsI3fYmfJ2++ZfdBfMcMxGbo?1 zC7>c1lyBq;P&*jZoT_Xhib46M)xlHkWKjNhj6g**sDR>?pkf%*+%jqE?P5^#)Fc*- zWl-}Q%W00?3@XTUIjB7hYJsH;0_|l`3mr8S z3fcY@)IkQdBwioXAqEwCUP86Q3~K3ZS=-|o)Uu)iPzem`zp}QVjxeYdYEpqb%AmsZ zM?QsSbKBX8`GJ{$Z5D4l7gIc@dD5#SRYTfoH zpiVKUi1^l^QW(_w^Ac>IW>6b$OK;)~gW6am!S-1OwW&-hMX3yGOV!G7jzMkJllSU8 zgWA?k&hVx&sO^>}pwbysGJ*Z0zDth~LP?s50 z%;6AFnG9;zxk6A`3@Y}P)ETZYsNF>}>2#Gr?fDu5>KcRErrRQ!srpzboLgzcw6H&i~_O%F99)n6&leTO=gF2yS1L`4zI@xX}s7DOy zRG%e_$bLA_^C+1soGz1rdiECzP{kT;&|BZIn; zDr26X7}U+1QoR4npl&^ruGbd^b^D97^}aHwyEPVoDq~PN4W$P8jX~XOQv&KcgUao# z1L_BZx<5?9qMr=v!DLCq%NbPO92q|S#h~(+T?O@Zc z6DU;%^ZI;kobT6WQ13q(fvUrxK34MsRhL11s=onLJqGpJs)GQ1uy9*`&pw8ZfADK8HXxWKiEjC7)=-pnh!b4oZ(f{oH>XRAUBJep(W7eFpXG zx+AD24C;5`T2M_HRK>?6PzDU@uSyoEW(?|IeJTAmXHbJu-5YRRB#_{dPRF@sVKH3Zd)L8)!>1ZBdY)Z=8wY0aQCQe?+z!=P$r%cM+O z2BldbO`&!SO6!9MC{qSit8zW4_6$l}N6I@L7*w5B8lXBdsJh+dJck*Bsy8?ulsSXa znUDjj6NA#7^%qoU233E_5KvthRD+EYi&`+KhWkE)>dK%Ro%#W)8-vojCjId4465;C zNyB?EDE;?R+wRGrn*7TE)r&zH)O!P}H-l z)(on}OlkP{Wl$|c^g!7#DB}$hY}+!ZR(k_L^I_2|l-a-upoTIi^Kmj~F^oZV znj!sg2L{!7u>q*z464g|scbtkD2qM&L5*NgT~8E)8p)u#U6IK?CkEC1p$w8ZGpHVK z&x0DppnCrK25K~e>Rs0f)EEZUr$sENu?))6q5#x524ywiAE@yR%6gm(l1yMweP>8e zG?78s1WUR!i9y*$U)G)i7pr$b>hcPnQH=RKZ_ih8qn?X4) zl3)7_1~p>cPEa!$)W}%rrOskdPRGB3n$4h`Gh2f4VNj#;Y(e=lsL`)Cfttgh#{8Cp zP<{+*T}sf!qt`=v-w!3@gd{smBr8I)(KglZuS%IjAXP)iup zv|61(g)*q=&Gvy>%AmZ>B~)9+pk~-gv-m#-HPiVMsO1c5mY3u>D;U)5sz0n9#-Mz{ zhvBJKGAQ5ZX`ogys5yz@pjI;|zl+hJ!WoqRJ&9!2FsOhM9Z+i-)ZCxa!d}Oq=4s9a z6~Un9H(dj2J%bABm;!18gIds6CNnoOsD&e?p|FWTE%KCq!OaXRc%B)kEevY$Dye>K zWl$keGeK=*P)m-;%CMb5g{J3%ieykr@0Nht!JwACkd+~dLH+k#vecanYK2BWP|*x3 zOy3Js41-$PUQ&Qv3~H6tK~S*_YPF-3cXl(Va5sq=_Asb5bDM$M%b?bV$@I=X2DL6y z{srS0R7CtTQ2QCw`tw^r9bi!Zy9VYUgW6bh7Eg7EL2W8)3hFR}+M?D4R6K*)s%Hr* zfkAC+HwM%Z2DQD9H>jfwD$-#Os6+;}!}SrUBnB1bCppeB2DNi}Cs4;3RP?sqppqF> z%pnIBhEn}F%b@nR zk-5H926dp<6j0|F)WM-rRXfk14!OuJDQOJqu&?Ae=?p4<=}S--7*xXMN>CRW)R6=7 zUS%+-qi3X{aEU=BUYGHt%M2>1P$HR326gP?R!~_CDp^G$nJWzHgsvRzxyqnUnn;`D z8iP93L#k@o3@XKbIjHLl>h#1YP&XLV8K3r`ZZfE|p>k2cEe4gkxdy1)4C>r|$(`>o zsPm^!fV#_|(y9(;#`QdcdGAn@C+Wk3nVj zkio@#29-5L3)Djfb!Ad-P>&eYRiE*o9y6$Gq3b{uFsSTJWuOWf)b;)5pq?d}C zJ!Mcgv*o>d#-MH$T7W8IP`5v}0#(eQ?y5+6=Q)GQ(UqX+1%tZRO4jz53@W$BJWwSJ z>c0I{P^Ap&!NgcluNYL`?DwEvGpPI}QWt&0pdM~I2I?(?dK7mY)H??C__PM7_YA5a zTgFH}FsQ@s3#w!WAceXJ*|{1^)rJi((MZB3xg_dC3S|c4C;AzS^CNt)QiFA zKz(CSFDFQ-_MJhM%$5)52ZJhIBC+UC2K8!VC8%-+^*XLOs9y}~O^Ow$-wf*QH3^C; z7}UGRxuE_qsP`Z0g8Iv#K2(kX^^ZY)(g^@n$)G+Pp8=(!ru_b^yI;E9233tgeH|<< zq3R5(Y=X@7)nHKHW-SDz%Amf7$RxQMgZi;SmOga`^>d$O6B-Pv{FEHLt;wK%UEK~! zlR^D{R0v9oK~=n$@=h%V_2*v?P_-G8I%r#sxk9FP`V6Cb#W4?`V2~KLv2tE7?k>6Ss5BKD2;S4cgVGt- z7nC7`(wz|v%7{VL56%SDfvTWP;D8MK^++*X~&?N8Oga+QwG)CLYjK*8I<7wS5O@ol+oCopgJEObzxAgGi7D4U{G!Hr10C7LA8C; z5mYw@)vm%0RCfl|Ui$#39t^63VHT*K460)n2T;8jl$o6esNM|9d`v8;J`AeUbg5ri zGN{fAKZ3GiP+itZP-M-ZEMlc4)R#eZJ-z~z4TI`-Sq6b@8C3TNQfKJLpnANPQThH1 zs^{-vpzIh_@7fYG3}8@wn(qTOkU?2?k`HGPgR<%`q1s>uWj$JsLD@5?zSG=64Pj6= z3*_|KPzGhY<{7AA465HQ>3KOYsQ$+!n;6cZ?5YlQJ2I#N_wVAVMlh&>rDdQ-GN?hn zBxZ18Q1-Rvf^ue1Lz+o%VibcKYAy@%Xa+T`UoB8$7?i^(X$g&GP{XJCf*Qx59D`1P z8qc6cgv-h>fkBOokyhhG2IZ6_mF-Ck$~i+~1{Vf3Dp&4EnarR@myQDE%Am%S{|9Oc zgBqtL^(!|9HQwMID0c=m!F)6*4+b^Sb~PwZ1~tj~2q-TGw$vpIYJrUjs89yA&`I8_r3`A3r);Uq7*z1Qr=b30P>WZ~Q2KHP6|!?V zs1*!qNunyKFa{NR(Gt{32DS8_6R1@TYT3&;P^%f#e?L+{g)^uXnvFrNVNhXBWsGDk zgId|~GN^S7YL!iGP!SAjwNn#N>lswIXE3M@3~J511W+3p)Y?@qKy6}B>vqlrwV6Rh z91R1tg+Z;qAcd5z3~IwYsb6hlP#a(N2eq9+ZThhRR3wAiQd8cm9Smw~QyoxI3~F0P z>00h&P}}<+1QpGoB1c{a6~mx*cvc6si$O)ra|RX5pmwg3plCOPijISTv$piVQWQ`S=1 zKEt3=Mp}S6%b-qs1b|9qP-g;9f;z{b&aS)z>O6x=jWPn2#-Pp}nFK1GL7h*R(bEeI zD(!A1sEZ6Lz3Q^r3;`P?wst29?R6E_c`iDvLp7TFZxXg+XPF zC&M zAI?h#RWdgMR0)GB4U>6>QU>*EhxEK&F{sxGgFwAzP;b%>gL=cD-rns9>MetM_gs1t z?-ifz@puRDvA3Fwu`p%$!CdlaN4+d49CR^%H2KDQ%i2U$P`?;d z#W(qIelw`Q>T({Zf236U9IHYj>~RJQ*ysA`UqhF3DE>h5hpsi-Tz z|LR_ixuZZ;V^FGLvg1@|P->CIplUEE^>``os4^&xw8@~<7*x$W2SKSbD9z{c;b<@@ zt#8uqugRclt1keh$)L0wODw9zpz4?wfvUxz>ROEjRhvQ8bKDI|n?dQgT?SQ$LFvw| z392rGsvl+ysvd)C5GhBebr@8`cqydlGN?vrQlP8Pp!DuIgKEH_8W&5uzafLtFS`M% z5rb-~UJsNWgED9=^{d7Vs+s93Q2Gq2x#f9KO&FA+qs(kKWl%9U*BxY#EpiI@y zfHGlF?ez*kwPsKqOeKkH!=O4^%7WaML75HT0;(N@GM^&naZDLhrvMovY0scKub2U< z1B2?aU1Elg49eng1}HNI)%CnCD02qY?RIBSofuU2B01vPnL+jVDzRu62GvXTEhq~H z)mu;AtF8>HPdg7#-58W*pU0rOGbpR!%|Z2GP}WnVH_?+p_4StmT`vY@vpg46Zw6(% zU7E#x7*xN*gF#s`sQ%}qKxf6E>~6`2W6huj6iJ<-FM}HRRSGFK3~I1yI4D~NW#4E& zsD2D;NL$%b`!lGaeOiFBV^G5!d_WCgP!6t9pawFi;eNM44PsD^%MC#dW>6!xIfAlh zP$LiB12u#}Ii*S(K9oT@-;|YM7=s%1Y%?ea1~vMN#0&hVa1?`b@sqafXa+U;zpJ3eFeuk;CZNVLs40ge z4Ijs#+)_7!8qc8IZ%S`s0)z5+Dxum$2Icwr2dGI5YHAIAP%aE=T0=?0Co`z&ZO(ym zWl-L|WME?ogPJi++Wl?}YUbpApxhbMtU2pJc`&Hi%f5i}WKcd^JA(3JP`(Fcf^I5< znsat7sA&w!?}k($r!y%3r`14tGpK;iwxDJ(sKDw{ft<;p<~5L3<17X>zm1HZ&Sp?S zy{3TjVNeT($`9zvpccARf||pi7R@mO<;S3cm(2v_&!84>*#s(pL4_PR3u-QdT5?8$ z?LY<Bjq-2xTDpu&B=fm*_#)-0_LDwIL3-7LqTmNKYy``th-V^9&NcY*qk zL9Nf;2WmNk+E5@(y%h{<<45@y3}aB6RV0a9$)L9A$`HaT2DP>7&uXt`P}{o8juXzH zw%eeK)!&>dt@ zDdSW@9b!MmNou~ooD1$nmWeO^hL8aw) z0+qy|(%&ovb&Nq>sE~%jaR!y4E#;kL26f3$&JUhoP?tOZ0(Fu>W!g#M_Y{N58siNr zg+X1JF7@`)4C?Aaxi#VpgSxg>E)qY>pt57Tf=Xpj*N;yGb&f&ZxO^Pcc?Na!K|H85 z26gL|#G>g8>h|yEpe`_|yS3$H@kIueV^|MV27|iSc_gSy3@W$3WD}Pe)cw(iKxHzh z2h(Ih&SFq`3yMKqVNm&NrTTG|K|S0x4b(LT_2`&%;<6dk<4ey#U1v}Q52Su|gFzL( zstxKUgL?8yjzQgGP|s?~zu;{KRn#m5)Ex#@Y@Pz@E`xe*dlOU+gL>hd2kIV!dO6h? zR4#)m39<%tpFx#|&ja;itjYr4}%#kD3Xf3K`TVgVvy)FsRQR|AKnTpuX71hx3dDX% z{&dB22K9ZF%=NusP(OCc4g4<|)X$^MK$S44@(YsVlrpGaIkKg`Vo<+dECcnLK~;Qz z2fk9OpA%lw_8B}%mWKf?NRE@bZ zW$~Fosjh4S>I;KX+tCWtR|ch?5C*D@L20B(UGy7+s(B|9)OQA@`CJ9m4+f?6tu3gZ z463%eoDwQ$P}+^xfcnLt>X=>t^_xM}t-2nnfK}uu zA0{h9C4*`ZDZ#dihVuKbavR3Wk^E{5s!^J>gsL+rz1uQQS%X0}E|!joDudE5lMhFY zK{Zv&0j18M3>yCjN`pZ)Go1{oCWC5jDTin@8I<908C=w2P)2TbLDgbVEdr#4U7JC* zTp>A*HiI(Wegae-2G#2DJ5Y5Ql*xHRQ1uv8>)Ueojt+xrQxpwKmqE4t`WsYz24$)y zhiDowsP=kZpc*o$4()boY8wSy z-{>f)&J1cu+xwupFsPw@q_S@?-H2q49ao2G!%L;s1a3H zJ@jNyBM;5PQ}tp{PN_0r)|){&-~0%w4}%)@OcRtPgBtxs#*?fV)YuxbrCKwnaSii9 z^<_}w+ja(J!=NVg9t6skK}{ScTWUWBHEHq_Q2iN{%N#j^VaK2*|2F~D00!l{bv3Ag z3~I{3>!1cPD7RESP=gti`_1m4>=~5D(@0Q57?kH{*;0oxsQ+C8GmJq^Ybe=-1B060 zM&@LOGbry~QonL!P&0-}{b~e*nmO4S)JO(3YtBwkP7G@HGMV^wW>7v`8-p6fpnMOS zgBs1C=A4x@d<=u~yYT?jSO(?)R2S4Z1{Lsm2&nN4DzJJ8s0j>eUV}TJCNik`ZR&%X z#GrzDNlVCuK`j`%0n}s$wa_I2lq-W;G^aVJDGVxjnZyik3~KR~6`P#z3w z$=Rl$JQ-Bzjp?Ag7}U}yr$9|*P|H5u1vQO9Ew3&g&U6N~qJf-P_hwLGtq*{j!Jt<5 zd<<$PgIYDT4yaiSYPE~JSF;&ZxUclQd>GW4rG=n;8PwV>4M5FdQ0opz>E4e)MVyHN z<#hjLhC#(|ngwbtgGz`?2epnt9Z8YGZv=xnnr#AV zJ%dUtkOJKX29@+72-HRfb*%Cps7(wiS?2?&%?#>9tFfTAFsPH={XuPIP^SjV)crOF zl`=u*WVSP?)3aqkj$}}0mdGig9SrJh)x~j93@UZsaXi&d26Zk)15`AFI$w2hTnvLs zdo1;dGux`VKItt0Bul9b{0~Hb{DyQWcP{$b5y{-}z9cNIvgQRPj%%JX%?+xk% zgL*L22h>RhmA7~=s8b9ofBh{`DGci2UR_Y98Pubby+EB|P>-)jo#8BlDtIU{Ln?zR ze0vboIR^FQPdTXb4C+~3X$hqU~R4#-1d0ZCc`wXf)(<0ETEpEVKV88Ol;JNY`)bH0} zpz;}1#cxT(A2O)FweN#^#Gw8eb_eyCK~;8^aJzs&-B;-^XNwCNRJGB!K|Ntm)u&ej z^^`%?SSV4$GX|x)b~dOY2Bo&^0;pmJrG8BM;m;YA#$`27FBnwK2Tq_~GAPYg5;c@C zD6LLDlWI5!71-Rd3W|Q12L&&NMAh?-`Wt z0_nwlU{LkfNM-aRgK7}73e+bC)iCKIsLu?lQO0XfUl^3$eM!W>GN{I-vNDu0DE;yq zpuRDvrnL+}eP>Vx%|?Lw!JwL%&j9t4K{dCP_o|#h89KiL^@~9nO`QtrH-l;sv=~$c zgK8NrvFINLWgH_3(_aSFDoOg`{}_}>h9u&Z461dm1luY#mEVAMzfDOQsA>$VZFx^n z)ftqjmJBG>U{LK1j(}2SP#w%0fl^~o9c|?YRA*3T&dWe)Feq~`nfa~BpgPT$U|W+x zbq<&F99j&jOSA{5S`5k}Q3^b@8C2JcGH0R9pt{}r4XO@<>R!?iR9yzuKP1?0&Y%X@+yu&y zLD@HvmBENX4e20*BrO=!(7w`5ZONd9jg*zam_a#sB!X(ipoRzLfHGlFj;ky{wPsKw zqUL~V!=OeUi3Qb`K{=%tfojK~oO9}eGG$PsUYLPu&!9$sm-0>r1~pbg((sN9YFrZ? zP-YBjdq($GF{p_nr10CBK}~X(!fzJ_Fw=7SurSYE4fy|nnBHQybY=^gPQ3sm2DdaHEXVMIls6h1=pzImcyvAOjhA^o4rW-*G zWl%wurJ#l}s0EI7Kshj|g>C~u4QEh`0%XT=WKh8?WOQ)^gIc`37}Q7x6%sE6IwuCT z36)l=lo#Yqe*-2WCR7Y4Ov`7cnD8PwYCjX}9GsC9?EK}}&$5$ELm znHz&ze@lXGcLuegNE?&~gWCAD6DUsxwOMr*C@%)JrO^gZQyJ9Os*9+nF{o|5f8wd8 zGpOwj(o6MbP?4^CLCs)LJN#q_VJ3r$T0R-nEC#i6+jLN~8C3KkIU?!9pkh)b*!E>m zyKW5yHHSgPK9g+1k3sGJk_^h9LG7&}!FB+H+SkYu)LaG?*H(g}KnAtHw~Ud@V^9Yi zWO`>lgF5Ic^{XHTb;xfGs09q_@PCIvEo4yf+Zupc#Gn!m^#v8oppK*lfLhF;j^4}$ z6~dqrpMD0lgh3^Ju>}>%ppMt@1htewB{$3kwTwZXXd`1J|1qeOy<3A?&Y(^Wn+j?L zgG!ma0#q1-Iz30``c^WiGs_A=tzuATw|oV)nn9%=v;!5+pw69@`KvVy>ii9vzgo+n z(w<0VdmV#H|MU-31cSO*y+5e+3@W369Cp~ipf0su3Th*Ry4+KO?M)0SbBM&En;BG= zi&P-DFsLiOE}*tDsH;nNfZE2Ou5FfX_;v=BeV`hsNCtKNjD%`C7}SmHQuvKxP&W%@ z{%R+Ky7ln~sAvXtrYxI_l^+};6H5`%g%Q3}7u7}U$z zQfD~Mph}iVODLH^m2RpH>I8#&6(>Q_Ne1;gWgVzf4C>9bG*BrF>TQ7*sM8GU-G^?V z&M>I=m777GWl$e=Qb46Ls87ZpL7ihzpSw%_>O6z`GI$!OGzRsx>blu<230oe1)k~x zgZdUCS?Waw^?lq#1ZjY0i0)&P~wpenmb1@bz9dZ02$g6$g&s@nJjP&XM=^_hP`-C|HR zLYzR|W>Bgd7J$0Lpw#y60d|dE34?0ZwF#(F2Gx9^6{uGX%5YpDsMie2Xogg_-!P~a!I_}mGN_j8 zrGE8}K^gDq1nND5YIR~Fs1FRvBx@O{j|{4Hz63>|7*v}#QrZ5@pxXYCne8tO%Cyb| zP+u8Td!sN=WeloA7n$Dq#-KXdNgDp0L79!s1NDPJna{8U^^-w$3YJ1jIfLpP5d!KL zgX*&T4yfM@$|6~|)CvaGHOm6j9|qMee+;O<466H^C!qc@s2+dXgQ{duz3SM5Qqfd? z|J8%uRVza^2Gyr(WvI@eEbZ>%2du%Mtj0<=T$Mptdv5`y#-RExlJ`oTLD@uTgVJD7 zw!6E6s>z`GB`*f0$)NgY9tWkxpzQMNgQ~@#2D}*xsy2ffSm6Rnn?ViMJ`Ji4gR(b# z4yrDL8q#GqsCo=)sGUq%=rE{ZW2Eq_%b*;*C0(k|poUjnQQv?;Ij$?mQ#E8zBVu2J zYQ&&MCQD^ok3l(QdV^}rpq%rTgVJYEqh42nYQmsKSIFAlltGQv9s1=X5Cxj%Rasttqkcr9ZjZ5fp3@6n*zF{r7vWlJ?>P}7>r%Fv!cP4AQs zssn@a?k8DlM+P-xv`kr;F{qi-d_kErs96gn>g&XyX0Le$sxyP~*(Jet7Y5~fOjZU9 z1~unWB&eVQ6%%E1)we-8FsQZBvN8;3Q0o$9L3U(N5f|ry8o{8} z-;v^YO|(f6QdZ^mZnmm8_l4$ni+!{!=ScRT~I%kL2Y+h ziKiOJpdvjJL5*inJLXBcG=V`yt?mPAB7@qwQ<{2{7*zC8S^8WURLq3}P?H(dt{ge% z<;tL9Urquwg+cB95dg}KLG7)X1InF2?Q8N5lm~-~>nJOOCxhDGcMd2o26bShtPE2b z)IpCzP}3OHp?RvHrZcF+tE3z5&7k6=)`6PApc0NqmO7I`9l1~q)GP*dG)H35*$gW2 zg-mApFsP*O(hc`zP{*q-sGq~2lB+JL_hV2esxGMaXHX}tr{i}Oz@SczkSujBgGzC~ z3@VU8oenesHIG4^S!oAqK7%^DL+YYI3@SC@6{rOa>Rj4iPzxE<`MYC5En-k<&u4=Q zW>D$hE`VChpf0L^1Qo)dGW69zEn!fX+Dp$XltEp#lJTUa3@X#{3aDiaD$A_|)PD@> z%G`#amNTfUVN(5A!Jw{127wA=P}%VrpjI-d>*v3KTE(Dl+^Gd>HG{fYJPuSigSu5F zJ+Czk>W-R>C#_{rclF9atz%F*rc%F(U{Log7l2yNpmK*tg4)2K?z`!M+Q^_D1W18y z6NAcIAuGdX29>{EVumdY>fzygptdroN9Wss+Qy(B-yR5RJA*1H@&OgepbEcA%&>z& zJyn&JA&Nmg)01EOP6k!f&J|QNgDUP50xE_JWqa*igFRhZ)qTwz43{GpNtKWgs?zL46svAJh>B^>wmj6Gs_TnV(c36B*RE|3-pJ zVo=|=Mu0lTpne=Y4(d3A`k7iCR5F7qzu5rP2?q7+sT-)14C?pi<)BV6s6W*uiA!Nn ze;a%Tb(%r_Ya`=HXBbpvFDdVwB~W=PLt{XtGN@`UnV`-wsOr9w#GPkQHI_*dm&Tw} zw@4C~&Y;u|Na6PagHk_pAJjz#rExB@NZ3dOepla98 z2bIO3v`s2OU13mldg_C^%Ao2FnF;C|gQ_=4g6(VurQ;(#`s)lzcd0t48w{%c=I)?w zGN=ao7lOLQpcCg) zsh|oNRI8MKpb8n3Nw(}bPZ(6|f~laMGN?8mqCh=kP;D!-K@~A5Q(Z$)#SE%_D{D~C z8B~WJ67{`cP#x{9K)qy8W)mgYE@4pSv!y^+%Ah(e`2gw_gX+9VVusfYs!P>1)NdG+ zMT&elZy8kAYqF)jV^G~5e+TuRL3RJ&1nL8W>QT82)JF!@OJ@(LPYkNJ@gq>58C0Kc zl7@d_P?m!SfcnaytS0z@Dq~RAv*JK~V^DoVWHR$RgRs6Py9aJ?O%{xT^0meNc8$DoFEvjA1e zpoR{T1zAN)`TbXU!^TUnU5!CG%sd6EI)fS>BBlEp49ams7f`AUYQ$a#P-+Zn9eZ&!84gmu7JX2DNBm)%{ltDtKLgJe3)PS{&;R%A7%k9G5ynCkD0TvaAf98C2*4 zS&+LhsHLxagR)>y%YKgs)s;alue}RYHwLw$`7Kc08B|y&nZN46pjP&81*#{5T2*y% zTrUQ-dYXi4y%|*af*+vzFsL)Jm7yPl+FWZcsQwIUOEa0wv|~_P&5J+{U{KroNz5>iL2Vzk5Y!+B z6**N3zk?anj-ZdA>={(l8XZtW7}U-fSs8{hsOY3kpoTH1n2fWa92nHD`!c;VoI%Bw z$|W$43~G0II;as0YOmHmP$L=CzGm8>oETJ`xtvRNW>EWWB`6xjpbm^`4r(-mIyiMJ zs4)!cP>`hIV;R)pHS#YwjzPu8bOJS=K_w&wf||gfj%3J(Gm$|Z&3y=J5`#)CZ2`)K zK_!)s1T~pK9oO0h%9TMS8whznaFNQWIs$VmgC5mk|ldn?aq=m1}%vFsQT=`EX`3sPu9> zP_r1+MJ*}a&t^~=2KPYuFsMuBhM;^I)MeX&pyn{BOlKJ*@ncX~UVA|KGpH-`<--YJ zP*=mH&M=ojU5lOoDv&{CCraUW9)r4m@j9sa4C+R%5vU*rb+cp&s09q_)=wW$3mMcM zO)0T1Vo-Mt8iERDP&sCjO)O?m_iQ963Sm&WPHCW)FsS=pvh;;As0Z_hf?CR;@>U-O zwTwaKN9%$5k3l_5l#a=A2KA`w8tN4c>hZmec&ac4Rq*l?sFe(=@TXk2vWh`H)tm@w zHG_KAbUUbU236GYDyTIKs@TQ^)LI7h+({~s>loAvPw7oWFsPUFq$RYTL6xkML7)u` zs&r>BP#YQ4tD~NvHZiEz7vy-|W(M^p=O(Bv4C?KRr=YensCPf4-M@`NeW>XHYCD7a z*hE%_NCx$(gB;1XSxJMdl*!Cj@(AFmqGn{AwkhT2KD>9jFH4Ks6QIg^V-j#{_4w) zbAUnp>tF@yAcLy3o&)L-fy!4IAr;8O462&@KTz=us(N5OPzemG#!9JdA7N0cJLK1X zltHN_908Tcpw!bHKqWCKjk~g?9%E27pWg;`oIz=RuLCNXLDkZ51$BZ!)ovnnhLa3R zyMr|KPBExD);XY37*yR6<)BV8sCph!zdFO9bOL2dJM0_rM*Y7v$K>KcP;8TkZMHiI%w=nLvP zgKCvF8q^I2WpZaPsGAI`b#Wf3TMVjA*)LGH8B{xUsX*RgP^OL7gSyM0+M7zSox`9y zSV^#bk3n^GGzXQ-pv>G>gSyY4%mdOuJz!9sR(uDQ$Dle#b_A8rpt{5l1ND$WS)7jn z^@u@ry?qAMV+Pf&=ntp@2GzaH7*rvH>Zv9@uO|$umtHQYrwpohyEmYoF{nP4?LZYV zD9ho4K@~G7t0^-NA6K@RR!07X~$ac_&a`8IDFsKQ`WXj?% zgPQ270qP%vn&j65R3(FQ`Og=WN-gF0U*%8UngFUAgK|B19aMD&H6^t^s2U8)?WQ#- zRR-n$ROV#V7?j6nnX*u4P+ryFfzn`5QyaDiRg*zYYvTk;lR-`I6$VO+L3s~745}7` zn&Bb^y4nnC=A7oBv>DW_W%i)zFsRvER)DI@pnMJ-0#%Pe`JR>0Qym61=Y|}R)MZe9 zPnLkH&!GH2NnNx7gPL1S>Y@!9RABwjpc*l#d98%k@}TBgIX}@ z5vV2%YN3zxQkycUMN4f#88E2e%@LrQF{s7+3qUnzP$6d|W-w$>ORhHqWyGLD3#D1y zfYI%K`%xuM=R+vcr%7j6M^^hH>HG^7de;HI82DNIElcXJ5m^21u!JxMGFa_0>L2a{_AFvyP+CK3CsO}6Z(npfG z9t>(nXeg+j3@U2VF;Kl2)Xx37pn5Z?=+hlQ^6lnCsMrtkURg7! zJu2%#^<_|db$5WWVNm-_rh~F&P;otGgX+hi_S;L;*PlThm?+tV9fLaPQw`Jr26ZTO z0;quu>hPwMpawCh__#Zu1~aIHlpmn%8Pt(%*>Q$2sG|joKn-P3iB*5@Y8Zn$rlNwU za$r!$buB>+XHdzl~X9jh8i8NnE zF{m?}WWa1RgE|{0S?U-Dm6{R^YAk~~SM}$v#xbb#k0o6i&!Eyi^a3@3L8VtpiFG1_ zx~Q`X)FcL#VSFBx3xm4UP3l*Z8Pw&$((ZRlGb5NcPD*KcxsQ+v2KK!v>!!Uq1B#P1`DoTZv5e-B|^E9-M#yOShsHB18 z740G_N=u<3DkD*}qh0Exfd-mJMq5-Osq6MHoa-O>eqQf~$ManG_56M=4C>A;$r#mkEsK=|NW#P`Co@{dk<-wqGqJu$sGN{~BGA#a=LFJ`ag7RWePxEA0 ztYA>j-g$xYW>C+6#(`SKpk8WNg7RTdueweG<;$S*Ef0cP&Y%j0OO0d&gDRX_0%|3L zDsnRbwTeN#UM1J-Rx_yL5UDh;VNfMe@u1c+sM3=+K&@j?W$7~1ThE~0|`p_a5Q~-ne*u@A`AcOj3A~GFacC7gKD+h2UHw` zY8`wUR6K)f6Dh}~qYSETvLmPj2G#C*7^q_ms{LbW&?Pb`&C*|>k{Fa$jr3%WGbn8} z3FuBRD4ou-*-mCq9nCU8r7);Y4zk%k$)Gy_DJ_dr3`%#t1iz;ll%8(^s5110b5Y#7?l2rMWD_zsBYH+L0w=_-5<%BP$u)_`ulYTW$IH1>IQ={3zA{u zO$KFN|H7+u2G!@dFFw^R24!(ovhCXp%JSiDQ2#Ngz9rp3-CR4#)WRy7S&9)lX8>JI8DgBq!G z5!5pVHLACS_s<#BAA>bOyRH8CpzR562^RCFIy34@yaO|tD$2IZt8o!>GBHAP#dUT+xG z)LzoNf6Jhz*-5?S9fO)ap$Dk<4C=4B6G44oP=70Av;C1l%?PXp^@%~vjL-yC&Y)%; zlivMj2IYLo4^#z%nw@zZR3(F&QzZ8}zA&h{Uv)rzWl%0nWzX=9LCxzRhqx*RHQzWD z)OQB8z)lTRHG^^;FVSxegIYKz9Mlg6<)%;pRm-3j1z=6vsv(2&DC`fa5rgvl5)7&_gYr_AO_36VQfNue(1byG8_UwC z%%GOp$|Js-GAJL%Ku{_S%6GObeX0yfBl1~+c2mNJ>_d}%b)@V zI)ZA)paRFq%Fv!cZFG(XrOBW+c}l#a#h^BCPzKe3K?UuRQ>r$D3Xb0kN{2yhNz(__ zkwI;}FX=}o1{IPozgL|Z)V2!wkE6?=b|}dOJ3R)qQ*#EWE(~gykt}^(8C0mvO;Gv_ zYWLW3P~8~R9%pIxb!Sjvo&lhGFsN{Ux$a=Vpdxllf5(tP?TwdXxDkWempUC(PX@LB zUIZv(26f<7I;dU@DzZWsRBr|qr6ike69yICehny726f0V36vRwI&3W~gE@mbGFBT@ z9|jdO%LV^AlpB)=NOpiYgx25K;aIz3a4 zOG6mc8F$IA>>1SA_5Pq77}U94v7m-BsPi#*Kn-J17tW6YHJm}EW_W-a!JyJ!lz|$_ Npe~lTQMf2fegoIPo&^8^ literal 0 HcmV?d00001 diff --git a/code/recording/data/scene_camera/2015_10_03/000/user_info.csv b/code/recording/data/scene_camera/2015_10_03/000/user_info.csv new file mode 100644 index 0000000..c868dc7 --- /dev/null +++ b/code/recording/data/scene_camera/2015_10_03/000/user_info.csv @@ -0,0 +1,2 @@ +name +additional_field change_me diff --git a/code/recording/data_grabber.py b/code/recording/data_grabber.py new file mode 100644 index 0000000..9a343a1 --- /dev/null +++ b/code/recording/data_grabber.py @@ -0,0 +1,61 @@ +import os +from tracker import performMarkerTracking + + +ROOT_DATA_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants' +SQUARE_SIZE = '0.16' + +def main(force = False): + ''' + Processes all the participants recordings and performs marker tracking on each video and stores marker data in a npy file near the video + ''' + c = 0 + for d1 in os.listdir(ROOT_DATA_DIR): + if d1.startswith('p'): # every participant + d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/ + d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../ + for d3 in os.listdir(d2): # every recording + d4 = os.path.join(d2, d3) # .../pi/../00X/ + print '> Processing', d4 + frames_dir = os.path.join(d4, '_aruco_frames.npy') + if os.path.isfile(frames_dir): + if not force: # already processed this recording + print '> Recording already processed...' + continue + else: # video processed, but forced to process again + # remove old file + print '> Reprocessing file:', frames_dir + print '> Removing old marker file...' + os.remove(frames_dir) + + world_path = os.path.join(d4, 'world.avi') + + log_dir = os.path.join(d4, '_ArUco.log') + read_log = False + if os.path.isfile(log_dir): + read_log = True + else: # no log available > process video + if not os.path.isfile(world_path): # .avi file exists + # Creating avi + print '> AVI video does not exists, generating it from mp4 source file...' + os.popen('avconv -i ' + os.path.join(d4, 'world.mp4') + ' -c:a copy -c:v copy ' + world_path + ' -v quiet') + + # Perform tracking... + performMarkerTracking(read_from_log = read_log, log = True, log_dir = log_dir, + recording_video = world_path + ' ', frames_dir = frames_dir, + square_size = SQUARE_SIZE) # in case of relocating camera.yml input the new path as cam_math + if not read_log: + # Removing avi file + print '> Removing avi file:', world_path + os.remove(world_path) + c += 1 + print '> Processed %s recordings so far...' % c + # print '> Halting...' + # return + +if __name__ == '__main__': + main(force=False) + print 'Finished.' + + + diff --git a/code/recording/eye_world_correlation.py b/code/recording/eye_world_correlation.py new file mode 100644 index 0000000..888e235 --- /dev/null +++ b/code/recording/eye_world_correlation.py @@ -0,0 +1,85 @@ +''' +(*)~---------------------------------------------------------------------------------- + author: + Julian Steil + Master Thesis (2014): + Discovery of eye movement patterns in long-term human visual behaviour using topic models +----------------------------------------------------------------------------------~(*) +''' +import sys,os +import numpy as np + +PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25'] + +ROOT = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants' +timestamps_world_path = 'world_timestamps.npy' +timestamps_eye_path = 'eye0_timestamps.npy' + +def main(): + for p in os.listdir(ROOT): + if p in PARTICIPANTS: + print '> Correlating eye-world images for', p + d1 = os.path.join(ROOT, p) + d1 = os.path.join(d1, os.listdir(d1)[0]) # ../p_i/../ + for d2 in os.listdir(d1): + path = os.path.join(d1, d2) + print '> Processing', path + process(path) + print 'Done.' + +def process(root): + timestamps_world = list(np.load(os.path.join(root, timestamps_world_path))) + timestamps_eye = list(np.load(os.path.join(root, timestamps_eye_path))) + + no_frames_eye = len(timestamps_eye) + no_frames_world = len(timestamps_world) + + # Detection of Synchronization-Matchings to initialize the correlation-matrix + frame_idx_world = 0 + frame_idx_eye = 0 + while (frame_idx_world < no_frames_world): + # if the current world_frame is before the mean of the current eye frame timestamp and the next eyeframe timestamp + if timestamps_world[frame_idx_world] <= (timestamps_eye[frame_idx_eye]+timestamps_eye[frame_idx_eye+1])/2.: + frame_idx_world+=1 + else: + if frame_idx_eye >= no_frames_eye-2: + break + frame_idx_eye+=1 + + + no_of_matched_frames = frame_idx_eye + print "no_of_matched_frames: ", no_of_matched_frames + + # Synchonizing eye and world cam + print no_frames_eye, no_frames_world + correlation = [] + for i in xrange(no_frames_world): + correlation.append([]) + for j in xrange(1): + correlation[i].append(float(0)) + + frame_idx_world = 0 + frame_idx_eye = 0 + while (frame_idx_world < no_frames_world): + # print frame_idx_world,frame_idx_eye + # if the current world_frame is before the mean of the current eye frame timestamp and the next eye timestamp + if timestamps_world[frame_idx_world] <= (timestamps_eye[frame_idx_eye]+timestamps_eye[frame_idx_eye+1])/2.: + correlation[frame_idx_world][0] = frame_idx_eye + frame_idx_world+=1 + else: + if frame_idx_eye >= no_frames_eye-2: + frame_idx_eye += 1 + while (frame_idx_world < no_frames_world): + correlation[frame_idx_world][1] = frame_idx_eye + frame_idx_world+=1 + break + frame_idx_eye+=1 + + correlation_list_path = "eye_world_correlation.npy" + correlation_list_csv_path = "eye_world_correlation.csv" + np.save(os.path.join(root, correlation_list_path),np.asarray(correlation)) + np.savetxt(os.path.join(root, correlation_list_csv_path),np.asarray(correlation), delimiter=",", fmt="%f") + + +if __name__ == '__main__': + main() diff --git a/code/recording/opencv/calibrate_and_save.py b/code/recording/opencv/calibrate_and_save.py new file mode 100644 index 0000000..9d774c6 --- /dev/null +++ b/code/recording/opencv/calibrate_and_save.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +''' +This is a modified version of opencv's calibrate.py that stores the camera +parameters in the YAML format required by ArUco. an example of this format +is given by: + +%YAML:1.0 +calibration_time: "Sa 08 Aug 2015 16:32:35 CEST" +nr_of_frames: 30 +image_width: 752 +image_height: 480 +board_width: 8 +board_height: 6 +square_size: 24. +fix_aspect_ratio: 1. +# flags: +fix_aspect_ratio +fix_principal_point +zero_tangent_dist +flags: 14 +camera_matrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 418.164617459, 0., 372.480325679, 0., 417.850564673, 229.985538918, 0., 0., 1.] +distortion_coefficients: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -0.3107371474, 0.1187673445, -0.0002552599, -0.0001158436, -0.0233324616] + +(Reference: https://wiki.mpi-inf.mpg.de/d2/ArUco) + +How to use: +- Copy and paste this in //samples/python2 +- Run it like + python calibrate_and_save.py --save "//camera.yml" "//*.png" +- Then you can use ArUco like + .//build/utils/aruco_test_gl live //camera.yml 0.039 + +More on calibration: http://www.janeriksolem.net/2014/05/how-to-calibrate-camera-with-opencv-and.html + +UPDATE: +Alternatively, you can follow the steps below +- After disabling integrated webcam: + echo "0" > /sys/bus/usb/devices/2-1.6/bConfigurationValue + (need to first find which usb device corresponds to your webcam) +- Plug in pupil tracker and use the official precompiled cpp script like: + //build/bin/cpp-example-calibration -w 8 -h 6 -s 0.039 -o camera.yml -op +''' + +import time + +import numpy as np +import cv2, cv +import os +from common import splitfn + + +def saveCameraParams(save_dir, nframes, w, h, bw, bh, square_size, + camera_matrix, distortion_coefficients, fix_aspect_ratio = None, flags = 0): + time_str = time.strftime('%a %d %b %Y %H:%M:%S %Z') + lines = [] + lines.append('%YAML:1.0') + lines.append('calibration_time: "%s"' %time_str) + lines.append('nframes: %s' %nframes) + lines.append('image_width: %s' %w) + lines.append('image_height: %s' %h) + lines.append('board_width: %s' %bw) + lines.append('board_height: %s' %bh) + lines.append('square_size: %s' %square_size) + if fix_aspect_ratio: + lines.append('fix_aspect_ratio: %s' %fix_aspect_ratio) + lines.append('flags: %s' %flags) + lines.append('camera_matrix: !!opencv-matrix') + lines.append(' rows: 3') + lines.append(' cols: 3') + lines.append(' dt: d') + lines.append(' data: %s' %repr(camera_matrix.reshape(1,9)[0])[6:-1]) # [6:-1] removes "array(" and ")" + lines.append('distortion_coefficients: !!opencv-matrix') + lines.append(' rows: 5') + lines.append(' cols: 1') + lines.append(' dt: d') + lines.append(' data: %s' %repr(distortion_coefficients)[6:-1]) + with open(save_dir, 'w') as f: + f.writelines(map(lambda l: l+'\n', lines)) + + +def readCameraParams(cam_mat = None): + ''' + Reads an openCV camera.yml file and returns camera_matrix and distortion_coefficients + ''' + if not cam_mat: + cam_mat = CAMERA_MATRIX + + data = ''.join(open(cam_mat.strip(), 'r').readlines()).replace('\n', '').lower() + try: + ind1 = data.index('[', data.index('camera_matrix')) + ind2 = data.index(']', ind1) + camera_matrix = eval(data[ind1:ind2+1]) + camera_matrix = np.array([camera_matrix[:3], + camera_matrix[3:6], + camera_matrix[6:]]) + ind1 = data.index('[', data.index('distortion_coefficients')) + ind2 = data.index(']', ind1) + dist_coeffs = np.array(eval(data[ind1:ind2+1])) + return camera_matrix, dist_coeffs + except Exception: + print 'Could not load camera parameters' + print 'Invalid camera.yml file.' + +if __name__ == '__main__': + import sys, getopt + from glob import glob + + args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size=']) + args = dict(args) + try: img_mask = img_mask[0] + except: img_mask = '../cpp/left*.jpg' + # print 'mask is', img_mask + # img_mask = img_mask.replace('10.png', '*.png') + img_names = glob(img_mask) + debug_dir = args.get('--debug') + square_size = float(args.get('--square_size', 1.0)) + square_size = 0.00122 + + save_dir = args.get('--save') + + pattern_size = (8, 6) + + pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 ) + pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2) + pattern_points *= square_size + + obj_points = [] + img_points = [] + h, w = 0, 0 + for fn in img_names: + print 'processing %s...' % fn, + img = cv2.imread(fn, 0) + h, w = img.shape[:2] + found, corners = cv2.findChessboardCorners(img, pattern_size) + if found: + term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 ) + cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) + if debug_dir: + vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + cv2.drawChessboardCorners(vis, pattern_size, corners, found) + path, name, ext = splitfn(fn) + cv2.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis) + if not found: + print 'chessboard not found' + continue + img_points.append(corners.reshape(-1, 2)) + obj_points.append(pattern_points) + + print 'ok' + + # rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h)) + + root_dir = '/home/mmbrian/Pictures/eye_camera_images/' + cameraMatrixguess, distCoeffsguess = readCameraParams(os.path.join(root_dir,'_camera.yml')) + print "cameraM: ", cameraMatrixguess + print "dist: ", distCoeffsguess + + cameraMatrixguess[1][1] = cameraMatrixguess[0][0] + cameraMatrixguess[0][2] = 320 + cameraMatrixguess[1][2] = 180 + + # Calibrate camera intrinsics + rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w,h), + cameraMatrixguess, distCoeffsguess, None, None, flags = \ + cv.CV_CALIB_USE_INTRINSIC_GUESS + cv.CV_CALIB_FIX_PRINCIPAL_POINT + cv.CV_CALIB_FIX_ASPECT_RATIO) + np.save(os.path.join(root_dir,'dist.npy'), dist_coefs) + np.save(os.path.join(root_dir,'cameraMatrix.npy'), camera_matrix) + np.savetxt(os.path.join(root_dir,'dist.csv'),np.asarray(dist_coefs), delimiter=";", fmt="%s") + np.savetxt(os.path.join(root_dir,"cameraMatrix.csv"),np.asarray(camera_matrix), delimiter=";", fmt="%s") + + print "RMS:", rms + print "camera matrix:\n", camera_matrix + print "distortion coefficients: ", dist_coefs.ravel() + + print 'Width:', w, 'Height:', h + print 'nframes:', len(img_names) + print 'square_size:', square_size + print 'board_width:', pattern_size[0] + print 'board_height:', pattern_size[1] + + saveCameraParams(save_dir, len(img_names), w, h, pattern_size[0], pattern_size[1], square_size, + camera_matrix, dist_coefs.ravel()) + print "Saved camera matrix to", save_dir + + cv2.destroyAllWindows() + + + diff --git a/code/recording/process_recordings.py b/code/recording/process_recordings.py new file mode 100644 index 0000000..24f97e8 --- /dev/null +++ b/code/recording/process_recordings.py @@ -0,0 +1,454 @@ +from __future__ import division +''' +For each experiment, this script tracks movement of the marker in the video from the information in aruco_frames.npy +It then correlates this information with gaze data from pupil_positions.npy +finally, for every target in the video (25 targets in calibration, 16 in test), it maps 3D marker position (mean position over the duration of pause) +to the gaze position (mean position over the pause duration) and stores this info together with the projected 2D marker position in a separate npy file. +the resulting file contains the ground truth data for this experiment. +''' +import os, sys +import numpy as np +import matplotlib.pyplot as plt +from pylab import rcParams +from scipy.ndimage.filters import gaussian_filter1d as g1d +from scipy import signal + +from sklearn.neighbors import NearestNeighbors as knn + +# from sklearn import svm +from sklearn.cluster import AgglomerativeClustering + +from tracker import readCameraParams, Marker +from util.tools import is_outlier, moving_average + +sys.path.append('..') # so we can import from pupil +from pupil import player_methods +from vector import Vector as v + +import pdb + +ROOT_DATA_DIR = '/home/mmbrian/HiWi/etra2016_mohsen/code/recording/data/participants' + +def unifiy_markers_per_frame(marker_data): + ''' + Since ArUco sometimes detects a marker twice in a frame, we need to either ignore one or somehow compute their mean. + Also this method maps each final marker to its center's 3D and 2D position wrt scene camera + ''' + camera_matrix, dist_coeffs = readCameraParams() # in case of relocating camera.yml input the new path as cam_math + mdata, mrdata = [], [] + for fn in xrange(len(marker_data)): + if len(marker_data[fn]) > 0: + markers = map(lambda m: Marker.fromList(m), marker_data[fn]) + markers = map(lambda m: np.array([np.array(m.getCenter()), + np.array(m.getCenterInImage(camera_matrix, dist_coeffs))]), markers) + marker = sum(markers)/len(markers) + marker = [marker[0][0], marker[0][1], marker[0][2], marker[1][0], marker[1][1]] + # marker_data[fn] = marker + mdata.append(marker) + mrdata.append(marker) + else: # if marker is not detected, assign last detected position to this frame + # marker_data[fn] = marker_data[fn-1] + mdata.append(mdata[fn-1]) + mrdata.append([]) # this contains real marker information (all tracked positions) + # return marker_data + return np.array(mdata), mrdata + +def fix_labels(labels, window = 2, elements = [0, 1], outliers = []): + labels = list(labels) + for i in xrange(window, len(labels)-window): + neighborhood = labels[i-window:i+window+1] + if outliers[i]: # removing this label from decision making + neighborhood = neighborhood[:i] + neighborhood[i+1:] + element_counts = [list(neighborhood).count(e) for e in elements] + dominant_element = elements[element_counts.index(max(element_counts))] + labels[i] = dominant_element + return labels + +def find_intervals(labels, mean, marker_speed): + ''' + Given the label information of frame to frame motion speed, this method returns the frame + intervals for which the marker is either "moving" or "not moving" + Notice that len(labels) equals the number of frames minus one + ''' + nm_label = labels[0] + intervals = [] + curr_label, start, end = -1, -1, -1 + not_moving = 0 + for i in xrange(len(labels)): + if curr_label < 0: # first label + curr_label = labels[i] + start = i + else: + if labels[i] != curr_label: # label changed + end = i + intervals.append([start, end, curr_label]) + if curr_label == nm_label: not_moving+=1 + curr_label = labels[i] + start = i+1 + end = len(labels) + intervals.append([start, end, curr_label]) + if curr_label == nm_label: not_moving+=1 + + # Now we do a post check to see if two non moving intervals are very close to each other, + # the middle interval is most likely a misclassification + # computing average interval length for moving intervals + if (len(intervals) > 49 and not_moving > 25) or (len(intervals)>31 and not_moving>16): + ret = merge_intervals(intervals, nm_label, mean, marker_speed, remove_outliers=True) + return ret, sum(1 for e in ret if e[2] == nm_label) + else: + return intervals, not_moving + + + +def merge_intervals(intervals, nm_label, mean, marker_speed, remove_outliers=True): + mlength = np.array([seg[1] - seg[0] for seg in intervals if seg[2] != nm_label]) + nmlength = np.array([seg[1] - seg[0] for seg in intervals if seg[2] == nm_label]) + if remove_outliers: + mlength_outliers = mlength[is_outlier(mlength, thresh=3.5)] + avg_m_length = (sum(mlength)-sum(mlength_outliers))/(mlength.size - mlength_outliers.size) + + nmlength_outliers = nmlength[is_outlier(nmlength, thresh=3.5)] + avg_nm_length = (sum(nmlength)-sum(nmlength_outliers))/(nmlength.size - nmlength_outliers.size) + else: + avg_m_length = sum(mlength)/mlength.size + avg_nm_length = sum(nmlength)/nmlength.size + + thresh = 3.5 # removes a moving interval if average length is at least this time larger than its length + i = 1 + ret = [] + ret.append(intervals[0]) + while i < len(intervals): + length = intervals[i][1] - intervals[i][0] + ratio, label = 1, intervals[i][2] + if label == nm_label: + ratio = avg_nm_length/length + else: + ratio = avg_m_length/length + if ratio>=thresh: # average length is at least 2 times larger than the length of this interval + # replace this interval by merge the two not moving intervals around it + # check if average of elements in this interval is greater than mean + if np.mean(marker_speed[intervals[i][0]:intervals[i][1]]) < mean: + last_intv = ret.pop() + ret.append([last_intv[0], intervals[i+1][1], 1-label]) + print 'Merged two intervals' + i+=2 + continue + else: + pass + ret.append(intervals[i]) + i+=1 + return ret + + +# def main(force=False): +# rcParams['figure.figsize'] = 15, 7 +# recordings_processed = 0 +# recordings_successful = 0 +# for d1 in os.listdir(ROOT_DATA_DIR): +# if d1.startswith('p'): # every participant +# d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/ +# d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../ +# for d3 in os.listdir(d2): # every recording +# d4 = os.path.join(d2, d3) # .../pi/../00X/ +# print '> Processing', d4 +# frames_dir = os.path.join(d4, '_aruco_frames.npy') +# if not os.path.isfile(frames_dir): # the recording is not yet processed for marker tracking +# print '> Recording does not contain marker data...' +# continue +# intervals_dir = os.path.join(d4, 'gaze_intervals.npy') +# if os.path.isfile(intervals_dir): +# print '> Recording already processed...' +# if force: +# print '> Processing again...' +# else: +# continue + +# marker_data = np.load(frames_dir) +# # marker_data includes data on tracked markers per frame +# # it's a list with as many entries as the number of video frames, each entry +# # has a list of tracked markers, each marker item has marker id, marker corners, Rvec, Tvec +# wt = np.load(os.path.join(d4, 'world_timestamps.npy')) +# # Processing pupil positions +# pp = np.load(os.path.join(d4, 'pupil_positions.npy')) # timestamp confidence id pos_x pos_y diameter +# # pos_x and pos_y are normalized (Origin 0,0 at the bottom left and 1,1 at the top right) +# # converting each element to dictionary for correlation +# pp = map(lambda e: dict(zip(['timestamp', 'conf', 'id', 'x', 'y', 'diam'], e)), pp) +# pp_by_frame = player_methods.correlate_data(pp, wt) + +# # Keeping only pupil positions with nonzero confidence +# pp_by_frame = map(lambda l: filter(lambda p: p['conf']>0, l), pp_by_frame) +# # Computing a single pupil position for the frame by taking mean of all detected pupil positions +# pp_by_frame = map(lambda data: +# sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame) +# # Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame + +# # Checking if timestamps, markers per frame and pupil positions per frame are correlated +# assert len(marker_data) == len(wt) == len(pp_by_frame) + +# # Good, now we need to find the frame ranges in which marker is not moving, for that we need the marker_data +# # and using the position info per frame, we can compute movement speed and detect when it is it almost zero + +# marker_data, mrdata = unifiy_markers_per_frame(marker_data) +# # Smoothing x and y coords +# marker_data[:, 3] = g1d(marker_data[:, 3], sigma=2) +# marker_data[:, 4] = g1d(marker_data[:, 4], sigma=2) + +# marker_speed = [] +# for fn, fnp1 in ((f, f+1) for f in xrange(len(marker_data)-1)): +# if marker_data[fnp1] != [] and marker_data[fn] != []: +# # dx = marker_data[fnp1][0] - marker_data[fn][0] +# # dy = marker_data[fnp1][1] - marker_data[fn][1] +# # dz = marker_data[fnp1][2] - marker_data[fn][2] +# # speed = np.sqrt(dx**2 + dy**2 + dz**2) * 100 + +# # print fn, fnp1, len(marker_data), marker_data[fnp1], marker_data[fn] +# dx = marker_data[fnp1][3] - marker_data[fn][3] +# dy = marker_data[fnp1][4] - marker_data[fn][4] +# speed = np.sqrt(dx**2 + dy**2) + +# # print 'marker speed:', speed +# marker_speed.append(speed) +# else: +# marker_speed.append(marker_speed[-1]) # set speed to last speed if marker could not be detected +# # Performing binary clustering on marker speed +# model = AgglomerativeClustering(n_clusters=2, linkage="ward", affinity="euclidean") +# marker_speed = np.array(marker_speed) +# # Checking for outliers based on "median absolute deviation" +# outliers = is_outlier(marker_speed, thresh=3.5) +# print sum(outliers == True), 'outliers detected' + +# # removing outliers +# outlier_inds = [i for i in xrange(outliers.size) if outliers[i]] +# marker_speed = list(np.delete(marker_speed, outlier_inds)) +# # replacing removed outliers by average of their neighbours +# outliers_inds = sorted(outlier_inds) +# window = 1 +# for ind in outlier_inds: +# start = max(ind-window, 0) +# neighbours = marker_speed[start:ind+window] +# new_val = sum(neighbours)/len(neighbours) +# marker_speed.insert(ind, new_val) +# marker_speed = np.array(marker_speed) + +# # smoothed_signal = marker_speed[:] +# smoothed_signal = signal.medfilt(marker_speed, 13) +# # smoothed_signal = g1d(marker_speed, sigma=2) +# # smoothed_signal = moving_average(smoothed_signal, 7) +# model.fit(map(lambda e: [e], smoothed_signal)) +# labels = fix_labels(model.labels_, window=1, outliers = outliers) +# outliers = map(lambda e: 10 if e else 5, outliers) + +# mean = np.mean(smoothed_signal) + +# intervals, nm = find_intervals(labels, mean, smoothed_signal) +# print '>', len(intervals), 'Intervals found in total.', nm, 'gaze intervals.' +# interval_display = [] +# for dur in intervals: +# interval_display.extend([dur[2]]*(dur[1]-dur[0]+1)) +# interval_display = interval_display[:-1] + +# print len(interval_display), len(marker_data)-1, intervals[-1][1]-intervals[0][0] +# # print intervals +# # print labels +# # return +# # print len(marker_data), len(marker_speed) +# plt.plot(range(len(marker_data)-1), marker_speed, 'b', +# # range(len(marker_data)-1), labels, 'r', +# range(len(marker_data)-1), smoothed_signal, 'g', +# range(len(marker_data)-1), interval_display, 'r') +# # plt.show() +# # plt.clf() +# # return +# # plt.clf() + + +# recordings_processed += 1 +# intervals_okay = True +# if not nm in [16, 25]: +# intervals_okay = False +# pdb.set_trace() +# print '> Storing odd figure...' +# plt.savefig('./temp/%s-%s__%snm.png' % (d1, d3, str(nm))) +# # print '> Entering manual override mode...' +# # print '> Enter halt to quit.' +# # # set manual_bypass to True in case you wanna discard changes in override mode +# # cmd = raw_input(':') +# # while cmd != 'halt' and cmd != 'pass': +# # exec cmd in globals(), locals() +# # cmd = raw_input(':') + +# if intervals_okay: +# print '> Intervals seem okay.' +# plt.savefig(os.path.join(d4, 'marker_motion.png')) +# recordings_successful += 1 +# # Store interval information +# # Use pp_by_frame and marker_data to compute gaze and target points corresponding to this interval +# gaze_intervals = intervals[::2] # starting from the first interval, gaze, moving, gaze, moving, gaze, ... +# t2d, t3d, p = [], [], [] +# for intv in gaze_intervals: +# s, e = intv[0], intv[1] +# null_gaze, null_marker = 0, 0 +# gaze_point = np.array([0, 0]) +# marker_3d_position = np.array([0, 0, 0]) +# marker_2d_position = np.array([0, 0]) +# for fn in xrange(s, e+1): +# if all(pp_by_frame[fn]==np.array([-1, -1])): +# null_gaze += 1 +# else: +# gaze_point = gaze_point + pp_by_frame[fn] +# if mrdata[fn] == []: +# null_marker += 1 +# else: +# marker_3d_position = marker_3d_position + np.array(mrdata[fn][:3]) +# marker_2d_position = marker_2d_position + np.array(mrdata[fn][3:]) + +# gaze_point = gaze_point/(e-s+1-null_gaze) +# marker_3d_position = marker_3d_position/(e-s+1-null_marker) +# marker_2d_position = marker_2d_position/(e-s+1-null_marker) + +# t2d.append(marker_2d_position) +# t3d.append(marker_3d_position) +# p.append(gaze_point) +# print '> Storing intervals, gaze data, and marker data...' +# np.save(intervals_dir, np.array(gaze_intervals)) +# np.save(os.path.join(d4, 'p.npy'), np.array(p)) +# np.save(os.path.join(d4, 't2d.npy'), np.array(t2d)) +# np.save(os.path.join(d4, 't3d.npy'), np.array(t3d)) +# print '>', recordings_processed, 'recordings processed.', recordings_successful, 'successful.' + +# plt.clf() + + +PARTICIPANTS = ['p10', 'p16', 'p13', 'p24', 'p5', 'p14', 'p26', 'p12', 'p20', 'p7', 'p15', 'p11', 'p21', 'p25'] +def main(force=False): + recordings_processed = 0 + recordings_successful = 0 + for d1 in os.listdir(ROOT_DATA_DIR): + if d1.startswith('p'): # every participant + if not d1 in PARTICIPANTS: + continue + + d2 = os.path.join(ROOT_DATA_DIR, d1) # .../pi/ + d2 = os.path.join(d2, os.listdir(d2)[0]) # .../pi/../ + for d3 in os.listdir(d2): # every recording + d4 = os.path.join(d2, d3) # .../pi/../00X/ + print '> Processing', d4 + frames_dir = os.path.join(d4, '_aruco_frames.npy') + if not os.path.isfile(frames_dir): # the recording is not yet processed for marker tracking + print '> Recording does not contain marker data...' + continue + intervals_dir = os.path.join(d4, 'gaze_intervals.npy') + if os.path.isfile(intervals_dir): + print '> Recording already processed...' + if force: + print '> Processing again...' + else: + continue + + marker_data = np.load(frames_dir) + # marker_data includes data on tracked markers per frame + # it's a list with as many entries as the number of video frames, each entry + # has a list of tracked markers, each marker item has marker id, marker corners, Rvec, Tvec + wt = np.load(os.path.join(d4, 'world_timestamps.npy')) + # Processing pupil positions + pp = np.load(os.path.join(d4, 'pupil_positions.npy')) # timestamp confidence id pos_x pos_y diameter + # pos_x and pos_y are normalized (Origin 0,0 at the bottom left and 1,1 at the top right) + # converting each element to dictionary for correlation + pp = map(lambda e: dict(zip(['timestamp', 'conf', 'id', 'x', 'y', 'diam'], e)), pp) + pp_by_frame = player_methods.correlate_data(pp, wt) + + # Keeping only pupil positions with nonzero confidence + pp_by_frame = map(lambda l: filter(lambda p: p['conf']>0, l), pp_by_frame) + # Computing a single pupil position for the frame by taking mean of all detected pupil positions + pp_by_frame = map(lambda data: + sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame) + # Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame + + # Checking if timestamps, markers per frame and pupil positions per frame are correlated + assert len(marker_data) == len(wt) == len(pp_by_frame) + + # Good, now we need to find the frame ranges in which marker is not moving, for that we need the marker_data + # and using the position info per frame, we can compute movement speed and detect when it is it almost zero + + marker_data, mrdata = unifiy_markers_per_frame(marker_data) + + gaze_intervals = np.load(intervals_dir) + + recordings_processed += 1 + intervals_okay = True + + if intervals_okay: + print '> Intervals seem okay.' + recordings_successful += 1 + + t2d, t3d, p = [], [], [] + t2d_med, t3d_med, p_med, p_frames = [], [], [], [] + for intv in gaze_intervals: + s, e = intv[0], intv[1] + null_gaze, null_marker = 0, 0 + gaze_point = np.array([0, 0]) + marker_3d_position = np.array([0, 0, 0]) + marker_2d_position = np.array([0, 0]) + gpts, m3ds, m2ds = [], [], [] + valid_frames = [] + for fn in xrange(s, e+1): + if all(pp_by_frame[fn]==np.array([-1, -1])) or mrdata[fn] == []: + # either pupil detection failed or marker detection + # the whole pupil-marker correspondence is invalid + # ignore this frame + pass + else: + gpts.append(pp_by_frame[fn]) + + marker_3d_position = marker_3d_position + np.array(mrdata[fn][:3]) + marker_2d_position = marker_2d_position + np.array(mrdata[fn][3:]) + + m3ds.append(np.array(mrdata[fn][:3])) + m2ds.append(np.array(mrdata[fn][3:])) + + valid_frames.append(fn) + + if not len(valid_frames): + # this marker-pupil correspondece failed + print '> Failed to find reliable correspondece for a marker position...' + # In this case participant data should be completely ignored + # retrun + + # Computing the median pupil position + final_p = np.median(gpts, axis=0) + p_med.append(final_p) + # Finding the closest pupil position to this median in the valid frames + dists = map(lambda pupil_position: (v(pupil_position)-v(final_p)).mag, gpts) + dists = zip(range(len(gpts)), dists) + closest = min(dists, key=lambda pair:pair[1]) + # Getting the index for this position + ind = closest[0] + # Finding the k nearest pupil position to this one + k = 3 + while True: + try: + nbrs = knn(n_neighbors=k, algorithm='ball_tree').fit(gpts) + dists, indices = nbrs.kneighbors(gpts) + break + except ValueError, err: + k-=1 + nearest_ind = indices[ind] + frames_numbers = map(lambda i: valid_frames[i], nearest_ind) + p_frames.append(frames_numbers) + # Now we take eye images from these frames + # Also the pupil-marker correspondece is now final_p and m2ds[ind] m3d[ind] + t2d_med.append(m2ds[ind]) + t3d_med.append(m3ds[ind]) + # t2d_med.append(np.median(m2ds, axis=0)) + # t3d_med.append(np.median(m3ds, axis=0)) + + + print '> gaze and marker data...' + # np.save(intervals_dir, np.array(gaze_intervals)) + np.save(os.path.join(d4, 'p_frames.npy'), np.array(p_frames)) + # np.save(os.path.join(d4, 'p.npy'), np.array(p_med)) + # np.save(os.path.join(d4, 't2d.npy'), np.array(t2d_med)) + # np.save(os.path.join(d4, 't3d.npy'), np.array(t3d_med)) + print '>', recordings_processed, 'recordings processed.', recordings_successful, 'successful.' +if __name__ == '__main__': + main(force=True) \ No newline at end of file diff --git a/code/recording/retrieve.py b/code/recording/retrieve.py new file mode 100644 index 0000000..de874bc --- /dev/null +++ b/code/recording/retrieve.py @@ -0,0 +1,150 @@ +import sys +import numpy as np +import cv2 +import matplotlib.pyplot as plt + +sys.path.append('..') # so we can import from pupil +from pupil import player_methods + +# from tracker import processFrame + +DATA_DIR = '/home/mmbrian/HiWi/pupil_clone/pupil/recordings/2015_09_10/007/' +OUTPUT_DIR = DATA_DIR + 'pp.npy' + + +def capture(frame_number): + cap = cv2.VideoCapture(DATA_DIR + "world.mp4") + fc = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) + assert frame_number0, l), pp_by_frame) + # Computing a single pupil position for the frame by taking mean of all detected pupil positions + pp_by_frame = map(lambda data: + sum(np.array([pp['x'], pp['y']]) for pp in data)/len(data) if data else np.array([-1, -1]), pp_by_frame) + # Now each nonempty value of pp_by_frame is a tuple of (x, y) for pupil position in that frame + + # Next we need to associate each frame to a detected marker and by taking mean pupil point and + # mean 3D marker position over a series of frames corresponding to that marker find a 2D-3D + # mapping for calibration/test + + tdiff = map(lambda e: e-wt[0], wt) + # This time correspondence to each marker was coordinated using the GazeHelper android application + # for 005 > starting from 00:56, 3 seconds gaze, 1 second for saccade + # These parameters are specific to the experiment + # 005 > 56, 3, 1 + # 006 > 3, 3, 1 + # 007 > 7, 3, 1 (or 8) + # 010 > 3, 3, 1 + starting_point, gaze_duration, saccade_duration = 56, 3, 1 # these are specific to the experiment + # finding the starting frame + ind = 0 + while tdiff[ind] < starting_point: + ind+=1 + print ind + + data = [] + tstart = wt[ind] + for i in xrange(9): + print i + while ind= 0: + c+=1 + cp = cp + pp_by_frame[j] + all_corresponding_points.append(pp_by_frame[j]) + # print c + if c>0: + ret = cp/c + else: + ret = np.array([-1, -1]) # no detected pupil for this marker + p2d.append(ret) + data.append([ + np.array([starting_ind, ind-1]), # frame range + ret, # mean pupil position + all_corresponding_points]) # all pupil positions in range + + if ind Reading data from log file:', log_dir + with open(log_dir, 'r') as f: + for l in f: + if l.strip(): + data.append(l) + else: + if log: + print '> Performing marker tracking on file:', recording_video + print '> Writing data to log file:', log_dir + with open(log_dir, 'w') as f: + for l in os.popen(ARUCO_EXECUTABLE + recording_video + cam_mat + square_size): + l = l.strip() + if '\r' in l: + for line in l.split('\r'): + if line: + f.write(line + "\n") # logging to file + data.append(line) + else: + if l: + f.write(l + "\n") # logging to file + data.append(l) + + if log: + print '> Parsing marker data...' + frame_count = 0 + curr_frame = 0 + markers = {} + frames, frame = [], [] + visited_first_frame = False + for line in data: + line = line.strip().lower() + # print + # print repr(line) + # print + if line.startswith('time'): + if visited_first_frame: + frames.append(frame) + visited_first_frame = True + frame = [] + # if frame: + # frames.append(frame) + # frame = [] + + curr_frame=curr_frame+1 + nmarkers = int(line[line.index('nmarkers')+9:]) + + if 'txyz' in line: # This line holds information of a detected marker + ind = line.index('=') + _id = int(line[:ind]) + p = [] + for i in xrange(4): + pind = ind + ind = line.index(' ', pind+1) + p.append(line[pind+1:ind]) + pind = ind + ind = line.index('rxyz', pind) + T = line[pind+1+5:ind] + R = line[ind+5:] + + if not _id in markers: + markers[_id] = [] + curr_marker = Marker(_id, + map(lambda pstr: np.array(eval(pstr)), p), + np.array(eval('(' + R.strip().replace(' ', ',') + ')')), + np.array(eval('(' + T.strip().replace(' ', ',') + ')'))) + markers[_id].append(curr_marker) + frame.append(np.array([np.array([curr_marker.id]), curr_marker.p, curr_marker.Rvec, curr_marker.Tvec])) + # Last frame data + frames.append(frame) + + frames = np.array(frames) + if log: + print '> Saving marker data for all frames in:', frames_dir + np.save(frames_dir, frames) + if log: + print '> Successfully processed %s frames.' % curr_frame + if ret: + return markers + + + +if __name__ == '__main__': + camera_matrix, dist_coeffs = readCameraParams() + + markers = performMarkerTracking() + print '################################################' + print len(markers), 'markers detected' + for m in markers: + # print '################################################' + # print markers[m][0].__dict__ + # print '################################################' + print m, ':', len(markers[m]), 'instances' + c = markers[m][0].getCenter() + print c * 100, markers[m][0].getCenterInImage(camera_matrix, dist_coeffs) + + + # # TODO: investigate if this is how to get projections of unit vectors + # # originating from the camera onto the image plane + # # the trick is that we consider a marker with no R and zero T + # v = [(0.0, 0.0, 0.0), (1, 0, 0), (0, 1, 0), (0, 0, 1)] + # ret = cv2.projectPoints(np.array(v), np.eye(3), np.zeros(3), camera_matrix, dist_coeffs) + # p = ret[0] + + # for i, t in enumerate(v): + # print t, 'P->', p[i][0] \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.gitignore b/code/recording/util/GazeHelper/.gitignore new file mode 100644 index 0000000..0a45093 --- /dev/null +++ b/code/recording/util/GazeHelper/.gitignore @@ -0,0 +1,41 @@ +.gradle +/local.properties +/.idea/workspace.xml +/.idea/libraries +.DS_Store +/build +/captures + +### Android ### +# Built application files +*.apk +*.ap_ + +# Files for the Dalvik VM +*.dex + +# Java class files +*.class + +# Generated files +bin/ +gen/ + +# Gradle files +.gradle/ +build/ + +# Local configuration file (sdk path, etc) +local.properties + +# Proguard folder generated by Eclipse +proguard/ + +# Log Files +*.log + +# Android Studio Navigation editor temp files +.navigation/ + +### Android Patch ### +gen-external-apklibs \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/.name b/code/recording/util/GazeHelper/.idea/.name new file mode 100644 index 0000000..81d9152 --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/.name @@ -0,0 +1 @@ +GazeHelper \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/compiler.xml b/code/recording/util/GazeHelper/.idea/compiler.xml new file mode 100644 index 0000000..96cc43e --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/compiler.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/copyright/profiles_settings.xml b/code/recording/util/GazeHelper/.idea/copyright/profiles_settings.xml new file mode 100644 index 0000000..e7bedf3 --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/copyright/profiles_settings.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/gradle.xml b/code/recording/util/GazeHelper/.idea/gradle.xml new file mode 100644 index 0000000..c595ad9 --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/gradle.xml @@ -0,0 +1,19 @@ + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/misc.xml b/code/recording/util/GazeHelper/.idea/misc.xml new file mode 100644 index 0000000..e284b1d --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/misc.xml @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/modules.xml b/code/recording/util/GazeHelper/.idea/modules.xml new file mode 100644 index 0000000..834102f --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/modules.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/.idea/vcs.xml b/code/recording/util/GazeHelper/.idea/vcs.xml new file mode 100644 index 0000000..6564d52 --- /dev/null +++ b/code/recording/util/GazeHelper/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/GazeHelper.iml b/code/recording/util/GazeHelper/GazeHelper.iml new file mode 100644 index 0000000..6c192c3 --- /dev/null +++ b/code/recording/util/GazeHelper/GazeHelper.iml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/app/.gitignore b/code/recording/util/GazeHelper/app/.gitignore new file mode 100644 index 0000000..796b96d --- /dev/null +++ b/code/recording/util/GazeHelper/app/.gitignore @@ -0,0 +1 @@ +/build diff --git a/code/recording/util/GazeHelper/app/app.iml b/code/recording/util/GazeHelper/app/app.iml new file mode 100644 index 0000000..8addfe8 --- /dev/null +++ b/code/recording/util/GazeHelper/app/app.iml @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/code/recording/util/GazeHelper/app/build.gradle b/code/recording/util/GazeHelper/app/build.gradle new file mode 100644 index 0000000..44e4f59 --- /dev/null +++ b/code/recording/util/GazeHelper/app/build.gradle @@ -0,0 +1,25 @@ +apply plugin: 'com.android.application' + +android { + compileSdkVersion 23 + buildToolsVersion "22.0.1" + + defaultConfig { + applicationId "gazehelper.android.mmbrian.com.gazehelper" + minSdkVersion 14 + targetSdkVersion 23 + versionCode 1 + versionName "1.0" + } + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' + } + } +} + +dependencies { + compile fileTree(dir: 'libs', include: ['*.jar']) + compile 'com.android.support:appcompat-v7:23.0.1' +} diff --git a/code/recording/util/GazeHelper/app/proguard-rules.pro b/code/recording/util/GazeHelper/app/proguard-rules.pro new file mode 100644 index 0000000..42406d2 --- /dev/null +++ b/code/recording/util/GazeHelper/app/proguard-rules.pro @@ -0,0 +1,17 @@ +# Add project specific ProGuard rules here. +# By default, the flags in this file are appended to flags specified +# in /home/mmbrian/Android/Sdk/tools/proguard/proguard-android.txt +# You can edit the include path and order by changing the proguardFiles +# directive in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# Add any project specific keep options here: + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} diff --git a/code/recording/util/GazeHelper/app/src/main/AndroidManifest.xml b/code/recording/util/GazeHelper/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000..57628b8 --- /dev/null +++ b/code/recording/util/GazeHelper/app/src/main/AndroidManifest.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + diff --git a/code/recording/util/GazeHelper/app/src/main/java/gazehelper/android/mmbrian/com/gazehelper/MainActivity.java b/code/recording/util/GazeHelper/app/src/main/java/gazehelper/android/mmbrian/com/gazehelper/MainActivity.java new file mode 100644 index 0000000..3059a7b --- /dev/null +++ b/code/recording/util/GazeHelper/app/src/main/java/gazehelper/android/mmbrian/com/gazehelper/MainActivity.java @@ -0,0 +1,108 @@ +package gazehelper.android.mmbrian.com.gazehelper; + +import android.app.Activity; +import android.media.AudioManager; +import android.media.ToneGenerator; +import android.os.Bundle; +import android.os.Handler; +import android.util.Log; +import android.view.View; +import android.widget.Button; +import android.widget.EditText; +import android.widget.TextView; +import android.widget.ViewFlipper; + +import java.util.Timer; +import java.util.TimerTask; + + +public class MainActivity extends Activity { + + public static final String CLASS_TAG = "GazeHelper"; + + ViewFlipper flipper; + Button btn_start; + EditText txt_gaze_dur, txt_bgaze_dur, txt_ngaze; + TextView txt_stats; + + int gaze_dur, bgaze_dur, ngaze, cgaze; + long total_dur, start_time; + + static Timer timer; + ToneGenerator toneG; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activity_main); + + flipper = (ViewFlipper)findViewById(R.id.flipper); + + txt_gaze_dur = (EditText)findViewById(R.id.txt_gaze_dur); + txt_bgaze_dur = (EditText)findViewById(R.id.txt_bgaze_dur); + txt_ngaze = (EditText)findViewById(R.id.txt_ngaze); + + txt_stats = (TextView)findViewById(R.id.txt_stats); + + timer = new Timer("Gaze Timer"); + toneG = new ToneGenerator(AudioManager.STREAM_MUSIC, 100); + + btn_start = (Button)findViewById(R.id.btn_start); + btn_start.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View v) { + txt_stats.setText("Starting..."); + + gaze_dur = Integer.valueOf(txt_gaze_dur.getText().toString()); + bgaze_dur = Integer.valueOf(txt_bgaze_dur.getText().toString()); + ngaze = Integer.valueOf(txt_ngaze.getText().toString()); + total_dur = (ngaze * gaze_dur + (ngaze - 1) * bgaze_dur) * 1000; // in milliseconds + + flipper.showNext(); + + cgaze = 0; + start_time = -1; + Log.d(CLASS_TAG, "Started..."); + timer = new Timer("Gaze Timer"); + timer.schedule(new TimerTask() { + @Override + public void run() { + runOnUiThread(new Runnable() { + @Override + public void run() { + updateUI(); + } + }); + } + }, bgaze_dur * 1000, (gaze_dur + bgaze_dur) * 1000); // initial delay, recall delay + // first gaze is after 2 bgaze durations, later gazes start after a bgaze + } + }); + } + + public void updateUI() { + Log.d(CLASS_TAG, cgaze + "/" + ngaze); + if (cgaze++ >= ngaze) { + txt_stats.setText("Finished :)"); + new Handler().postDelayed(new Runnable() { + @Override + public void run() { + runOnUiThread(new Runnable() { + @Override + public void run() { + flipper.showPrevious(); + } + }); + } + }, gaze_dur * 1000); + timer.cancel(); + return; + } + toneG.startTone(ToneGenerator.TONE_CDMA_ALERT_CALL_GUARD, 200); + +// if (start_time < 0) +// start_time = System.currentTimeMillis(); +// txt_stats.setText((System.currentTimeMillis()-start_time)/1000 + ""); + txt_stats.setText("Gaze at Target #" + cgaze); + } +} diff --git a/code/recording/util/GazeHelper/app/src/main/res/layout/activity_main.xml b/code/recording/util/GazeHelper/app/src/main/res/layout/activity_main.xml new file mode 100644 index 0000000..4657034 --- /dev/null +++ b/code/recording/util/GazeHelper/app/src/main/res/layout/activity_main.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + +