From 343784a6a2b6685b46b4e07bc4422205ea474d3d Mon Sep 17 00:00:00 2001
From: s2000431 <s200431@student.dtu.dk>
Date: Mon, 20 Mar 2023 14:12:49 +0100
Subject: [PATCH] local changes to files for complete usage

---
 FeatureEstimation.py                         |   39 +-
 MachineLearning_2.py                         |   72 +-
 Main.py                                      |    5 +-
 Preamble.py                                  |    2 +-
 __pycache__/Preamble.cpython-310.pyc         |  Bin 5511 -> 5532 bytes
 __pycache__/eeg_microstates.cpython-310.pyc  |  Bin 0 -> 42755 bytes
 __pycache__/eeg_microstates3.cpython-310.pyc |  Bin 44415 -> 44375 bytes
 __pycache__/feature_select.cpython-310.pyc   |  Bin 1623 -> 1583 bytes
 eeg_microstates.py                           | 1720 ++++++++++++++++++
 9 files changed, 1785 insertions(+), 53 deletions(-)
 create mode 100644 __pycache__/eeg_microstates.cpython-310.pyc
 create mode 100644 eeg_microstates.py

diff --git a/FeatureEstimation.py b/FeatureEstimation.py
index 4c599d4..3a97aa3 100644
--- a/FeatureEstimation.py
+++ b/FeatureEstimation.py
@@ -43,7 +43,7 @@ os.chdir(wkdir)
 from Preamble import *
 
 # %% Load preprocessed epochs and questionnaire data
-load_path = "home/s200431/PreprocessedData"
+load_path = "/home/s200431/PreprocessedData"
 
 # Get filenames
 files = []
@@ -297,7 +297,7 @@ Drop_epochs_df = Drop_epochs_df.sort_values(by=["Subject_ID"]).reset_index(drop=
 ### Load questionnaire data
 # ISAF
 qdf_ISAF7 = pd.read_csv("/data/raw/FOR_DTU/Questionnaires_for_DTU.csv",
-                  sep=";", na_values=' ')
+                   na_values=' ')
 # Rename Subject_ID column
 qdf_ISAF7.rename({"ID_number": "Subject_ID"}, axis=1, inplace=True)
 # Sort Subject_id to match Subject_id for files
@@ -363,8 +363,15 @@ final_qdf2 = final_qdf2.fillna(final_qdf2.mean())
 final_qdf0.columns = final_qdf1.columns # fix colnames with t7
 final_qdf = pd.concat([final_qdf0,final_qdf1,final_qdf2], ignore_index=True)
 
+# Define folder for saving features
+Feature_savepath = "./Features/"
+Stat_savepath = "./Statistics/"
+Model_savepath = "./Model/"
+
 # Ensure all columns are integers
 final_qdf = final_qdf.astype("int")
+final_qdf.to_pickle(os.path.join(Feature_savepath,"final_qdf.pkl"))
+
 
 # Define cases as >= 44 total PCL
 # Type: numpy array with subject id
@@ -409,10 +416,6 @@ test_d3 = qdf_base["Subject_ID"][qdf_base["Subject_ID"].isin(Subject_id)]
 test_d4 = np.concatenate([test_d1.to_numpy(),test_d2.to_numpy(),test_d3.to_numpy()])
 assert all(np.equal(Subject_id,test_d4))
 
-# Define folder for saving features
-Feature_savepath = "./Features/"
-Stat_savepath = "./Statistics/"
-Model_savepath = "./Model/"
 
 # %% Power spectrum features
 Freq_Bands = {"delta": [1.25, 4.0],
@@ -472,6 +475,7 @@ with concurrent.futures.ProcessPoolExecutor() as executor:
     for n, result in executor.map(power_band_estimation, range(len(final_epochs))): # Function and arguments
         power_bands[n] = result
 """
+"""
 
 for i in range(len(power_bands)):
     n, results = power_band_estimation(i)
@@ -540,15 +544,15 @@ power_df = pd.DataFrame(data = {"Subject_ID": [ele for ele in Subject_id for i i
 # Fix Freq_band categorical order
 power_df["Freq_band"] = power_df["Freq_band"].astype("category").\
             cat.reorder_categories(list(Freq_Bands.keys()), ordered=True)
-"""
+
 # Fix Brain_region categorical order
 power_df["Brain_region"] = power_df["Brain_region"].astype("category").\
             cat.reorder_categories(Brain_region_labels, ordered=True)
 """
 # Save the dataframe
-power_df.to_pickle(os.path.join(Feature_savepath,"Power_df.pkl"))
-
+# power_df.to_pickle(os.path.join(Feature_savepath,"Power_df.pkl"))
 
+"""
 # %% Theta-beta ratio
 # Frontal theta/beta ratio has been implicated in cognitive control of attention
 power_df = pd.read_pickle(os.path.join(Feature_savepath,"Power_df.pkl"))
@@ -629,7 +633,7 @@ frontal_theta_beta_ratio.insert(3, "Measurement", dummy_variable )
 
 # frontal_theta_beta_ratio.to_pickle(os.path.join(Feature_savepath,"fTBR_df.pkl"))
 
-"""
+
 # %% Frequency bands asymmetry
 # Defined as ln(right) - ln(left)
 # Thus we should only work with the absolute values and undo the dB transformation
@@ -729,7 +733,7 @@ assert asymmetry[i,e,f,r] == asymmetry_df.iloc[random_point]["Asymmetry_score"]
 asymmetry_df.to_pickle(os.path.join(Feature_savepath,"asymmetry_df.pkl"))
 
 """
-
+"""
 # %% Using FOOOF
 # Peak alpha frequency (PAF) and 1/f exponent (OOF)
 # Using the FOOOF algorithm (Fitting oscillations and one over f)
@@ -1004,7 +1008,7 @@ PTF_data_df.to_pickle(os.path.join(Feature_savepath,"PTF_data_FOOOF_df.pkl"))
 PTF_data_df_global.to_pickle(os.path.join(Feature_savepath,"PTF_data_FOOOF_global_df.pkl"))
 PBF_data_df.to_pickle(os.path.join(Feature_savepath,"PBF_data_FOOOF_df.pkl"))
 PBF_data_df_global.to_pickle(os.path.join(Feature_savepath,"PBF_data_FOOOF_global_df.pkl"))
-
+"""
 # # Convert to Pandas dataframe (only keep exponent parameter for OOF)
 # # The dimensions will each be a column with numbers and the last column will be the actual values
 # ori = OOF_data[:,:,:,1]
@@ -1073,6 +1077,7 @@ micro_data_all = np.vstack(micro_data)
 
 # Determine the number of clusters
 # I use a slightly modified kmeans function which returns the cv_min
+"""
 global_gev = []
 cv_criterion = []
 for n_maps in range(2,7):
@@ -1093,6 +1098,7 @@ plt.plot(np.linspace(2,6,len(cv_criterion)),(cv_criterion/np.sum(cv_criterion)),
 plt.plot(np.linspace(2,6,len(cv_criterion)),(global_gev/np.sum(global_gev)), label="GEV")
 plt.legend()
 plt.ylabel("Normalized to total")
+"""
 # The lower CV the better.
 # But the higher GEV the better.
 # Based on the plots and the recommendation by vong Wegner & Laufs 2018
@@ -1117,6 +1123,11 @@ microstate_cluster_results = []
 c_time1 = time.localtime()
 c_time1 = time.strftime("%a %d %b %Y %H:%M:%S", c_time1)
 print(c_time1)
+# Change datatype due to error with computational power in clustering 
+EC_down = np.array(EC_micro_data, dtype = object)
+#EC_down = EC_down.astype('float32')
+EO_down = np.array(EO_micro_data, dtype = object)
+#EO_down = EO_down.astype('float32')
 
 for r in range(n_run):
     maps = [0]*2
@@ -1126,7 +1137,7 @@ for r in range(n_run):
     # Eyes closed
     counter = 0
     maps_, x_, gfp_peaks_, gev_ = clustering(
-        np.vstack(np.array(EC_micro_data)), sfreq, ch_names, locs, mode, n_maps, doplot=False) # doplot=True is bugged
+        np.vstack(EC_down), sfreq, ch_names, locs, mode, n_maps, doplot=False) # doplot=True is bugged
     maps[counter] = maps_
     m_labels[counter] = x_
     gfp_peaks[counter] = gfp_peaks_
@@ -1134,7 +1145,7 @@ for r in range(n_run):
     counter += 1
     # Eyes open
     maps_, x_, gfp_peaks_, gev_ = clustering(
-        np.vstack(np.array(EO_micro_data)), sfreq, ch_names, locs, mode, n_maps, doplot=False) # doplot=True is bugged
+        np.vstack(EO_down), sfreq, ch_names, locs, mode, n_maps, doplot=False) # doplot=True is bugged
     maps[counter] = maps_
     m_labels[counter] = x_
     gfp_peaks[counter] = gfp_peaks_
diff --git a/MachineLearning_2.py b/MachineLearning_2.py
index 3c934ab..b7d411c 100644
--- a/MachineLearning_2.py
+++ b/MachineLearning_2.py
@@ -1,32 +1,25 @@
 # Set working directory
 import os
-wkdir = "/Users/benj3542/Desktop/Uni/Noter/Semester_6/Bachelor/resting-state-eeg-analysis"
+wkdir = "/home/s200431"
 os.chdir(wkdir)
 
 # Load all libraries from the Preamble
 from Preamble import *
 
+# Define paths to data
+Feature_savepath = "./Features/"
+
 ### Load questionnaire data
 # For the purposes of this demonstration I will make a dummy dataframe
 # The original code imported csv files with questionnaire data and group status
-final_qdf = pd.DataFrame({"Subject_ID":[1,2],
-                          "Age":[23,26],
-                          "Gender":[0,0],
-                          "Group_status":[0,1],
-                          "PCL_total":[33,56],
-                          "Q1":[1.2, 2.3],
-                          "Q2":[1.7, 1.5],
-                          "Qn":[2.1,1.0]})
+final_qdf = pd.read_pickle(Feature_savepath+"final_qdf.pkl")
 
 # Define cases as >= 44 total PCL
 # Type: numpy array with subject id
 cases = np.array(final_qdf["Subject_ID"][final_qdf["PCL_total"]>=44])
 n_groups = 2
 Groups = ["CTRL", "PTSD"]
-n_subjects = 2
-
-# Define paths to data
-Feature_savepath = "./Features/"
+n_subjects = 203
 
 # Make function to load EEG features
 def load_features_df():
@@ -171,12 +164,13 @@ def load_features_df():
 Eyes_Cloased, Eyes_Open, Eyes_closed_PTSD, Eyes_closed_CTRL, Eyes_open_PSTD, Eyes_open_CTRL = load_features_df()
 
 # Get the data ready for classification and other analysis (extract target and features)
-Eyes_Cloased_y = Eyes_Cloased[0]
-Eyes_Cloased_X = np.transpose(Eyes_Cloased[1:])
+Eyes_Closed_y = Eyes_Cloased[0]
+Eyes_Closed_X = np.transpose(Eyes_Cloased[1:])
 
 Eyes_Open_y = Eyes_Open[0]
 Eyes_Open_X = np.transpose(Eyes_Open[1:])
 
+print(Eyes_Closed_X, Eyes_Closed_y, len(Eyes_Closed_X), len(Eyes_Closed_y))
 
 # Do PCA 
 import numpy as np
@@ -188,39 +182,41 @@ from sklearn import datasets
 
 # unused but required import for doing 3d projections with matplotlib < 3.2
 import mpl_toolkits.mplot3d  # noqa: F401
+from mpl_toolkits.mplot3d import Axes3D
+from sklearn.preprocessing import StandardScaler
 
 # import some data to play with
 np.random.seed(5)
 
 def PCA(X, y, n_components):
-    fig = plt.figure(1, figsize=(4, 3))
+    fig = plt.figure(1, figsize=(6, 6))
+    ax = fig.add_subplot(111, projection = "3d")
     plt.clf()
 
-    ax = fig.add_subplot(111, projection="3d", elev=48, azim=134)
-    ax.set_position([0, 0, 0.95, 1])
-
 
     plt.cla()
+    scaler = StandardScaler()
+    X = scaler.fit_transform(X)
     pca = decomposition.PCA(n_components=n_components)
     pca.fit(X)
     X = pca.transform(X)
 
     for name, label in [("CTRL", 0), ("PTSD", 1)]:
-        ax.text3D(
-            X[y == label, 0].mean(),
-            X[y == label, 1].mean(),
-            name,
-            horizontalalignment="center",
-            bbox=dict(alpha=0.5, edgecolor="w", facecolor="w"),
+        plt.scatter(
+            X[y == label, 0],
+            X[y == label, 1],
+            X[y == label, 2],
+            label = name,
+            alpha = 0.5,
         )
     # Reorder the labels to have colors matching the cluster results
-    y = np.choose(y, [1, 2, 0]).astype(float)
-    ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral, edgecolor="k")
-
-    ax.xaxis.set_ticklabels([])
-    ax.yaxis.set_ticklabels([])
-    ax.zaxis.set_ticklabels([])
-
+    #plt.legend(loc = "best", shadow = False, scatterpoints = 1)
+    ax.set_xlabel("PCA Component 1")
+    ax.set_ylabel("PCA Component 2")
+    ax.set_zlabel("PCA Component 3")
+    ax.set_title("PCA 3D")
+    
+    ax.legend()
     plt.show()
 
     # Safe plot 
@@ -229,6 +225,8 @@ def PCA(X, y, n_components):
 
     return "PCA was complete without errors - check the plot in your chosen path"
 
+PCA(Eyes_Closed_X, Eyes_Closed_y, None)
+PCA(Eyes_Open_X, Eyes_Open_y, None)
 # PCA(Eyes_Cloased_X, Eyes_Cloased_y, 3)
 # PCA(Eyes_Open_X, Eyes_Open_y, 3)
 
@@ -258,18 +256,20 @@ from numpy import std
 from sklearn.svm import SVC
 
 # Input of kernel, c and gamma should be a list
+C = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
+gamma = ["scale", "auto"]
 
-def Nested_cross_validation(X, y, kernel, C, gamma, n_splits):
+def Nested_cross_validation(X, y, C, gamma, n_splits):
     # Create a classifier: a support vector classifier with "rbf" kernel
     svm = SVC(kernel="rbf")
 
     # Perform nested cross validation
     # Set the parameters by cross-validation
-    tuned_parameters = {"kernel": kernel, "C": C, "gamma": gamma}
+    tuned_parameters = {"C": C, "gamma": gamma}
     n_splits = n_splits
 
     # configure the cross-validation procedure
-    cv_outer = KFold(n_splits=10, shuffle=True, random_state=1)
+    cv_outer = KFold(n_splits=n_splits, shuffle=True, random_state=1)
 
     # enumerate splits
     outer_results = list()
@@ -326,6 +326,8 @@ def Nested_cross_validation(X, y, kernel, C, gamma, n_splits):
 
     return "Nested cross validation was complete without errors - check the printed results"
 
+#Nested_cross_validation(Eyes_Closed_X, Eyes_Closed_y, C, gamma, 10)
+#Nested_cross_validation(Eyes_Open_X, Eyes_Open_y, C, gamma, 10)
 
 from scipy.stats import permutation_test
 
diff --git a/Main.py b/Main.py
index 0a02e51..8de314d 100755
--- a/Main.py
+++ b/Main.py
@@ -63,7 +63,7 @@ from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
 
 from nitime.viz import drawmatrix_channels
 
-from eeg_microstates3 import * # import downloaded package for microstates [von Wegner & Lauf, 2018]
+from eeg_microstates import * # import downloaded package for microstates [von Wegner & Lauf, 2018]
 
 #from EEG_functions_toolbox import * # compilation of useful/modified functions
 
@@ -1160,7 +1160,6 @@ for n in range(len(final_epochs)):
     final_epochs[n].save(fname = os.path.join(save_path,str("{}_HELSE_corrected".format(Subject_id[n]) + "-epo.fif")),
                     overwrite=True, verbose=0)
 
-"""
 # %% Load Baseline data
 data_path = "/data/sep2020/"
 
@@ -1835,7 +1834,7 @@ len(list(All_drop_epoch_df2.loc[All_drop_epoch_df2["Number_drop_epoch"]>30,"Subj
 
 # Manually check the dropped subjects
 # final_epochs[83].plot(scalings=100e-6, n_channels=19, n_epochs=15)
-
+"""
 # %% Load the pre-processed epochs
 load_path = "/home/s200431/PreprocessedData"
 
diff --git a/Preamble.py b/Preamble.py
index 5dcd7a5..bfc399a 100644
--- a/Preamble.py
+++ b/Preamble.py
@@ -77,7 +77,7 @@ from mpl_toolkits.mplot3d import Axes3D # registers 3D projections
 
 # Non-library scripts
 # EEG microstate package by von Wegner & Lauf, 2018
-from eeg_microstates3 import * # downloadeded from https://github.com/Frederic-vW/eeg_microstates
+from eeg_microstates import * # downloadeded from https://github.com/Frederic-vW/eeg_microstates
 # minimum Redundancy Maximum Relevance script by Kiran Karra
 from feature_select import * # downloaded from https://github.com/stochasticresearch/featureselect/blob/master/python/feature_select.py
 
diff --git a/__pycache__/Preamble.cpython-310.pyc b/__pycache__/Preamble.cpython-310.pyc
index 696c247a4cf74f8aa174bc61fa5eb7cb3fcad907..bf3ebd9a6b008a6cb00104d14be979f5d8fc00ab 100644
GIT binary patch
delta 1422
zcmY+E-BVjd7{+sw5JE`EHy?qJ@)by_K>3gsX|2#wO0l+uihV)iA!h@JzVA74&k2fN
z^hT#%In!AkZyc@O>0QVFpyP#i-gD=l;GN@z<L(yxNM@e>?epxuyR*B=lku;|6K*0A
z3&_ulFY?w`PZLGs`={gIm?7+;0u-nhdX0c(SV8QAKHV0fDBCdhZz{h9`>6yaJtl(Z
zXaELi5C&-ohG-auwcm;(RE9E*!l>>?af~XW-zYr};uKwkY3+t_MptaZ6wSh{_F|If
zU`}&8zDn~jPnY15?#J;mEx>~21inU#u&B8Muh3PvsyT_T(>1uJIfZY~5-ia&EYo$k
zPB-AjV_Ja~9j5Unt->nZf?Ko(Yud};ZCZzQ%~^bt?!X<*Ied%m!d<!t_vk*{rw!Q9
zeh@e50X)#0$G2$<wlsI*JM<79YVJayZP?Zv!pHwV<8JJu9oW%f557ygu&cR%ReBFh
z&AqrsHLx`IAtMG%a}n#b5Br+?@qlcwHHYy~pK2H%Z3bB45TB7%`p%Hqh&2?qhCeaD
zv4R_cN?H6E{CP5X(#Dx-RS&HC+)3;MZu?Ag#W*p}OX+BMR+XCQ+rHUgT=39mKV`!^
zZN{`{Bwlnvgqx1#612;Zn!f4Vt^;DVW8DDpWyc><KTj6>AyPZ4lI{Eu;2>Y#pR<z(
zF0n~(dTMHR=A!sLxnS&y3#m`Lc4S%!@d?@RNhMRtaK$gFr98xHu2XAr&Kw_FoF+An
z`MfBlJ69mmIBuZppM)AHg&YPgUd?VCL#y`)naLf9IJR%g!)tL|v^;3{<W@a$EruSn
z)LqxDgZMaIxDq~?vt1rkONXRD%xwBDXNRojLo4BjK65O{$Q343xoVxW_nXYA9rG4B
zT0BqZwjye7tCFaaHYw1iCg>%(`swj@WyY0RJ1`wbZi2^^m3+C&3DYt0*t0!|{zGCW
zGqeUF$M+z9I;!d0YWFXjuuPf|JB^O)k9b0@)S=3@pPs^mcSJm??38$x>0M5%Fm)C(
z$78gIjAxV`KeK(#7|$vzeP%h#-apuLxyN(juS_AIS0y`@bSddp(j#8U76%IodzJJ_
z`FE{`-C$@tj28uFOXYqQCeO+f^SNE~cuCDWr({6Mpm?4g?0^`VzPfC*_xO<bEn6~%
zg^?>pM%0+Hn96k;qvB?+ba_k~5ZYMTfYj;5O{jUk?d<c4%A%@U_39o!ueuR3kIf^S
zkBe_}h5Q9&B^le7@3zKWb$QGazvhZ#>W%7W{2$qt-br*(KBy)#`BxMOct;|auVhk2
ZRDFYSquWRsaU&iQxASEqB^vpzzX5p<gy{eP

delta 1405
zcmYk4%TF9f5XPC^Wm)#czJY~hd7DSV^0b4W7;GH;0Na4=lrigJrh$R3nQ_l73S5w)
zMDZmDtFKX%9Ft29$t^iWij+%~`y6t}9}z{7BBi@AN_M2LfA!UDbx&7)KK|)=%1Nc-
zP2%?`^{3JOCRNc+-i&{-8KRxgX=-MemN%Nj8lhd-4c%CQ3idz`R-vj%znS)8AM{~A
z^s6{Z2XGJu6}QkK9EM@VA$kEv_?T8b4bw?gF-9{u1yd^9DtH>E702le&cZCt!JLW{
z^deq@ONx{9J)DPm#VL9jufP?>X?hi}!8OHg^g7;v8@K=qcoS~oA}rz(EU8?E-a5tG
za2uCl8SlUyWoGI7xB@GRbM!9WgL{he^gcd-2e=BWxCU$Z5FRQ&L?7W}c&xZU*Kq?j
z6c_0xK7l8SOY|u|gJ+7{=?4e^iaY4(bKHV06?D=UxD9p1W%?3z*iqa?4K%<|+)Yg+
zKonQ#F7Cme;xOG;w-~0@MiWUL9DI3b_I#y@+@RTS4t%TuHACx7qeCY@#5Y}2_leoN
zX865R{EyK05iQWh_|M_j#iHjD!)H3J8;)%Yw2mzQyOqexR&A1hllrL?M44`z4no2u
z3>&`gTaF#@kJ9(Gfd7{MOXx3cmE|C69M#dXziwhL#4P>2RonCk^J+W9KA4%Covz&?
z-l6ZfwN2ZqtvWtowRM8T6HvccThxtxLLJ7#nakR9em(O^dtFo+4YPN|!rqlKAr0g~
zX0aH=#bJ#GW5o7@X1jszF`MI4*^)Shd+buj55g`Lf;I`7J;QR3p~X9-L}xZcZOgY%
z__jk$4-%gEBo7^vP%miSbsT3m;Gbp7r!7c7^oeakPE?tYf2r>>@~S~>;~28ia~W}&
zV-U}?>^&Bihm1&RmJ$^Lt@1Us0c(+FOqxk)8vDAqZtAgCX=Rl4UjvR=oKNO@#NDpt
z2JQx7+xH-OcJ7IWZ^@IMEfJAvK>Tg$$U0#Oxld9SO}zc$3GWD5O4@1uW3FqdP3?G|
zB(_IzhZ2^NcJkcz86hkyt?apF6KijO$6+4J@p8Ug%*&DmDMcwIDeZhEUrBUG?37aG
zukt0Wi@(lShr5LVZRgdH_{=gqR*|puNU2Kc<-g|p(?J|{U-pq&JFJiYldo$1yi%y-
z24s!6-tZkp4&>kt@`XZ48{$t3)%jtW46iS*i~Bt5-Gr>+%kEr|xtI*=-mb?+WEe&L
zSU<AZDF3!lE{#bmL&%;OV}m)eYs8!3zZWVaau(HC|4+7s7sRfL!D&!(T@#V#@`2)L
fIu?qBI<$<Iluw9n7l%v9ka$!mH5Q49I352NcUX(G

diff --git a/__pycache__/eeg_microstates.cpython-310.pyc b/__pycache__/eeg_microstates.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9202115fb15b51e969c36c4c6ca36cc0e96ca4e5
GIT binary patch
literal 42755
zcmeIbd30P^de~R>YC%<@uo46Tg3II5B)|qh0^QAKqgOVUCa1gEJx!_?^iWU%uK*NK
z3-IcJOX<~2k2vG$8M&pAERAAE(gEklmODx$SzaP*Bx@##C$=mrwx#@2PEMjIk;24Q
zMv)ywZFwfY@4K%y0A%+_@<b;mz^V7{z3;yJ?)u$tzoo9OREWQC{OXDCPXAsg^g9x1
z{fiQLj$iqk;ZVp4IpKV0*4)FhVU1g}mfj<?5$;yrDnw_aVUs6Th|hKe;lyl`=aGD>
z&^g<wY0-ST&^6mt=$`E^^vw2XS}fmN=$q{;^w0KdJf7cG7?>R>?4I3S7@Qr{w2u6q
z!qDsx;e?a?OlWr4NjaU|_d02(i~ESv?euWp=X|%*=k$LjJiFhSa|WE<gbp}^&K{l}
zbiT(KcJ`8D6B=>$5jx~#9NRfW=&*CxIYO=@&Qa$f?nj+t&T;M!Iit=P_hZfp=V9*0
zopEP^`=~SNoa8>{oN}hPpKzv~N4P)iJnB5geVlKcb{?nX3FnOS1SylG%s5Yya?*Lq
zd76|{&NI%l+^3x9oU`1goq6Y+bDkWJ5W3)8B=jhu7o1Ck9wT(w`52+ogkE%BBJ?<+
zmz^tw&N#0)uTsVn&c~hCxX(COoon2mbiT`Zo%>VH8_t{DpLX7IX1PC;eb#w<)pGxR
zHo5Yg^9kp9=lXkAIFx<nPKaI(U)A5(!abQ8SH{A%c-eDG8y=y=lACj!jF%aU)Z+6S
zo|koNiN##rlV2@%Gn-#n9<yrEh2`9Ityivd3#Fo$D=uY=3%7YG^6I&>wT_FKvUiz4
z`kK2@T*!D?=j!X{-w)Ti3LAMZx0cU(*|Jw&U=B9^VuTX>&hab%O)g8JYM7tpg?#FJ
z$NIsisu4f38NM5;M*XP!j0taAcSD;I$GRJqR41|&UX8djet1)2(c#eDa5&^!n^7-D
zJT@G<6V_0?5$cf8Qbh6<EhizRlzZP`E=EJq(47dWiK$Te(oLSr|6Bb0uovI#@cHJV
zbu~1WtlWJq%T&8?;RQRJ&n|dw$;sOJ679~Fie)=jv~S(6*o!5%knvI}o4>Q}Qh7#0
z5~FXk#Y`bPV=r#x^Y&WCTeiKD?JZ~R<CDC4Tytpl*RtM*TWook&y;8E;@rY=rdZ78
z%l0k%5qq?_QCPb@nQ`6B?J<3?7Ir@fCu<>By8NDAED1%pTp3H$qNTNLv6gUYM?P1~
z)?#JP&8=};%jdjWJhR4Qrxshxmoi?>D)Ph>9@E*OwXY=>mgl4y<yur~RSPfH!uj_@
z?jgSM!ILMKONH#o^7N@wk3KSW(q;UaG!tcy$uW`5E=^>Lnf&c?u6&ZnTp_pMmh@S9
za_zRM*Bm{ui=xW=xI`1ta5|g_N5lQ$RJc2QA{@1%A6W0~`(}62Jwk@Ixz$HP2R~_T
z`RiOi7g`KEVaNJRq8fH0PV_VNc^124eLm!cSFCqKPMmwh>2MP7MK`0&x=2oDUc^Z{
zsn1%&A?8!(Qz7ChCmqDQG@f?4gLsd|yPe)3-ly?ir$31AdMd<hj5z(yfTY|BQ|4~X
zGvEvcdG-W(2A!cso?+7Byf@_RZKRC^`Sv;;jkJA~6?OIvhpI6@?Cker1AIf;Lh1pY
zg`I=Vw9O7pw>3SrE&b3_tVrHF-1@v4_Y!{WPWXK2dUv%$o(_jLlhn=f<9^2->rPnK
z)%yGT=8*@!d1S{ozpif{ec+o%_08CpZ~l(H`Otmd?9tjjw9}hk46pCpGsl7+IR3yg
zkMGnD$~-~u3^Ni5g+JtSk}~4eB=Llw90-{nucqD&ZKlY(dpP8EuB7h*w_E08m!FdP
zSnVWN_g!ES&wBh$Kj~A-Vnp_{_afC!FTC07cRDBTS`rR&B;*^5RwGZs?_B9~9+p}#
ze&?%c#x3g5n|@kr)}?X!D2R7!oDmM<JsM}+f_Sgr?RO2)uGQ!l!|t#9)QkOe{Iiyy
zUX=C1Gm~Q^<oEhLL!p7to=|I^r$Y48=e>SEdWTs<T@KKfW>yCMMcT-4i1#}eBS7AX
z%mkq9`Aj*tU|%?YQNd&YXKevxS=WGDc~)M_F60(-0Z^W?mpyN7=H$tnH*Zd6oyE2M
zMtL$<Tr8cGd;lS5adO!!<iBlj&5I8L*9*%)>wK>4*`-B0fZ4VJ&E{@Ej&h~|;4LoM
zi*9y(L$+4gMc%$tx!%YooAiwCITErnWxECF+oM`SR<?us&R{lzHn=GJK%Ad~)d2!t
zXeA8f3v;*Fb*=ZJ^m3%qC7o#(?fKj6+mmiDIp1fiFK5{Y9PjoT`^7?Oqv(xA+`T+w
zPjE{G+0|;%T+urNUJ)*``&)}^x*52KB~wfaEuJP=maS;Os_c=JIqCYRWcSRMpPAG=
z3-Y}FCt-o*TIWduUT8Digy)_GREsdv!%lP$_$d(2I?oKh8{UlE4bhrdWj`3pjjZcw
zb8OwmWV@9euedbX)<N<yL75vJ1W*ZGk5}m=KRg`zA!{83z_dwrx%aJ_HF>HQUZt@y
z?W~serk;{<UnmvUo_UJAf2fTPRVKDKTHo~CW-m0~yeBA<EBo14RQuW;AxvLpz`$w=
zk4;lrF=n~bk{-^P3QWmchb5&WD13&9_WPt&8NP27+-`YM#`_+yCgdKKsLUGoF)p<Z
zy0Hw7bMH-Jvq=z_T(;<4K7UW}x7kiNyHIi+szb`pOa3HVy6Z7tv-f^c-_W?1D`f9|
zRi4MegO<GId;dxzAS30hdn4=I`!^EpFr}66{fRzwHe5xo1nV%E&poN;Y-bRaHFfV*
zNlAHhPTAR5$hxzMwdLDohwsgHNT`r227%12*$yYSl+zRqNXnX<6})%0KUht3Ygu>B
z)T0)@$h3~pzO2i%*FNdzb2>J9xiYrPl{ICCX_Qbcwjs;GeO$s3g6^<9-y;`U2kwwO
zh?cY2)tXf**AiDRTzGNr>V<2y7@4O|v&!<Na#p&cyqsaxCADd5k`^;5u{DO@8|!lg
zNpdIT@`Mx`zEz7aD@3eC1zD_x=iM{%z$&`4$zr`>wT|MPSMoCXT2!Eldx|WxF$a9C
z$V#6pWNy`BYbM4mbIZ)GhvhRmyA*+x6+A{#+2xY%N<>r9ArQ6R#{U4J0sT*K9pXO$
zGIwm-bu@S~$`g6JpWKP?U?fd?f23Ysclgn8^qqv2;<*)$e$!pxvkS6n>whwd1<ak}
zSN;MQ0U17|fueKa5k-&u7<WNj<lf;}eup52PRxn_c&wTr9tRby$CIArbml^S;-|tt
z5dMT!O|i|Ilw^>Su6B75KgHIpWG!2Elz7_jVhe-JmAEg!HJ#bT+&RzGqu1^EOwj@V
zFPCL2gZ8pVPftJ$=q$6JS90uT$@P}UcG{QBj?l6TjN7ub>`cDIPLjEmE8C|U+sEkQ
zT)s4JFU~FJ#-KYI+l<x#Xqdgl+*<J3_ngI@x2DFvw2eBED$Pk#X0&M&EltZpoAY#B
z_PVUCvsX4TNz+Qgz0kks?#a;AI`XBP^7h4=HJ=%a)xuY5)*DR9dEI{ELOqn0YH_I8
zwGFS9Sk5giOLkqDK;x)1|B6o60C3h)l`J&ITt2s&b!E1=LV=ETyK~%TqtZQcUCyy>
zmEm>NTOx_Y+q2Pm%`h(+qHA;cHMuUY&Bo@}=H{pLmNB>aX%m|^i{+4fN*7C#tY(-i
zq?HG`q*4iHczw3_>#UE4U5`g?o1oOUF{6t_WJa^~=|<<vjJCYUN>oT3C-R;pg!4*l
zGwwtM9p?$eEAukO^Z0u(cSuPHp~JisbThI7iPMa2rWDbPY<9}s>6BSpdHdCT$x{+l
zM@)yTovM{|DAOh-+L(JEHjT{wX5AC$c~6?k0Cp_qmdd#bfbQmU&dXM;3E5<mLag4*
zIo@(5p>zH1^7VTHMS;zUMHmzEN~M3T<gqX1SYU4-{X}8o=<C<7jHL~#63>{eGs4zW
zi>$%qi1Vclmk+U>0Vuo5S#NGJv%p@KFS(2|?XJad<umiyd@X+4+|wTO&6~?-ZZiRa
z;LD|(Of#8D?mCxR*sDb-cH0b)!seu-eu!LU*}(ccx`BoLkhI;*gLK&aeiH8q60#uF
zYyD3KzcB&ki3pd(5BruAelG$*iU4k)VrBT9C=(#&$5tYnfG?c@;T5P(0Fi(+s0pDW
zm>fyN(=a(U6Nb9jOzNPg81&A{&ou^pt#lLUB)BqQB5ym{VmasCwnyI(2GQ}dUC4UN
zCFi4osgFD_Z2pRK<?K>`AcNkrA=@Mr7~XMev6z9s^z_uJ=|{CtW<f`hEaV90l>0II
zN|6`$ri5<GMRqw?P&`)_V&&wkAE|NF<>~L8ajIii#!{|~h5L?NJ}sB;lgk}0wM02v
z$N`#e*J57I10#+bK9Ku4$?`6jv3ScwiD~;_v;dIH%*V3h%7(Rd;v{t9@bt?(EX)4Z
zpD^>LH>$JdM@YK&9G5n*(L;hDkh&3GVc+qDvnCrDGsa`~K##)V@T?Vp2mA${Jx=Vs
z=zh2#9$O(fEC-HRHCE5VJc6qR)hBrq(4KM6+KjVVCPzYqI-C@ofcOA8?uG|s%k3aF
z;U&Ek<##%rpQSvd4z(O98!EB>B+~LEB2VtXr>iD>DT57nGwq~(sM@<=_XDA7Qq#M(
zrKh}ZuiH<;`Rn#m@cN{c@;3BoCzPZgz8it|ZI5?(z0lODMZlz=mU>D}4HX;Ix>I6M
z(_KzCn|Zh2%?jzsyh^8DF0ym4mGYpuN}qY9wbGKC0pYZPNF_T1ZMw9P&$#*9tRNtZ
zV++^yBW~#e(d?%MuFf;}p(2ItSjT}IOQ|>~Mb1DqdfBC{JFbCxrKp2>4s{p=lnQFD
zpwnjk?7W{#nNrk@y|6J4<z0^}K3=a;v(e-AZ`425sHCjtmOo)6XL2x1i<zQ*Ba`3A
zmNjdoV<EF9d{DQ6U|)ORa!X9)T4FVuT?3&n*Q|3DgN4g>=sH}M`0>~K+!a~nEq~v5
z7ED^I&JGd}AXT4o4c$+XE!Y^?S~B@1&}I0xvi%etXe(Hu?m`)CI9gtJVTeO^v8E%1
z%v!BONV|OYRxJuSm9Iszx4;Q<h0KyXzEw+5o{qLFaD1#s7kw>m`ro}Q-#x&kmTK~H
zYvF}j_~xv9((F#)8sRr<;kT45t66V?o4l2Sa1q=j7tq#C{30r2@b@TFIi_h)iy^B!
zyf<Q7&nwa`Xn1e<6sWoT3*>4ed<C9KJrz<2ZQ_Kha0O(KxdXIdzp#Y=El^`KqM;}#
zMTE5r+!%sGBRdW&!piK~vLXb-l`hTXWg^SuZJjE)C7uc6qh${0B=`{A0yOg5UKj44
z<HDc~bJ8jho_gDywuWt4aM~vKN9kDb1nK=?($Q?8wc>Km+^TO4?~|bx;TtupP~>-O
z7E0fIaO=*{0sDNK>G)f`QciP;CR1?8Q;am@EL8W!1!14m|Be!nxD0lA6BO7FtuWYt
z_zngec2FIM9D;QTqcQ<f5nz>U3JJk7MLeTOM$(IH#{8rc+a-WL1$z*nTZICG$rpFx
z27rD(^e+Ex<))#F(rx9Yy$+{iGeJJ6r(wyjb-D}a23bi8Qc@L4Dv6NGAunK^^b}u;
z0iM%rL)|>>;;F~eZhb2Ch&eqx?^S>ub^3VzlRWR&=aEfnLA`tY9%(_f$LoTH8FO}t
zsPVn%=fS8qyFVBD9NT~j9TOf;g>`y|?WfxP$(}D*d~=90|1o6_OPRbaDA-<S<j14c
zUa!}A7MihFD8$c4-i>Vb$@c``rrgM4*x83Db4#gTh<vh_QunV~uvGU@?t!4(gOrPK
z(>V-v+UxbVlpKCHytzwT()Vt7WuV&6b$4|a?;i06>1XP@hacbVaE_9)t1aarQu<p`
zq;6sAc5H{bg+JLFjKzRIAne`hZpu8qnecbt1+CDQ?P1PzuMFLVo(3rNFkgmE{|0kr
z@RL0-g}dA4!ycdiw)wD!K9Z95w3f8F*9V=g4%Nru`o8M0Kj;syTGhQ~Ci;6}von^A
z<6eJQHf)Ir!r-T1A`kfpQeZh>r`Ds?dT29tmuFh@U~I2<Rfl;x#?xWRk3do9Aa8xn
zkAKl}f6W`&+^4AW$jbhe1Jx04WaXd}X5<kmD=NOPy5HZoVs9RD#&xvoB~Mik`1^fW
z+$)E@BlN+6<{Ci|G~!G+lkY{CLz-vv=n^$+#2#vk9czmnH!*7Xcbt>dJGwb4t*RdM
z_sd#Z8S@W{SlgMF(y9Fkt^LE!Bdk^1w>QT{$c^B@gio8WH{sjVDz+M-%~K}kB$$<!
zdw~9VT>7loP3X*K!g*qI(zgXE3uf6&aDP&B{Z-}wBj+I~hcG$hosyCfKy-qJ9<Cno
zPWngq(josaV`Hs9MVU`;EtB3l)a<Qlx7S0B2Ji4aDW`gvw+DR$6$l-;YdPP;=p6#8
z4gG}~A8Y+H&NCk}DkGayGCH((5JYuoG0K+uT&Bwkg=FJwN=fYn<QHINvU7s8u!VZd
zh4SP_LMf3J&p|v1+F?WlHh+s{+ul(O(ctw+E~FXc+DV)daMKL}Cs_kIb(0$)pIulU
zx2MMhW!y<_K6-214#d||LIJ5dDCZ6hZ6K7CXdc6-Y9h$>v_>|kB;*q;sZuGQ%@j3#
z8>-75Dy6{XToWlu6DzZ?Y!qI-ZR7@xB43(m9gVUP+GOp~0RJvGdv<AY4Q^{@mBK~H
zVN?X@0hM^zFI;@pX5$BKY!tney)k1i<xBI>+2E9UP-G~E8yPp3DK2C+n-aIz+%K_*
zyNdn_qI{+DeDdvwuiNLr4(mcOXwTLjd|N)Jl^Ji45Z7mHp=xQe+!UpTdzM*Vs2oaZ
zdFOT*ff@Vk*-PgpFA73!dbbu|Ezq~+%GiV6VoWFP7bjl6aQ4d8$$QFxL+`|Ny4=wo
zDZ5{xF(CSdtW(N4<;t-K6)0t$f93LdN=xzDy?+DtITo#juhzmZ)?!5|50sw{-lBcN
zifT&21Se8KPqzqq-VKYWXESs+guYYAATdVp^L|*AXFBHQ80EP+caqo8jY%sy9!$Dg
zQtGXf$Y5%R$>cu5WiK^T0nE`*<v@^|<-3#62R^_;reZ&+-DurRGE5s@iPol>tk5E*
zB3u!aF&0XwP{fsOR;;DUH=Mb0DUal)4dHL?fIk#ML_45UI$%YFX<M4^{?}w-v^$$0
z3OXR{{u60pgACEe9;UH>D4#eWf)5(=(1RKy+Cvkl4^7q*Yq00?`E1_(>%4sLA8{Fr
zx&M?~E!lu`?tdfk$l?YHEDY@&>_N8@mr<CpE3wPxSZS4>LhhCTM>KHU+%3Qe6>9vo
zw%z^bBx~#3e<2|q)Uf;CO1QmEPg8^|Xm(}4Sr>Hq2I?%x!<6qX>u7R%v0527ZGGW>
zOW1zl!s}4<<bH(Q{mCTpIaz#K0%;Jt8}j{MkxPSZRg1wA&$`GQqYFZR#)0}vxngDT
z<%aSM%>{Uke(y}VI;NbjT6DgYcPOt)QMHtCNN;9^b@{<)Dw#Z6E<=CGOSZB#c{Mw^
zG-;m#N{`!9<Mtzn3O)#rt3dFBu>HaAWb#5`4ZdHlNYm>Q)DK3I$yYNl-5iQ4ZUDCQ
zv9zXK{lFTxKX@#e1c(Owi{MjE33gr)o-(kLSGI%i$q&Yp$#c{=SKP?j8xY}3%6~S^
z<`vleR9{$ularIF_rvZn8u`I+GWl9|LM13v$b8IHrLxy;K4{rXrPFk*$EMvkq@SV!
z$q;vYPLoO_Ldr$~nPrLYzvaFugY!krE}Urh?@RobC7wVE>MoSBi|$t?O$4?^+?SN6
z@P$2BU{$S)(40#2ghg2EFjIEU{Y}j#%!h~r?>T%;m=H^RvldekhVmU*RwYQog+eU`
z%b}2g@whQB+g!PY85xIB0&7~yKp+8(?OR@MVYOWAX@pG&nTNMq9%?vv7~4P{%te|@
z0m{@=K4vZHW^MusMgD?>l8p+9tMF55(UnrJ=)NMsWR`)=BDxjTfYRdP*wGf|X9Rr0
zFhQcS*()m~*w1wnaNpJ<6ut_XU5l`p)}madYstpGI@{L@fR!p2bv6Xg3?xHa3!kgS
z1+ECw=;~a}Iy*NTxqNO8jtweq9Zoh6H-&C~X*Ma@O#u;hVZ$fxrVj4Emy0lF;Q-x&
zcczS<HTMU#emd9Y7%%r_$);k~a4@5y?mw0^1t-5IAqAGQDb!NUP2hhaUjAoXQUm=_
zWC0P81t9;l`XS;6!qIOfA_*%Y5(2KhdQEAL13WiRx*_@Zg`<&F7dc@2q{4Cj`}Lpn
zAXk4xWTtZMkECLvP&Y}Aqv1H^CL#mKAJW7`7SXLT36Z}hkiVuiB^4eZAL@6OFp^*=
zahDteR{MKWXOU{9E%y*WvrUlk5Up&mq<<GkEZhau@ggF);0||TB?M&<LpCd11DMe7
zDH9p>GFZXTc~K;_9m;V?Lg6QnTP4gUT@`*nYrNA>!AC&t3U+VINx`Ou{uY`<cm|(=
z9`@6Qo`<)P_QR^iRCKx!jdTl#!imAehO^)$Hj{q0(sBtS8>42O)T{^20u1ANEiJ3s
zQ|;xO&1b2WXMNRvr%zbpv>}Z`RR3KICiX63UDO-(C_*r-{XSuxLqG3=ejb2!>hTAp
zz1kBN6n6wwFX3AZ*6V+Lq`Jr7?eBpO9^xte;}1dk@(xt`kiSRiU}8!K!={I}K*@0Z
zE_wU>K}s5ish{$P?^yJEs=61JURMz7s*a$NyU!ni&bq``I{iH@{k{LbZIyo5zlr>i
zD+;{yOr!OFFUmUojh3gR<<Pv+>PDMk)oYun+W~*yX1{+xY2hxRo{8;|*!pjIyZkQZ
zMA{qhcKiE;8m#V1hWvfh^;zhwF8?4SZ&PQxrOt<Fb(eo=Wzata&3l;s-LpC5AHFL>
zkt5Zk{!x>cT<|V7hy5dr1!}*Pw4eB1e?N8F?@M@uUlbZ!=i*&tu=L?U<u9Btv#ET^
zi-`iZFT%9`%xf%#vtFqnd`n@9o?QaTSJ;@lk+m=7fRZi>*62amEhzWdI(4UrzhK?=
z=*t_T=xb*^XurvE`*{}<!M@gzI<R}6I(6m&vN)k71!jsCQ~-pa2ir-tKm*3miALe3
zNz+_up}U-HIp?y|Zr{Kg6wMa;YuuK7N^up<@@<e}+ZCPL<XuQjmAQFU;OWP>u&`QC
zS<x*QMG%Kn<WLP1S98H66Kubkx~TkamIe&KtEhTisf!f%QgAJPgDqxZRcMW~x>3}^
zH(YEQwu$aK1I>y#w{-I-cwQD(dvACe_*fYhI;%+mwB+2@ENPlHYdmNcGZNTingZWL
z8X7cV7&S}L05BDI1s6ByP1HRqVG)1U!kHawo37UZq`L6sdUX?QEmU{!&efHy+BYq<
zsM3*@V~QwWF6LAgzkP-XUglI&+mLf%PHSU@u2sS$R)s2C3U5`wQ*_<n>4(UwdKOB6
z{E}2%@cjNzWvIQ{P35}Efi6K&yTF8HT|HwLO@-{IpV5j{UWE<u=lW-8_TQsk{C(ru
zv3-hgy1!4%{R6qk+;YDmmj-@ma7Sqix)3mW&{S9iOImjSwI-o}kagYvTV8oxE+5F{
z-^nAz8bQQ{B*h?R!nzSXPS5>gN&Ioi7J(qBMKbec_opOnlgrpcilxn_nhV_^WXELw
zjB-Jb%$toA7na>#N$QtNpIipG%%%gVqN)z=Kaj6j=W?zs&mx7nTeFd}TnclyC4_Pf
z(cA8Bd4EJM`{W|i)Cj(isv#o@gSNTZbF(QoTL(Yx&yZ&pozv`1gLXx7&aImDIwlKO
zS8K5>LdCM&(D2<lNd#U0AwE-f2qe;pSSp^5fMP{@BfVA%1PTO6(4_Fb=swVy0~!~E
zX*`^YbVs`trRrr}h|<mw=urw}2$UycS&3dj(z^NW16i`eiC>9dKo+9}!q@oP@4ff0
z|L*1=ef{U3asLZq3jz=7e~%DI@l!FJ%@e_fR5~Nr5JGoWeO*oysCvMKNASK90Tx(@
z?k#YnsGndjOp2O0`BR9xW2lkG6=y<qJSZ^@%t)dTg86{%Zz4#K`CUQ2Zub2ie!cwq
z`1SMKg@gxg_<4Bf*FObrVyz&v5nROS5P-_JcC+j5ZYgunAEZ>Y=TJZyl#;2VD5xOT
zN18O~gsb2a)gdPd*WE%59*#TMz|aU7kRMr%A{U6RhMwblt~~Fg^g&UP2S?d!g#V_E
z+WtY)e!;3JdoO9-{@&o5(k~;_-f)bc1f%KlNBn)OC?ho5zhA$we<jTxZ&eTQeh-)y
zm<O1_!7AKwlb2lGTGxXO%;1oJh!k2$|G}r0epY;7(BD@tAAIXS@rS^)<Xuswpg-XG
zH@@z74Fuz7-e**Y=#4bEt|f}|v<OVju^taq_vqK}(^KWonm(h)PI!Iby!~KO7Hanv
zSW`s#@tvf1wxmZ*I;D!l%=GVJ|8Tv3cj(*0+P8;W`u2#n9TOU9drJ$Cw6u^`m=S8U
z5Nu1#Mj9=YxW*(0SR%6uMUw<S;$XuMQSvU-H^S9pjj`M(*r|+U^|;m{<%{V6`I#Zd
z{e6o2YH7@<PH?niK1MRgO1`MMk5SgB-?KU3KcpCDFCo&v+4=~LYN($OSTbq52!RiR
zd%noqyZs*C9`pPCF=qZC7`@z&`MdOf#E*S3;&yw3n|oB=GPp9dGF(034X*4(w=^uK
zjw&sBxH|4Xyn;;0F%(99j!so4{BeH*`OSXs0CQ!c*^@|1PAKhxJjdkOJQzrX^o>os
zEq16acDNxe`V*(GI_ZxKbXhs#PhymW<WprtgPTXS&JQ`efg>mVlbgpx0?Mi|;UN?D
zj{7I63kG;dg*Hsg39F1K!yHqIC^DixXYXdeGqO4ApOkeWbC5Lfb-nMGyFYkUU>IZT
zjX4KctEapZQnpBUSbbB~X>ZJ*=3A%yDV6TLLkS1BmP#+3YW5QJQJgxBG^9jRyglMi
zsg#Jjmh)50hf^|Ye__VOTK`9m{UPHrxcRV3iRiVlLz!p6mR>G7*t&p)zc`^>_&Qpz
z@*$LLvnbl+d7LX6E5x^_uG_C>$_sGbCSK0C>l?WSHF5dEg$wqz=U={Hk4`=D#A9Rj
zqtm00kIkHZ?99Z(>C=xX#{Ty7b^AkzkWr;UphRY-AAMqCV*2!>lltoGBFC7|3-1p-
zU`JGd#nNT!IvO2bb}b+yf_e*CBbn{XA5FCN(X~Q^)6*(5+QJTOkS<0f(VDZ~!y;O+
zM@wtM|IOr^v=Mwd@G-<gBGuU_*hCP*s31noP{eQ+LD7yc3MaeyW)okQ^gwd7!;5wD
zN~tO_BE?()wa9W)z3U=)(6(>hhwR$Y6KY!;aBij3L^Rf5<<`~Enyp7J))0mv<Z3C*
z@Ddvx+hE^`&YF{3U_$)qg<|&TsmaNyW-S(wps7Nclqo$T)}`eMl5)_!!mwWuMA{{!
z6;f(tXQ7%q29g*c?6CgNB$F)@K&gb2Dn2tdMoTiI7b#hv*v)a9tUP<aH=UegUwP%4
zT`%E21-Snbys#+^aQ{1r{`Xue!}S^kShu|ZOWYONnbCgzA0_8+%jI|E@;}R^S1!VK
zbbnVa(m?lrmCHT3{BLsk-{m5fIhCU=eDnK$Yi!)mGD4LYGNwt@C_y7yWwkik*G{QW
z>x7|P2&5-Ua=26Uvms-YG#Qtu(lSD%xThrAB#DH+F?5X>1S&(^{S!%3M(sBx^ewr3
zpIq+91wKqj`QR@5O~^&<J>;@~hFr9vLvCCy?@B)09faJ3TvRnMDIwuxyD15Eav6KX
zu&{-!aetMI3K&`DaPURYNa!n)8bQpi9PB2ER7=UUH>!?GE+L|vBdHdC9hUV*;q_^>
z3U6Sd0F{=218qV{Lf3+za;Wur{3VkV0T7{r$Qn}GOQfh$hguYg@hQDdwNO?{R6Q<D
zIU|=R<T4|dhqyqb!5)7J&N%NPTex8eG2wv2Z>KuOR4BGO2lQ(4#NGW;zyY}kL)<+m
zp$02lslKh$*DsSV4oO=UnOkZ=OnJJYzJ$e%<{IqpXe0`SCI8*9mwUqptf(b1+v>Is
zlRIq<2pihkhZ#}S8i&?Ogb)0FYS&PB(CUsrt%XmMW<#qPCN~;!5u+EECIrhIM)uB3
z?|^x|h3Spu`Ed8WkH+?nS}F%xh|;$D-!1||=qX{hOb0axJM{L^tyE?W=wwV|=vFHC
zDUJz|5?t4R3_a@bN)*xon_X2vLp4tjNhQ0I5K<}Fj}g$|PQSDKCCI%9q(aJ1R@0)K
z#51(6u^10y3L<E@g_Uknkz->oAhy4tW{^cmazz3{=`on!VMx3#zk3yKm=dBrlp&1w
z9?)!4PLGyjR04ZZ%IgJb?Bl%-zn2#E*S`*vst-b>Q5Tf7`bmSe4srCF*9ozMdLMPr
zc03<)f5gGE+3$2ulBbKD@!)&fl6pC$3vnBPdnawuXX|_TdfJcQ#q-5jEF%#7l?5+z
zBYX1YjK@ASW1mArj`VcdgL#S+2>s)aP1+aTENUKbX3uBy?0BAiej~fEI&PocSQ3uv
zY27MX%!t>@xk<0&BB3l{QoM2UlPK}uKABl7oLpU4m@H`at2r_3Rn%<$w*6xE1}F`f
z6T0F0_|@B(O%(0v$;T&e*%S7~8*hx;r>0L%pPG7fCtV(_hyFu5j?k^pusW5UH`Zmq
zx-zB=Tj`cs(6}lOe@522Kz4=IVyu&Ovxb57)1<;z%40RNQOsf0jD6&PDLMZux%@-9
zd|fUlxL|WgZx-Cs6jy`GvxfF-L9|~t+|TnE>qL|rW@Cj7q}c$bd@a0E3tyg1U7fou
zd{sc_Y~(7*RxU7rJtQR`mdjVU{Iq}?sb#8DBwJ}fLmFTr?j+=w4)3+n$guih^N4&)
z*f~hu!qIOG^c^6sWwdc*W0WO7UCz%DP{y)Qrk1hFLQxtHtWq#(WLj`)eVmiT&`@*2
zkE49cl<yJ+YUp|ttC^-&<u?$m=UnCgmv0x4+uIKNfSbcBi{L3bUp}f<w6Uud?570-
zt0QJwlFc7#BRi;1wY*V(tZ!_?7uRO&Y@^C;b(O*uajF|F>wc{ps*D0E9dq2=ZW-<H
zn$7^_-)-3@z!K&fU-)M-hN9q{N{YqHppKmCw6u*(2RY=YBlFh?n33_3;cOip2kwH7
zP=9lTWSoSLE~XwjT5w!+wE8fSXB?4MihW(I0U}3u7gsZkYZON`A8Vv{t8Ho2kDGE#
zp8ck*C)ZEvn8zyrygBBo2mqrTaA1^H{sNBpwbl{eUSS>fKKfk8-be$UVvHH@2KX>&
zl|l@`3dcH{Q^D}01sv#Pn8os#we8MP!nTQVtvNA%TPDUJZ*?;<dc%ixfr~(TZ`gVV
zzN^FgZ4w4qxQ!_yIHh<&U=PNxkSNZ=aFAsj!iP`@HeFpI9+su}7m^=~JA^P9cB>O(
z&rz!-CKm?2DBpJ66=t0=_KjCwTS7{QhD$D=n|E`NgRScXGu%?a-qw#O3T6uPPUh(;
zwuI4qcF`NN;n8PtVz9t|V><Uq=~uCH^-6gIv&Ryqmz((V>(Z5Pkg?X0E2}BF!LqgE
zW2U>C7~mOl&tlY?UEkW<U6R;n!v7`xEllbG>j2A80I9pQwU5c8judnmiWEndVclR=
zWI2GmaOG!S4v6HdV$;veSHfityQxJx(<CHsqTSj8tae%$e@0NYEXM6k%6m(76#UhF
zT|_YMQ#*cqE+_zt>C0RDtdj!gg64cndQ7mvZoC+|D?9a7gs0k9BCKI1qJ4<{4HlhP
z+pw*`A~QYq!>=j*B$i~$8;gs=(-Cz)=`fY(j^-wFljvBM5fPV`P`i167#KR9Hv>zs
zsq;Gnc_|g`!nNBF_Vq97mn6+}Ow|1iDs3j8AO*FKASXQ}aDG;G^X3+|bdkGC-cA~>
z6469LCz7PKA<m*JB2ZsD$0wlhoMno^E6dg_Pld>#O<A-oO+l0I0>@XC55E+~L@^E$
z91+JL*N2dw;LMAh?YU#hiDpL-{-AkNIlTvy^2J1eOhz)#Qo*bB&b8s4t(I<xE-iSS
zPSYK>ZhK9zO6{v{Yu&!K(`;_rS$8N=%Mxh&5dgAe+j(`=clVR#3r*c^#Gp+CqA`>1
zi%M)m3<?WOmL2;@E8dh9k0o{1)Zz;p1<|dGh@3BhvYSC8buvYTV_;BBSsSWO2dJB8
z91H}LMW`qQw%URSMz2XxR$)PRku8(1mTpvRuCOp9vrov|RCjlHAEf1E*s@Z)T>+?V
zT1%beX>1w4sIyRP@6{X*{7q+Lom6{COyN`(Zw=y*2K%5f2Zv%Njht~Wwj%E2AWz_c
zOIS^>Apy}hg3N=sa$>$Z^-|+IjAy}0DAVuQgMtDCkC^0%L2WbN*5?sOKcs(<TPZcs
ziz&Vt1?a{z1sZn^jm3$4W=Z%}uwjhOrf4A=?ikR-%n7VL>sTX<Kbhb<f^a_JH!p(f
z7tsP$WEF1X#+oRU2oFT~o|JCqi5Rx;i4!O6t0p5K#2wOnR@6+<F-6l9^-Xzm10JQa
zgAF?nh6ejx{`Mi8mv>qv0PJnGgaaIW<!Srv)(<otDdA18g@qYpZ>%V(a&se49|N>R
zA8()Mxf#{`y?;-(%=fR!<fZFF9~?yCV24iUGe-U3%2=ntlA1`9;!9uTWrX*)%dYIM
z?g<KWALdevT$-Db%QP>(C7CR=-lNhIvo5RxvUhn!1n?NR@}OodPw|_sMV6<wIjV?L
zJA7kZl+_`1NOYyx=0&6p^8~!RpKagf%4YUSN^Dyxveyf8pz!(^c`8Pvjg^AQx#HpC
z8^tg-R?TW*rC6+#<BsT5iIvxftr9`lgx*R(>YGr~gaS-I#TU>R1md5-_*+&`r#d-H
zo3I}f9G`l1u};#8=|>HwFU*FvcnZcuS3nH}dDfrftq#yU)|lS;_6iJ`Ku5289VQ9V
z)D*I6sll6|l%3caigy^8B~m&TKGet11LTEKgDl=!1-T>!6Q&*ujOcqCbj4J))9HY{
zQfJYOG|L4!?O2VvzvlPVzZ8@fd{>-f4uoI}^{xM$--{_c4&kUtpSop>Qa7B%0VjTX
z{nP5HCXx9`hVMflg~&CsnT6$G+12^&^>wN9e+vX*JNo~@hwRFQ4+S5%1#0hK)bT_D
zBTL(WmaoaOW|%^5nX4fUlmaVCni5~#_XQ@dC-1Za6T|El3ISId`0{eHw+sVFn3iMx
z3SZot^4=}ED7AK5LbiNR0E}DVQnS{8FKg4Yku~7U87a=nk%AZ5xMA2RU01U%k>%2M
z5R@<=Xf8J|K<GK%Pjm_R5ddU>knnR&ig64j3<2mrl$eLYN8#QITSurte1wLre;19r
ze~D_fDci`V-r&hD5(z*Ren0^!fFT7{9V|6Ct}Nq2qFUpGQSyNE%GMeU%#%^g!4r$9
z#zfhcJ6ODN425M2hU-dRRGUS`hNacxcd!LVD}Q9R641iH9i0rb%BfmxO4x)E2`%RE
zwSvG<oi2zzlRbLw@|esEodEKZY*P2%f@Dbs*j#fvz9N2SFWtsZ&NAwoY={9OdrlGA
z<{QQF`xCEg<E94SXZV6?@-6IZpaN>GQG}6)Fmf%6(E!iOla>8goiq?I`{i@?5hEx&
zsW(Lo@lLzrgng3%QhN)<&UE8pyKAC>G856x&#x_K*y8xAnV>RNXA_0ooYFagKG`jB
zKe&lGZd>s~*_qk<=FVOIQJHzYl$wakJVV~VwI3;j`vsXvt=xSv>}bp+gC3%1YS2Q^
zL!CWoWg5v$QRMIrv&t8=ED3qRtP<AJ9#o*&_(IGu@yv;z5;l$wHkpX~rWap9H3sIB
zAdR4mngi9HMht`!m6-&5@K}(VbPS3a-%L3jifqO=JDo%jN~?-Te6vec37DtGw{b(u
zH7otCmz#ttk%{u^2iC;As^wK;M=#B-9<8J=h@cT7QVcwBrPYqG2uiZ*ev-Fq9k>s<
zEru`eNU5Ke%V*^BS-Jcrxv1pma}p9U9E5=&G74yF(N)1@RHkMv&l&YqYqjhiXBga1
zNVY?b%V9#bUD;d6lgoATdroTiH)J&T@iY}Z3|s(tf#KT?B4eaN0y`36S2P0K*oiUT
z5E4|;8JU8P;SE7}t}wm=6&NGDFEOr+s7gZE#@#Vx`9=g;ucxE<4|&>*%VvBA_V0Q^
znSqhYg*Ixk&I*yr8ytGV{sffGTn^f*Ft=i_5xO>)TfvfaX;cD6{;fb?w}d+L^;6PM
zZ%}tvVR)>$XlmhQLnQ6QW9um_wDsUuqz8o*k4D9l$O*$;`zz98JZbz%js~fe<|%Vq
z9Fz%Rj_x&mfYb<HT4V%X8VZZ_of8)%P~a?UBk*P{rt7FN<MmkPHAK7RejjRr2`2@N
zjjpKM2UMsqSOCp}oHFWD_%ae=J*obII?0y=ts$;tlOQ}A66GnUPyJxE=Rk2X$RWy5
z%^ad~i3f3U3?Yh8@5NYkQC8KEuB9leYS@HU(wGnI)ioM5E6eLv69ClQbbXCfro@60
zo!zZqdlC@8IBAcby?k*@rBG8)Nn)!z`Fq;8&;8(Y>a%Le^mO7mT+*OHGZuEcWPBeZ
z`HH~=eudyYhl|3~G3(x-gm1`2ma_W5Vm+5uXCv}4yjrSmhWq`LQ%RQ#CD|3QPq>Wl
zy^?#chj%J*sma6?=B}v%JP&aLiY?lIByV)pi6r~&Liu{77XTWx<?VUQZm;tJcxDTD
zz!C5u)^WufVvmXs@?y4Z)SlmwdVN|h-zS$lauL9>1xkp+Q23xuQ&f*zH^n~IC9?q$
zjyH=boqF0s#rPI7@D(9=Vg;an4y(z<Mf9I*QBQ!%7vwdOFl_}P$xLoh&9<k+eRz44
zED38bKtp_2A{pXewu5dEjKd%t0z?orN8;F7xqqErX#*m%H8g-o3!-5uq9Ig_NQ>&*
z1P*nWZcqYBg*2r_<bdK32RxDkXI%lcAg<kln4uOr>&|#0m4CX0lerZ?-%8yJdKpl}
zJ8E40xs%-6XzJw|dxts)*se%&4QPw+h$9HvYJgn4xqlcORS^Xp=^vIt4mHMHb_{oe
zwEsy4T9`qJlnkceljs(N^;sK)6X@0$&My%$cq3ON=m>$*9x?+_YxH0-3h`^6s2?&0
zn&AT{2*_XtPqBkuARwl1;#*BHgl*!dLp&8>A%ruM2c!!ymV{W0z({Ju8pDqVHpMgl
z<;O6@^mgh{!5iyDT4NHF?Kj8mw+tWi!zKc*8g^j6zy%7XkNv0MzkFMhq4GQo5Bp}u
zEsC-rbqRbhs2*99J7rK`;Z8LSh%t$E(U5`bWauTvK2%3K70VP3*%n;Jh_rS8>@Yl*
z@NGi|DFyW4*>!{R{;kX|VUG0Tm&@RiB5c+fCeQCb!vs+>^T@Cp1yqPFjQI8kbVO12
zC#50EWFC>jFa)7eWaqmRZUZF)GMD5Holio~$PB836EnLswq-s=D%D_V;_y*Se35Jg
z_@4^0ZoOT6cx|RQQ82u=R#>511m2x<fs42M^WaundC#j8%ZR{Bt`Rh}j8?~%<&U$)
z4ScZ~HgOXQn4vP`bc$|tm*sL$E~-{wEw65c1b>;aNP*icrjzIxz)(wE6-OhkJh|3}
z_RG#FsQ)>B<x4yUQ3rhw7C*=i{G}+G;)3)U`7@gTMxX<lUk`OKfOLN_fX6`hK~xYj
z3eQRjO~M?{09lE9LLEp{dqgG;36N}yclfE+`1&D-g9G>u$khs@0LbbT$ZE1#8o-7!
zhC>6QYoC&PRqb9uU_UMVAoESY42~U{B#k)v^}YIAVy~f`rOx$N#rM|}AryB&@!&n{
zm2$hhgd$^I@W`;bP>);@_sjGaHnqYH^Sg{4f;v`AW<Ddm&7mQ-n7kWv{(wlu4T`uP
zeE^TTZ5I<xi;}y`E#z)%p%_%=a?Y6j)YH!>LAy5k=G)%&F{~6N@bJ|2G0{+kise%D
zG|K{r+KZ6T^H>0m+t;!S%S9|X(bZUhJfC{v^z@EWx7GzwhgN?kcqvU&f*0tACj>q1
zKzIdtES~aO(QObF$<mtW!QxSY4wPNs9B2C2ay*annwl*QT+LMWCX=%lUVCNYmDkQ+
zc+GzK>}xN+^7`eoS1w<BYsLt2bW;{ilfga(@x<Z8kCT8-Bi5|h4~{02d{?|<WUJWG
z;EBihz7U(5MU711^rF+~cdC|1J0a<|Q!uV<z%{G9sO$jn$_A<EeonqBUh3!Zksb(z
z79oxBx-gfM%ZkDF&QO9;EKp@rfe1CrSu~W<A$h15r6xP2a|~&AYD6G|K$2*8U0V@-
zMRW;;KcG}4cd?Z`4Rs{i2*NK=M)}{SW^KSjxO6Fg3OxQ(o<0b806x$^5HGAPz$5@6
z0ca5)sQ?;fiMIljClrvVAs7H9DVlNs6fyOI;jG~_3tL#k>BWeGk2)hfE~)K|Zx+1d
zGhnEm4>oe^d+QH7F_wEP*g5|RxM8rs^-BO9VLNl?h+#l0Fah{Tssf_mDL?~9aZVgy
z^taGJY}7FTM=kNza-_zT71UEoYyhXO^#bN?9RQ~;0muTL_+A}A^(cVC1t5;b#Ir39
z{1}nn0zf4JP`%WkCjd~r;8v;3-(<)XKrQB6+}9YF?Pi)h2tvKN9lhEKLh0&lEBRbI
zfC4VDd=yfFYSlqikTfn2CV*TY5n2Jmj7`RwNhES-CXvaVnLPcSp#Z)kKx`j;$BUO=
zyLxR$FeYf(ePNmVJ_GIkj9g@ua@4}lN_J&|3%GHARxXD7D4=H5{W%h9c#S9cx8#{p
z9ZiU%h)4tMxW6D-6n{J<<?3ZC+?hHraOX77qdm&yhVWEPX&{a8#0P;o0}5vb@$Z@l
zUsUKrN*((AJwDO~eWc04|5oVpyF7g`^Z~|1Tj5Utj~WmthzBqzfJkD9+C*&5KL8GK
zatkoY7^E7Q)L8F`-l7U!VhyCH6;{<#b-k0<K(Kl$kf{N!g0%HhaGTT!vw;-7@`<Q{
zPQZl#s>oM?A_@ToF4iHFiULh6C|i*xaUm+Aw_gX5X)8eyr?wP<RZ=G{v88+iv7`lU
zFspmL3JgpDv$_RlHQ`kQZuKhM>h^mvedDY$;8q`F`3JzQ6mYAL8ubQns}H!<nUUG4
zaI1`m`=ayzBH(I3srv&klV$qb1!VTrsmba84}mWcn_Rtc?v*R&cZ6SJTwvgrLb4qY
zvaLX@|Gq$MHm*P{@BSU;$!tuZSl<1+gm(mEwtV^9gJsie0?Py(8dzoyy%1}81Iy&F
zmqGoH5;{ZbYYNRc0;Uel{&T9cAe{O7-);g8T&mjwnnkw}V&G1~d*SpoocMBu-6og;
zBpTcjUtIA(;<yj7Zq%PW(i?KX!x0V(wh;k@69+^ZSN@GSK*mTpfOn`I$TQ$%r%5N2
z=DfVPa(G@65(p^|Sck^Xv7$L6O}^}REBxzejR$yIFYrR#68CPw%}`RPs}eYMJPcPl
z<ZH*xAgfw&GvHTGYkd9V>zBdSP$$5}QDP;fMB+KHwJvpK)c7{AkT(@(1z*;1@I?t@
z=h3gY+HV7MTHd2%;lKxN*r^<_Os#ht-0NHaNxx5VHC!gO5|RP~aYT?_>Eb)P8n_xf
z4NTr+imk<Qd&IK<>J2Ei_T@U%6PTycNZjmjLS+edayB3*Q9zw2K|AC*{Ze>J^Kh+K
zg`K2WR9%L;GvT=nt^4*cFZ+qfsqY-T!jr$A^J(7_u`-7P+^^xr$+{sWzax0s1b|M!
z+~sSRU%7JjCHvAVFTe7_g)0}JHtkVw+0AArZf0)Vo?xqEGnJEoMLDq4=)R(Y>9Uom
zIc8(*>auT1o%{$$9#Tx@0V)~#3UI|pE#xFrRIi66;fr!niw42Sb|OyQ|BUH2wz~z$
z+LE&!T@0N17Y(RYtkD44pBHGWn4=<%8HuWZ?xSLkds`;?rRE%$UH>Np_@1XShJf7*
zcr_<=8H5tXx1O^#5TKMo(prBUO$~-8A#$5&rFfQuH7;Iu+R(}_N^SsRQ3iPsTB#s3
z)`lbw0*pbsKn7Lh77(Z$ISP7&fLcLU!2Md^19!{#+^BQ69uLIj<b@4isJ{i!ks~(|
zz$u_oJuQ&60^F^jTtiZF+({!2P;P{qZ|`Xmlgahd5YWQuN-8l4|84;ND+HaLijX0A
zR8E%=l4*M32NX1kE*Vb((3Dotbe3*12-)p|Fp-qCC7hY<%ECMHybS13c++Ise?;)}
z9aF3P56-4a3ySpC%P(KJ_Syp&Q|3gGEfif>%R1e%L<L#iiuim4bSI4vM2Aww^*3e7
z34IvFHKSO#8cb)91te|~MWFc1R&LQ(WMQ>pI3v8@U^VydfZ?c8FgOm&N7RF{97WC~
z+@KC{9k3jjzyAu>GA_UZ3<o_B#cw)fdaEj=U^7vXo#Qg79_r-nq`tjRxi{og&J7d!
zDU_SRj3k90tlmc0PpkVD=p6K0&iE_{IfsX((#ewTYLs}85-A6{dJI=T<Tjz_u;&I}
z&?nRbwzqy!pSSRGGUBR;GEtUvI#8VM_Hew&d)+KMJ>?3;$f)}h0Y=axPl9EK7M7pD
z$~8^t@*Oxv=p%3j{d*RaV6V6;tnT97ukdafX<MH<YgFes?eB1sQnWfy-CZ5THDc8L
zP4eSB5?74kFh=v6zU4at!ssP&3xvC)s2nm-?{~#L_j)l>h5~0LDSAeZ9}pf{S{duZ
z>%UKmMSF)8(+T6~P%)llXnk0<%DOY62(N%dfX7lc;7GSy9EoaIZV`nn&fse`McUbS
z{F^(7`2NfsYX|<Xkq3#t4>oO48&G)lr$I3SYkH0}Y}b_30xRO>_=O9v2voRy&OU$n
zD);BVOOOje<LW)Z@#^BVIVqhdh@bRdYv(jkzCd5iNTS+#-Uhd5fR;)(YB>{f*kC@l
zT;gan9ETgTuDQw#m74JQ7}j0wnd}CieB4%TaB&fhACCr?TpUo;-+eWU(`AgNgo8i!
zp>GC=jU1t8#=oqVaa-k_irgsj^9?|b3e%O}t|*NwVsG~d^K5LS1!!&xlx#rGT8Ed}
zI4`EF0x5CnU3Yk(+UMcIY;1k5G+&k*?(Gd`qbxGvgrPHfs%9-sx$h8}#q4)+YPN%H
z3!a1a=+d<N>3aHfJ$;&Y<~SfHvoWWBo8|Lj&b39dSL<L&$O!+Dz~k4cS~M-jzD9LJ
z)j9C{mO?A6*E+^k^uvZ!e^j8hp*`_2pfu=Sy$*5}4O2V`;8ZtmSwL}Y|G!QR+tfb<
zArg^=^4f-Y>KT=I%ecPf95#i|q9l*SyJ!Kxd#c8O5+aMm%5VsSB==c(Y4EV>&pDdI
za!)`TLH$+Z#U44uO(<I&qym*i8)@fq$|KuzLPg<%*N@22Hq{Osy`fYe>>1sBFR>YM
zI+nzG4+>d!3UMTh6RhuQ<T@mN3uu*|=@!7SK@cO2s}nl*2GMGu45(pgQ5zNNt}puu
zkfB6onC>}OSCC>E)+cM*dfcWKt&1H<lG(1)U~i+$zB6>zHpyGCC=hS6Gz|19i;tdf
zqlIl&tl$7hfdfC8y#&-xX0#e!+ICDec6cYru#-KMI?KEWrlHO<d+}{wuTQz@m$d0I
z(}?(#!6fplZ53i^8ONOi^$KxzHWPyh-r~9wowc9ldA(Zd^6?M}P3E^&>l)Q^AC+{~
zV~-k{ztUVvm<>r8sTMdSNY<<@|Ca>%DDk8)Cqk0@51Co+Kak4~%8u@@@L)!|VUjWu
zokHCh&j7-d!aWJp4+`af%&ChwA~~k+N78r;u)c|6fV|tb;6>j{dcrJt0<z#?*oGCs
zP<2t%edIuUD^q10&#tXu&{@D|Qw}8tw7v_tE6)Mi%Y<FAf)efO1ze=JYO!$i0J?g(
zFXvQD<j|R=Kuz?rcx0am6w3o)FWb9puE*z(&)CPGN2^@CQWbN@O=?+Eui_eMHAq;N
zgiED-p&r*9k}%Plt*rTQagi-gTq-Rr<1}Nb{zl0Z0UDeSk`^6zZeHKGc>Xo}`D}3|
zgSlUB>zkVGw!Gb*?XB!8n%V58Eya1|W`WZaIOJW*KDc0BSZ$QO{f!q<P~niciJiW+
zG_Rc?mC867+<xJ-`Gw34m%~19Of*|_74683dweIts=g7<*c-)QTv4;EkB|<943asZ
z5X?(Yb|2>q$%d@nJSy8ud>m*u54TER+XigvpTcri=TDZSAsfN=Pr84N3F?a0E~n4^
zs;rOym@rlv*%c;Ut!GV)4w~iEI+c`L(0NcxnTX7&t=sKDeLm~iB*A?C9$8|tGzNCV
zKKON6IQ$xa@`M~_;mFEIn-?`2C=!6kru@{@i#U5HF|?S(Y)rI?>rRhZRME<(U%=|#
z9Iyi-z~MRqZ>P>rs13kQLxXAt*3jrDHSHaD?pO}|6Znng;`xbfe#^|a^hL!6?|t)I
z-};t<%s>?zKPH$>qPUB;h8pIR{ua$d_W+B$%7Xh<>HkBdw1C1tAbMN+O^~ltvKQ72
zeJ81HKu7k1`cJL)J{RH`H*v0?FaTJ0cQg;_p-AN`7pZ+P#RLbhZ%Ss;u`Rh-Z^H!u
z1&!Qs)5LBBw=OU&%v+5aj1#ym<%xM`8FY2Q*pF_Tuk?TYje4)D39wBCWVhd-vefb=
z{Y2Z8ktWNu5gCZ;_v<#8f)m6f@vV15ph--cmfc6Y7Hj_ImT2=X$A`#X(bDgakov>Y
z@3PbG3spvfBSClC7unHLyiKWEi0)`Iu%@_wAq5|n=udKax4s4jQ;egeY->;hTdeTZ
ztbxGV;L&=+#L=e9v3E!-1Y=agN}(@>re8;%fqyAv4N%m`8_0Q*oMME$Ehp)*ozflA
zAnmAJJ;yG@saX??fX1T3Hf2s+mjxwcl5tgHU)ynhToXfDIe$lne~rV+jP{hSh)Ob<
z^o<&ju9Zr{MEq1GZbDb;ZbO>dKgyXE3WbhVHjM7^JM8DRw0;gUKtBH^>2cBQ9q15#
zOFzFMEBb-^n~ci>$&J7Bgc{BUet}VtzQ+!SKJ|b!$}iynOysOl4y9mI#}0u5Dumx4
zl2^gGNr97qd)L+!&J4!ppfv^i101e4Qa0o2?*Um77&tD3oL%xQpxeN3$h~atI5LP7
zuM*x(T3%&Igq*=U)(`ezL$Svh0*8<4Dv!Joa)zU<i#rjH=}Sz7%8NHCPyXNHS3E^2
z9F^$)RdD?XZA_ZDN`DT3%ZFA{NNqF^#<*&eM2dRUa(@h(W5gL2y;29ayTpZyq7=va
z?PA2)FZHAadT=H8gPg()FC;jEvd6KdWqeoZ4DLyODSn;&V3guq+PZ!hE>UaGAJU#j
z2M3M|2XcsPijog=d_`Bl#j>0u#Bj)~IW6ZXvEG$_=OH*Kaz9o-$dUJsOCG-)j#v`O
zm&H+#eSTlCa{dpm#~ITyTEBlnQ+6R^f7tKAvag?9<J)pgXv%;y$>Es^=OmUbyKou2
zn}a^W&am{D^N_Hhs{^0h#i5%6lw>ilTEBBjN^qvCyXlL;rI0iI<*<XpT3kNw!OgH7
zoXOE0IM|+a9(k_=-V+X`t9x;5y;E%-)!N`D+TRU7aGy6Egm821Jf_d~@<jN8UE=K8
zIZZEc*oGXj(KzzbC0&nt^hSJj)a&i@_aZ&r$MGOxzh^|8G9M(*8Q#Smww7u+PXzZF
zMjrRO!N^<AljIuN+}}8I({i5DoCh`!`dHFbZSTOQ{0^ZacwqA|zaw(2B=@6oz^G82
zdNfM7dWibyJmT9=Q|h6{x1Z4x9}3!f*gGa|<d8|rd6s;Kao=oKvE@7`Ii0iBqc{ns
zk2v^4dNm4seF%5Jqx8gn`VFVj&hxy9d+4B5=ZGB-VmtNwc`f%sbrhGyQtl}8Wej)5
z_3#Pr;YRpjZ(PF8Md?d&Myum~2dimJ>GuSo6B<eqdRU>*PG5XMzF0|k6U>84dJM|M
z8=+#{tas;fb>gj%H>uy8&~Hroos>6u-|{}DEj`(&<w-wHew;TmTX*{Yi<<wG^q9Zf
z%ne#{${WJrvdq0H=AH>n)0;18?w#h|RLk76)_<P7FKgZras7OsZ(q?TV^Y5M^dt0i
zJ^Uy=T@OD-PZMtG>C-#5<rOLKL;F%nHGTQ`j-|Y+wL01O`k5VbeOz)muT`G_(q0`7
z;V%4Hjy2UVb9Sa^IoGN)-jt5R6U?F~y=nhRG>v4Qie>0i)u-vB?_xea<v-n+*RM;d
z>%YV}0Ck;X9BLF1&UDK@_J)4<&FVAc>1gD+<2?d@=NZQ9SxSA2wfk&i?e3EU1%k4E
zjC={r_Z<0V$;V0x*7!b=6$JTyhI~oQcXo&JIve?Zm3*C=?|JgQO?l6^l*a*0jW#@j
z@J#A=j(ndW-?>IU%ek&?8U|<i?#iGt!6A(|Af`DbP|4@=Bxk^L`lriLgs^%iCMNVO
z@)l=z8QeJE#u(b>St?c8v*pCq`ePWmot$}t17UUjT(Kr{m5vFB0jTauH;y0|ZE@46
z?yWCkFBe_T=GSToS3GEOP7e7OE2)VI$s;Il>nj?!CY(y=#DwvFg6TtLbj>Y^IV-B(
zdAm`*y3O`5kAO2K90JK{O%y8q6BA;fSGv(Kvo2RgnjV{tf@;${Pq}hBmAu9=l6d7t
zEnp4T(PjIo3q|Z>o-tS(--b#BffIDE$nh{>``NWrEoM%BOVk@uv)uVML{`vd!OXAn
zE4T84{IIyUYQf0Co(IWP=Lgw@4dQ%ALW(#GGXBe;amo*>3>EOVv=bhFrIu`T$Gty+
z`l>{w$5_xXel>I;`BK1tYm5@~G)f?1XY0(kJ$y*b)J<1hxN;4xcUq4~T@NC}YN#^6
z0gtHPzEsLMI*FvDnF`bDq6F;ktyYGUS2N=H6JdgsYn0y?tM;WzWh{AKj$SG+XVx^`
zsoKF=OQ>TQSJL7P4YrP9wh5oDl1gHkuZc62SW*tHsT@qnJbpke4^m6~v>WEqJWiA*
z);KXyCZ{oeqPwN+dq?C02R@`QG|=9D<huQOu8glBIl)SOfQ@ntDMq0)*TBbrgJSfg
zPCcHpa*`Jwz216~m8>UDvB!H_W^EZmAUosPjq_+{)IJrA|L;pNzbu!p%H^NPMNiNB
zB|??Ml=|3p`}s^6O1)+6!us(TR_rfk<N#VZrSkr+H0{x+U%Pf~453dS9jGU7s@jMi
z$H}RyEpKlV!Sj+R<2CAyg{})_yD(WF;>yGF_`XMfj&jx~vKn5(lk-f)N+xACC9jn{
z^pSYhC`L75)N8r&ZSux3(|nfmJ7N8_G+jJG&s2IS<Q)8m#{1Ls)alIi``P}r6i_!s
znW;QZA<dqv+jd<tox{nd_9$PNtQ&pRKQ)%p!9G2I-3D$n+yOKw#OCRwjW4NRfy&Vx
zKPT(OUT%D|GD`0(XG`!S)HNzTBkC2~nsYBrew>2|6#NA`__FR1?=5}rz|X$$+m+8e
z>;4EdUuB4#XXqPqdMmss?1?1jOl2U+iIuDmqx_}%PdKQ57w&?t`X_j)uBBmKV7T9&
zIwi?P0Hjz&+br%KouAFf^i5uEJTsP(ZITV9mrTHBo}DJ3oG&{C{Mp`66@s>W{2b+-
zklet5ZH?WYS&v$tVwtzKaff`*N|~(gmv?j>*1mDF&V|#|X!Xlf%KZ|fFx$IzqjJ9{
zYy3DJfbF?mw}00I0<r;Y)MImHDi+EWi(Og{hn@l=y!>G1xw?s{FhORwT9cV^w<{w8
zy|)^2)F19nCAW1ShYa_T+%_9*14jk9sPCz49Bc$x_7=e_#vXM)P8(}+ym-l}X~NJ@
zhj%di%h_5t{(WIX7Ck*q*R7>9j)UV8lU7S<CW+(S3lHsOS9Vpz%0NDLJ>1`AP?1a0
zwY7MzESr*Xk^e{JGJfra*<sYz)E$H;QPC=K|E*lUUoMAu-IaaZNQYJ0B771fd{LRO
z8fX2XWELroaVaYLDZ<^%aFkf~c2<q1)zhWA)Nucbl<>FZB9;iQDDbMB)OZ^bH8NLJ
zUYf35)QDag#AE06^j6HdaCS=5XT?8Y85eP0Eh%l3V?@eo(^<0?kr6LW&jQrV+Bnq=
zPBnwm;#n49@ND-Qr?x6!RS+<nA|8yVE3QJ^wA5*x&}>5XI~<AJ#QXHJaak){6aG>c
zGE;iOnI2?od@&5jYs&uXm(V^g^2z0lHygVqH<e9s$~Wf@%b{npv2!A@p2d@>Jj}^U
zKOpIG+?@ap-Nz&nT^4sWDrWzh<akFepOz=oo92j^y{zY_&01@Z@O#w#ygZH5DpucY
z{F=E()=tfdTPC?o&&}dvx&E@GJR+Az=VoJhtx}Y;*%xLbctddS%J)Q%mUD6!5VhmE
z+?h=`x?1uzVpDCgX^Hi<ceW(qSGL~YM7N-Pa(Hj9_&0=gCr;*Y+K(ho4rEIpY)9Pw
z%|s*`j(<xW`SY)C{>2g0zHec8XNl1rhXfE$A(XfNBpy%b@oeT00pq^`2U2q2oBju<
z>{6B}XPIv$Ec1Mjuc0ImQ@;<9bflYj_qRAOj<-d5K<Xt*T$pw9EP?(cb&RHPkRqi^
zNZ#c4twdbDBW|nYd|!EAoK(qwzm`N!{ia1N<n&%K@u}CQ9x^$b->g43rI=Xr{rXex
zQTdu2Y$u8hqI92#SyuNhQ#)f!biZrba=*b4)Dm;diCl4RP92dz+X(u(kSimdl;fw@
z6d47X|I4flZa=EGi^w>>M#z1VBJ{*NT#j5DJL~=r5*KJ_+>rgB5)zuB)(6*C&a%N}
z^5oiW2>}80?!F_>W9qop=;ij67jkR2Ct)pTU3?75Vu?tev`H61OD69M8R9aNIOszj
zb_Necf@+joM6^=dU0xLeu{h~q8B<s*8Lw6bK#C5BD+W`w5dj&2;B`{NlPc*Fbift;
zS62{fS3#`;SOu#;B5A)MmtT~NLfI{-ZWHJ(NzbgvWmPUh2dKX*bz!F7r&^$@cpEX`
zR@?}vmHJjl+k~&8o1*e+1+B*Sibx=o72X(R0jYCywO%>zTlwhbu%gy86c5QuLYdVP
zPZ>)48Hx^-i(KruOh%caViPNeN++U;!NkWCds2O=fy9ASU+7b*iPT2uQ;EZgu|!{D
zcOuCNj9rPz#J<EMDdMS4Lc6&4rN&a@<UO4_nmCs@mKaX-B)Sv5p-=H{zf9NYH~$~>
CP|-F3

literal 0
HcmV?d00001

diff --git a/__pycache__/eeg_microstates3.cpython-310.pyc b/__pycache__/eeg_microstates3.cpython-310.pyc
index e6ab8e955a07d094f80dfcb7e608c83ede141781..859d550ef7e81b55bbfcbb20d5c4952bfeaa8d5f 100644
GIT binary patch
delta 2020
zcma)7e{54#6yABS>o&R#$p%d3=*D1DQRvFRCHRLB=L80b5S#^9#@dawU0ZJ3xnCK^
zb)z$Ej+_utnB(WBQ%2+_>XeC!AS~IOpg~>?;u07CkZ4RKhG6DBw+pK%fhPU>opaAS
z_dDNx?{!>LE^CUxo0K#qMtokHf8%{aRc!JTi?(~K9E<!GvpH{ZF7J!|YhJ=l*vuXf
zH8=T*1q)|@hF6!*b!$^9%6+!#Qg_hdXLBGXMvL{>L`s8ATC6`<waKEzl~vo^mHvPh
zU*VWw34(2mBonGief|j57e#(m7W8O3=CN0@dDKzwu~h~fOheC#wV?5V6|?ma;6FSw
z3a|s$FHOOVs~&|QA77OW;Kbx2JJfK0(E@;a-nAwkrZ<Qt&#Eyoe!Wm1AD;+1U=94#
z+Ga)HB$mold^wcHZR>9-(8jlHyrJgQinM7OD;D*vMR2x@;1LSL#4?GsVoh<5P|#KE
zg1y}E!g-MGSQ}okX6oe?EjVu70Q>lc&6NNxIIzX590Ximn1Y>Z0<L=TLqj{Yd!E|W
zVq!_!#6bBaIdGWUO8Nob#0R%ppa+Y#Hb59JY;A{=ytuRxmPn2ID2-B{nfkR-zeB{b
zh)I}3EQE%#bO>SCk-?uTy97!j;OT8M^JT9%*-L}k$jJB6t{JyGjL^)(jt!6$)q}O*
zFXao;_X`E<bi(&Uv3Go2Dp(*9azLlD{d{4?lPdJ0*SR_~$^w!cCyF)2Uw+$v)SS7(
zmqb?soa3#oA$8{mRQC}<Kf%WYqcq<q#7+?m2r#9v<CK*r_#?5?v_ykxvE{Z(r;B|i
z5}K~uCg>NmCY{Tt(j63<iK)bl#Lf`=62wUCEM5y_<1KFiT;MBwsk(~$)s%WyO;)|s
z<Yr%qZLC6|)8%I8Y0D^8I1orPOz4=9;e<iHI%wA!?;!z)YnSZ&AG6t~Vh_{IC|#K}
z?7t0T$s(sIo`^c>TSVLEBv5)MZSzxjckg0pnY>HGuDU81;z{*`iu)eI=^A;SQ=>HA
zA&(9++9wMCb8xr!-rQIK!~9U=&+0u*JN8Nq1aWp#hM8_~d3Q>~SUj2ZxX78(qhy_+
zqKGzhHCfEk3VD8K#fql*%$ib>%U%<KRcH5>=UU?&HNLWdJy+x{tOJLdj>8pR9}1}A
zh4K5YEaUgo@f!l!D~q<3AfM<FG>4bL2zQ64g3#H}nkz2rYuyN=Tzz#=fr~i2+YMv5
ztnKgVPSM1qi#+9uy+%{&%Gf2oru{pBUwL-Na{%pFy~iluL8;igXD(dlU+<Y`fIj}`
zfo=sZ<AJV~aD<O_?N;F!`j2eOc!PAM2uXDb%U3>~IlrKw&}33r1BvXxKaMQY_fW+<
z`0!C9bo0WaZkQFIOpstHm0c$`N-%;$$5t2h5e*U46C5NsOmL3i0>L1`6@qotVTjl;
zK@&k6K?g1DLwk2V9LLV?bYVe%_qAzPY3(NgR$ZVp=w~Oew`Z3AGG#qD*kjb!5$(j^
zdLA-;OOy_ZbqKId@!2XIUiz!^v&Sf*_!f(A9($eVy&Y0u2KM)sz%8EkZZ_<koUw))
z$VupLC`#d83ilCAj(CznIeRlPdV$_K_T;eAfDtN@?u`+<Nn^hilOXUq#WV2Cd#C;Z
Dtw}X?

delta 2148
zcma)7Yiv|S6y9@p_p<HskhDT6;1+0WYNOkhr@=&?0V!6H0`(%?Ubc51?LKbiZfU!g
zHninYEv?P)h@udvtcCzu20;*rkg5daj|NDtKLiwinTUxoK0<KLEU`<~K!1Gw&Y77r
z-+X6w?sZGLXG!kZh!L4K{<rz;ul??bb4HDKrKuCsyIzmhPMtoj%)3B0>*6uPyD}Q`
zR>a~u@vhJ#y2*(;!#hv&1@&-@ur1Et3o<NNLmuO`EVi}ynK7Wy6La%hmCS(OP&a7d
zdfg<G!DdsOQI)4GSfx14`pBv>WteY+8V;Fp#TC%+vkcBEQ^|~oW|%1|FBShSJE@N<
z4ic>ikpf=PlA~&f3sM{#G*V(AmXbm~K%NHr%Dma2u$}XskRiy9FTMq!)7?v+hY-8I
zWF){k`eS(wgjxC0Vt^QHc+mxq8+_?9yUk|G{4g%paBu)I*p7<L5;U;f<>TzIiA}5g
z&0ZMc6N^TZDn5-QaKG0Op2DyhSst>DbXip)-?X8s4m#QQRTn{Sz`;$lU&)ijiUhr`
ztbi_7ye0&YpgUiU$vgNIpG3RuIdt;cllg79Vl|c%dpb!8ChQz62$ESipwqi+pM_m)
z=DJe=-Sii=4EE6c>Sk!B`>VSl$x5^qST2^!X_AFnQLPiJ?8Lg0kO`QKtdZxQbZ|dL
z^2n8$3BzFwxS$YyX{6QqL^i?q4FI8cYI0_Y4TgygENDvE@(6o0(K)&Ynpm~I0!F0O
zAPM@JzZiPxPyTVx!{opmJM5*4YRmH;w1f=f+APWR|I-h8thSWf{zGjuoMu|x6}x^2
z&vqE$2*Oc>TX?_^k$r^F&!H%b9Ko#U=y%8%4&fmzr(X@#){)~pVLALNrzdbsSa-h*
za}!zalbHMj;S}wU1>r2qHL@M8k18Phe+4MA5D+Gx^5P0tQ(G4%XNE|?umpAIA>bt;
z9%MsgO`+e$7r{9;rM|}Dd5AQcY?|)>568(dUcG2+nqINzgRT+0H<lDlsTSs?G=GfJ
zgn`1l0ERmKw}CmdArXP|>~`Xk6n=<OtRcEMDoxZ7J=nhVo?$p>(}T^+p;KEH!UfjU
za@+n;mo~OFR)nxxF8b|6HUrrvgg+4QYKyxnoFgvG&E&bF@o94Va8gP$o!?%Tqhl=Y
z*eT8|$tba;Kq+ab+uO%N2R+z+M83iY_<P}5p2Lie)pq`hT--Xz^A#@q96^+rgu4k4
zzv}_&+BO>o*n(}N07A5~vy?B}(pd@DXo;51e(pRW!51{y6$XoDZNJOk%F*3f@Fgqg
zz6x-I{nhg_KpU;-_3)orUQ0G@>&=Io?ET&XHymJB-bqSuna20M0K3__eO-2Vi<bAT
zhwb!e-+2BqUF!47dr;ZC^j@C_-e#Wn!jK!pjBrr+b`!&E2m|!s!SbaCkm7ShVhH$1
zl3fU=5zZo<LqIpkOL*Gz$SxqH9$jQ_;y@RDE;(!BejZ!)fKRjB@sO`hH8i3{OtKYc
zShP2JqxEZCag~GOh->vG5xsdG$Gw=8F{y?uDQ=R>ICVV-%WfE^1p)C9Qk?VEh}Nh$
zgKAKVlPFH=6-$FE(fDU;F$RvH8u6uiei3K1BXodmf=$UZ)PNqtPpHX1Xrlj970H-J
z-eAK&=#aoodk?LHJM7M(DbPA-U>GY1oA7&<cKl8ZyATHbOJXRDZ9>+AFvQbACxyEM
YI7PI^LiQ&*ve%ZuQ8dj>_Z&X+H(3Wp-2eap

diff --git a/__pycache__/feature_select.cpython-310.pyc b/__pycache__/feature_select.cpython-310.pyc
index 855de3ce368628fbaa1525ebb9c50a56b96b9d3b..2e6f5c5a2cc60f28920837f2cdc5fcc91e61ad45 100644
GIT binary patch
delta 40
ucmcc4vz~`LpO=@50SL-9zHQ`IW8t&X&&bbB)h{+OFfcJT+?>wxfe`@Bp9-@8

delta 80
zcmZ3_bDf7fpO=@50SK;L{j!l;jm0cUKeRZts8~NKH80E9)Wk^NCAB!aB)>pEG%r)%
hFTW(UNIy6=H?<hZh&R)BN=(j3&B-s?Y|rw65daDw8x#Nl

diff --git a/eeg_microstates.py b/eeg_microstates.py
new file mode 100644
index 0000000..2a3d2ff
--- /dev/null
+++ b/eeg_microstates.py
@@ -0,0 +1,1720 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Information theoretic analysis EEG microstates
+# Ref.: von Wegner et al., NeuroImage 2017, 158:99-111
+# FvW (12/2014)
+
+import argparse, os, sys, time
+from sys import stdout
+import matplotlib.pyplot as plt
+import numpy as np
+from scipy.interpolate import griddata
+from scipy.signal import butter, filtfilt, welch
+from scipy.stats import chi2, chi2_contingency
+from sklearn.decomposition import PCA, FastICA, TruncatedSVD
+from statsmodels.stats.multitest import multipletests
+# in case you have a compiled Cython implementation:
+# from mutinf.mutinf_cy import mutinf_cy_c_wrapper, mutinf_cy
+
+def read_xyz(filename):
+    """Read EEG electrode locations in xyz format
+
+    Args:
+        filename: full path to the '.xyz' file
+    Returns:
+        locs: n_channels x 3 (numpy.array)
+    """
+    ch_names = []
+    locs = []
+    with open(filename, 'r') as f:
+        l = f.readline()  # header line
+        while l:
+            l = f.readline().strip().split("\t")
+            if (l != ['']):
+                ch_names.append(l[0])
+                locs.append([float(l[1]), float(l[2]), float(l[3])])
+            else:
+                l = None
+    return ch_names, np.array(locs)
+
+
+def read_edf(filename):
+    """Basic EDF file format reader
+
+    EDF specifications: http://www.edfplus.info/specs/edf.html
+
+    Args:
+        filename: full path to the '.edf' file
+    Returns:
+        chs: list of channel names
+        fs: sampling frequency in [Hz]
+        data: EEG data as numpy.array (samples x channels)
+    """
+
+    def readn(n):
+        """read n bytes."""
+        return np.fromfile(fp, sep='', dtype=np.int8, count=n)
+
+    def bytestr(bytes, i):
+        """convert byte array to string."""
+        return np.array([bytes[k] for k in range(i*8, (i+1)*8)]).tostring()
+
+    fp = open(filename, 'r')
+    x = np.fromfile(fp, sep='', dtype=np.uint8, count=256).tostring()
+    header = {}
+    header['version'] = x[0:8]
+    header['patientID'] = x[8:88]
+    header['recordingID'] = x[88:168]
+    header['startdate'] = x[168:176]
+    header['starttime'] = x[176:184]
+    header['length'] = int(x[184:192]) # header length (bytes)
+    header['reserved'] = x[192:236]
+    header['records'] = int(x[236:244]) # number of records
+    header['duration'] = float(x[244:252]) # duration of each record [sec]
+    header['channels'] = int(x[252:256]) # ns - number of signals
+    n_ch = header['channels']  # number of EEG channels
+    header['channelname'] = (readn(16*n_ch)).tostring()
+    header['transducer'] = (readn(80*n_ch)).tostring().split()
+    header['physdime'] = (readn(8*n_ch)).tostring().split()
+    header['physmin'] = []
+    b = readn(8*n_ch)
+    for i in range(n_ch): header['physmin'].append(float(bytestr(b, i)))
+    header['physmax'] = []
+    b = readn(8*n_ch)
+    for i in range(n_ch): header['physmax'].append(float(bytestr(b, i)))
+    header['digimin'] = []
+    b = readn(8*n_ch)
+    for i in range(n_ch): header['digimin'].append(int(bytestr(b, i)))
+    header['digimax'] = []
+    b = readn(8*n_ch)
+    for i in range(n_ch): header['digimax'].append(int(bytestr(b, i)))
+    header['prefilt'] = (readn(80*n_ch)).tostring().split()
+    header['samples_per_record'] = []
+    b = readn(8*n_ch)
+    for i in range(n_ch): header['samples_per_record'].append(float(bytestr(b, i)))
+    nr = header['records']
+    n_per_rec = int(header['samples_per_record'][0])
+    n_total = int(nr*n_per_rec*n_ch)
+    fp.seek(header['length'],os.SEEK_SET)  # header end = data start
+    data = np.fromfile(fp, sep='', dtype=np.int16, count=n_total)  # count=-1
+    fp.close()
+
+    # re-order
+    data = np.reshape(data,(n_per_rec,n_ch,nr),order='F')
+    data = np.transpose(data,(0,2,1))
+    data = np.reshape(data,(n_per_rec*nr,n_ch),order='F')
+
+    # convert to physical dimensions
+    for k in range(data.shape[1]):
+        d_min = float(header['digimin'][k])
+        d_max = float(header['digimax'][k])
+        p_min = float(header['physmin'][k])
+        p_max = float(header['physmax'][k])
+        if ((d_max-d_min) > 0):
+            data[:,k] = p_min+(data[:,k]-d_min)/(d_max-d_min)*(p_max-p_min)
+
+    print(header)
+    return header['channelname'].split(),\
+           header['samples_per_record'][0]/header['duration'],\
+           data
+
+
+def bp_filter(data, f_lo, f_hi, fs):
+    """Digital band pass filter (6-th order Butterworth)
+
+    Args:
+        data: numpy.array, time along axis 0
+        (f_lo, f_hi): frequency band to extract [Hz]
+        fs: sampling frequency [Hz]
+    Returns:
+        data_filt: band-pass filtered data, same shape as data
+    """
+    data_filt = np.zeros_like(data)
+    f_ny = fs/2.  # Nyquist frequency
+    b_lo = f_lo/f_ny  # normalized frequency [0..1]
+    b_hi = f_hi/f_ny  # normalized frequency [0..1]
+    p_lp = {"N":6, "Wn":b_hi, "btype":"lowpass", "analog":False, "output":"ba"}
+    p_hp = {"N":6, "Wn":b_lo, "btype":"highpass", "analog":False, "output":"ba"}
+    bp_b1, bp_a1 = butter(**p_lp)
+    bp_b2, bp_a2 = butter(**p_hp)
+    data_filt = filtfilt(bp_b1, bp_a1, data, axis=0)
+    data_filt = filtfilt(bp_b2, bp_a2, data_filt, axis=0)
+    return data_filt
+
+
+def plot_data(data, fs):
+    """Plot the data
+
+    Args:
+        data: numpy.array
+        fs: sampling frequency [Hz]
+    """
+    t = np.arange(len(data))/fs # time axis in seconds
+    fig = plt.figure(1, figsize=(20,4))
+    fig.patch.set_facecolor('white')
+    plt.plot(t, data, '-k', linewidth=1)
+    plt.xlabel("time [s]", fontsize=24)
+    plt.ylabel("potential [$\mu$V]", fontsize=24)
+    plt.tight_layout()
+    plt.show()
+
+
+def plot_psd(data, fs, n_seg=1024):
+    """Plot the power spectral density (Welch's method)
+
+    Args:
+        data: numpy.array
+        fs: sampling frequency [Hz]
+        n_seg: samples per segment, default=1024
+    """
+    freqs, psd = welch(data, fs, nperseg=n_seg)
+    fig = plt.figure(1, figsize=(16,8))
+    fig.patch.set_facecolor('white')
+    plt.semilogy(freqs, psd, 'k', linewidth=3)
+    #plt.loglog(freqs, psd, 'k', linewidth=3)
+    #plt.xlim(freqs.min(), freqs.max())
+    #plt.ylim(psd.min(), psd.max())
+    plt.title("Power spectral density (Welch's method  n={:d})".format(n_seg))
+    plt.tight_layout()
+    plt.show()
+
+
+def topo(data, n_grid=64):
+    """Interpolate EEG topography onto a regularly spaced grid
+
+    Args:
+        data: numpy.array, size = number of EEG channels
+        n_grid: integer, interpolate to n_grid x n_grid array, default=64
+    Returns:
+        data_interpol: cubic interpolation of EEG topography, n_grid x n_grid
+                       contains nan values
+    """
+    channels, locs = read_xyz('cap.xyz')
+    n_channels = len(channels)
+    #locs /= np.sqrt(np.sum(locs**2,axis=1))[:,np.newaxis]
+    locs /= np.linalg.norm(locs, 2, axis=1, keepdims=True)
+    c = findstr('Cz', channels)[0]
+    # print 'center electrode for interpolation: ' + channels[c]
+    #w = np.sqrt(np.sum((locs-locs[c])**2, axis=1))
+    w = np.linalg.norm(locs-locs[c], 2, axis=1)
+    #arclen = 2*np.arcsin(w/2)
+    arclen = np.arcsin(w/2.*np.sqrt(4.-w*w))
+    phi = np.angle(map(complex, locs[:,0]-locs[c][0], locs[:,1]-locs[c][1]))
+    X = arclen*np.real(np.exp(1j*phi))
+    Y = arclen*np.imag(np.exp(1j*phi))
+    r = max([max(X),max(Y)])
+    Xi = np.linspace(-r,r,n_grid)
+    Yi = np.linspace(-r,r,n_grid)
+    data_ip = griddata((X, Y), data, (Xi[None,:], Yi[:,None]), method='cubic')
+    return data_ip
+
+
+def eeg2map(data):
+    """Interpolate and normalize EEG topography, ignoring nan values
+
+    Args:
+        data: numpy.array, size = number of EEG channels
+        n_grid: interger, interpolate to n_grid x n_grid array, default=64
+    Returns:
+        top_norm: normalized topography, n_grid x n_grid
+    """
+    n_grid = 64
+    top = topo(data, n_grid)
+    mn = np.nanmin(top)
+    mx = np.nanmax(top)
+    top_norm = (top-mn)/(mx-mn)
+    return top_norm
+
+
+def clustering(data, fs, chs, locs, mode, n_clusters, doplot=False):
+    """EEG microstate clustering algorithms.
+
+    Args:
+        data: numpy.array (n_t, n_ch)
+        fs  : sampling frequency [Hz]
+        chs : list of channel name strings
+        locs: numpy.array (n_ch, 2) of electrode locations (x, y)
+        mode: clustering algorithm
+        n_clusters: number of clusters
+        doplot: boolean
+    Returns:
+        maps: n_maps x n_channels NumPy array
+        L: microstate sequence (integers)
+        gfp_peaks: locations of local GFP maxima
+        gev: global explained variance
+    """
+    #print("\t[+] Clustering algorithm: {:s}".format(mode))
+
+    # --- number of EEG channels ---
+    n_ch = data.shape[1]
+    #print("\t[+] EEG channels: n_ch = {:d}".format(n_ch))
+
+    # --- normalized data ---
+    data_norm = data - data.mean(axis=1, keepdims=True)
+    data_norm /= data_norm.std(axis=1, keepdims=True)
+
+    # --- GFP peaks ---
+    gfp = np.nanstd(data, axis=1)
+    gfp2 = np.sum(gfp**2) # normalizing constant
+    gfp_peaks = locmax(gfp)
+    data_cluster = data[gfp_peaks,:]
+    #data_cluster = data_cluster[:100,:]
+    data_cluster_norm = data_cluster - data_cluster.mean(axis=1, keepdims=True)
+    data_cluster_norm /= data_cluster_norm.std(axis=1, keepdims=True)
+    print("\t[+] Data format for clustering [GFP peaks, channels]: {:d} x {:d}"\
+         .format(data_cluster.shape[0], data_cluster.shape[1]))
+
+    start = time.time()
+
+    if (mode == "aahc"):
+        print("\n\t[+] Clustering algorithm: AAHC.")
+        maps = aahc(data, n_clusters, doplot=False)
+
+    if (mode == "kmeans"):
+        print("\n\t[+] Clustering algorithm: mod. K-MEANS.")
+        maps = kmeans(data, n_maps=n_clusters, n_runs=5, doplot=False)
+
+    if (mode == "kmedoids"):
+        print("\n\t[+] Clustering algorithm: K-MEDOIDS.")
+        C = np.corrcoef(data_cluster_norm)
+        C = C**2 # ignore EEG polarity
+        kmed_maps = kmedoids(S=C, K=n_clusters, nruns=10, maxits=500)
+        maps = np.array([data_cluster[kmed_maps[k].__int__(),:] for k in range(n_clusters)])
+        del C, kmed_maps
+
+    if (mode == "pca"):
+        print("\n\t[+] Clustering algorithm: PCA.")
+        params = {"n_components": n_clusters, "copy": True, "whiten": True, "svd_solver": "auto"}
+        pca = PCA(**params) # SKLEARN
+        pca.fit(data_cluster_norm)
+        maps = np.array([pca.components_[k,:] for k in range(n_clusters)])
+        #print "PCA - explained variance: " + str(pca.explained_variance_ratio_)
+        #print "PCA - total explained variance: " + str(np.sum(pca.explained_variance_ratio_))
+        del pca, params
+
+        # SKLEARN: TruncatedSVD(n_components=2, algorithm='randomized', n_iter=5, random_state=None, tol=0.0)
+        #params = {"n_components": n_clusters, "algorithm": "randomized", "n_iter": 5, "random_state": None, "tol": 0.0}
+        #svd = TruncatedSVD(**params)
+        #svd.fit(data_cluster_norm)
+        #maps = svd.components_
+        #print "explained variance (%): "
+        #print explained_variance_ratio_
+        ##print "explained variance: "
+        #print explained_variance_
+        #del svd, params
+
+    if (mode == "ica"):
+        print("\n\t[+] Clustering algorithm: Fast-ICA.")
+        #''' Fast-ICA: algorithm= parallel;deflation, fun=logcosh;exp;cube
+        params = {"n_components": n_clusters, "algorithm": "parallel", "whiten": True, "fun": "exp", "max_iter": 200}
+        ica = FastICA(**params) # SKLEARN
+        S_ = ica.fit_transform(data_cluster_norm)  # reconstructed signals
+        A_ = ica.mixing_  # estimated mixing matrix
+        IC_ = ica.components_
+        print("data: " + str(data_cluster_norm.shape))
+        print("ICs: " + str(IC_.shape))
+        print("mixing matrix: " + str(A_.shape))
+        maps = np.array([ica.components_[k,:] for k in range(n_clusters)])
+        del ica, params
+
+    end = time.time()
+    delta_t = end - start
+    print("\t[+] Computation time: {:.2f}".format(delta_t))
+
+    # --- microstate sequence ---
+    C = np.dot(data_norm, maps.T)/n_ch
+    L = np.argmax(C**2, axis=1)
+    del C
+
+    # --- GEV ---
+    maps_norm = maps - maps.mean(axis=1, keepdims=True)
+    maps_norm /= maps_norm.std(axis=1, keepdims=True)
+
+    # --- correlation data, maps ---
+    C = np.dot(data_norm, maps_norm.T)/n_ch
+    #print("\tC.shape: " + str(C.shape))
+    #print("\tC.min: {:.2f}   Cmax: {:.2f}".format(C.min(), C.max()))
+
+    # --- GEV_k & GEV ---
+    gev = np.zeros(n_clusters)
+    for k in range(n_clusters):
+        r = L==k
+        gev[k] = np.sum(gfp[r]**2 * C[r,k]**2)/gfp2
+    print("\n\t[+] Global explained variance GEV = {:.3f}".format(gev.sum()))
+    for k in range(n_clusters): print("\t\tGEV_{:d}: {:.3f}".format(k, gev[k]))
+
+    if doplot:
+        plt.ion()
+        # matplotlib's perceptually uniform sequential colormaps:
+        # magma, inferno, plasma, viridis
+        cm = plt.cm.magma
+        fig, axarr = plt.subplots(1, n_clusters, figsize=(20,5))
+        fig.patch.set_facecolor('white')
+        for imap in range(n_clusters):
+            axarr[imap].imshow(eeg2map(maps[imap, :]), cmap=cm, origin='lower')
+            axarr[imap].set_xticks([])
+            axarr[imap].set_xticklabels([])
+            axarr[imap].set_yticks([])
+            axarr[imap].set_yticklabels([])
+        title = "Microstate maps ({:s})".format(mode.upper())
+        axarr[0].set_title(title, fontsize=16, fontweight="bold")
+        plt.show()
+
+        # --- assign map labels manually ---
+        order_str = raw_input("\n\t\tAssign map labels (e.g. 0, 2, 1, 3): ")
+        order_str = order_str.replace(",", "")
+        order_str = order_str.replace(" ", "")
+        if (len(order_str) != n_clusters):
+            if (len(order_str)==0):
+                print("\t\tEmpty input string.")
+            else:
+                print("\t\tParsed manual input: {:s}".format(", ".join(order_str)))
+                print("\t\tNumber of labels does not equal number of clusters.")
+            print("\t\tContinue using the original assignment...\n")
+        else:
+            order = np.zeros(n_clusters, dtype=int)
+            for i, s in enumerate(order_str):
+                order[i] = int(s)
+            print("\t\tRe-ordered labels: {:s}".format(", ".join(order_str)))
+            # re-order return variables
+            maps = maps[order,:]
+            for i in range(len(L)):
+                L[i] = order[L[i]]
+            gev = gev[order]
+            # Figure
+            fig, axarr = plt.subplots(1, n_clusters, figsize=(20,5))
+            fig.patch.set_facecolor('white')
+            for imap in range(n_clusters):
+                axarr[imap].imshow(eeg2map(maps[imap, :]), cmap=cm, origin='lower')
+                axarr[imap].set_xticks([])
+                axarr[imap].set_xticklabels([])
+                axarr[imap].set_yticks([])
+                axarr[imap].set_yticklabels([])
+            title = "re-ordered microstate maps"
+            axarr[0].set_title(title, fontsize=16, fontweight="bold")
+            plt.show()
+            plt.ioff()
+
+    return maps, L, gfp_peaks, gev
+
+
+def aahc(data, N_clusters, doplot=False):
+    """Atomize and Agglomerative Hierarchical Clustering Algorithm
+    AAHC (Murray et al., Brain Topography, 2008)
+
+    Args:
+        data: EEG data to cluster, numpy.array (n_samples, n_channels)
+        N_clusters: desired number of clusters
+        doplot: boolean, plot maps
+    Returns:
+        maps: n_maps x n_channels (numpy.array)
+    """
+
+    def extract_row(A, k):
+        v = A[k,:]
+        A_ = np.vstack((A[:k,:],A[k+1:,:]))
+        return A_, v
+
+    def extract_item(A, k):
+        a = A[k]
+        A_ = A[:k] + A[k+1:]
+        return A_, a
+
+    #print("\n\t--- AAHC ---")
+    nt, nch = data.shape
+
+    # --- get GFP peaks ---
+    gfp = data.std(axis=1)
+    gfp_peaks = locmax(gfp)
+    #gfp_peaks = gfp_peaks[:100]
+    #n_gfp = gfp_peaks.shape[0]
+    gfp2 = np.sum(gfp**2) # normalizing constant in GEV
+
+    # --- initialize clusters ---
+    maps = data[gfp_peaks,:]
+    # --- store original gfp peaks and indices ---
+    cluster_data = data[gfp_peaks,:]
+    #n_maps = n_gfp
+    n_maps = maps.shape[0]
+    print("\t[+] Initial number of clusters: {:d}\n".format(n_maps))
+
+    # --- cluster indices w.r.t. original size, normalized GFP peak data ---
+    Ci = [[k] for k in range(n_maps)]
+
+    # --- main loop: atomize + agglomerate ---
+    while (n_maps > N_clusters):
+        s = "\r{:s}\r\t\tAAHC > n: {:d} => {:d}".format(80*" ", n_maps, n_maps-1)
+        stdout.write(s); stdout.flush()
+        #print("\n\tAAHC > n: {:d} => {:d}".format(n_maps, n_maps-1))
+
+        # --- correlations of the data sequence with each cluster ---
+        m_x, s_x = data.mean(axis=1, keepdims=True), data.std(axis=1)
+        m_y, s_y = maps.mean(axis=1, keepdims=True), maps.std(axis=1)
+        s_xy = 1.*nch*np.outer(s_x, s_y)
+        C = np.dot(data-m_x, np.transpose(maps-m_y)) / s_xy
+
+        # --- microstate sequence, ignore polarity ---
+        L = np.argmax(C**2, axis=1)
+
+        # --- GEV (global explained variance) of cluster k ---
+        gev = np.zeros(n_maps)
+        for k in range(n_maps):
+            r = L==k
+            gev[k] = np.sum(gfp[r]**2 * C[r,k]**2)/gfp2
+
+        # --- merge cluster with the minimum GEV ---
+        imin = np.argmin(gev)
+        #print("\tre-cluster: {:d}".format(imin))
+
+        # --- N => N-1 ---
+        maps, _ = extract_row(maps, imin)
+        Ci, reC = extract_item(Ci, imin)
+        re_cluster = []  # indices of updated clusters
+        #C_sgn = np.zeros(nt)
+        for k in reC:  # map index to re-assign
+            c = cluster_data[k,:]
+            m_x, s_x = maps.mean(axis=1, keepdims=True), maps.std(axis=1)
+            m_y, s_y = c.mean(), c.std()
+            s_xy = 1.*nch*s_x*s_y
+            C = np.dot(maps-m_x, c-m_y)/s_xy
+            inew = np.argmax(C**2) # ignore polarity
+            #C_sgn[k] = C[inew]
+            re_cluster.append(inew)
+            Ci[inew].append(k)
+        n_maps = len(Ci)
+
+        # --- update clusters ---
+        re_cluster = list(set(re_cluster)) # unique list of updated clusters
+
+        ''' re-clustering by modified mean
+        for i in re_cluster:
+            idx = Ci[i]
+            c = np.zeros(nch) # new cluster average
+            for k in idx: # add to new cluster, polarity according to corr. sign
+                if (C_sgn[k] >= 0):
+                    c += cluster_data[k,:]
+                else:
+                    c -= cluster_data[k,:]
+            c /= len(idx)
+            maps[i] = c
+            #maps[i] = (c-np.mean(c))/np.std(c) # normalize the new cluster
+        del C_sgn
+        '''
+
+        # re-clustering by eigenvector method
+        for i in re_cluster:
+            idx = Ci[i]
+            Vt = cluster_data[idx,:]
+            Sk = np.dot(Vt.T, Vt)
+            evals, evecs = np.linalg.eig(Sk)
+            c = evecs[:, np.argmax(np.abs(evals))]
+            c = np.real(c)
+            maps[i] = c/np.sqrt(np.sum(c**2))
+
+    print()
+    return maps
+
+
+def kmeans(data, n_maps, n_runs=10, maxerr=1e-6, maxiter=500, doplot=False):
+    """Modified K-means clustering as detailed in:
+    [1] Pascual-Marqui et al., IEEE TBME (1995) 42(7):658--665
+    [2] Murray et al., Brain Topography(2008) 20:249--264.
+    Variables named as in [1], step numbering as in Table I.
+
+    Args:
+        data: numpy.array, size = number of EEG channels
+        n_maps: number of microstate maps
+        n_runs: number of K-means runs (optional)
+        maxerr: maximum error for convergence (optional)
+        maxiter: maximum number of iterations (optional)
+        doplot: plot the results, default=False (optional)
+    Returns:
+        maps: microstate maps (number of maps x number of channels)
+        L: sequence of microstate labels
+        gfp_peaks: indices of local GFP maxima
+        gev: global explained variance (0..1)
+        cv: value of the cross-validation criterion
+    """
+    n_t = data.shape[0]
+    n_ch = data.shape[1]
+    data = data - data.mean(axis=1, keepdims=True)
+
+    # GFP peaks
+    gfp = np.std(data, axis=1)
+    gfp_peaks = locmax(gfp)
+    gfp_values = gfp[gfp_peaks]
+    gfp2 = np.sum(gfp_values**2) # normalizing constant in GEV
+    n_gfp = gfp_peaks.shape[0]
+
+    # clustering of GFP peak maps only
+    V = data[gfp_peaks, :]
+    sumV2 = np.sum(V**2)
+
+    # store results for each k-means run
+    cv_list =   []  # cross-validation criterion for each k-means run
+    gev_list =  []  # GEV of each map for each k-means run
+    gevT_list = []  # total GEV values for each k-means run
+    maps_list = []  # microstate maps for each k-means run
+    L_list =    []  # microstate label sequence for each k-means run
+    for run in range(n_runs):
+        # initialize random cluster centroids (indices w.r.t. n_gfp)
+        rndi = np.random.permutation(n_gfp)[:n_maps]
+        maps = V[rndi, :]
+        # normalize row-wise (across EEG channels)
+        maps /= np.sqrt(np.sum(maps**2, axis=1, keepdims=True))
+        # initialize
+        n_iter = 0
+        var0 = 1.0
+        var1 = 0.0
+        # convergence criterion: variance estimate (step 6)
+        while ( (np.abs((var0-var1)/var0) > maxerr) & (n_iter < maxiter) ):
+            # (step 3) microstate sequence (= current cluster assignment)
+            C = np.dot(V, maps.T)
+            C /= (n_ch*np.outer(gfp[gfp_peaks], np.std(maps, axis=1)))
+            L = np.argmax(C**2, axis=1)
+            # (step 4)
+            for k in range(n_maps):
+                Vt = V[L==k, :]
+                # (step 4a)
+                Sk = np.dot(Vt.T, Vt)
+                # (step 4b)
+                evals, evecs = np.linalg.eig(Sk)
+                v = evecs[:, np.argmax(np.abs(evals))]
+                maps[k, :] = v/np.sqrt(np.sum(v**2))
+            # (step 5)
+            var1 = var0
+            var0 = sumV2 - np.sum(np.sum(maps[L, :]*V, axis=1)**2)
+            var0 /= (n_gfp*(n_ch-1))
+            n_iter += 1
+        if (n_iter < maxiter):
+            print("\t\tK-means run {:d}/{:d} converged after {:d} iterations.".format(run+1, n_runs, n_iter))
+        else:
+            print("\t\tK-means run {:d}/{:d} did NOT converge after {:d} iterations.".format(run+1, n_runs, maxiter))
+
+        # CROSS-VALIDATION criterion for this run (step 8)
+        C_ = np.dot(data, maps.T)
+        C_ /= (n_ch*np.outer(gfp, np.std(maps, axis=1)))
+        L_ = np.argmax(C_**2, axis=1)
+        var = np.sum(data**2) - np.sum(np.sum(maps[L_, :]*data, axis=1)**2)
+        var /= (n_t*(n_ch-1))
+        cv = var * (n_ch-1)**2/(n_ch-n_maps-1.)**2
+
+        # GEV (global explained variance) of cluster k
+        gev = np.zeros(n_maps)
+        for k in range(n_maps):
+            r = L==k
+            gev[k] = np.sum(gfp_values[r]**2 * C[r,k]**2)/gfp2
+        gev_total = np.sum(gev)
+
+        # store
+        cv_list.append(cv)
+        gev_list.append(gev)
+        gevT_list.append(gev_total)
+        maps_list.append(maps)
+        L_list.append(L_)
+
+    # select best run
+    k_opt = np.argmin(cv_list)
+    #k_opt = np.argmax(gevT_list)
+    maps = maps_list[k_opt]
+    # ms_gfp = ms_list[k_opt] # microstate sequence at GFP peaks
+    gev = gev_list[k_opt]
+    L_ = L_list[k_opt]
+
+    if doplot:
+        plt.ion()
+        # matplotlib's perceptually uniform sequential colormaps:
+        # magma, inferno, plasma, viridis
+        cm = plt.cm.magma
+        fig, axarr = plt.subplots(1, n_maps, figsize=(20,5))
+        fig.patch.set_facecolor('white')
+        for imap in range(n_maps):
+            axarr[imap].imshow(eeg2map(maps[imap, :]), cmap=cm, origin='lower')
+            axarr[imap].set_xticks([])
+            axarr[imap].set_xticklabels([])
+            axarr[imap].set_yticks([])
+            axarr[imap].set_yticklabels([])
+        title = "K-means cluster centroids"
+        axarr[0].set_title(title, fontsize=16, fontweight="bold")
+        plt.show()
+
+        # --- assign map labels manually ---
+        order_str = raw_input("\n\t\tAssign map labels (e.g. 0, 2, 1, 3): ")
+        order_str = order_str.replace(",", "")
+        order_str = order_str.replace(" ", "")
+        if (len(order_str) != n_maps):
+            if (len(order_str)==0):
+                print("\t\tEmpty input string.")
+            else:
+                print("\t\tParsed manual input: {:s}".format(", ".join(order_str)))
+                print("\t\tNumber of labels does not equal number of clusters.")
+            print("\t\tContinue using the original assignment...\n")
+        else:
+            order = np.zeros(n_maps, dtype=int)
+            for i, s in enumerate(order_str):
+                order[i] = int(s)
+            print("\t\tRe-ordered labels: {:s}".format(", ".join(order_str)))
+            # re-order return variables
+            maps = maps[order,:]
+            for i in range(len(L)):
+                L[i] = order[L[i]]
+            gev = gev[order]
+            # Figure
+            fig, axarr = plt.subplots(1, n_maps, figsize=(20,5))
+            fig.patch.set_facecolor('white')
+            for imap in range(n_maps):
+                axarr[imap].imshow(eeg2map(maps[imap, :]), cmap=cm, origin='lower')
+                axarr[imap].set_xticks([])
+                axarr[imap].set_xticklabels([])
+                axarr[imap].set_yticks([])
+                axarr[imap].set_yticklabels([])
+            title = "re-ordered K-means cluster centroids"
+            axarr[0].set_title(title, fontsize=16, fontweight="bold")
+            plt.show()
+            plt.ioff()
+    #return maps, L_, gfp_peaks, gev, cv
+    return maps
+
+
+def kmedoids(S, K, nruns, maxits):
+    """Octave/Matlab: Copyright Brendan J. Frey and Delbert Dueck, Aug 2006
+    http://www.psi.toronto.edu/~frey/apm/kcc.m
+    Simplified by Kevin Murphy
+    Python 2.7.x - FvW, 02/2014
+
+    Args:
+        filename: full path to the '.xyz' file
+    Returns:
+        locs: n_channels x 3 (numpy.array)
+    """
+    n = S.shape[0]
+    dpsim = np.zeros((maxits,nruns))
+    idx = np.zeros((n,nruns))
+    for rep in xrange(nruns):
+        tmp = np.random.permutation(range(n))
+        mu = tmp[:K]
+        t = 0
+        done = (t==maxits)
+        while ( not done ):
+            t += 1
+            muold = mu
+            dpsim[t,rep] = 0
+            # Find class assignments
+            cl = np.argmax(S[:,mu], axis=1) # max pos. of each row
+            # Set assignments of exemplars to themselves
+            cl[mu] = range(K)
+            for j in xrange(K): # For each class, find new exemplar
+                I = np.where(cl==j)[0]
+                S_I_rowsum = np.sum(S[I][:,I],axis=0)
+                Scl = max(S_I_rowsum)
+                ii = np.argmax(S_I_rowsum)
+                dpsim[t,rep] = dpsim[t,rep] + Scl
+                mu[j] = I[ii]
+            if all(muold==mu) | (t==maxits):
+                done = 1
+        idx[:,rep] = mu[cl]
+        dpsim[t+1:,rep] = dpsim[t,rep]
+    return np.unique(idx)
+
+
+def p_empirical(data, n_clusters):
+    """Empirical symbol distribution
+
+    Args:
+        data: numpy.array, size = length of microstate sequence
+        n_clusters: number of microstate clusters
+    Returns:
+        p: empirical distribution
+    """
+    p = np.zeros(n_clusters)
+    n = len(data)
+    for i in range(n):
+        p[data[i]] += 1.0
+    p /= n
+    return p
+
+
+def T_empirical(data, n_clusters):
+    """Empirical transition matrix
+
+    Args:
+        data: numpy.array, size = length of microstate sequence
+        n_clusters: number of microstate clusters
+    Returns:
+        T: empirical transition matrix
+    """
+    T = np.zeros((n_clusters, n_clusters))
+    n = len(data)
+    for i in range(n-1):
+        T[data[i], data[i+1]] += 1.0
+    p_row = np.sum(T, axis=1)
+    for i in range(n_clusters):
+        if ( p_row[i] != 0.0 ):
+            for j in range(n_clusters):
+                T[i,j] /= p_row[i]  # normalize row sums to 1.0
+    return T
+
+
+def p_equilibrium(T):
+    '''get equilibrium distribution from transition matrix: lambda=1 - (left) eigenvector'''
+    evals, evecs = np.linalg.eig(T.transpose())
+    i = np.where(np.isclose(evals, 1.0, atol=1e-6))[0][0] # locate max eigenval.
+    p_eq = np.abs(evecs[:,i]) # make eigenvec. to max. eigenval. non-negative
+    p_eq /= p_eq.sum() # normalized eigenvec. to max. eigenval.
+    return p_eq # stationary distribution
+
+
+def max_entropy(n_clusters):
+    """Maximum Shannon entropy of a sequence with n_clusters
+
+    Args:
+        n_clusters: number of microstate clusters
+    Returns:
+        h_max: maximum Shannon entropy
+    """
+    h_max = np.log(float(n_clusters))
+    return h_max
+
+
+def shuffle(data):
+    """Randomly shuffled copy of data (i.i.d. surrogate)
+
+    Args:
+        data: numpy array, 1D
+    Returns:
+        data_copy: shuffled data copy
+    """
+    data_c = data.copy()
+    np.random.shuffle(data_c)
+    return data_c
+
+
+def surrogate_mc(p, T, n_clusters, n):
+    """Surrogate Markov chain with symbol distribution p and
+    transition matrix T
+
+    Args:
+        p: empirical symbol distribution
+        T: empirical transition matrix
+        n_clusters: number of clusters/symbols
+        n: length of surrogate microstate sequence
+    Returns:
+        mc: surrogate Markov chain
+    """
+
+    # NumPy vectorized code
+    psum = np.cumsum(p)
+    Tsum = np.cumsum(T, axis=1)
+    # initial state according to p:
+    mc = [np.min(np.argwhere(np.random.rand() < psum))]
+    # next state according to T:
+    for i in range(1, n):
+        mc.append(np.min(np.argwhere(np.random.rand() < Tsum[mc[i-1]])))
+
+    ''' alternative implementation using loops
+    r = np.random.rand() # ~U[0,1], random threshold
+    s = 0.
+    y = p[s]
+    while (y < r):
+        s += 1.
+        y += p[s]
+    mc = [s] # initial state according to p
+
+    # iterate ...
+    for i in xrange(1,n):
+        r = np.random.rand() # ~U[0,1], random threshold
+        s = mc[i-1] # currrent state
+        t = 0. # trial state
+        y = T[s][t] # transition rate to trial state
+        while ( y < r ):
+            t += 1. # next trial state
+            y += T[s][t]
+        mc.append(t) # store current state
+    '''
+
+    return np.array(mc)
+
+
+def mutinf(x, ns, lmax):
+    """Time-lagged mutual information of symbolic sequence x with
+    ns different symbols, up to maximum lag lmax.
+    *** Symbols must be 0, 1, 2, ... to use as indices directly! ***
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        lmax: maximum time lag
+    Returns:
+        mi: time-lagged mutual information
+    """
+
+    n = len(x)
+    mi = np.zeros(lmax)
+    for l in range(lmax):
+        if ((l+1)%10 == 0):
+            s = "\r\t\tmutual information lag: {:d}".format(l+1)
+            sys.stdout.write(s)
+            sys.stdout.flush()
+        nmax = n-l
+        h1 = H_1(x[:nmax], ns)
+        h2 = H_1(x[l:l+nmax], ns)
+        h12 = H_2(x[:nmax], x[l:l+nmax], ns)
+        mi[l] = h1 + h2 - h12
+    return mi
+
+
+def mutinf_i(x, ns, lmax):
+    """Time-lagged mutual information for each symbol of the symbolic
+    sequence x with  ns different symbols, up to maximum lag lmax.
+    *** Symbols must be 0, 1, 2, ... to use as indices directly! ***
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        lmax: maximum time lag
+    Returns:
+        mi: time-lagged mutual informations for each symbol, ns x lmax
+    """
+
+    n = len(x)
+    mi = np.zeros((ns,lmax))
+    for l in range(lmax):
+        if (l%10 == 0):
+            s = "\r\t\tmutual information lag: {:d}".format(l)
+            sys.stdout.write(s)
+            sys.stdout.flush()
+        nmax = n - l
+
+        # compute distributions
+        p1 = np.zeros(ns)
+        p2 = np.zeros(ns)
+        p12 = np.zeros((ns,ns))
+        for i in range(nmax):
+            i1 = int(x[i])
+            i2 = int(x[i+l])
+            p1[i1] += 1.0 # p( X_t = i1 )
+            p2[i2] += 1.0 # p( X_t+l = i2 )
+            p12[i1,i2] += 1.0 # p( X_t+l=i2 , X_t=i1 )
+        p1 /= nmax
+        p2 /= nmax
+
+        # normalize the transition matrix p( X_t+l=i2 | X_t=i1 )
+        rowsum = np.sum(p12, axis=1)
+    for i, j in np.ndindex(p12.shape):
+        p12[i,j] /= rowsum[i]
+        
+        # compute entropies
+        H2 = np.sum(p2[p2>0] * np.log(p2[p2>0]))
+        for i in range(ns):
+            H12 = 0.0
+            for j in range(ns):
+                if ( p12[i,j] > 0.0 ):
+                    H12 += ( p12[i,j] * np.log( p12[i,j] ) )
+            mi[i,l] = -H2 + p1[i] * H12
+    return mi
+
+
+def mutinf_CI(p, T, n, alpha, nrep, lmax):
+    """Return an array for the computation of confidence intervals (CI) for
+    the time-lagged mutual information of length lmax.
+    Null Hypothesis: Markov Chain of length n, equilibrium distribution p,
+    transition matrix T, using nrep repetitions."""
+
+    ns = len(p)
+    mi_array = np.zeros((nrep,lmax))
+    for r in range(nrep):
+        print("\n\t\tsurrogate MC # {:d} / {:d}".format(r+1, nrep))
+        x_mc = surrogate_mc(p, T, ns, n)
+        mi_mc = mutinf(x_mc, ns, lmax)
+        #mi_mc = mutinf_cy(X_mc, ns, lmax)
+        mi_array[r,:] = mi_mc
+    return mi_array
+
+
+def excess_entropy_rate(x, ns, kmax, doplot=False):
+    # y = ax+b: line fit to joint entropy for range of histories k
+    # a = entropy rate (slope)
+    # b = excess entropy (intersect.)
+    h_ = np.zeros(kmax)
+    for k in range(kmax): h_[k] = H_k(x, ns, k+1)
+    ks = np.arange(1,kmax+1)
+    a, b = np.polyfit(ks, h_, 1)
+    # --- Figure ---
+    if doplot:
+        plt.figure(figsize=(6,6))
+        plt.plot(ks, h_, '-sk')
+        plt.plot(ks, a*ks+b, '-b')
+        plt.xlabel("k")
+        plt.ylabel("$H_k$")
+        plt.title("Entropy rate")
+        plt.tight_layout()
+        plt.show()
+    return (a, b)
+
+
+def mc_entropy_rate(p, T):
+    """Markov chain entropy rate.
+    - \sum_i sum_j p_i T_ij log(T_ij)
+    """
+    h = 0.
+    for i, j in np.ndindex(T.shape):
+        if (T[i,j] > 0):
+            h -= ( p[i]*T[i,j]*np.log(T[i,j]) )
+    return h
+
+
+def aif_peak1(mi, fs, doplot=False):
+    '''compute time-lagged mut. inf. (AIF) and 1st peak.'''
+    dt = 1000./fs # sampling interval [ms]
+    mi_filt = np.convolve(mi, np.ones(3)/3., mode='same')
+    #mi_filt = np.convolve(mi, np.ones(5)/5., mode='same')
+    mx0 = 8 # 8
+    jmax = mx0 + locmax(mi_filt[mx0:])[0]
+    mx_mi = dt*jmax
+    if doplot:
+        offset = 5
+        tmax = 100
+        fig = plt.figure(1, figsize=(22,4))
+        fig.patch.set_facecolor('white')
+        t = dt*np.arange(tmax)
+        plt.plot(t[offset:tmax], mi[offset:tmax], '-ok', label='AIF')
+        plt.plot(t[offset:tmax], mi_filt[offset:tmax], '-b', label='smoothed AIF')
+        plt.plot(mx_mi, mi[jmax], 'or', markersize=15, label='peak-1')
+        plt.xlabel("time lag [ms]")
+        plt.ylabel("mut. inf. [bits]")
+        plt.legend(loc=0)
+        #plt.title("mutual information of map sequence")
+        #plt.title(s, fontsize=16, fontweight='bold')
+        plt.show()
+        raw_input()
+
+    return jmax, mx_mi
+
+
+def H_1(x, ns):
+    """Shannon entropy of the symbolic sequence x with ns symbols.
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+    Returns:
+        h: Shannon entropy of x
+    """
+
+    n = len(x)
+    p = np.zeros(ns) # symbol distribution
+    for t in range(n):
+        p[x[t]] += 1.0
+    p /= n
+    h = -np.sum(p[p>0]*np.log(p[p>0]))
+    return h
+
+
+def H_2(x, y, ns):
+    """Joint Shannon entropy of the symbolic sequences X, Y with ns symbols.
+
+    Args:
+        x, y: symbolic sequences, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+    Returns:
+        h: Shannon entropy of x
+    """
+
+    if (len(x) != len(y)):
+        print("H_2 warning : sequences of different lengths, using the shorter...")
+    n = min([len(x), len(y)])
+    p = np.zeros((ns, ns)) # joint distribution
+    for t in range(n):
+        p[x[t],y[t]] += 1.0
+    p /= n
+    h = -np.sum(p[p>0]*np.log(p[p>0]))
+    return h
+
+
+def H_k(x, ns, k):
+    """Shannon's joint entropy from x[n+p:n-m]
+    x: symbolic time series
+    ns: number of symbols
+    k: length of k-history
+    """
+
+    N = len(x)
+    f = np.zeros(tuple(k*[ns]))
+    for t in range(N-k): f[tuple(x[t:t+k])] += 1.0
+    f /= (N-k) # normalize distribution
+    hk = -np.sum(f[f>0]*np.log(f[f>0]))
+    #m = np.sum(f>0)
+    #hk = hk + (m-1)/(2*N) # Miller-Madow bias correction
+    return hk
+
+
+def testMarkov0(x, ns, alpha, verbose=True):
+    """Test zero-order Markovianity of symbolic sequence x with ns symbols.
+    Null hypothesis: zero-order MC (iid) <=>
+    p(X[t]), p(X[t+1]) independent
+    cf. Kullback, Technometrics (1962)
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        alpha: significance level
+    Returns:
+        p: p-value of the Chi2 test for independence
+    """
+
+    if verbose:
+        print( "\t\tZERO-ORDER MARKOVIANITY:" )
+    n = len(x)
+    f_ij = np.zeros((ns,ns))
+    f_i = np.zeros(ns)
+    f_j = np.zeros(ns)
+    # calculate f_ij p( x[t]=i, p( x[t+1]=j ) )
+    for t in range(n-1):
+        i = x[t]
+        j = x[t+1]
+        f_ij[i,j] += 1.0
+        f_i[i] += 1.0
+        f_j[j] += 1.0
+    T = 0.0 # statistic
+    for i, j in np.ndindex(f_ij.shape):
+        f = f_ij[i,j]*f_i[i]*f_j[j]
+        if (f > 0):
+            T += (f_ij[i,j] * np.log((n*f_ij[i,j])/(f_i[i]*f_j[j])))
+    T *= 2.0
+    df = (ns-1.0) * (ns-1.0)
+    #p = chi2test(T, df, alpha)
+    p = chi2.sf(T, df, loc=0, scale=1)
+    if verbose:
+        print("\t\tp: {:.2e} | t: {:.3f} | df: {:.1f}".format(p, T, df))
+    return p
+
+
+def testMarkov1(X, ns, alpha, verbose=True):
+    """Test first-order Markovianity of symbolic sequence X with ns symbols.
+    Null hypothesis:
+    first-order MC <=>
+    p(X[t+1] | X[t]) = p(X[t+1] | X[t], X[t-1])
+    cf. Kullback, Technometrics (1962), Tables 8.1, 8.2, 8.6.
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        alpha: significance level
+    Returns:
+        p: p-value of the Chi2 test for independence
+    """
+
+    if verbose:
+        print( "\n\t\tFIRST-ORDER MARKOVIANITY:" )
+    n = len(X)
+    f_ijk = np.zeros((ns,ns,ns))
+    f_ij = np.zeros((ns,ns))
+    f_jk = np.zeros((ns,ns))
+    f_j = np.zeros(ns)
+    for t in range(n-2):
+        i = X[t]
+        j = X[t+1]
+        k = X[t+2]
+        f_ijk[i,j,k] += 1.0
+        f_ij[i,j] += 1.0
+        f_jk[j,k] += 1.0
+        f_j[j] += 1.0
+    T = 0.0
+    for i, j, k in np.ndindex(f_ijk.shape):
+        f = f_ijk[i][j][k]*f_j[j]*f_ij[i][j]*f_jk[j][k]
+        if (f > 0):
+            T += (f_ijk[i,j,k]*np.log((f_ijk[i,j,k]*f_j[j])/(f_ij[i,j]*f_jk[j,k])))
+    T *= 2.0
+    df = ns*(ns-1)*(ns-1)
+    #p = chi2test(T, df, alpha)
+    p = chi2.sf(T, df, loc=0, scale=1)
+    if verbose:
+        print("\t\tp: {:.2e} | t: {:.3f} | df: {:.1f}".format(p, T, df))
+    return p
+
+
+def testMarkov2(X, ns, alpha, verbose=True):
+    """Test second-order Markovianity of symbolic sequence X with ns symbols.
+    Null hypothesis:
+    first-order MC <=>
+    p(X[t+1] | X[t], X[t-1]) = p(X[t+1] | X[t], X[t-1], X[t-2])
+    cf. Kullback, Technometrics (1962), Table 10.2.
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        alpha: significance level
+    Returns:
+        p: p-value of the Chi2 test for independence
+    """
+
+    if verbose:
+        print( "\n\t\tSECOND-ORDER MARKOVIANITY:" )
+    n = len(X)
+    f_ijkl = np.zeros((ns,ns,ns,ns))
+    f_ijk = np.zeros((ns,ns,ns))
+    f_jkl = np.zeros((ns,ns,ns))
+    f_jk = np.zeros((ns,ns))
+    for t in range(n-3):
+        i = X[t]
+        j = X[t+1]
+        k = X[t+2]
+        l = X[t+3]
+        f_ijkl[i,j,k,l] += 1.0
+        f_ijk[i,j,k] += 1.0
+        f_jkl[j,k,l] += 1.0
+        f_jk[j,k] += 1.0
+    T = 0.0
+    for i, j, k, l in np.ndindex(f_ijkl.shape):
+        f = f_ijkl[i,j,k,l]*f_ijk[i,j,k]*f_jkl[j,k,l]*f_jk[j,k]
+        if (f > 0):
+            T += (f_ijkl[i,j,k,l]*np.log((f_ijkl[i,j,k,l]*f_jk[j,k])/(f_ijk[i,j,k]*f_jkl[j,k,l])))
+    T *= 2.0
+    df = ns*ns*(ns-1)*(ns-1)
+    #p = chi2test(T, df, alpha)
+    p = chi2.sf(T, df, loc=0, scale=1)
+    if verbose:
+        print("\t\tp: {:.2e} | t: {:.3f} | df: {:.1f}".format(p, T, df))
+    return p
+
+
+def conditionalHomogeneityTest(X, ns, l, alpha, verbose=True):
+    """Test conditional homogeneity of non-overlapping blocks of
+    length l of symbolic sequence X with ns symbols
+    cf. Kullback, Technometrics (1962), Table 9.1.
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        l: split x into non-overlapping blocks of size l
+        alpha: significance level
+    Returns:
+        p: p-value of the Chi2 test for independence
+    """
+
+    if verbose:
+        print("\n\t\tCONDITIONAL HOMOGENEITY (three-way table):")
+    n = len(X)
+    r = int(np.floor(float(n)/float(l))) # number of blocks
+    nl = r*l
+    if verbose:
+        print("\t\tSplit data in r = {:d} blocks of length {:d}.".format(r,l))
+    f_ijk = np.zeros((r,ns,ns))
+    f_ij = np.zeros((r,ns))
+    f_jk = np.zeros((ns,ns))
+    f_i = np.zeros(r)
+    f_j = np.zeros(ns)
+
+    # calculate f_ijk (time / block dep. transition matrix)
+    for i in  range(r): # block index
+        for ii in range(l-1): # pos. inside the current block
+            j = X[i*l + ii]
+            k = X[i*l + ii + 1]
+            f_ijk[i,j,k] += 1.0
+            f_ij[i,j] += 1.0
+            f_jk[j,k] += 1.0
+            f_i[i] += 1.0
+            f_j[j] += 1.0
+
+    # conditional homogeneity (Markovianity stationarity)
+    T = 0.0
+    for i, j, k in np.ndindex(f_ijk.shape):
+        # conditional homogeneity
+        f = f_ijk[i,j,k]*f_j[j]*f_ij[i,j]*f_jk[j,k]
+        if (f > 0):
+            T += (f_ijk[i,j,k]*np.log((f_ijk[i,j,k]*f_j[j])/(f_ij[i,j]*f_jk[j,k])))
+    T *= 2.0
+    df = (r-1)*(ns-1)*ns
+    #p = chi2test(T, df, alpha)
+    p = chi2.sf(T, df, loc=0, scale=1)
+    if verbose:
+        print("\t\tp: {:.2e} | t: {:.3f} | df: {:.1f}".format(p, T, df))
+    return p
+
+
+def symmetryTest(X, ns, alpha, verbose=True):
+    """Test symmetry of the transition matrix of symbolic sequence X with
+    ns symbols
+    cf. Kullback, Technometrics (1962)
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        alpha: significance level
+    Returns:
+        p: p-value of the Chi2 test for independence
+    """
+
+    if verbose:
+        print( "\n\t\tSYMMETRY:" )
+    n = len(X)
+    f_ij = np.zeros((ns,ns))
+    for t in range(n-1):
+        i = X[t]
+        j = X[t+1]
+        f_ij[i,j] += 1.0
+    T = 0.0
+    for i, j in np.ndindex(f_ij.shape):
+        if (i != j):
+            f = f_ij[i,j]*f_ij[j,i]
+            if (f > 0):
+                T += (f_ij[i,j]*np.log((2.*f_ij[i,j])/(f_ij[i,j]+f_ij[j,i])))
+    T *= 2.0
+    df = ns*(ns-1)/2
+    #p = chi2test(T, df, alpha)
+    p = chi2.sf(T, df, loc=0, scale=1)
+    if verbose:
+        print("\t\tp: {:.2e} | t: {:.3f} | df: {:.1f}".format(p, T, df))
+    return p
+
+
+def geoTest(X, ns, dt, alpha, verbose=True):
+    """Test the geometric distribution of lifetime distributions.
+
+    Args:
+        X: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+        dt: time step in [ms]
+        alpha: significance level
+    Returns:
+        p_values: for each symbol
+    """
+
+    if verbose:
+        print("\n\t\tGEOMETRIC DISTRIBUTION of lifetimes:\n")
+    tauDist = lifetimes(X, ns)
+    T = T_empirical(X, ns)
+    p_values = np.zeros(ns)
+    for s in range(ns): # test for each symbol
+        if verbose:
+            print( "\n\t\tTesting the distribution of symbol # {:d}".format(s) )
+        # m = max_tau:
+        m = len(tauDist[s])
+        # observed lifetime distribution:
+        q_obs = np.zeros(m)
+        # theoretical lifetime distribution:
+        q_exp = np.zeros(m)
+        for j in range(m):
+            # observed frequency of lifetime j+1 for state s
+            q_obs[j] = tauDist[s][j]
+            # expected frequency
+            q_exp[j] = (1.0-T[s][s]) * (T[s][s]**j)
+        q_exp *= sum(q_obs)
+
+        t = 0.0 # chi2 statistic
+        for j in range(m):
+            if ((q_obs[j] > 0) & (q_exp[j] > 0)):
+                t += (q_obs[j]*np.log(q_obs[j]/q_exp[j]))
+        t *= 2.0
+        df = m-1
+        #p0 = chi2test(t, df, alpha)
+        p0 = chi2.sf(t, df, loc=0, scale=1)
+        if verbose:
+            print("\t\tp: {:.2e} | t: {:.3f} | df: {:.1f}".format(p0,t,df))
+        p_values[s] = p0
+
+        g1, p1, dof1, expctd1 = chi2_contingency( np.vstack((q_obs,q_exp)), lambda_='log-likelihood' )
+        if verbose:
+            print("\t\tG-test (log-likelihood) p: {:.2e}, g: {:.3f}, df: {:.1f}".format(p1,g1,dof1))
+
+        g2, p2, dof2, expctd2 = chi2_contingency( np.vstack((q_obs,q_exp)) ) # Pearson's Chi2 test
+        if verbose:
+            print("\t\tG-test (Pearson Chi2) p: {:.2e}, g: {:.3f}, df: {:.1f}".format(p2,g2,dof2))
+
+        p_ = tauDist[s]/np.sum(tauDist[s])
+        tau_mean = 0.0
+        tau_max = len(tauDist[s])*dt
+        for j in range(len(p_)): tau_mean += (p_[j] * j)
+        tau_mean *= dt
+        if verbose:
+            pass
+            #print("\t\tmean dwell time: {:.2f} [ms]".format(tau_mean))
+        if verbose:
+            pass
+            #print("\t\tmax. dwell time: {:.2f} [ms]\n\n".format(tau_max))
+    return p_values
+
+
+def lifetimes(X, ns):
+    """Compute the lifetime distributions for each symbol
+    in a symbolic sequence X with ns symbols.
+
+    Args:
+        x: symbolic sequence, symbols = [0, 1, 2, ...]
+        ns: number of symbols
+    Returns:
+        tauDist: list of lists, lifetime distributions for each symbol
+    """
+
+    n = len(X)
+    tauList = [[] for i in range(ns)] # unsorted lifetimes, [[],[],[],[]]
+    i = 0 # current index
+    s = X[i] # current symbol
+    tau = 1.0 # current lifetime
+    while (i < n-1):
+        i += 1
+        if ( X[i] == s ):
+            tau += 1.0
+        else:
+            tauList[int(s)].append(tau)
+            s = X[i]
+            tau = 1.0
+    tauList[int(s)].append(tau) # last state
+    # find max lifetime for each symbol
+    tau_max = [max(L) for L in tauList]
+    #print( "max lifetime for each symbol : " + str(tau_max) )
+    tauDist = [] # empty histograms for each symbol
+    for i in range(ns): tauDist.append( [0.]*int(tau_max[i]) )
+    # lifetime distributions
+    for s in range(ns):
+        for j in range(len(tauList[s])):
+            tau = tauList[s][j]
+            tauDist[s][int(tau)-1] += 1.0
+    return tauDist
+
+
+def multiple_comparisons(p_values, method):
+    """Apply multiple comparisons correction code using the statsmodels package.
+    Input: array p-values.
+    'b': 'Bonferroni'
+    's': 'Sidak'
+    'h': 'Holm'
+    'hs': 'Holm-Sidak'
+    'sh': 'Simes-Hochberg'
+    'ho': 'Hommel'
+    'fdr_bh': 'FDR Benjamini-Hochberg'
+    'fdr_by': 'FDR Benjamini-Yekutieli'
+    'fdr_tsbh': 'FDR 2-stage Benjamini-Hochberg'
+    'fdr_tsbky': 'FDR 2-stage Benjamini-Krieger-Yekutieli'
+    'fdr_gbs': 'FDR adaptive Gavrilov-Benjamini-Sarkar'
+
+    Args:
+        p_values: uncorrected p-values
+        method: string, one of the methods given above
+    Returns:
+        p_values_corrected: corrected p-values
+    """
+    reject, pvals_corrected, alphacSidak, alphacBonf = multipletests(p_values, method=method)
+    return pvals_corrected
+
+
+def locmax(x):
+    """Get local maxima of 1D-array
+
+    Args:
+        x: numeric sequence
+    Returns:
+        m: list, 1D-indices of local maxima
+    """
+
+    dx = np.diff(x) # discrete 1st derivative
+    zc = np.diff(np.sign(dx)) # zero-crossings of dx
+    m = 1 + np.where(zc == -2)[0] # indices of local max.
+    return m
+
+
+def findstr(s, L):
+    """Find string in list of strings, returns indices.
+
+    Args:
+        s: query string
+        L: list of strings to search
+    Returns:
+        x: list of indices where s is found in L
+    """
+
+    x = [i for i, l in enumerate(L) if (l==s)]
+    return x
+
+
+def print_matrix(T):
+    """Console-friendly output of the matrix T.
+
+    Args:
+        T: matrix to print
+    """
+
+    for i, j in np.ndindex(T.shape):
+        if (j == 0):
+            print ("\t\t|{:.3f}".format(T[i,j]))
+        elif (j == T.shape[1]-1):
+            print ("{:.3f}|\n".format(T[i,j]))
+        else:
+            print ("{:.3f}".format(T[i,j]))
+
+
+def main():
+    """Test module functions."""
+
+    os.system("clear")
+    print("\n\n\t\t--- EEG microstate analysis ---\n\n")
+    parser = argparse.ArgumentParser(description='EEG microstate analysis')
+    parser.add_argument('-i','--input', help='.edf file name', required=False)
+    parser.add_argument('-f','--filelist', help='.edf file list', required=False)
+    parser.add_argument('-d', '--directory', \
+                        help='process all .edf files in this directory', \
+                        required=False)
+    parser.add_argument('-m', '--markovsurrogates', \
+                        help='number of Markov surrogates', required=False)
+    args = parser.parse_args()
+    argsdict = vars(args)
+    print("\n\tTutorial: press <Enter> to proceed to the next step\n")
+
+    # (0) make file list
+    if argsdict['input']:
+        filenames = [argsdict['input']]
+    elif argsdict['filelist']:
+        with open(argsdict['filelist'], 'rb') as f:
+            filenames = [l.strip() for l in f]
+    elif argsdict['directory']:
+        filenames = []
+        for f1 in os.listdir(argsdict['directory']):
+            f2 = os.path.join(argsdict['directory'], f1)
+            if os.path.isfile(f2):
+                if (f2[-4:] == ".edf"):
+                    filenames.append(f2)
+    else:
+        filenames = ["test.edf"]
+
+    print("\tList of file names generated:")
+    print("\t" + str(filenames))
+    raw_input("\n\t\tpress ENTER to continue...")
+    # define lists that hold p-values for multiple comparisons
+    p_Markov0 = []
+    p_Markov1 = []
+    p_Markov2 = []
+    p_geo = []
+    p_stationarity = []
+    p_symmetry = []
+
+    # process files
+    for filename in filenames:
+
+        # (1) load edf file
+        os.system("clear")
+        print("\n\t[1] Load EEG file:")
+        chs, fs, data_raw = read_edf(filename)
+        print("\n\tFile: {}".format(filename))
+        print("\tSampling rate: {:.2f} Hz".format(fs))
+        print("\tData shape: {:d} samples x {:d} channels".\
+        format(data_raw.shape[0], data_raw.shape[1]))
+        print("\tChannels:")
+        for ch in chs:
+            print ("\t{:s}".format(ch))
+        raw_input("\n\n\n\t\tpress ENTER to continue...")
+
+        # (2) band-pass filter 1-35 Hz (6-th order Butterworth)
+        os.system("clear")
+        print("\n\t[2] Apply band-pass filter...")
+        data = bp_filter(data_raw, (1, 35), fs) # (1, 35)
+        #data = np.copy(data_raw)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        #''' (3) visualize data
+        os.system("clear")
+        print("\n\t[3] Visualize data (PCA-1):")
+        pca = PCA(copy=True, n_components=1, whiten=False)
+        pca1 = pca.fit_transform(data)[:,0]
+        del pca
+        plot_data(pca1, fs)
+        #raw_input("\n\n\t\tpress ENTER to continue...")
+        #'''
+
+        # (4) microstate clustering
+        os.system("clear")
+        print("\n\t[4] Clustering EEG topographies at GFP peaks:")
+        n_maps = 4
+        #maps, x, gfp_peaks, gev, cv = kmeans(data, n_maps=n_maps, n_runs=5, \
+        #                                      doplot=True)
+        locs = []
+
+        # choose clustering method
+        mode = ["aahc", "kmeans", "kmedoids", "pca", "ica"][4]
+        maps, x, gfp_peaks, gev = clustering(data, fs, chs, locs, mode, n_maps, \
+                                             doplot=True)
+        #print("\t\tK-means clustering of: n = {:d} EEG maps".format(len(gfp_peaks)))
+        #print("\t\tMicrostates: {:d} maps x {:d} channels".format(maps.shape[0],maps.shape[1]))
+        #print("\t\tGlobal explained variance GEV = {:.2f}".format(gev.sum()))
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (5) basic microstate statistics
+        os.system("clear")
+        print("\n\t[5] Basic microstate statistics:")
+        p_hat = p_empirical(x, n_maps)
+        T_hat = T_empirical(x, n_maps)
+        print("\n\t\tEmpirical symbol distribution (=RTT):\n")
+        for i in range(n_maps):
+            print("\t\tp_{:d} = {:.3f}".format(i, p_hat[i]))
+        print("\n\t\tEmpirical transition matrix:\n")
+        print_matrix(T_hat)
+
+        # PPS (peaks per second)
+        pps = len(gfp_peaks) / (len(x)/fs)
+        print("\n\t\tGFP peaks per sec.: {:.2f}".format(pps))
+
+        # GEV (global explained variance A-D)
+        print("\n\t\tGlobal explained variance (GEV) per map:")
+        print("\t\t" + str(gev))
+        print("\n\t\tTotal GEV: {:.2f}".format(gev.sum()))
+        #raw_input()
+
+        # Cross-correlation between maps
+        print("\n\t\tCross-correlation between microstate maps:")
+        C = np.corrcoef(maps)
+        C_max = np.max(np.abs(C-np.array(np.diag(C)*np.eye(n_maps))))
+        print_matrix(C)
+        print ("\n\t\tCmax: {:.2f}".format(C_max))
+
+        # (6) entropy of the microstate sequence
+        #os.system("clear")
+        print("\n\t[6a] Shannon entropy of the microstate sequence:")
+        h_hat = H_1(x, n_maps)
+        h_max = max_entropy(n_maps)
+        print("\n\t\tEmpirical entropy H = {:.2f} (max. entropy: {:.2f})\n".\
+              format(h_hat,h_max))
+        #raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (6b) entropy rate of the microstate sequence
+        #os.system("clear")
+        print("\n\t[6b] Entropy rate of the microstate sequence:")
+        kmax = 6
+        h_rate, _ = excess_entropy_rate(x, n_maps, kmax, doplot=True)
+        h_mc = mc_entropy_rate(p_hat, T_hat)
+        print("\n\t\tEmpirical entropy rate h = {:.2f}".format(h_rate))
+        print("\t\tTheoretical MC entropy rate h = {:.2f}".format(h_mc))
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        #''' (7) Markov tests
+        os.system("clear")
+        print("\n\t[7] Markovianity tests:\n")
+        alpha = 0.01
+        p0 = testMarkov0(x, n_maps, alpha)
+        p1 = testMarkov1(x, n_maps, alpha)
+        p2 = testMarkov2(x, n_maps, alpha)
+        p_geo_vals = geoTest(x, n_maps, 1000./fs, alpha)
+        p_Markov0.append(p0)
+        p_Markov1.append(p1)
+        p_Markov2.append(p2)
+        #p_geo.append(p_geo_vals)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (8) stationarity test
+        os.system("clear")
+        print("\n\t[8] Stationarity test:")
+        try:
+            L = int(raw_input("\t\tenter block size: "))
+        except:
+            L = 5000
+        p3 = conditionalHomogeneityTest(x, n_maps, L, alpha)
+        p_stationarity.append(p3)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (9) symmetry test
+        os.system("clear")
+        print("\n\t[9] Symmetry test:")
+        p4 = symmetryTest(x, n_maps, alpha)
+        p_symmetry.append(p4)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (10) Markov surrogate example
+        os.system("clear")
+        print("\n\t[10] Synthesize a surrogate Markov chain:")
+        x_mc = surrogate_mc(p_hat, T_hat, n_maps, len(x))
+        p_surr = p_empirical(x_mc, n_maps)
+        T_surr = T_empirical(x_mc, n_maps)
+        print("\n\t\tSurrogate symbol distribution:\n")
+        for i in range(n_maps):
+            print("\t\tp_{:d} = {:.3f}".format(i, p_surr[i]))
+        print( "\n\t\tSurrogate transition matrix:\n" )
+        print_matrix(T_surr)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (11) Markov tests for surrogate (sanity check)
+        os.system("clear")
+        print("\n\t[11] Markovianity tests for surrogate MC:\n")
+        p0_ = testMarkov0(x_mc, n_maps, alpha)
+        p1_ = testMarkov1(x_mc, n_maps, alpha)
+        p2_ = testMarkov2(x_mc, n_maps, alpha)
+        p_geo_vals_ = geoTest(x_mc, n_maps, 1000./fs, alpha)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (12) Stationarity test for surrogate MC
+        os.system("clear")
+        print("\n\t[12] Stationarity test for surrogate MC:")
+        try:
+            L = int(raw_input("\t\tenter block size: "))
+        except:
+            L = 5000
+        p3_ = conditionalHomogeneityTest(x_mc, n_maps, L, alpha)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+
+        # (13) Symmetry test for surrogate (sanity check)
+        os.system("clear")
+        print("\n\t[13] Symmetry test for surrogate MC:")
+        p4_ = symmetryTest(x_mc, n_maps, alpha)
+        raw_input("\n\n\t\tpress ENTER to continue...")
+        #'''
+
+        #''' (14) time-lagged mutual information (AIF) empirical vs. Markov model
+        os.system("clear")
+        print("\n\t[14] Mutual information of EEG microstates and surrogate MC:\n")
+        l_max = 100
+        #print(argsdict)
+        if argsdict['markovsurrogates']:
+            n_mc = int(argsdict['markovsurrogates'])
+        else:
+            n_mc = 10
+        aif = mutinf(x, n_maps, l_max)
+        jmax, mx_mi = aif_peak1(aif, fs, doplot=True)
+        print("\n\t\tAIF 1st peak: index {:d} = {:.2f} ms".format(jmax, mx_mi))
+
+        #print("\n\n\t\tComputing n = {:d} Markov surrogates...".format(n_mc))
+        #aif_array = mutinf_CI(p_hat, T_hat, len(x), alpha, n_mc, l_max)
+        #pct = np.percentile(aif_array,[100.*alpha/2.,100.*(1-alpha/2.)],axis=0)
+        print("\n\n")
+
+        '''
+        plt.ioff()
+        fig = plt.figure(1, figsize=(20,5))
+        fig.patch.set_facecolor('white')
+        t = np.arange(l_max)*1000./fs
+        plt.semilogy(t, aif, '-sk', linewidth=3, label='AIF (EEG)')
+        #plt.semilogy(t, pct[0,:], '-k', linewidth=1)
+        #plt.semilogy(t, pct[1,:], '-k', linewidth=1)
+        #plt.fill_between(t, pct[0,:], pct[1,:], facecolor='gray', alpha=0.5, label='AIF (Markov)')
+        plt.xlabel("time lag [ms]", fontsize=24, fontweight="bold")
+        plt.ylabel("AIF [nats]", fontsize=24, fontweight="bold")
+        plt.legend(fontsize=22)
+        ax = plt.gca()
+        ax.set_xticklabels(ax.get_xticks(), fontsize=24, fontweight='normal')
+        ax.set_yticklabels(ax.get_yticks(), fontsize=24, fontweight='normal')
+        plt.tight_layout()
+        plt.show()
+        '''
+
+    if (len(filenames) > 1):
+        os.system("clear")
+        print("\n\t[15] Multiple comparisons correction")
+        mc_method = 'b'
+        if (len(p_Markov0) > 1):
+            p_Markov0_mc = multiple_comparisons(p_Markov0, mc_method)
+            print("\n\tZero-order Markov test: " + str(p_Markov0_mc))
+        if (len(p_Markov1) > 1):
+            p_Markov1_mc = multiple_comparisons(p_Markov1, mc_method)
+            print("\n\tFirst-order Markov test: " + str(p_Markov1_mc))
+        if (len(p_Markov2) > 1):
+            p_Markov2_mc = multiple_comparisons(p_Markov2, mc_method)
+            print("\n\tZero-order Markov test: " + str(p_Markov0_mc))
+        if (len(p_stationarity) > 1):
+            p_stationarity_mc = multiple_comparisons(p_stationarity, mc_method)
+            print("\n\tStationarity test: " + str(p_stationarity_mc))
+        if (len(p_symmetry) > 1):
+            p_symmetry_mc = multiple_comparisons(p_symmetry, mc_method)
+            print("\n\tSymmetry test: " + str(p_symmetry_mc))
+
+    print("\n\n\t\t--- Tutorial completed ---")
+
+
+if __name__ == '__main__':
+    np.set_printoptions(precision=3, suppress=True)
+    main()
-- 
GitLab