Skip to content
Snippets Groups Projects
FeatureEstimation.py 156 KiB
Newer Older
  • Learn to ignore specific revisions
  • glia's avatar
    glia committed
    # -*- coding: utf-8 -*-
    """
    Updated Oct 18 2022
    
    @author: Qianliang Li (glia@dtu.dk)
    
    This script contains the code to estimate the following EEG features:
        1. Power Spectral Density
        2. Frontal Theta/Beta Ratio
        3. Asymmetry
        4. Peak Alpha Frequency
        5. 1/f Exponents
        6. Microstates
        7. Long-Range Temporal Correlation (DFA Exponent)
    Source localization and functional connectivity
        8. Imaginary part of Coherence
        9. Weighted Phase Lag Index
        10. (Orthogonalized) Power Envelope Correlations
        11. Granger Causality
    
    It should be run after Preprocessing.py
    
    All features are saved in pandas DataFrame format for the machine learning
    pipeline
    
    Note that the code has not been changed to fit the demonstration dataset,
    thus just running it might introduce some errors.
    The code is provided to show how we performed the feature estimation
    and if you are adapting the code, you should check if it fits your dataset
    e.g. the questionnaire data, sensors and source parcellation etc
    
    The script was written in Spyder. The outline panel can be used to navigate
    the different parts easier (Default shortcut: Ctrl + Shift + O)
    """
    
    
    glia's avatar
    glia committed
    # Set working directory
    
    s200431's avatar
    s200431 committed
    import numpy as np
    
    glia's avatar
    glia committed
    import os
    
    wkdir = "/home/s200431"
    
    glia's avatar
    glia committed
    os.chdir(wkdir)
    
    # Load all libraries from the Preamble
    from Preamble import *
    
    # %% Load preprocessed epochs and questionnaire data
    
    load_path = "/home/s200431/PreprocessedData"
    
    glia's avatar
    glia committed
    
    # Get filenames
    files = []
    for r, d, f in os.walk(load_path):
        for file in f:
            if ".fif" in file:
                files.append(os.path.join(r, file))
    files.sort()
    
    
    glia's avatar
    glia committed
    # Subject IDs
    Subject_id = [0] * len(files)
    for i in range(len(files)):
    
        temp = files[i].split("/")
        temp = temp[-1].split(".")
        temp = temp[0].split("_")
        Subject_id[i] = int(temp[0])
    
    # Should exclude subject 100326 due to 7 bad channels
    # Exclude 200013 and 200015 due to too many dropped epochs
    # (200001, 200004, 200053 and 200072 were already excluded prior to preprocessing)
    # Exclude 302215, 302224, 302227, 302233, 302264, 302268, 302275 due to too many dropped epochs
    # 13 subjects excluded in total + 4 I did not receive because they were marked as bad from Helse
    bad_subjects = [100326, 200013, 200015, 302224, 302227, 302233, 302264, 302268, 302275]
    good_subject_idx = [not i in bad_subjects for i in Subject_id]
    
    Subject_id = list(np.array(Subject_id)[good_subject_idx])
    files = list(np.array(files)[good_subject_idx])
    
    glia's avatar
    glia committed
    
    n_subjects = len(Subject_id)
    
    
    # Load ISAF
    n_ISAF = 51-1
    ISAF7_final_epochs = [0]*n_ISAF
    for n in range(len(ISAF7_final_epochs)):
        ISAF7_final_epochs[n] = mne.read_epochs(fname = os.path.join(files[n]),
                        verbose=0)
    
    # Load HELSE
    n_HELSE = 70-2
    HELSE_final_epochs = [0]*n_HELSE
    for n in range(len(HELSE_final_epochs)):
        HELSE_final_epochs[n] = mne.read_epochs(fname = os.path.join(files[n+n_ISAF]),
    
    glia's avatar
    glia committed
                        verbose=0)
    
        # Rename channels to match ISAF (Using 10-20 MCN)
        mne.rename_channels(HELSE_final_epochs[n].info, {"T3":"T7",
                                                         "T4":"T8",
                                                         "T5":"P7",
                                                         "T6":"P8"})
    # Warning about chronological order is due to interleaved EO->EC->EO->EC being concatenated as 5xEC->5xEO
    
    # Load Baseline
    n_Base = 91-6
    Base_final_epochs = [0]*n_Base
    for n in range(len(Base_final_epochs)):
        Base_final_epochs [n] = mne.read_epochs(fname = os.path.join(files[n+n_ISAF+n_HELSE]),
                        verbose=0)
    
    # I will use the union of the channels in both dataset (except mastoids)
    # This means I add empty channels and interpolate when it is missing
    # Helsefond montage has wrong calibration, head size are too big. 
    # Thus I will need to re-calibrate by comparing with ISAF7
    # Notice I already used the final dig montage for Baseline data
    
    # Get channel names
    ISAF7_chs = ISAF7_final_epochs[0].copy().info["ch_names"]
    Helse_chs = HELSE_final_epochs[0].copy().info["ch_names"]
    Base_chs = Base_final_epochs[0].copy().info["ch_names"]
    
    # Get intersection of channels
    intersect_ch = list(set(Helse_chs) & set(ISAF7_chs))
    intersect_ch_ratio = [0]*len(intersect_ch)
    for i in range(len(intersect_ch)):
        # Get channel name
        ch_name0 = intersect_ch[i]
        # Get index and electrode location
        ISAF7_ch_idx = np.where(np.array(ISAF7_chs) == ch_name0)[0]
        ISAF7_ch_loc = ISAF7_final_epochs[0].info["chs"][int(ISAF7_ch_idx)]["loc"]
        Helse_ch_idx = np.where(np.array(Helse_chs) == ch_name0)[0]
        Helse_ch_loc = HELSE_final_epochs[0].info["chs"][int(Helse_ch_idx)]["loc"]
        # Calculate ratio
        intersect_ch_ratio[i] = [ch_name0,ISAF7_ch_loc[0:3]/Helse_ch_loc[0:3]]
    # Most of the ratios are either around 0.095 or 0/Inf when division with 0
    # We also see that Cz for ISAF is defined as (0,0,0.095m). For Helse Cz is (0,0,1m)
    Helse_to_ISAF7_cal = 0.095
    
    # Now we are ready to make a combined montage (info["dig"])
    # Get list of channels to add
    ISAF7_add_ch_list = list(set(Helse_chs)-set(ISAF7_chs))
    Helse_add_ch_list = list(set(ISAF7_chs)-set(Helse_chs))
    combined_ch_list = Helse_chs + ISAF7_chs
    # Remove duplicates while maintaining A-P order
    duplicate = set()
    final_ch_list = [x for x in combined_ch_list if not (x in duplicate or duplicate.add(x))]
    # Move AFz and POz to correct position
    final_ch_list.insert(3,final_ch_list.pop(-2)) # from second last to 3
    final_ch_list.insert(-5,final_ch_list.pop(-1)) # from last to seventh from last
    
    # The order of DigPoints in info are based on sorted ch names
    Helse_dig = HELSE_final_epochs[0].copy().info["dig"]
    Helse_chs_sorted = Helse_chs.copy()
    Helse_chs_sorted.sort()
    ISAF7_dig = ISAF7_final_epochs[0].copy().info["dig"]
    ISAF7_chs_sorted = ISAF7_chs.copy()
    ISAF7_chs_sorted.sort()
    
    # Make one list with DigPoints from Helse + unique channels from ISAF7
    ch_idx = [i for i, item in enumerate(ISAF7_chs_sorted) if item in set(Helse_add_ch_list)] # indices of unique channels
    final_ch_list_sorted = final_ch_list.copy()
    final_ch_list_sorted.sort()
    dig_insert_idx = [i for i, item in enumerate(final_ch_list_sorted) if item in set(Helse_add_ch_list)] # find where ISAF7 channels should be inserted
    
    # Prepare combined dig montage
    final_dig = Helse_dig.copy()
    # Calibrate Helse digpoints
    for i in range(len(final_dig)):
        final_dig[i]["r"] = final_dig[i]["r"]*Helse_to_ISAF7_cal
    # Insert ISAF7 digpoints
    for i in range(len(ch_idx)):
        final_dig.insert(dig_insert_idx[i],ISAF7_dig[ch_idx[i]])
    
    # Remove mastoids
    Mastoid_ch = ["M1", "M2"]
    M_idx = [i for i, item in enumerate(final_ch_list_sorted) if item in set(Mastoid_ch)] # find mastoids ch
    M_idx2 = [i for i, item in enumerate(final_ch_list) if item in set(Mastoid_ch)] # find mastoids ch
    M_idx3 = [i for i, item in enumerate(Helse_add_ch_list) if item in set(Mastoid_ch)] # find mastoids ch
    for i in reversed(range(len(M_idx))):
        del final_ch_list_sorted[M_idx[i]]
        del final_dig[M_idx[i]]
        # Mastoids are placed in the back of final_ch_list and Helse_add_ch_list and are also removed
        del final_ch_list[M_idx2[i]]
        del Helse_add_ch_list[M_idx3[i]]
    
    # T7, T8, Pz, P8 and P7 are placed wrongly (probably due to renaming)
    # This is fixed manually
    final_dig.insert(-7,final_dig.pop(-4)) # swap between P8 and T7
    final_dig.insert(-6,final_dig.pop(-3)) # swap between T7 and T8
    final_dig.insert(-4,final_dig.pop(-4)) # swap between Pz and T7
    final_dig.insert(-5,final_dig.pop(-5)) # swap between Pz and POz
    
    # Update EEG identity number
    for i in range(len(final_dig)):
        final_dig[i]["ident"] = i+1
    
    # Make final digital montage
    final_digmon = mne.channels.DigMontage(dig=final_dig, ch_names=final_ch_list_sorted)
    # final_digmon.plot() # visually inspect topographical positions
    # final_digmon.plot(kind="3d") # visually inspect 3D positions
    # final_digmon.save("final_digmon.fif") # Save digital montage
    with open("final_digmon_ch_names.pkl", "wb") as filehandle:
        # The data is stored as binary data stream
        pickle.dump(final_digmon.ch_names, filehandle)
    
    # Remove mastoids from ISAF, add channels from Helse and interpolate
    for n in range(len(ISAF7_final_epochs)):
        # Remove mastoid channels
        ISAF7_final_epochs[n].drop_channels(Mastoid_ch)
        # Add empty channels to interpolate - notice that the locations are set to 0
        mne.add_reference_channels(ISAF7_final_epochs[n],ISAF7_add_ch_list,copy=False)
        # Fix channel info (both after removal of mastoids and newly added chs)
        # Ch info loc are linked for all reference channels and this link is removed
        for c in range(ISAF7_final_epochs[n].info["nchan"]):
            ISAF7_final_epochs[n].info["chs"][c]["loc"] = ISAF7_final_epochs[n].info["chs"][c]["loc"].copy()
            ISAF7_final_epochs[n].info["chs"][c]["scanno"] = c+1
            ISAF7_final_epochs[n].info["chs"][c]["logno"] = c+1
        # Set new combined montage
        ISAF7_final_epochs[n].set_montage(final_digmon)
        # Set newly added channels as "bad" and interpolate
        ISAF7_final_epochs[n].info["bads"] = ISAF7_add_ch_list
        ISAF7_final_epochs[n].interpolate_bads(reset_bads=True)
        # Fix "picks" in order to reorder channels
        ISAF7_final_epochs[n].picks = np.array(range(ISAF7_final_epochs[n].info["nchan"]))
        # Reorder channel
        ISAF7_final_epochs[n].reorder_channels(final_ch_list)
    
    # Add channels from ISAF to Helse and interpolate
    for n in range(len(HELSE_final_epochs)):
        # Add empty channels to interpolate
        mne.add_reference_channels(HELSE_final_epochs[n],Helse_add_ch_list,copy=False)
        # Fix channel info (both after removal of mastoids and newly added chs)
        # Ch info loc are linked for all reference channels and this link is removed
        for c in range(HELSE_final_epochs[n].info["nchan"]):
            HELSE_final_epochs[n].info["chs"][c]["loc"] = HELSE_final_epochs[n].info["chs"][c]["loc"].copy()
            HELSE_final_epochs[n].info["chs"][c]["scanno"] = c+1
            HELSE_final_epochs[n].info["chs"][c]["logno"] = c+1
        # Set new combined montage
        HELSE_final_epochs[n].set_montage(final_digmon)
        # Set newly added channels as "bad" and interpolate
        HELSE_final_epochs[n].info["bads"] = Helse_add_ch_list
        HELSE_final_epochs[n].interpolate_bads(reset_bads=True)
        # Fix "picks" in order to reorder channels
        HELSE_final_epochs[n].picks = np.array(range(HELSE_final_epochs[n].info["nchan"]))
        # Reorder channels
        HELSE_final_epochs[n].reorder_channels(final_ch_list)
    
    # Add missing channels to Baseline and interpolate
    Base_add_ch_list = list(set(final_ch_list)-set(Base_chs))
    for n in range(len(Base_final_epochs)):
        # Add empty channels to interpolate
        mne.add_reference_channels(Base_final_epochs[n],Base_add_ch_list,copy=False)
        # Fix channel info (both after removal of mastoids and newly added chs)
        # Ch info loc are linked for all reference channels and this link is removed
        for c in range(Base_final_epochs[n].info["nchan"]):
            Base_final_epochs[n].info["chs"][c]["loc"] = Base_final_epochs[n].info["chs"][c]["loc"].copy()
            Base_final_epochs[n].info["chs"][c]["scanno"] = c+1
            Base_final_epochs[n].info["chs"][c]["logno"] = c+1
        # Set new combined montage
        Base_final_epochs[n].set_montage(final_digmon)
        # Set newly added channels as "bad" and interpolate
        Base_final_epochs[n].info["bads"] = Base_add_ch_list
        Base_final_epochs[n].interpolate_bads(reset_bads=True)
        # Fix "picks" in order to reorder channels
        Base_final_epochs[n].picks = np.array(range(Base_final_epochs[n].info["nchan"]))
        # Reorder channels
        Base_final_epochs[n].reorder_channels(final_ch_list)    
    
    # Combine both dataset in one list
    final_epochs = ISAF7_final_epochs+HELSE_final_epochs+Base_final_epochs
    # Check number of epochs
    file_lengths = [0]*len(final_epochs)
    for i in range(len(final_epochs)):
        file_lengths[i] = len(final_epochs[i])
    # sns.distplot(file_lengths) # visualize
    np.min(file_lengths)/150*100 # Max 20% epochs dropped. Above and the subjects were excluded
    n_subjects = len(final_epochs)
    
    # Re-sample files to 200Hz (Data was already lowpass filtered at 100, so above 200 is oversampling)
    for i in range(len(final_epochs)):
        final_epochs[i].resample(sfreq=200, verbose=2)
    
    # # Save final epochs data
    # save_path = "/home/glia/Analysis/Final_epochs_data"
    # for n in range(len(final_epochs)):
    #     # Make file writeable
    #     final_epochs[n]._times_readonly.flags["WRITEABLE"] = False
    #     # Save file
    #     final_epochs[n].save(fname = os.path.join(save_path,str("{}_final_epoch".format(Subject_id[n]) + "-epo.fif")),
    #                     overwrite=True, verbose=0)
    
    glia's avatar
    glia committed
    
    # Load dropped epochs - used for gap idx in microstates
    
    ISAF7_dropped_epochs_df = pd.read_pickle("ISAF7_dropped_epochs.pkl")
    Helse_dropped_epochs_df = pd.read_pickle("HELSE_dropped_epochs.pkl")
    Base_dropped_epochs_df = pd.read_pickle("Base_dropped_epochs.pkl")
    
    glia's avatar
    glia committed
    
    
    Drop_epochs_df = pd.concat([ISAF7_dropped_epochs_df,Helse_dropped_epochs_df,
                                Base_dropped_epochs_df]).reset_index(drop=True)
    
    glia's avatar
    glia committed
    good_subject_df_idx = [not i in bad_subjects for i in Drop_epochs_df["Subject_ID"]]
    Drop_epochs_df = Drop_epochs_df.loc[good_subject_df_idx,:]
    Drop_epochs_df = Drop_epochs_df.sort_values(by=["Subject_ID"]).reset_index(drop=True)
    
    ### Load questionnaire data
    
    # ISAF
    qdf_ISAF7 = pd.read_csv("/data/raw/FOR_DTU/Questionnaires_for_DTU.csv",
    
                       na_values=' ')
    
    # Rename Subject_ID column
    qdf_ISAF7.rename({"ID_number": "Subject_ID"}, axis=1, inplace=True)
    # Sort Subject_id to match Subject_id for files
    qdf_ISAF7 = qdf_ISAF7.sort_values(by=["Subject_ID"], ignore_index=True)
    # Get column idx for PCL_t7 columns
    PCL_idx = qdf_ISAF7.columns.str.contains("PCL") & np.invert(qdf_ISAF7.columns.str.contains("PCL_"))
    # Keep subject id
    PCL_idx[qdf_ISAF7.columns=="Subject_ID"] = True
    
    # Make a final df and exclude dropped subjects
    final_qdf0 = qdf_ISAF7.loc[qdf_ISAF7["Subject_ID"].isin(Subject_id),PCL_idx].reset_index(drop=True)
    # Make column that is sum of all PCL
    final_qdf0.insert(len(final_qdf0.columns),"PCL_total",np.sum(final_qdf0.iloc[:,1:],axis=1))
    
    # Helse
    qdf_helse = pd.read_csv("/data/may2020/Questionnaires/HelsfondenQuestData_nytLbn.csv",
                      sep=",", na_values=' ')
    # Rename subject ID column
    qdf_helse.rename(columns={"Nyt_lbn":"Subject_ID"}, inplace=True)
    Helse_ID_modifier = 200000
    # Add 200000 to id
    qdf_helse["Subject_ID"] += Helse_ID_modifier
    # Sort Subject_id to match Subject_id for files
    qdf_helse = qdf_helse.sort_values(by=["Subject_ID"], ignore_index=True)
    # Get column idx for PCL columns (don't use summarized columns with _)
    PCL_idx = qdf_helse.columns.str.contains("PCL") & np.invert(qdf_helse.columns.str.contains("PCL_"))
    # Keep subject id
    PCL_idx[qdf_helse.columns=="Subject_ID"] = True
    
    # Make a final df and exclude dropped subjects
    final_qdf1 = qdf_helse.loc[qdf_helse["Subject_ID"].isin(Subject_id),PCL_idx].reset_index(drop=True)
    # Make column that is sum of all PCL
    final_qdf1.insert(len(final_qdf1.columns),"PCL_total",np.sum(final_qdf1.iloc[:,1:],axis=1))
    
    # Baseline
    # antal_børm renamed to antal_boern
    qdf_base = pd.read_csv("/data/sep2020/BaselineForLi.csv", sep=",", na_values=' ')
    
    # Rename subject ID column
    qdf_base.rename(columns={"LbnRand":"Subject_ID"}, inplace=True)
    Base_ID_modifier = 300000
    # Add 300000 to id
    qdf_base["Subject_ID"] += Base_ID_modifier
    # Sort Subject_id to match Subject_id for files
    qdf_base = qdf_base.sort_values(by=["Subject_ID"], ignore_index=True)
    # Get column idx for PCL columns (don't use summarized columns with _)
    PCL_idx = qdf_base.columns.str.contains("PCL") & np.invert(qdf_base.columns.str.contains("PCL_"))
    # Keep subject id
    PCL_idx[qdf_base.columns=="Subject_ID"] = True
    
    # Make a final df and exclude dropped subjects
    final_qdf2 = qdf_base.loc[qdf_base["Subject_ID"].isin(Subject_id),PCL_idx].reset_index(drop=True)
    # Make column that is sum of all PCL
    final_qdf2.insert(len(final_qdf2.columns),"PCL_total",np.sum(final_qdf2.iloc[:,1:],axis=1))
    
    # Find NaN
    nan_idx = np.where(final_qdf2.isna()==True)
    final_qdf2.iloc[nan_idx[0],np.concatenate([np.array([0]),nan_idx[1]])] # 2252 has NaN for PCL3 and 12
    # Interpolate with mean of column
    final_qdf2 = final_qdf2.fillna(final_qdf2.mean())
    
    # Combine the 3 datasets
    final_qdf0.columns = final_qdf1.columns # fix colnames with t7
    final_qdf = pd.concat([final_qdf0,final_qdf1,final_qdf2], ignore_index=True)
    
    
    # Define folder for saving features
    Feature_savepath = "./Features/"
    Stat_savepath = "./Statistics/"
    Model_savepath = "./Model/"
    
    
    # Ensure all columns are integers
    final_qdf = final_qdf.astype("int")
    
    final_qdf.to_pickle(os.path.join(Feature_savepath,"final_qdf.pkl"))
    
    
    glia's avatar
    glia committed
    
    # Define cases as >= 44 total PCL
    # Type: numpy array with subject id
    
    cases = np.array(final_qdf["Subject_ID"][final_qdf["PCL_total"]>=44])
    
    glia's avatar
    glia committed
    n_groups = 2
    Groups = ["CTRL", "PTSD"]
    
    
    # Check percentage of cases in both datasets
    len(np.where((cases>100000)&(cases<200000))[0])/n_ISAF # around 32%
    len(np.where((cases>200000)&(cases<300000))[0])/n_HELSE # around 51%
    len(np.where((cases>300000)&(cases<400000))[0])/n_Base # around 66%
    # There is clearly class imbalance between studies!
    
    ### Get depression scores as binary above threshold
    # BDI >= 20 is moderate depression
    # DASS-42 >= 14 is moderate depression for depression subscale
    dep_cases = np.concatenate([np.array(qdf_ISAF7["Subject_ID"][qdf_ISAF7["BDI_t7"] >= 20]),
                               np.array(qdf_helse["Subject_ID"][qdf_helse["DASS_D_t0"] >= 14]),
                               np.array(qdf_base["Subject_ID"][qdf_base["DASS_D_t0"] >= 14])])
    dep_cases.sort()
    dep_cases = dep_cases[np.isin(dep_cases,Subject_id)] # only keep those that we received and not excluded
    
    # Check percentage of dep cases in both datasets
    len(np.where((dep_cases>100000)&(dep_cases<200000))[0])/n_ISAF # around 34%
    len(np.where((dep_cases>200000)&(dep_cases<300000))[0])/n_HELSE # around 53%
    len(np.where((dep_cases>300000)&(dep_cases<400000))[0])/n_Base # around 72%
    
    # Make normalized to max depression score to combine from both scales
    # Relative score seem to be consistent between BDI-II and DASS-42 and clinical label
    max_BDI = 63
    max_DASS = 42
    # Get normalized depression scores for each dataset
    ISAF7_dep_score = qdf_ISAF7["BDI_t7"][qdf_ISAF7["Subject_ID"].isin(Subject_id)]/max_BDI
    Helse_dep_score = qdf_helse["DASS_D_t0"][qdf_helse["Subject_ID"].isin(Subject_id)]/max_DASS
    Base_dep_score = qdf_base["DASS_D_t0"][qdf_base["Subject_ID"].isin(Subject_id)]/max_DASS
    Norm_dep_score = np.concatenate([ISAF7_dep_score.to_numpy(),Helse_dep_score.to_numpy(),Base_dep_score.to_numpy()])
    
    # Check if subject id match when using concat
    test_d1 = qdf_ISAF7["Subject_ID"][qdf_ISAF7["Subject_ID"].isin(Subject_id)]
    test_d2 = qdf_helse["Subject_ID"][qdf_helse["Subject_ID"].isin(Subject_id)]
    test_d3 = qdf_base["Subject_ID"][qdf_base["Subject_ID"].isin(Subject_id)]
    test_d4 = np.concatenate([test_d1.to_numpy(),test_d2.to_numpy(),test_d3.to_numpy()])
    assert all(np.equal(Subject_id,test_d4))
    
    
    glia's avatar
    glia committed
    
    # %% Power spectrum features
    Freq_Bands = {"delta": [1.25, 4.0],
                  "theta": [4.0, 8.0],
                  "alpha": [8.0, 13.0],
                  "beta": [13.0, 30.0],
                  "gamma": [30.0, 49.0]}
    ch_names = final_epochs[0].info["ch_names"]
    n_channels = final_epochs[0].info["nchan"]
    
    # Pre-allocate memory
    power_bands = [0]*len(final_epochs)
    
    def power_band_estimation(n):
        # Get index for eyes open and eyes closed
        EC_index = final_epochs[n].events[:,2] == 1
        EO_index = final_epochs[n].events[:,2] == 2
        
        # Calculate the power spectral density
        psds, freqs = psd_multitaper(final_epochs[n], fmin = 1, fmax = 50) # output (epochs, channels, freqs)
        
        temp_power_band = []
        
        for fmin, fmax in Freq_Bands.values():
            # Calculate the power each frequency band
            psds_band = psds[:, :, (freqs >= fmin) & (freqs < fmax)].sum(axis=-1)
            # Calculate the mean for each eye status
            psds_band_eye = np.array([psds_band[EC_index,:].mean(axis=0),
                                          psds_band[EO_index,:].mean(axis=0)])
            # Append for each freq band
            temp_power_band.append(psds_band_eye)
            # Output: List with the 5 bands, and each element is a 2D array with eye status as 1st dimension and channels as 2nd dimension
        
        # The list is reshaped and absolute and relative power are calculated
        abs_power_band = np.reshape(temp_power_band, (5, 2, n_channels))
        abs_power_band = 10.*np.log10(abs_power_band) # Convert to decibel scale
        
        rel_power_band = np.reshape(temp_power_band, (5, 2, n_channels))
        rel_power_band = rel_power_band/np.sum(rel_power_band, axis=0, keepdims=True)
        # each eye condition and channel have been normalized to power in all 5 frequencies for that given eye condition and channel
        
        # Make one list in 1 dimension
        abs_power_values = np.concatenate(np.concatenate(abs_power_band, axis=0), axis=0)
        rel_power_values = np.concatenate(np.concatenate(rel_power_band, axis=0), axis=0)
        ## Output: First the channels, then the eye status and finally the frequency bands are concatenated
        ## E.g. element 26 is 3rd channel, eyes open, first frequency band
        #assert abs_power_values[26] == abs_power_band[0,1,2]
        #assert abs_power_values[47] == abs_power_band[0,1,23] # +21 channels to last
        #assert abs_power_values[50] == abs_power_band[1,0,2] # once all channels have been changed then the freq is changed and eye status
        
        # Get result
        res = np.concatenate([abs_power_values,rel_power_values],axis=0)
        return n, res
    
    
    glia's avatar
    glia committed
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for n, result in executor.map(power_band_estimation, range(len(final_epochs))): # Function and arguments
            power_bands[n] = result
    
    
    for i in range(len(power_bands)):
        n, results = power_band_estimation(i)
        power_bands[i] = results
    
    glia's avatar
    glia committed
    
    # Combine all data into one dataframe
    # First the columns are prepared
    n_subjects = len(Subject_id)
    
    # The group status (PTSD/CTRL) is made using the information about the cases
    Group_status = np.array(["CTRL"]*n_subjects)
    Group_status[np.array([i in cases for i in Subject_id])] = "PTSD"
    
    # A variable that codes the channels based on A/P localization is also made
    Frontal_chs = ["Fp1", "Fpz", "Fp2", "AFz", "Fz", "F3", "F4", "F7", "F8"]
    Central_chs = ["Cz", "C3", "C4", "T7", "T8", "FT7", "FC3", "FCz", "FC4", "FT8", "TP7", "CP3", "CPz", "CP4", "TP8"]
    Posterior_chs = ["Pz", "P3", "P4", "P7", "P8", "POz", "O1", "O2", "Oz"]
    
    s200431's avatar
    s200431 committed
    Parietal_chs = ["TP7", "CP3", "CPz", "CP4", "TP8", "P7", "P3", "Pz", "P4", "P8", "POz"]
    
    glia's avatar
    glia committed
    
    
    s200431's avatar
    s200431 committed
    Brain_region_labels = ["Frontal","Central","Posterior","Parietal"]
    
    glia's avatar
    glia committed
    Brain_region = np.array(ch_names, dtype = "<U9")
    Brain_region[np.array([i in Frontal_chs for i in ch_names])] = Brain_region_labels[0]
    Brain_region[np.array([i in Central_chs for i in ch_names])] = Brain_region_labels[1]
    Brain_region[np.array([i in Posterior_chs for i in ch_names])] = Brain_region_labels[2]
    
    s200431's avatar
    s200431 committed
    Brain_region[np.array([i in Parietal_chs for i in ch_names])] = Brain_region_labels[3]
    
    glia's avatar
    glia committed
    
    # A variable that codes the channels based on M/L localization
    Left_chs = ["Fp1", "F3", "F7", "FC3", "FT7", "C3", "T7", "CP3", "TP7", "P3", "P7", "O1"]
    Right_chs = ["Fp2", "F4", "F8", "FC4", "FT8", "C4", "T8", "CP4", "TP8", "P4", "P8", "O2"]
    Mid_chs = ["Fpz", "AFz", "Fz", "FCz", "Cz", "CPz", "Pz", "POz", "Oz"]
    
    Brain_side = np.array(ch_names, dtype = "<U5")
    Brain_side[np.array([i in Left_chs for i in ch_names])] = "Left"
    Brain_side[np.array([i in Right_chs for i in ch_names])] = "Right"
    Brain_side[np.array([i in Mid_chs for i in ch_names])] = "Mid"
    
    # Eye status is added
    eye_status = list(final_epochs[0].event_id.keys())
    n_eye_status = len(eye_status)
    
    # Frequency bands
    freq_bands_name = list(Freq_Bands.keys())
    n_freq_bands = len(freq_bands_name)
    
    # Quantification (Abs/Rel)
    quant_status = ["Absolute", "Relative"]
    n_quant_status = len(quant_status)
    
    # The dataframe is made by combining all the unlisted pds values
    # Each row correspond to a different channel. It is reset after all channel names have been used
    # Each eye status element is repeated n_channel times, before it is reset
    # Each freq_band element is repeated n_channel * n_eye_status times, before it is reset
    # Each quantification status element is repeated n_channel * n_eye_status * n_freq_bands times, before it is reset
    power_df = pd.DataFrame(data = {"Subject_ID": [ele for ele in Subject_id for i in range(n_eye_status*n_quant_status*n_freq_bands*n_channels)],
                                    "Group_status": [ele for ele in Group_status for i in range(n_eye_status*n_quant_status*n_freq_bands*n_channels)],
                                    "Channel": ch_names*(n_eye_status*n_quant_status*n_freq_bands*n_subjects),
                                    "Brain_region": list(Brain_region)*(n_eye_status*n_quant_status*n_freq_bands*n_subjects),
                                    "Brain_side": list(Brain_side)*(n_eye_status*n_quant_status*n_freq_bands*n_subjects),
                                    "Eye_status": [ele for ele in eye_status for i in range(n_channels)]*n_quant_status*n_freq_bands*n_subjects,
                                    "Freq_band": [ele for ele in freq_bands_name for i in range(n_channels*n_eye_status)]*n_quant_status*n_subjects,
                                    "Quant_status": [ele for ele in quant_status for i in range(n_channels*n_eye_status*n_freq_bands)]*n_subjects,
                                    "PSD": list(np.concatenate(power_bands, axis=0))
                                    })
    # Absolute power is in decibels (10*log10(power))
    
    # Fix Freq_band categorical order
    power_df["Freq_band"] = power_df["Freq_band"].astype("category").\
                cat.reorder_categories(list(Freq_Bands.keys()), ordered=True)
    
    glia's avatar
    glia committed
    # Fix Brain_region categorical order
    power_df["Brain_region"] = power_df["Brain_region"].astype("category").\
                cat.reorder_categories(Brain_region_labels, ordered=True)
    
    glia's avatar
    glia committed
    # Save the dataframe
    
    # power_df.to_pickle(os.path.join(Feature_savepath,"Power_df.pkl"))
    
    glia's avatar
    glia committed
    
    
    glia's avatar
    glia committed
    # %% Theta-beta ratio
    # Frontal theta/beta ratio has been implicated in cognitive control of attention
    power_df = pd.read_pickle(os.path.join(Feature_savepath,"Power_df.pkl"))
    
    eye_status = list(final_epochs[0].event_id)
    n_eye_status = len(eye_status)
    
    # Subset frontal absolute power
    power_df_sub1 = power_df[(power_df["Quant_status"] == "Absolute")&
                             (power_df["Brain_region"] == "Frontal")]
    
    # Subset frontal, midline absolute power
    power_df_sub2 = power_df[(power_df["Quant_status"] == "Absolute")&
                                (power_df["Brain_region"] == "Frontal")&
                                (power_df["Brain_side"] == "Mid")]
    
    s200431's avatar
    s200431 committed
    # Subset posterior absolute power
    power_df_sub3 = power_df[(power_df["Quant_status"] == "Absolute")&
                                (power_df["Brain_region"] == "Posterior")]
    
    glia's avatar
    glia committed
    
    
    # Calculate average frontal power theta
    
    glia's avatar
    glia committed
    frontal_theta_mean_subject = power_df_sub1[power_df_sub1["Freq_band"] == "theta"].\
        groupby(["Subject_ID","Group_status","Eye_status"]).mean().reset_index()
    
    
    # Calculate average frontal power beta
    
    glia's avatar
    glia committed
    frontal_beta_mean_subject = power_df_sub1[power_df_sub1["Freq_band"] == "beta"].\
        groupby(["Subject_ID","Group_status","Eye_status"]).mean().reset_index()
    
    # Extract all values
    frontal_beta_subject_values = power_df_sub1[power_df_sub1["Freq_band"] == "beta"]
    
    glia's avatar
    glia committed
    
    
    # Calculate average frontal, midline power theta
    frontal_midline_theta_mean_subject = power_df_sub2[power_df_sub2["Freq_band"] == "theta"].\
        groupby(["Subject_ID","Group_status","Eye_status"]).mean().reset_index()
    
    # Extract all values
    frontal_midline_theta_subject_values = power_df_sub2[power_df_sub2["Freq_band"] == "theta"]
    
    s200431's avatar
    s200431 committed
    # Calculate average parietal alpha power
    parietal_alpha_mean_subject = power_df_sub3[power_df_sub3["Freq_band"] == "alpha"].\
        groupby(["Subject_ID","Group_status","Eye_status"]).mean().reset_index()
    # Extract all values
    parietal_alpha_subject_values = power_df_sub3[power_df_sub3["Freq_band"] == "alpha"]
    
    
    glia's avatar
    glia committed
    # Convert from dB to raw power
    frontal_theta_mean_subject["PSD"] = 10**(frontal_theta_mean_subject["PSD"]/10)
    frontal_beta_mean_subject["PSD"] = 10**(frontal_beta_mean_subject["PSD"]/10)
    
    frontal_midline_theta_mean_subject["PSD"] = 10**(frontal_midline_theta_mean_subject["PSD"]/10)
    
    frontal_beta_subject_values["PSD"] = 10**(frontal_beta_subject_values["PSD"]/10)
    frontal_midline_theta_subject_values["PSD"] = 10**(frontal_midline_theta_subject_values["PSD"]/10)
    
    s200431's avatar
    s200431 committed
    parietal_alpha_mean_subject["PSD"] = 10**(parietal_alpha_mean_subject["PSD"]/10)
    parietal_alpha_subject_values["PSD"] = 10**(parietal_alpha_subject_values["PSD"]/10)
    
    s200431's avatar
    s200431 committed
    # Safe values
    
    frontal_beta_mean_subject.to_pickle(os.path.join(Feature_savepath,"fBMS_df.pkl"))
    frontal_midline_theta_mean_subject.to_pickle(os.path.join(Feature_savepath,"fMTMS_df.pkl"))
    frontal_beta_subject_values.to_pickle(os.path.join(Feature_savepath,"fBSV_df.pkl"))
    frontal_midline_theta_subject_values.to_pickle(os.path.join(Feature_savepath,"fMTSV_df.pkl"))
    
    s200431's avatar
    s200431 committed
    parietal_alpha_mean_subject.to_pickle(os.path.join(Feature_savepath,"pAMS_df.pkl"))
    parietal_alpha_subject_values.to_pickle(os.path.join(Feature_savepath,"pASV_df.pkl"))
    
    glia's avatar
    glia committed
    
    # Calculate mean for each group and take ratio for whole group
    # To confirm trend observed in PSD plots
    mean_group_f_theta = frontal_theta_mean_subject.iloc[:,1:].groupby(["Group_status","Eye_status"]).mean()
    mean_group_f_beta = frontal_beta_mean_subject.iloc[:,1:].groupby(["Group_status","Eye_status"]).mean()
    mean_group_f_theta_beta_ratio = mean_group_f_theta/mean_group_f_beta
    
    # Calculate ratio for each subject
    frontal_theta_beta_ratio = frontal_theta_mean_subject.copy()
    frontal_theta_beta_ratio["PSD"] = frontal_theta_mean_subject["PSD"]/frontal_beta_mean_subject["PSD"]
    
    # Take the natural log of ratio 
    frontal_theta_beta_ratio["PSD"] = np.log(frontal_theta_beta_ratio["PSD"])
    
    # Rename and save feature
    frontal_theta_beta_ratio.rename(columns={"PSD":"TBR"},inplace=True)
    # Add dummy variable for re-using plot code
    dummy_variable = ["Frontal Theta Beta Ratio"]*frontal_theta_beta_ratio.shape[0]
    frontal_theta_beta_ratio.insert(3, "Measurement", dummy_variable )
    
    
    # frontal_theta_beta_ratio.to_pickle(os.path.join(Feature_savepath,"fTBR_df.pkl"))
    
    glia's avatar
    glia committed
    
    
    glia's avatar
    glia committed
    # %% Frequency bands asymmetry
    # Defined as ln(right) - ln(left)
    # Thus we should only work with the absolute values and undo the dB transformation
    # Here I avg over all areas. I.e. mean((ln(F4)-ln(F3),(ln(F8)-ln(F7),(ln(Fp2)-ln(Fp1))) for frontal
    ROI = ["Frontal", "Central", "Posterior"]
    qq = "Absolute" # only calculate asymmetry for absolute
    # Pre-allocate memory
    asymmetry = np.zeros(shape=(len(np.unique(power_df["Subject_ID"])),
                                 len(np.unique(power_df["Eye_status"])),
                                 len(list(Freq_Bands.keys())),
                                 len(ROI)))
    
    def calculate_asymmetry(i):
        ii = np.unique(power_df["Subject_ID"])[i]
        temp_asymmetry = np.zeros(shape=(len(np.unique(power_df["Eye_status"])),
                                 len(list(Freq_Bands.keys())),
                                 len(ROI)))
        for e in range(len(np.unique(power_df["Eye_status"]))):
            ee = np.unique(power_df["Eye_status"])[e]
            for f in range(len(list(Freq_Bands.keys()))):
                ff = list(Freq_Bands.keys())[f]
                
                # Get the specific part of the df
                temp_power_df = power_df[(power_df["Quant_status"] == qq) &
                                         (power_df["Eye_status"] == ee) &
                                         (power_df["Subject_ID"] == ii) &
                                         (power_df["Freq_band"] == ff)].copy()
                
                # Convert from dB to raw power
                temp_power_df.loc[:,"PSD"] = np.array(10**(temp_power_df["PSD"]/10))
                
                # Calculate the power asymmetry
                for r in range(len(ROI)):
                    rr = ROI[r]
                    temp_power_roi_df = temp_power_df[(temp_power_df["Brain_region"] == rr)&
                                                      ~(temp_power_df["Brain_side"] == "Mid")]
                    # Sort using channel names to make sure F8-F7 and not F4-F7 etc.
                    temp_power_roi_df = temp_power_roi_df.sort_values("Channel").reset_index(drop=True)
                    # Get the log power
                    R_power = temp_power_roi_df[(temp_power_roi_df["Brain_side"] == "Right")]["PSD"]
                    ln_R_power = np.log(R_power) # get log power
                    L_power = temp_power_roi_df[(temp_power_roi_df["Brain_side"] == "Left")]["PSD"]
                    ln_L_power = np.log(L_power) # get log power
                    # Pairwise subtraction followed by averaging
                    asymmetry_value = np.mean(np.array(ln_R_power) - np.array(ln_L_power))
                    # Save it to the array
                    temp_asymmetry[e,f,r] = asymmetry_value
        # Print progress
        print("{} out of {} finished testing".format(i+1,n_subjects))
        return i, temp_asymmetry
    
    with concurrent.futures.ProcessPoolExecutor() as executor:
        for i, res in executor.map(calculate_asymmetry, range(len(np.unique(power_df["Subject_ID"])))): # Function and arguments
            asymmetry[i,:,:,:] = res
    
    # Prepare conversion of array to df using flatten
    n_subjects = len(Subject_id)
    
    # The group status (PTSD/CTRL) is made using the information about the cases
    Group_status = np.array(["CTRL"]*n_subjects)
    Group_status[np.array([i in cases for i in Subject_id])] = "PTSD"
    
    # Eye status is added
    eye_status = list(final_epochs[0].event_id.keys())
    n_eye_status = len(eye_status)
    
    # Frequency bands
    freq_bands_name = list(Freq_Bands.keys())
    n_freq_bands = len(freq_bands_name)
    
    # ROIs
    n_ROI = len(ROI)
    
    # Make the dataframe                
    asymmetry_df = pd.DataFrame(data = {"Subject_ID": [ele for ele in Subject_id for i in range(n_eye_status*n_freq_bands*n_ROI)],
                                         "Group_status": [ele for ele in Group_status for i in range(n_eye_status*n_freq_bands*n_ROI)],
                                         "Eye_status": [ele for ele in eye_status for i in range(n_freq_bands*n_ROI)]*(n_subjects),
                                         "Freq_band": [ele for ele in freq_bands_name for i in range(n_ROI)]*(n_subjects*n_eye_status),
                                         "ROI": list(ROI)*(n_subjects*n_eye_status*n_freq_bands),
                                         "Asymmetry_score": asymmetry.flatten(order="C")
                                         })
    # Flatten with order=C means that it first goes through last axis,
    # then repeat along 2nd last axis, and then repeat along 3rd last etc
    
    # Asymmetry numpy to pandas conversion check
    random_point=321
    asymmetry_df.iloc[random_point]
    
    i = np.where(np.unique(power_df["Subject_ID"]) == asymmetry_df.iloc[random_point]["Subject_ID"])[0]
    e = np.where(np.unique(power_df["Eye_status"]) == asymmetry_df.iloc[random_point]["Eye_status"])[0]
    f = np.where(np.array(list(Freq_Bands.keys())) == asymmetry_df.iloc[random_point]["Freq_band"])[0]
    r = np.where(np.array(ROI) == asymmetry_df.iloc[random_point]["ROI"])[0]
    
    assert asymmetry[i,e,f,r] == asymmetry_df.iloc[random_point]["Asymmetry_score"]
    
    # Save the dataframe
    asymmetry_df.to_pickle(os.path.join(Feature_savepath,"asymmetry_df.pkl"))
    
    
    s200431's avatar
    s200431 committed
    """
    
    glia's avatar
    glia committed
    # %% Using FOOOF
    # Peak alpha frequency (PAF) and 1/f exponent (OOF)
    # Using the FOOOF algorithm (Fitting oscillations and one over f)
    # Published by Donoghue et al, 2020 in Nature Neuroscience
    # To start, FOOOF takes the freqs and power spectra as input
    n_channels = final_epochs[0].info["nchan"]
    ch_names = final_epochs[0].info["ch_names"]
    sfreq = final_epochs[0].info["sfreq"]
    Freq_Bands = {"delta": [1.25, 4.0],
                  "theta": [4.0, 8.0],
                  "alpha": [8.0, 13.0],
                  "beta": [13.0, 30.0],
                  "gamma": [30.0, 49.0]}
    n_freq_bands = len(Freq_Bands)
    
    # From visual inspection there seems to be problem if PSD is too steep at the start
    # To overcome this problem, we try multiple start freq
    OOF_r2_thres = 0.95 # a high threshold as we allow for overfitting
    PAF_r2_thres = 0.90 # a more lenient threshold for PAF, as it is usually still captured even if fit for 1/f is not perfect
    
    s200431's avatar
    s200431 committed
    PTF_r2_thres = 0.90 # a more lenient threshold for PTF, as it is usually still captured even if fit for 1/f is not perfect
    PBF_r2_thres = 0.90 # a more lenient threshold for PBF, as it is usually still captured even if fit for 1/f is not perfect
    
    glia's avatar
    glia committed
    freq_start_it_range = [2,3,4,5,6]
    freq_end = 40 # Stop freq at 40Hz to not be influenced by the Notch Filter
    
    eye_status = list(final_epochs[0].event_id.keys())
    n_eye_status = len(eye_status)
    
    PAF_data = np.zeros((n_subjects,n_eye_status,n_channels,3)) # CF, power, band_width
    
    s200431's avatar
    s200431 committed
    PTF_data = np.zeros((n_subjects,n_eye_status,n_channels,3)) # CF, power, band_width
    PBF_data = np.zeros((n_subjects,n_eye_status,n_channels,3)) # CF, power, band_width
    
    glia's avatar
    glia committed
    OOF_data = np.zeros((n_subjects,n_eye_status,n_channels,2)) # offset and exponent
    
    def FOOOF_estimation(i):
        PAF_data0 = np.zeros((n_eye_status,n_channels,3)) # CF, power, band_width
    
    s200431's avatar
    s200431 committed
        PTF_data0 = np.zeros((n_eye_status,n_channels,3)) # CF, power, band_width
        PBF_data0 = np.zeros((n_eye_status,n_channels,3)) # CF, power, band_width
    
    glia's avatar
    glia committed
        OOF_data0 = np.zeros((n_eye_status,n_channels,2)) # offset and exponent
        # Get Eye status
        eye_idx = [final_epochs[i].events[:,2] == 1, final_epochs[i].events[:,2] == 2] # EC and EO
        # Calculate the power spectral density
        psd, freqs = psd_multitaper(final_epochs[i], fmin = 1, fmax = 50) # output (epochs, channels, freqs)
        # Retrieve psds for the 2 conditions and calculate mean across epochs
        psds = []
        for e in range(n_eye_status):
            # Get the epochs for specific eye condition
            temp_psd = psd[eye_idx[e],:,:]
            # Calculate the mean across epochs
            temp_psd = np.mean(temp_psd, axis=0)
            # Save
            psds.append(temp_psd)
        # Try multiple start freq
        PAF_data00 = np.zeros((n_eye_status,n_channels,len(freq_start_it_range),3)) # CF, power, band_width
    
    s200431's avatar
    s200431 committed
        PTF_data00 = np.zeros((n_eye_status,n_channels,len(freq_start_it_range),3)) # CF, power, band_width
        PBF_data00 = np.zeros((n_eye_status,n_channels,len(freq_start_it_range),3)) # CF, power, band_width
    
    glia's avatar
    glia committed
        OOF_data00 = np.zeros((n_eye_status,n_channels,len(freq_start_it_range),2)) # offset and exponent
        r2s00 = np.zeros((n_eye_status,n_channels,len(freq_start_it_range)))
        for e in range(n_eye_status):
            psds_avg = psds[e]
            for f in range(len(freq_start_it_range)):
                # Initiate FOOOF group for analysis of multiple PSD
                fg = fooof.FOOOFGroup()
                # Set the frequency range to fit the model
                freq_range = [freq_start_it_range[f], freq_end] # variable freq start to 49Hz
                # Fit to each source PSD separately, but in parallel
                fg.fit(freqs,psds_avg,freq_range,n_jobs=1)
                # Extract aperiodic parameters
                aps = fg.get_params('aperiodic_params')
                # Extract peak parameters
                peaks = fg.get_params('peak_params')
                # Extract goodness-of-fit metrics
                r2s = fg.get_params('r_squared')
                # Save OOF and r2s
                OOF_data00[e,:,f] = aps
                r2s00[e,:,f] = r2s
                # Find the alpha peak with greatest power
                for c in range(n_channels):
                    peaks0 = peaks[peaks[:,3] == c]
                    # Subset the peaks within the alpha band
                    in_alpha_band = (peaks0[:,0] >= Freq_Bands["alpha"][0]) & (peaks0[:,0] <= Freq_Bands["alpha"][1])
                    if sum(in_alpha_band) > 0: # Any alpha peaks?
                        # Choose the peak with the highest power
                        max_alpha_idx = np.argmax(peaks0[in_alpha_band,1])
                        # Save results
                        PAF_data00[e,c,f] = peaks0[in_alpha_band][max_alpha_idx,:-1]
                    else:
                        # No alpha peaks
                        PAF_data00[e,c,f] = [np.nan]*3
    
    s200431's avatar
    s200431 committed
                # Find the theta peak with greatest power
                for c in range(n_channels):
                    peaks0 = peaks[peaks[:,3] == c]
                    # Subset the peaks within the theta band
                    in_theta_band = (peaks0[:,0] >= Freq_Bands["theta"][0]) & (peaks0[:,0] <= Freq_Bands["theta"][1])
                    if sum(in_theta_band) > 0:
                        # Choose the peak with the highest power
                        max_theta_idx = np.argmax(peaks0[in_theta_band,1])
                        # Save results
                        PTF_data00[e,c,f] = peaks0[in_theta_band][max_theta_idx,:-1]
                    else:
                        # No theta peaks
                        PTF_data00[e,c,f] = [np.nan]*3
                # Find the beta peak with greatest power 
                for c in range(n_channels):
                    peaks0 = peaks[peaks[:,3] == c]
                    # Subset the peaks within the beta band
                    in_beta_band = (peaks0[:,0] >= Freq_Bands["beta"][0]) & (peaks0[:,0] <= Freq_Bands["beta"][1])
                    if sum(in_beta_band) > 0:
                        # Choose the peak with the highest power
                        max_beta_idx = np.argmax(peaks0[in_beta_band,1])
                        # Save results
                        PBF_data00[e,c,f] = peaks0[in_beta_band][max_beta_idx,:-1]
                    else:
                        # No beta peaks
                        PBF_data00[e,c,f] = [np.nan]*3
    
    glia's avatar
    glia committed
        # Check criterias
        good_fits_OOF = (r2s00 > OOF_r2_thres) & (OOF_data00[:,:,:,1] > 0) # r^2 > 0.95 and exponent > 0
        good_fits_PAF = (r2s00 > PAF_r2_thres) & (np.isfinite(PAF_data00[:,:,:,0])) # r^2 > 0.90 and detected peak in alpha band
    
    s200431's avatar
    s200431 committed
        good_fits_PTF = (r2s00 > PTF_r2_thres) & (np.isfinite(PTF_data00[:,:,:,0])) # r^2 > 0.90 and detected peak in theta band
        good_fits_PBF = (r2s00 > PBF_r2_thres) & (np.isfinite(PBF_data00[:,:,:,0])) # r^2 > 0.90 and detected peak in beta band
    
    glia's avatar
    glia committed
        # Save the data or NaN if criterias were not fulfilled
        for e in range(n_eye_status):
            for c in range(n_channels):
                if sum(good_fits_OOF[e,c]) == 0: # no good OOF estimation
                    OOF_data0[e,c] = [np.nan]*2
                else: # Save OOF associated with greatest r^2 that fulfilled criterias
                    OOF_data0[e,c] = OOF_data00[e,c,np.argmax(r2s00[e,c,good_fits_OOF[e,c]])]
                if sum(good_fits_PAF[e,c]) == 0: # no good PAF estimation
                    PAF_data0[e,c] = [np.nan]*3
                else: # Save PAF associated with greatest r^2 that fulfilled criterias
                    PAF_data0[e,c] = PAF_data00[e,c,np.argmax(r2s00[e,c,good_fits_PAF[e,c]])]
    
    s200431's avatar
    s200431 committed
                if sum(good_fits_PTF[e,c]) == 0: # no good PTF estimation
                    PTF_data0[e,c] = [np.nan]*3
                else: # Save PTF associated with greatest r^2 that fulfilled criterias
                    PTF_data0[e,c] = PTF_data00[e,c,np.argmax(r2s00[e,c,good_fits_PTF[e,c]])]
                if sum(good_fits_PBF[e,c]) == 0: # no good PBF estimation
                    PBF_data0[e,c] = [np.nan]*3
                else: # Save PBF associated with greatest r^2 that fulfilled criterias
                    PBF_data0[e,c] = PBF_data00[e,c,np.argmax(r2s00[e,c,good_fits_PBF[e,c]])]
    
    glia's avatar
    glia committed
        print("Finished {} out of {} subjects".format(i+1,n_subjects))
    
    s200431's avatar
    s200431 committed
        return i, PAF_data0, OOF_data0, PTF_data0, PBF_data0
    
    glia's avatar
    glia committed
    
    # Get current time
    c_time1 = time.localtime()
    c_time1 = time.strftime("%a %d %b %Y %H:%M:%S", c_time1)
    print(c_time1)
    
    
    s200431's avatar
    s200431 committed
    # with concurrent.futures.ProcessPoolExecutor() as executor:
    #     for i, PAF_result, OOF_result in executor.map(FOOOF_estimation, range(n_subjects)): # Function and arguments
    #         PAF_data[i] = PAF_result
    #         OOF_data[i] = OOF_result
    
    for i in range(n_subjects):
        j, PAF_result, OOF_result, PTF_data0, PBF_data0 = FOOOF_estimation(i) # Function and arguments
        PAF_data[i] = PAF_result
        OOF_data[i] = OOF_result
        PTF_data[i] = PTF_data0
        PBF_data[i] = PBF_data0
    
    glia's avatar
    glia committed
    
    # Save data
    with open(Feature_savepath+"PAF_data_arr.pkl", "wb") as file:
        pickle.dump(PAF_data, file)
    
    s200431's avatar
    s200431 committed
    with open(Feature_savepath+"PTF_data_arr.pkl", "wb") as file:
        pickle.dump(PTF_data, file)
    with open(Feature_savepath+"PBF_data_arr.pkl", "wb") as file:
        pickle.dump(PBF_data, file)
    # with open(Feature_savepath+"OOF_data_arr.pkl", "wb") as file:
    #     pickle.dump(OOF_data, file)
    
    glia's avatar
    glia committed
    
    # Get current time
    c_time2 = time.localtime()
    c_time2 = time.strftime("%a %d %b %Y %H:%M:%S", c_time2)
    print("Started", c_time1, "\nFinished",c_time2)
    
    # Convert to Pandas dataframe (only keep mean parameter for PAF)
    # The dimensions will each be a column with numbers and the last column will be the actual values
    ori = PAF_data[:,:,:,0]
    
    s200431's avatar
    s200431 committed
    ori_2 = PTF_data[:,:,:,0]
    ori_3 = PBF_data[:,:,:,0]
    
    glia's avatar
    glia committed
    arr = np.column_stack(list(map(np.ravel, np.meshgrid(*map(np.arange, ori.shape), indexing="ij"))) + [ori.ravel()])
    
    s200431's avatar
    s200431 committed
    arr_2 = np.column_stack(list(map(np.ravel, np.meshgrid(*map(np.arange, ori_2.shape), indexing="ij"))) + [ori_2.ravel()])
    arr_3 = np.column_stack(list(map(np.ravel, np.meshgrid(*map(np.arange, ori_3.shape), indexing="ij"))) + [ori_3.ravel()])
    
    glia's avatar
    glia committed
    PAF_data_df = pd.DataFrame(arr, columns = ["Subject_ID", "Eye_status", "Channel", "Value"])
    
    s200431's avatar
    s200431 committed
    PTF_data_df = pd.DataFrame(arr_2, columns = ["Subject_ID", "Eye_status", "Channel", "Value"])
    PBF_data_df = pd.DataFrame(arr_3, columns = ["Subject_ID", "Eye_status", "Channel", "Value"])
    
    glia's avatar
    glia committed
    # Change from numerical coding to actual values
    
    index_values = [Subject_id,eye_status,ch_names]
    temp_df = PAF_data_df.copy() # make temp df to not sequential overwrite what is changed
    
    s200431's avatar
    s200431 committed
    temp_df_2 = PTF_data_df.copy() # make temp df to not sequential overwrite what is changed
    temp_df_3 = PBF_data_df.copy() # make temp df to not sequential overwrite what is changed
    
    glia's avatar
    glia committed
    for col in range(len(index_values)):
        col_name = PAF_data_df.columns[col]
    
    s200431's avatar
    s200431 committed
        col_name_2 = PTF_data_df.columns[col]
        col_name_3 = PBF_data_df.columns[col]
    
    glia's avatar
    glia committed
        for shape in range(ori.shape[col]):
            temp_df.loc[PAF_data_df.iloc[:,col] == shape,col_name]\
            = index_values[col][shape]
    
    s200431's avatar
    s200431 committed
            temp_df_2.loc[PTF_data_df.iloc[:,col] == shape,col_name_2]\
            = index_values[col][shape]
            temp_df_3.loc[PBF_data_df.iloc[:,col] == shape,col_name_3]\
            = index_values[col][shape]
    
    glia's avatar
    glia committed
    PAF_data_df = temp_df # replace original df 
    
    s200431's avatar
    s200431 committed
    PTF_data_df = temp_df_2 # replace original df
    PBF_data_df = temp_df_3 # replace original df
    
    glia's avatar
    glia committed
    
    # Add group status
    Group_status = np.array(["CTRL"]*len(PAF_data_df["Subject_ID"]))
    Group_status[np.array([i in cases for i in PAF_data_df["Subject_ID"]])] = "PTSD"
    # Add to dataframe
    PAF_data_df.insert(3, "Group_status", Group_status)
    
    s200431's avatar
    s200431 committed
    PTF_data_df.insert(3, "Group_status", Group_status)
    PBF_data_df.insert(3, "Group_status", Group_status)
    
    glia's avatar
    glia committed
    
    # Global peak alpha
    PAF_data_df_global = PAF_data_df.groupby(["Subject_ID", "Group_status", "Eye_status"]).mean().reset_index() # by default pandas mean skip nan
    
    s200431's avatar
    s200431 committed
    PTF_data_df_global = PTF_data_df.groupby(["Subject_ID", "Group_status", "Eye_status"]).mean().reset_index() # by default pandas mean skip nan
    PBF_data_df_global = PBF_data_df.groupby(["Subject_ID", "Group_status", "Eye_status"]).mean().reset_index() # by default pandas mean skip nan
    
    
    glia's avatar
    glia committed
    # Add dummy variable for re-using plot code
    dummy_variable = ["Global Peak Alpha Frequency"]*PAF_data_df_global.shape[0]
    
    s200431's avatar
    s200431 committed
    dummy_variable_2 = ["Global Peak Theta Frequency"]*PTF_data_df_global.shape[0]
    dummy_variable_3 = ["Global Peak Beta Frequency"]*PBF_data_df_global.shape[0]
    
    glia's avatar
    glia committed
    PAF_data_df_global.insert(3, "Measurement", dummy_variable )
    
    s200431's avatar
    s200431 committed
    PTF_data_df_global.insert(3, "Measurement", dummy_variable_2 )
    PBF_data_df_global.insert(3, "Measurement", dummy_variable_3 )
    
    glia's avatar
    glia committed
    
    # Regional peak alpha
    # A variable that codes the channels based on A/P localization is also made
    Frontal_chs = ["Fp1", "Fpz", "Fp2", "AFz", "Fz", "F3", "F4", "F7", "F8"]
    Central_chs = ["Cz", "C3", "C4", "T7", "T8", "FT7", "FC3", "FCz", "FC4", "FT8", "TP7", "CP3", "CPz", "CP4", "TP8"]
    Posterior_chs = ["Pz", "P3", "P4", "P7", "P8", "POz", "O1", "O2", "Oz"]
    
    s200431's avatar
    s200431 committed
    Parietal_chs = ["TP7", "CP3", "CPz", "CP4", "TP8", "P7", "P3", "Pz", "P4", "P8", "POz"]
    
    glia's avatar
    glia committed
    
    
    s200431's avatar
    s200431 committed
    Brain_region_labels = ["Frontal","Central","Posterior","Parietal"]
    
    glia's avatar
    glia committed
    Brain_region = np.array(ch_names, dtype = "<U9")
    
    s200431's avatar
    s200431 committed
    Brain_region[np.array([i in Frontal_chs for i in ch_names])] = Brain_region_labels[0]
    Brain_region[np.array([i in Central_chs for i in ch_names])] = Brain_region_labels[1]
    Brain_region[np.array([i in Posterior_chs for i in ch_names])] = Brain_region_labels[2]
    Brain_region[np.array([i in Parietal_chs for i in ch_names])] = Brain_region_labels[3]
    
    glia's avatar
    glia committed
    
    
    s200431's avatar
    s200431 committed
    # Insert region type into dataframe
    
    glia's avatar
    glia committed
    PAF_data_df.insert(4, "Brain_region", list(Brain_region)*int(PAF_data_df.shape[0]/len(Brain_region)))
    
    s200431's avatar
    s200431 committed
    PTF_data_df.insert(4, "Brain_region", list(Brain_region)*int(PTF_data_df.shape[0]/len(Brain_region)))
    PBF_data_df.insert(4, "Brain_region", list(Brain_region)*int(PBF_data_df.shape[0]/len(Brain_region)))
    
    glia's avatar
    glia committed
    
    
    s200431's avatar
    s200431 committed
    # A variable that codes the channels based on M/L localization
    Left_chs = ["Fp1", "F3", "F7", "FC3", "FT7", "C3", "T7", "CP3", "TP7", "P3", "P7", "O1"]
    Right_chs = ["Fp2", "F4", "F8", "FC4", "FT8", "C4", "T8", "CP4", "TP8", "P4", "P8", "O2"]
    Mid_chs = ["Fpz", "AFz", "Fz", "FCz", "Cz", "CPz", "Pz", "POz", "Oz"]
    
    glia's avatar
    glia committed
    
    
    s200431's avatar
    s200431 committed
    Brain_side = np.array(ch_names, dtype = "<U5")
    Brain_side[np.array([i in Left_chs for i in ch_names])] = "Left"
    Brain_side[np.array([i in Right_chs for i in ch_names])] = "Right"
    Brain_side[np.array([i in Mid_chs for i in ch_names])] = "Mid"
    
    glia's avatar
    glia committed
    
    
    s200431's avatar
    s200431 committed
    # Insert side type into dataframe: 
    PAF_data_df.insert(5, "Brain_side", list(Brain_side)*int(PAF_data_df.shape[0]/len(Brain_side)))
    PTF_data_df.insert(5, "Brain_side", list(Brain_side)*int(PTF_data_df.shape[0]/len(Brain_side)))
    PBF_data_df.insert(5, "Brain_side", list(Brain_side)*int(PBF_data_df.shape[0]/len(Brain_side)))
    
    # Define region of interest before saving
    PAF_data_df = PAF_data_df[(PAF_data_df["Brain_region"] == "Parietal")] # Parietal region in peak alpha frequencys
    PTF_data_df = PTF_data_df[(PTF_data_df["Brain_region"] == "Frontal") & 
                              ((PTF_data_df["Brain_side"] == "Mid"))] # Frontal midline theta peak frequencys