Skip to content
Snippets Groups Projects
Commit d911db2c authored by ofplarsen's avatar ofplarsen
Browse files

Added some docs

parent 05f675c9
No related branches found
No related tags found
No related merge requests found
...@@ -123,14 +123,11 @@ while True: ...@@ -123,14 +123,11 @@ while True:
buffer_eeg = [] buffer_eeg = []
triggered = False triggered = False
start_time = None start_time = None
scan_values = False
count = False
i = 0
print("Sleep done")
while not triggered: while not triggered:
# Gets data from the Eye Tracker LSL stream, and the EEG LSL stream
sample, timestamp = inlet.pull_sample() sample, timestamp = inlet.pull_sample()
sample_eeg, timestamp_eeg = inlet_2.pull_sample() sample_eeg, timestamp_eeg = inlet_2.pull_sample()
#print(timestamp)
buffer.append(sample) buffer.append(sample)
buffer_eeg.append(sample_eeg) buffer_eeg.append(sample_eeg)
...@@ -140,6 +137,8 @@ while True: ...@@ -140,6 +137,8 @@ while True:
buffer.pop(0) buffer.pop(0)
buffer_eeg.pop(0) buffer_eeg.pop(0)
# If buffer is filled with data ready to be compared in CCA, and the start of the buffer is the start of
# the Eye Tracking data (Eye Tracking trigger)
if (len(buffer) == fragment_samples) and buffer[0][0] == 1: if (len(buffer) == fragment_samples) and buffer[0][0] == 1:
print(len(buffer)) print(len(buffer))
fragment = np.array(buffer[:fragment_samples]) fragment = np.array(buffer[:fragment_samples])
...@@ -147,26 +146,35 @@ while True: ...@@ -147,26 +146,35 @@ while True:
triggered = True triggered = True
print("Fragment: found") print("Fragment: found")
# Makes both streams to a single dataframe
df = pd.concat([pd.DataFrame(np.array(fragment)), pd.DataFrame(np.array(fragment_eeg))], axis=1, join='inner') df = pd.concat([pd.DataFrame(np.array(fragment)), pd.DataFrame(np.array(fragment_eeg))], axis=1, join='inner')
df.columns = ['N'] + channels # If any delay added, shift signal accordingly
print(df['N'].tolist())
# N = np.arange(1, len(df['O1']) + 1)
df['N'] = df['N'].shift(round(delay * fs)) df['N'] = df['N'].shift(round(delay * fs))
df = df.iloc[round(delay * fs):] df = df.iloc[round(delay * fs):]
# Reset the index # Reset the index
df = df.reset_index(drop=True) df = df.reset_index(drop=True)
N = df['N']
print(df.shape)
df = pd.concat([df, get_freqs(N)], axis=1, join='inner')
print(df.shape)
print([(index, row['O1']) for index, row in df.iterrows() if pd.isna(row['O1'])])
N = df['N'] N = df['N']
frs = get_freqs(N) frs = get_freqs(N)
X = df[:][occ_channels] X = df[:][occ_channels]
freqs = [] freqs = []
h = 0 h = 0
# CCA on the target frequencies, and the occular channels
for y in range(0, len(frequencies), 6): for y in range(0, len(frequencies), 6):
h = h + 1 h = h + 1
Y = frs[:][frequencies[y:6 * h]] Y = frs[:][frequencies[y:6 * h]]
ca = CCA(n_components=2) ca = CCA(n_components=2)
ca.fit(X, Y) ca.fit(X, Y)
X_c, Y_c = ca.transform(X, Y) X_c, Y_c = ca.transform(X, Y)
# Uses two coefficients pk = sqrt(p1**2+p2*'2)
p1 = np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1] p1 = np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1]
p2 = np.corrcoef(X_c[:, 1], Y_c[:, 1])[0][1] p2 = np.corrcoef(X_c[:, 1], Y_c[:, 1])[0][1]
freqs.append(np.sqrt(p1 ** 2 + p2 ** 2)) freqs.append(np.sqrt(p1 ** 2 + p2 ** 2))
...@@ -174,8 +182,7 @@ while True: ...@@ -174,8 +182,7 @@ while True:
# print("CCA single: " + str(perform_cca(df,1))) # print("CCA single: " + str(perform_cca(df,1)))
print(cca) print(cca)
index = np.argmax(cca) index = np.argmax(cca)
print(index)
print("Looking at: " + str(frequencies_main[index]) + "Hz") print("Looking at: " + str(frequencies_main[index]) + "Hz")
# Sends result in LSL stream
return_index(index, info, outlet) return_index(index, info, outlet)
print("Sleep")
#time.sleep(fragment_duration)
...@@ -6,14 +6,22 @@ from numpy.random import rand ...@@ -6,14 +6,22 @@ from numpy.random import rand
from pylsl import StreamInlet, resolve_stream, StreamInfo, StreamOutlet, pylsl, local_clock from pylsl import StreamInlet, resolve_stream, StreamInfo, StreamOutlet, pylsl, local_clock
from scipy import signal from scipy import signal
from sklearn.cross_decomposition import CCA from sklearn.cross_decomposition import CCA
# EEG channels used
channels = ['Fp1', 'Fz', 'F3', 'F7', 'F9', 'FC5', 'FC1', 'C3', 'T7', 'CP5', 'CP1', 'Pz', 'P3', 'P7' channels = ['Fp1', 'Fz', 'F3', 'F7', 'F9', 'FC5', 'FC1', 'C3', 'T7', 'CP5', 'CP1', 'Pz', 'P3', 'P7'
, 'P9', 'O1', 'Oz', 'O2', 'P10', 'P8', 'P4', 'CP2', 'CP6', 'T8', 'C4', 'Cz' , 'P9', 'O1', 'Oz', 'O2', 'P10', 'P8', 'P4', 'CP2', 'CP6', 'T8', 'C4', 'Cz'
, 'FC2', 'FC6', 'F10', 'F8', 'F4', 'Fp2', 'ACC_X', 'ACC_Y', 'ACC_Z'] , 'FC2', 'FC6', 'F10', 'F8', 'F4', 'Fp2', 'ACC_X', 'ACC_Y', 'ACC_Z']
# Channels where electrodes are removed from EEG
removed_channels = ['Fp1', 'F8', 'F7', 'Fp2', 'F3', 'F4'] removed_channels = ['Fp1', 'F8', 'F7', 'Fp2', 'F3', 'F4']
#The frequencies used for the SSVEP speller
frequencies_main = [4,5,6,7,9,11] frequencies_main = [4,5,6,7,9,11]
#The channels used for the BCI Speller combined with CCA
occ_channels = ['O1', 'O2', 'Oz', 'P3', 'P4', 'Pz', 'P7', 'P8'] occ_channels = ['O1', 'O2', 'Oz', 'P3', 'P4', 'Pz', 'P7', 'P8']
#Names of all frequencies with harmonics being used
frequencies = ['8.18_sin_h1', '8.18_cos_h1', '8.18_sin_h2', '8.18_cos_h2', '8.18_sin_h3', '8.18_cos_h3', frequencies = ['8.18_sin_h1', '8.18_cos_h1', '8.18_sin_h2', '8.18_cos_h2', '8.18_sin_h3', '8.18_cos_h3',
'9_sin_h1', '9_cos_h1', '9_sin_h2', '9_cos_h2', '9_sin_h3', '9_cos_h3', '9_sin_h1', '9_cos_h1', '9_sin_h2', '9_cos_h2', '9_sin_h3', '9_cos_h3',
'10_sin_h1', '10_cos_h1', '10_sin_h2', '10_cos_h2', '10_sin_h3', '10_cos_h3', '10_sin_h1', '10_cos_h1', '10_sin_h2', '10_cos_h2', '10_sin_h3', '10_cos_h3',
...@@ -22,14 +30,18 @@ frequencies = ['8.18_sin_h1', '8.18_cos_h1', '8.18_sin_h2', '8.18_cos_h2', '8.18 ...@@ -22,14 +30,18 @@ frequencies = ['8.18_sin_h1', '8.18_cos_h1', '8.18_sin_h2', '8.18_cos_h2', '8.18
'15_sin_h1', '15_cos_h1', '15_sin_h2', '15_cos_h2', '15_sin_h3', '15_cos_h3' '15_sin_h1', '15_cos_h1', '15_sin_h2', '15_cos_h2', '15_sin_h3', '15_cos_h3'
] ]
"""
Method to normalise data, to better fit plotting
"""
def normalize_data(data, lower_bound=-1, upper_bound=1): def normalize_data(data, lower_bound=-1, upper_bound=1):
min_value = data.min() min_value = data.min()
max_value = data.max() max_value = data.max()
normalized_data = lower_bound + (data - min_value) * (upper_bound - lower_bound) / (max_value - min_value) normalized_data = lower_bound + (data - min_value) * (upper_bound - lower_bound) / (max_value - min_value)
return normalized_data return normalized_data
"""
Method to plot a single EEG channel
"""
def plot_single(df, column): def plot_single(df, column):
t = np.arange(0, 10, 1 / fs) t = np.arange(0, 10, 1 / fs)
#df[column] = normalize_data(df[column]) #df[column] = normalize_data(df[column])
...@@ -38,7 +50,9 @@ def plot_single(df, column): ...@@ -38,7 +50,9 @@ def plot_single(df, column):
axis.set_title(column) axis.set_title(column)
plt.show() plt.show()
"""
Method to initialise the output stream for streaming the CCA answerser via LSL
"""
def init_stream(): def init_stream():
# Create an LSL stream # Create an LSL stream
stream_name = 'CCA' stream_name = 'CCA'
...@@ -53,7 +67,10 @@ def init_stream(): ...@@ -53,7 +67,10 @@ def init_stream():
# Create the LSL outlet # Create the LSL outlet
outlet = StreamOutlet(info) outlet = StreamOutlet(info)
return info, outlet return info, outlet
"""
Methods to add/remove padding, and formula for padding
Used when filtering the EEG signal, because of distortion at the start and end of EEG signal when using other filters
"""
def add_padding(data, lenght=100): def add_padding(data, lenght=100):
return padding(data, lenght) return padding(data, lenght)
...@@ -63,13 +80,10 @@ def remove_padding(data, length=100): ...@@ -63,13 +80,10 @@ def remove_padding(data, length=100):
def padding(data, pad_length = 100): def padding(data, pad_length = 100):
return np.pad(data, (pad_length, pad_length), mode="reflect") return np.pad(data, (pad_length, pad_length), mode="reflect")
def hamming_window(data, duration): """
window_size = int(duration) Method for return a dataframe with all frequencies used for comparison in the CCA method with the different EEG signals
window = np.hamming(window_size) N: Number of samples (seconds * frequency)
"""
data[:window_size] *= window
data[-window_size:] *= window[::-1]
return data
def get_freqs(N): def get_freqs(N):
start_time = time.time() start_time = time.time()
# fs = [8.18, 9, 10, 11.25, 12.86, 15] # fs = [8.18, 9, 10, 11.25, 12.86, 15]
...@@ -90,7 +104,10 @@ def get_freqs(N): ...@@ -90,7 +104,10 @@ def get_freqs(N):
print("--- %s seconds ---" % (time.time() - start_time)) print("--- %s seconds ---" % (time.time() - start_time))
return df return df
"""
Method that performs CCA between the EEG signal and the frequencies
Uses only a single coefficient from CCA
"""
def perform_cca(fragment, n_components): def perform_cca(fragment, n_components):
X = fragment[:][occ_channels] X = fragment[:][occ_channels]
freqs = [] freqs = []
...@@ -103,7 +120,10 @@ def perform_cca(fragment, n_components): ...@@ -103,7 +120,10 @@ def perform_cca(fragment, n_components):
X_c, Y_c = ca.transform(X, Y) X_c, Y_c = ca.transform(X, Y)
freqs.append(np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1]) freqs.append(np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1])
return freqs return freqs
"""
Method that performs CCA between the EEG signal and the frequencies
Uses two correlation coefficients from CCA
"""
def perform_cca_2(fragment): def perform_cca_2(fragment):
n_components = 2 n_components = 2
X = fragment[:][occ_channels] X = fragment[:][occ_channels]
...@@ -118,24 +138,21 @@ def perform_cca_2(fragment): ...@@ -118,24 +138,21 @@ def perform_cca_2(fragment):
p1 = np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1] p1 = np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1]
p2 = np.corrcoef(X_c[:, 1], Y_c[:, 1])[0][1] p2 = np.corrcoef(X_c[:, 1], Y_c[:, 1])[0][1]
freqs.append(np.sqrt(p1**2+p2**2)) freqs.append(np.sqrt(p1**2+p2**2))
if False:
plt.scatter(X_c[:, 0], Y_c[:, 0], label='EEG Channels', alpha=0.7)
plt.scatter(X_c[:, 1], Y_c[:, 1], label='Sine curves', alpha=0.7)
plt.xlabel('X Transformed')
plt.ylabel('Y Transformed')
plt.title('CCA Transformed Canonical Variates')
plt.legend()
plt.show()
return freqs return freqs
"""
Method to send value in the output LSL stream used in the unity Speller
"""
def return_index(index, info, outlet): def return_index(index, info, outlet):
# Send a single value # Send a single value
value = float(index) value = float(index)
timestamp = time.time() timestamp = time.time()
outlet.push_sample([value], timestamp) outlet.push_sample([value], timestamp)
"""
Method to apply a zero-phase Butterworth filter to the data
Uses bandpass [1-15], and order 3
"""
def zero_phase_butter(data): def zero_phase_butter(data):
# Butterworth filter parameters # Butterworth filter parameters
fs = 250 fs = 250
...@@ -152,7 +169,9 @@ def zero_phase_butter(data): ...@@ -152,7 +169,9 @@ def zero_phase_butter(data):
# Zero-phase filtering using filtfilt # Zero-phase filtering using filtfilt
return signal.filtfilt(b_bandpass, a_bandpass, data) return signal.filtfilt(b_bandpass, a_bandpass, data)
"""
Method to apply a notch filter to EEG data
"""
def notch(data): def notch(data):
fs = 250.0 # Sample frequency (Hz) fs = 250.0 # Sample frequency (Hz)
f0 = 50.0 # Frequency to be removed from signal (Hz) f0 = 50.0 # Frequency to be removed from signal (Hz)
...@@ -166,24 +185,24 @@ info, outlet = init_stream() ...@@ -166,24 +185,24 @@ info, outlet = init_stream()
print("Looking for an LSL stream...") print("Looking for an LSL stream...")
streams_counter = resolve_stream('type', 'DejitteredSpeller') streams_counter = resolve_stream('type', 'DejitteredSpeller')
streams_eeg = resolve_stream('type', 'DEEG') streams_eeg = resolve_stream('type', 'DEEG')
inlet = StreamInlet(streams_counter[0]) inlet = StreamInlet(streams_counter[0]) #LSL Eyetracker data
inlet_2 = StreamInlet(streams_eeg[0]) inlet_2 = StreamInlet(streams_eeg[0])# LSL EEG data
fs = 250 # Sampling frequency fs = 250 # Sampling frequency
delay = 0.01 delay = 0.01 #Occular delay
fragment_duration = 4+delay # Fragment duration in seconds fragment_duration = 4+delay # Fragment duration in seconds
fragment_samples = round(fs * fragment_duration) fragment_samples = round(fs * fragment_duration)
pre_trigger_samples = fs * 1
target_value = 0
pad_length = 100 pad_length = 100 #Padding length (padding in filtering)
while True: while True:
buffer = [] buffer = []
buffer_eeg = [] buffer_eeg = []
triggered = False triggered = False
start_time = None start_time = None #To track time
scan_values = False
count = False
i = 0
while not triggered: while not triggered:
# Gets data from the Eye Tracker LSL stream, and the EEG LSL stream
sample, timestamp = inlet.pull_sample() sample, timestamp = inlet.pull_sample()
sample_eeg, timestamp_eeg = inlet_2.pull_sample() sample_eeg, timestamp_eeg = inlet_2.pull_sample()
buffer.append(sample) buffer.append(sample)
...@@ -193,8 +212,8 @@ while True: ...@@ -193,8 +212,8 @@ while True:
buffer.pop(0) buffer.pop(0)
buffer_eeg.pop(0) buffer_eeg.pop(0)
# If buffer is filled with data ready to be compared in CCA, and the start of the buffer is the start of
# the Eye Tracking data (Eye Tracking trigger)
if (len(buffer) == fragment_samples) and buffer[0][0] == 1: if (len(buffer) == fragment_samples) and buffer[0][0] == 1:
print(len(buffer)) print(len(buffer))
fragment = np.array(buffer[:fragment_samples]) fragment = np.array(buffer[:fragment_samples])
...@@ -202,6 +221,7 @@ while True: ...@@ -202,6 +221,7 @@ while True:
triggered = True triggered = True
print("Fragment: found") print("Fragment: found")
# Makes both streams to a single dataframe
df = pd.concat([pd.DataFrame(np.array(fragment)), pd.DataFrame(np.array(fragment_eeg))], axis=1, join='inner') df = pd.concat([pd.DataFrame(np.array(fragment)), pd.DataFrame(np.array(fragment_eeg))], axis=1, join='inner')
df.columns = ['N'] + channels df.columns = ['N'] + channels
...@@ -209,15 +229,21 @@ while True: ...@@ -209,15 +229,21 @@ while True:
print(df.columns) print(df.columns)
print(df[occ_channels]) print(df[occ_channels])
start_time = time.time() start_time = time.time()
# Adds padding to the signals
df = df.apply(lambda x: add_padding(x, pad_length)) df = df.apply(lambda x: add_padding(x, pad_length))
print(len(df['O1'])) print(len(df['O1']))
# Adds Notch filter to the occular channels
df[occ_channels] = df[occ_channels].apply(lambda x: notch(x)) df[occ_channels] = df[occ_channels].apply(lambda x: notch(x))
#Adds Butterworth filter to the occular channels
df[occ_channels] = df[occ_channels].apply(lambda x: zero_phase_butter(x)) df[occ_channels] = df[occ_channels].apply(lambda x: zero_phase_butter(x))
# Removes padding from signal
df = df.apply(lambda x: remove_padding(x, pad_length)) df = df.apply(lambda x: remove_padding(x, pad_length))
# for i in occ_channels:
# df[i] = normalize_data(df[i])
print("--- Filter time: %s seconds ---" % (time.time() - start_time)) print("--- Filter time: %s seconds ---" % (time.time() - start_time))
print(df['N'].tolist()) print(df['N'].tolist())
# If any delay added, shift signal accordingly
df['N'] = df['N'].shift(round(delay*fs)) df['N'] = df['N'].shift(round(delay*fs))
df = df.iloc[round(delay*fs):] df = df.iloc[round(delay*fs):]
# Reset the index # Reset the index
...@@ -234,12 +260,14 @@ while True: ...@@ -234,12 +260,14 @@ while True:
X = df[:][occ_channels] X = df[:][occ_channels]
freqs = [] freqs = []
h = 0 h = 0
# CCA on the target frequencies, and the occular channels
for y in range(0, len(frequencies), 6): for y in range(0, len(frequencies), 6):
h = h + 1 h = h + 1
Y = frs[:][frequencies[y:6 * h]] Y = frs[:][frequencies[y:6 * h]]
ca = CCA(n_components=2) ca = CCA(n_components=2)
ca.fit(X, Y) ca.fit(X, Y)
X_c, Y_c = ca.transform(X, Y) X_c, Y_c = ca.transform(X, Y)
# Uses two coefficients pk = sqrt(p1**2+p2*'2)
p1 = np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1] p1 = np.corrcoef(X_c[:, 0], Y_c[:, 0])[0][1]
p2 = np.corrcoef(X_c[:, 1], Y_c[:, 1])[0][1] p2 = np.corrcoef(X_c[:, 1], Y_c[:, 1])[0][1]
freqs.append(np.sqrt(p1 ** 2 + p2 ** 2)) freqs.append(np.sqrt(p1 ** 2 + p2 ** 2))
...@@ -248,5 +276,5 @@ while True: ...@@ -248,5 +276,5 @@ while True:
print(cca) print(cca)
index = np.argmax(cca) index = np.argmax(cca)
print("Looking at: " + str(frequencies_main[index]) + "Hz") print("Looking at: " + str(frequencies_main[index]) + "Hz")
#print(np.argpartition(cca, -2)[-2:]) #Sends result in LSL stream
return_index(index, info, outlet) return_index(index, info, outlet)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment