Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
N
NeuroPype-Pipeline
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Group 92
NeuroPype-Pipeline
Commits
d911db2c
Commit
d911db2c
authored
2 years ago
by
ofplarsen
Browse files
Options
Downloads
Patches
Plain Diff
Added some docs
parent
05f675c9
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
jitter/ BCISpeller/BCISpellerV2.py
+20
-13
20 additions, 13 deletions
jitter/ BCISpeller/BCISpellerV2.py
jitter/ BCISpeller/BCISpellerV3.py
+68
-40
68 additions, 40 deletions
jitter/ BCISpeller/BCISpellerV3.py
with
88 additions
and
53 deletions
jitter/ BCISpeller/BCISpellerV2.py
+
20
−
13
View file @
d911db2c
...
...
@@ -123,14 +123,11 @@ while True:
buffer_eeg
=
[]
triggered
=
False
start_time
=
None
scan_values
=
False
count
=
False
i
=
0
print
(
"
Sleep done
"
)
while
not
triggered
:
# Gets data from the Eye Tracker LSL stream, and the EEG LSL stream
sample
,
timestamp
=
inlet
.
pull_sample
()
sample_eeg
,
timestamp_eeg
=
inlet_2
.
pull_sample
()
#print(timestamp)
buffer
.
append
(
sample
)
buffer_eeg
.
append
(
sample_eeg
)
...
...
@@ -140,6 +137,8 @@ while True:
buffer
.
pop
(
0
)
buffer_eeg
.
pop
(
0
)
# If buffer is filled with data ready to be compared in CCA, and the start of the buffer is the start of
# the Eye Tracking data (Eye Tracking trigger)
if
(
len
(
buffer
)
==
fragment_samples
)
and
buffer
[
0
][
0
]
==
1
:
print
(
len
(
buffer
))
fragment
=
np
.
array
(
buffer
[:
fragment_samples
])
...
...
@@ -147,26 +146,35 @@ while True:
triggered
=
True
print
(
"
Fragment: found
"
)
# Makes both streams to a single dataframe
df
=
pd
.
concat
([
pd
.
DataFrame
(
np
.
array
(
fragment
)),
pd
.
DataFrame
(
np
.
array
(
fragment_eeg
))],
axis
=
1
,
join
=
'
inner
'
)
df
.
columns
=
[
'
N
'
]
+
channels
print
(
df
[
'
N
'
].
tolist
())
# N = np.arange(1, len(df['O1']) + 1)
# If any delay added, shift signal accordingly
df
[
'
N
'
]
=
df
[
'
N
'
].
shift
(
round
(
delay
*
fs
))
df
=
df
.
iloc
[
round
(
delay
*
fs
):]
# Reset the index
df
=
df
.
reset_index
(
drop
=
True
)
N
=
df
[
'
N
'
]
print
(
df
.
shape
)
df
=
pd
.
concat
([
df
,
get_freqs
(
N
)],
axis
=
1
,
join
=
'
inner
'
)
print
(
df
.
shape
)
print
([(
index
,
row
[
'
O1
'
])
for
index
,
row
in
df
.
iterrows
()
if
pd
.
isna
(
row
[
'
O1
'
])])
N
=
df
[
'
N
'
]
frs
=
get_freqs
(
N
)
X
=
df
[:][
occ_channels
]
freqs
=
[]
h
=
0
# CCA on the target frequencies, and the occular channels
for
y
in
range
(
0
,
len
(
frequencies
),
6
):
h
=
h
+
1
Y
=
frs
[:][
frequencies
[
y
:
6
*
h
]]
ca
=
CCA
(
n_components
=
2
)
ca
.
fit
(
X
,
Y
)
X_c
,
Y_c
=
ca
.
transform
(
X
,
Y
)
# Uses two coefficients pk = sqrt(p1**2+p2*'2)
p1
=
np
.
corrcoef
(
X_c
[:,
0
],
Y_c
[:,
0
])[
0
][
1
]
p2
=
np
.
corrcoef
(
X_c
[:,
1
],
Y_c
[:,
1
])[
0
][
1
]
freqs
.
append
(
np
.
sqrt
(
p1
**
2
+
p2
**
2
))
...
...
@@ -174,8 +182,7 @@ while True:
# print("CCA single: " + str(perform_cca(df,1)))
print
(
cca
)
index
=
np
.
argmax
(
cca
)
print
(
index
)
print
(
"
Looking at:
"
+
str
(
frequencies_main
[
index
])
+
"
Hz
"
)
# Sends result in LSL stream
return_index
(
index
,
info
,
outlet
)
print
(
"
Sleep
"
)
#time.sleep(fragment_duration)
This diff is collapsed.
Click to expand it.
jitter/ BCISpeller/BCISpellerV3.py
+
68
−
40
View file @
d911db2c
...
...
@@ -6,14 +6,22 @@ from numpy.random import rand
from
pylsl
import
StreamInlet
,
resolve_stream
,
StreamInfo
,
StreamOutlet
,
pylsl
,
local_clock
from
scipy
import
signal
from
sklearn.cross_decomposition
import
CCA
# EEG channels used
channels
=
[
'
Fp1
'
,
'
Fz
'
,
'
F3
'
,
'
F7
'
,
'
F9
'
,
'
FC5
'
,
'
FC1
'
,
'
C3
'
,
'
T7
'
,
'
CP5
'
,
'
CP1
'
,
'
Pz
'
,
'
P3
'
,
'
P7
'
,
'
P9
'
,
'
O1
'
,
'
Oz
'
,
'
O2
'
,
'
P10
'
,
'
P8
'
,
'
P4
'
,
'
CP2
'
,
'
CP6
'
,
'
T8
'
,
'
C4
'
,
'
Cz
'
,
'
FC2
'
,
'
FC6
'
,
'
F10
'
,
'
F8
'
,
'
F4
'
,
'
Fp2
'
,
'
ACC_X
'
,
'
ACC_Y
'
,
'
ACC_Z
'
]
# Channels where electrodes are removed from EEG
removed_channels
=
[
'
Fp1
'
,
'
F8
'
,
'
F7
'
,
'
Fp2
'
,
'
F3
'
,
'
F4
'
]
#The frequencies used for the SSVEP speller
frequencies_main
=
[
4
,
5
,
6
,
7
,
9
,
11
]
#The channels used for the BCI Speller combined with CCA
occ_channels
=
[
'
O1
'
,
'
O2
'
,
'
Oz
'
,
'
P3
'
,
'
P4
'
,
'
Pz
'
,
'
P7
'
,
'
P8
'
]
#Names of all frequencies with harmonics being used
frequencies
=
[
'
8.18_sin_h1
'
,
'
8.18_cos_h1
'
,
'
8.18_sin_h2
'
,
'
8.18_cos_h2
'
,
'
8.18_sin_h3
'
,
'
8.18_cos_h3
'
,
'
9_sin_h1
'
,
'
9_cos_h1
'
,
'
9_sin_h2
'
,
'
9_cos_h2
'
,
'
9_sin_h3
'
,
'
9_cos_h3
'
,
'
10_sin_h1
'
,
'
10_cos_h1
'
,
'
10_sin_h2
'
,
'
10_cos_h2
'
,
'
10_sin_h3
'
,
'
10_cos_h3
'
,
...
...
@@ -22,14 +30,18 @@ frequencies = ['8.18_sin_h1', '8.18_cos_h1', '8.18_sin_h2', '8.18_cos_h2', '8.18
'
15_sin_h1
'
,
'
15_cos_h1
'
,
'
15_sin_h2
'
,
'
15_cos_h2
'
,
'
15_sin_h3
'
,
'
15_cos_h3
'
]
"""
Method to normalise data, to better fit plotting
"""
def
normalize_data
(
data
,
lower_bound
=-
1
,
upper_bound
=
1
):
min_value
=
data
.
min
()
max_value
=
data
.
max
()
normalized_data
=
lower_bound
+
(
data
-
min_value
)
*
(
upper_bound
-
lower_bound
)
/
(
max_value
-
min_value
)
return
normalized_data
"""
Method to plot a single EEG channel
"""
def
plot_single
(
df
,
column
):
t
=
np
.
arange
(
0
,
10
,
1
/
fs
)
#df[column] = normalize_data(df[column])
...
...
@@ -38,7 +50,9 @@ def plot_single(df, column):
axis
.
set_title
(
column
)
plt
.
show
()
"""
Method to initialise the output stream for streaming the CCA answerser via LSL
"""
def
init_stream
():
# Create an LSL stream
stream_name
=
'
CCA
'
...
...
@@ -53,7 +67,10 @@ def init_stream():
# Create the LSL outlet
outlet
=
StreamOutlet
(
info
)
return
info
,
outlet
"""
Methods to add/remove padding, and formula for padding
Used when filtering the EEG signal, because of distortion at the start and end of EEG signal when using other filters
"""
def
add_padding
(
data
,
lenght
=
100
):
return
padding
(
data
,
lenght
)
...
...
@@ -63,13 +80,10 @@ def remove_padding(data, length=100):
def
padding
(
data
,
pad_length
=
100
):
return
np
.
pad
(
data
,
(
pad_length
,
pad_length
),
mode
=
"
reflect
"
)
def
hamming_window
(
data
,
duration
):
window_size
=
int
(
duration
)
window
=
np
.
hamming
(
window_size
)
data
[:
window_size
]
*=
window
data
[
-
window_size
:]
*=
window
[::
-
1
]
return
data
"""
Method for return a dataframe with all frequencies used for comparison in the CCA method with the different EEG signals
N: Number of samples (seconds * frequency)
"""
def
get_freqs
(
N
):
start_time
=
time
.
time
()
# fs = [8.18, 9, 10, 11.25, 12.86, 15]
...
...
@@ -90,7 +104,10 @@ def get_freqs(N):
print
(
"
--- %s seconds ---
"
%
(
time
.
time
()
-
start_time
))
return
df
"""
Method that performs CCA between the EEG signal and the frequencies
Uses only a single coefficient from CCA
"""
def
perform_cca
(
fragment
,
n_components
):
X
=
fragment
[:][
occ_channels
]
freqs
=
[]
...
...
@@ -103,7 +120,10 @@ def perform_cca(fragment, n_components):
X_c
,
Y_c
=
ca
.
transform
(
X
,
Y
)
freqs
.
append
(
np
.
corrcoef
(
X_c
[:,
0
],
Y_c
[:,
0
])[
0
][
1
])
return
freqs
"""
Method that performs CCA between the EEG signal and the frequencies
Uses two correlation coefficients from CCA
"""
def
perform_cca_2
(
fragment
):
n_components
=
2
X
=
fragment
[:][
occ_channels
]
...
...
@@ -118,24 +138,21 @@ def perform_cca_2(fragment):
p1
=
np
.
corrcoef
(
X_c
[:,
0
],
Y_c
[:,
0
])[
0
][
1
]
p2
=
np
.
corrcoef
(
X_c
[:,
1
],
Y_c
[:,
1
])[
0
][
1
]
freqs
.
append
(
np
.
sqrt
(
p1
**
2
+
p2
**
2
))
if
False
:
plt
.
scatter
(
X_c
[:,
0
],
Y_c
[:,
0
],
label
=
'
EEG Channels
'
,
alpha
=
0.7
)
plt
.
scatter
(
X_c
[:,
1
],
Y_c
[:,
1
],
label
=
'
Sine curves
'
,
alpha
=
0.7
)
plt
.
xlabel
(
'
X Transformed
'
)
plt
.
ylabel
(
'
Y Transformed
'
)
plt
.
title
(
'
CCA Transformed Canonical Variates
'
)
plt
.
legend
()
plt
.
show
()
return
freqs
"""
Method to send value in the output LSL stream used in the unity Speller
"""
def
return_index
(
index
,
info
,
outlet
):
# Send a single value
value
=
float
(
index
)
timestamp
=
time
.
time
()
outlet
.
push_sample
([
value
],
timestamp
)
"""
Method to apply a zero-phase Butterworth filter to the data
Uses bandpass [1-15], and order 3
"""
def
zero_phase_butter
(
data
):
# Butterworth filter parameters
fs
=
250
...
...
@@ -152,7 +169,9 @@ def zero_phase_butter(data):
# Zero-phase filtering using filtfilt
return
signal
.
filtfilt
(
b_bandpass
,
a_bandpass
,
data
)
"""
Method to apply a notch filter to EEG data
"""
def
notch
(
data
):
fs
=
250.0
# Sample frequency (Hz)
f0
=
50.0
# Frequency to be removed from signal (Hz)
...
...
@@ -166,24 +185,24 @@ info, outlet = init_stream()
print
(
"
Looking for an LSL stream...
"
)
streams_counter
=
resolve_stream
(
'
type
'
,
'
DejitteredSpeller
'
)
streams_eeg
=
resolve_stream
(
'
type
'
,
'
DEEG
'
)
inlet
=
StreamInlet
(
streams_counter
[
0
])
inlet_2
=
StreamInlet
(
streams_eeg
[
0
])
inlet
=
StreamInlet
(
streams_counter
[
0
])
#LSL Eyetracker data
inlet_2
=
StreamInlet
(
streams_eeg
[
0
])
# LSL EEG data
fs
=
250
# Sampling frequency
delay
=
0.01
delay
=
0.01
#Occular delay
fragment_duration
=
4
+
delay
# Fragment duration in seconds
fragment_samples
=
round
(
fs
*
fragment_duration
)
pre_trigger_samples
=
fs
*
1
target_value
=
0
pad_length
=
100
pad_length
=
100
#Padding length (padding in filtering)
while
True
:
buffer
=
[]
buffer_eeg
=
[]
triggered
=
False
start_time
=
None
scan_values
=
False
count
=
False
i
=
0
start_time
=
None
#To track time
while
not
triggered
:
# Gets data from the Eye Tracker LSL stream, and the EEG LSL stream
sample
,
timestamp
=
inlet
.
pull_sample
()
sample_eeg
,
timestamp_eeg
=
inlet_2
.
pull_sample
()
buffer
.
append
(
sample
)
...
...
@@ -193,8 +212,8 @@ while True:
buffer
.
pop
(
0
)
buffer_eeg
.
pop
(
0
)
# If buffer is filled with data ready to be compared in CCA, and the start of the buffer is the start of
# the Eye Tracking data (Eye Tracking trigger)
if
(
len
(
buffer
)
==
fragment_samples
)
and
buffer
[
0
][
0
]
==
1
:
print
(
len
(
buffer
))
fragment
=
np
.
array
(
buffer
[:
fragment_samples
])
...
...
@@ -202,6 +221,7 @@ while True:
triggered
=
True
print
(
"
Fragment: found
"
)
# Makes both streams to a single dataframe
df
=
pd
.
concat
([
pd
.
DataFrame
(
np
.
array
(
fragment
)),
pd
.
DataFrame
(
np
.
array
(
fragment_eeg
))],
axis
=
1
,
join
=
'
inner
'
)
df
.
columns
=
[
'
N
'
]
+
channels
...
...
@@ -209,15 +229,21 @@ while True:
print
(
df
.
columns
)
print
(
df
[
occ_channels
])
start_time
=
time
.
time
()
# Adds padding to the signals
df
=
df
.
apply
(
lambda
x
:
add_padding
(
x
,
pad_length
))
print
(
len
(
df
[
'
O1
'
]))
# Adds Notch filter to the occular channels
df
[
occ_channels
]
=
df
[
occ_channels
].
apply
(
lambda
x
:
notch
(
x
))
#Adds Butterworth filter to the occular channels
df
[
occ_channels
]
=
df
[
occ_channels
].
apply
(
lambda
x
:
zero_phase_butter
(
x
))
# Removes padding from signal
df
=
df
.
apply
(
lambda
x
:
remove_padding
(
x
,
pad_length
))
# for i in occ_channels:
# df[i] = normalize_data(df[i])
print
(
"
--- Filter time: %s seconds ---
"
%
(
time
.
time
()
-
start_time
))
print
(
df
[
'
N
'
].
tolist
())
# If any delay added, shift signal accordingly
df
[
'
N
'
]
=
df
[
'
N
'
].
shift
(
round
(
delay
*
fs
))
df
=
df
.
iloc
[
round
(
delay
*
fs
):]
# Reset the index
...
...
@@ -234,12 +260,14 @@ while True:
X
=
df
[:][
occ_channels
]
freqs
=
[]
h
=
0
# CCA on the target frequencies, and the occular channels
for
y
in
range
(
0
,
len
(
frequencies
),
6
):
h
=
h
+
1
Y
=
frs
[:][
frequencies
[
y
:
6
*
h
]]
ca
=
CCA
(
n_components
=
2
)
ca
.
fit
(
X
,
Y
)
X_c
,
Y_c
=
ca
.
transform
(
X
,
Y
)
# Uses two coefficients pk = sqrt(p1**2+p2*'2)
p1
=
np
.
corrcoef
(
X_c
[:,
0
],
Y_c
[:,
0
])[
0
][
1
]
p2
=
np
.
corrcoef
(
X_c
[:,
1
],
Y_c
[:,
1
])[
0
][
1
]
freqs
.
append
(
np
.
sqrt
(
p1
**
2
+
p2
**
2
))
...
...
@@ -248,5 +276,5 @@ while True:
print
(
cca
)
index
=
np
.
argmax
(
cca
)
print
(
"
Looking at:
"
+
str
(
frequencies_main
[
index
])
+
"
Hz
"
)
#
print(np.argpartition(cca, -2)[-2:])
#
Sends result in LSL stream
return_index
(
index
,
info
,
outlet
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment