more evaluations
This commit is contained in:
parent
b935701f9b
commit
6513f4e2b4
75
evaluation/1Layer Activity.py
Normal file
75
evaluation/1Layer Activity.py
Normal file
@ -0,0 +1,75 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/1lay/steps=1000100/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10))#+[16,49]
|
||||
|
||||
mode='density'
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
lastkurt=None
|
||||
for dim in dims[-1:]:
|
||||
dimpath = new_folder(path + f'dim={dim:02}/')
|
||||
for epsilon in eps_space:
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = np.array(json.load(f)[:500])
|
||||
|
||||
qtplot(f"Activity time series for eps={round(epsilon,3):.3f}",
|
||||
[list(range(500))],
|
||||
[activation],
|
||||
x_tag = 'time step',
|
||||
y_tag = f'activity density',
|
||||
y_range = (-1,1),
|
||||
export=True,
|
||||
path=dimpath+"evolution/",
|
||||
filename=f'eps={round(epsilon,3):.3f}_evolution.png',
|
||||
close=True)
|
||||
|
||||
mode = 'density'
|
||||
#%%
|
91
evaluation/1Layer PHI.py
Normal file
91
evaluation/1Layer PHI.py
Normal file
@ -0,0 +1,91 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
from phi import MIP,calc_mips,smartMIP
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
from datetime import datetime
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/1lay/steps=1000100/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
varphi = chr(981)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10))#+[16,49]
|
||||
|
||||
mode='density'
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
lastkurt=None
|
||||
phinum=10000
|
||||
PHIS=[]
|
||||
for dim in dims[:1]:
|
||||
dimpath = new_folder(path + f'dim={dim:02}/')
|
||||
for epsilon in eps_space:
|
||||
try:
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_phi_{phinum}.txt", 'r', encoding='utf-8') as f:
|
||||
phis=json.load(f)
|
||||
PHIS.append(np.average(phis))
|
||||
print(f'Loaded and Done eps={epsilon:.3f} with dim={dim} at {datetime.now()}')
|
||||
except:
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_states.txt", 'r', encoding='utf-8') as f:
|
||||
states = np.array(json.load(f)[100:phinum+100])
|
||||
|
||||
phis = [smartMIP(dim,state.translate(str.maketrans('','','.-=')),epsilon)[1] for state in states]
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_phi_{phinum}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(phis, f, indent=1)
|
||||
PHIS.append(np.average(phis))
|
||||
print(f'Generated and Done eps={epsilon:.3f} with dim={dim} at {datetime.now()}')
|
||||
#%%
|
||||
qtplot(f"Average phi for different noise parameters",
|
||||
[eps_space],
|
||||
[PHIS],
|
||||
[''],
|
||||
colors='w',
|
||||
x_tag = f'noise level {vareps}',
|
||||
y_tag = f'effect integration {varphi}',
|
||||
export=False,
|
||||
path=dimpath+"evolution/",
|
||||
filename=f'eps={round(epsilon,3):.3f}_evolution.png',
|
||||
close=False)
|
||||
|
||||
mode = 'density'
|
||||
#%%
|
183
evaluation/1Layer Suscept.py
Normal file
183
evaluation/1Layer Suscept.py
Normal file
@ -0,0 +1,183 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/1lay/steps=1000100/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10))#+[16,49]
|
||||
|
||||
def __calc_chi(mode='density'):
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
lastkurt=None
|
||||
for dim in dims:
|
||||
try:
|
||||
with open(path+f"magnets/magnetisation_{mode}_dim={dim:02}.txt", 'r', encoding='utf-8') as f:
|
||||
magnet = json.load(f)
|
||||
with open(path+f"suscepts/susceptibility_{mode}_dim={dim:02}.txt", 'r', encoding='utf-8') as f:
|
||||
suscept = json.load(f)
|
||||
with open(path+f"kurts/kurtosis_{mode}_dim={dim:02}.txt", 'r', encoding='utf-8') as f:
|
||||
kurt = json.load(f)
|
||||
except:
|
||||
magnet = []
|
||||
suscept = []
|
||||
kurt = []
|
||||
jumped=False
|
||||
print('magnets or suscept or kurt file not found')
|
||||
if not os.path.exists(path+"magnets/"):
|
||||
os.makedirs(path+"magnets/")
|
||||
if not os.path.exists(path+"suscepts/"):
|
||||
os.makedirs(path+"suscepts/")
|
||||
if not os.path.exists(path+"kurts/"):
|
||||
os.makedirs(path+"kurts/")
|
||||
for epsilon in eps_space:
|
||||
try:
|
||||
with open(path+f"dim={dim:02}/eps={round(epsilon,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = np.array(json.load(f)[100:])
|
||||
|
||||
if mode=='absolute':
|
||||
dmag = np.sum(activation*dim**2)/len(activation)
|
||||
mag = np.sum(np.abs(activation*dim**2))/len(activation)
|
||||
mag2 = np.sum(activation**2*dim**4)/len(activation)
|
||||
mag4 = np.sum(activation**4*dim**8)/len(activation)
|
||||
elif mode=='density':
|
||||
dmag = np.sum(activation)/len(activation)
|
||||
mag = np.sum(np.abs(activation))/len(activation)
|
||||
mag2 = np.sum(activation**2)/len(activation)
|
||||
mag4 = np.sum(activation**4)/len(activation)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
mag = round(abs(mag),6)
|
||||
fluct = round(mag2-mag**2,6)
|
||||
tail = round(mag4/mag2**2,6)
|
||||
|
||||
magnet.append(mag)
|
||||
suscept.append(fluct)
|
||||
kurt.append(tail)
|
||||
print(f"Done dim={dim:02} eps={round(epsilon,3):.3f}: mag={mag}")
|
||||
print(f"Done dim={dim:02} eps={round(epsilon,3):.3f}: fluct={fluct}")
|
||||
print(f"Done dim={dim:02} eps={round(epsilon,3):.3f}: tail={tail}")
|
||||
jumped=True
|
||||
except:
|
||||
if not jumped:
|
||||
magnet.append(1)
|
||||
elif jumped:
|
||||
magnet.append(0)
|
||||
suscept.append(0)
|
||||
if not jumped:
|
||||
kurt.append(1)
|
||||
elif jumped:
|
||||
kurt.append(3)
|
||||
print(f"Missing dim={dim:02} eps={round(epsilon,3):.3f}")
|
||||
with open(path+f"magnets/magnetisation_{mode}_dim={dim:02}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(magnet, f, ensure_ascii=False, indent=1)
|
||||
with open(path+f"suscepts/susceptibility_{mode}_dim={dim:02}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(suscept, f, ensure_ascii=False, indent=1)
|
||||
with open(path+f"kurts/kurtosis_{mode}_dim={dim:02}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(kurt, f, ensure_ascii=False, indent=1)
|
||||
|
||||
if lastkurt is not None:
|
||||
pos=0
|
||||
while kurt[pos]<lastkurt[pos]:
|
||||
pos+=1
|
||||
currdiff = kurt[pos]-lastkurt[pos]
|
||||
lastdiff = kurt[pos-1]-lastkurt[pos-1]
|
||||
crossres = currdiff/(currdiff-lastdiff)
|
||||
crosspoint = eps_space[pos] - crossres*(eps_space[pos]-eps_space[pos-1])
|
||||
assert eps_space[pos-1]<crosspoint<eps_space[pos]
|
||||
mk.append(crosspoint)
|
||||
lastkurt = kurt
|
||||
ma.append(magnet)
|
||||
s.append(suscept)
|
||||
k.append(kurt)
|
||||
|
||||
|
||||
return ma,s,k,mk
|
||||
|
||||
mode = 'absolute'
|
||||
ma,s,k,mk = __calc_chi(mode=mode)
|
||||
#%%
|
||||
qtplot(f"Magnetisation {mode} evolution for different automaton sizes",
|
||||
[[0]+list(eps_space)]*len(ma),
|
||||
[[1]+ls for d,ls in enumerate(ma[::-1])],
|
||||
[f'dim={dim}x{dim}' for dim in dims[::-1]],
|
||||
x_tag = f'noise parameter {vareps}',
|
||||
y_tag = f'magnetisation {m}',
|
||||
export=True,
|
||||
path=path+"magnets/",
|
||||
filename=f'eps_magnetisation_{mode}_dims=3-9_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
||||
qtplot(f"Susceptibility evolution for different automaton sizes",
|
||||
[[0]+list(eps_space)]*len(s),
|
||||
[[0]+ls for ls in s[::-1]],
|
||||
[f'dim={dim}x{dim}' for dim in dims[::-1]],
|
||||
x_tag = f'noise parameter {vareps}',
|
||||
y_tag = f'susceptibility {chi}',
|
||||
export=True,
|
||||
path=path+"suscepts/",
|
||||
filename=f'eps_susceptibility_{mode}_dims=3-9_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
||||
qtplot("Kurtosis evolution for different automaton sizes",
|
||||
[[0]+list(eps_space)]*len(k),
|
||||
[[1]+ls for ls in k[::-1]],
|
||||
[f'dim={dim}x{dim}' for dim in dims[::-1]],
|
||||
x_tag = f'noise parameter {vareps}',
|
||||
y_tag = f'kurtosis U',
|
||||
export=True,
|
||||
path=path+"kurts/",
|
||||
filename=f'eps_kurtosis_dims=3-9_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
||||
qtplot("Kurtosis crossing for different automaton sizes",
|
||||
[[dim+0.5 for dim in dims[:-1]]],
|
||||
[mk],
|
||||
['epsilon of kurtosis crosspoint'],
|
||||
x_tag = 'size',
|
||||
y_tag = f'epsilon {vareps}',
|
||||
export=True,
|
||||
path=path+"kurts/",
|
||||
filename=f'kurtosis_cross_dims=3-9_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
118
evaluation/1LayerPHI.py
Normal file
118
evaluation/1LayerPHI.py
Normal file
@ -0,0 +1,118 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Sep 27 04:39:54 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
from random import random
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
def neighbor(digit0, digit1, lenght):
|
||||
layer = int(lenght)
|
||||
dim = int(np.sqrt(layer))
|
||||
digit0, digit1 = np.array([digit0%dim, digit0//dim]), np.array([digit1%dim, digit1//dim])
|
||||
#print(digit0,digit1)
|
||||
coord_dif = list(map(abs,digit1 - digit0))
|
||||
layer_nbor = 0 in coord_dif and len(set([1,dim-1]).intersection(set(coord_dif))) != 0
|
||||
#print(coord_dif, set([1,dim-1]).intersection(set(coord_dif)))
|
||||
if layer_nbor:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def kcomb(zp,zm):
|
||||
if zp>2:
|
||||
val=1
|
||||
elif zm>2:
|
||||
val=0
|
||||
elif zp==zm:
|
||||
val=0.5
|
||||
elif zm==2:
|
||||
val=0.5**(3-zp)
|
||||
elif zp==2:
|
||||
val=1-0.5**(3-zm)
|
||||
elif zm==0 and zp==1:
|
||||
val=9/16
|
||||
elif zp==0 and zm==1:
|
||||
val=7/16
|
||||
else:
|
||||
raise NotImplementedError(zp,zm)
|
||||
return val
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/1lay/steps=100000/'
|
||||
|
||||
def phi(dim,statestr,partstr,eps):
|
||||
length = dim**2
|
||||
eta = 1-eps
|
||||
|
||||
state = np.array([int(q) for q in statestr])
|
||||
state = list(state.reshape((dim,dim)))
|
||||
state = [list([int(cell) for cell in row]) for row in state]
|
||||
|
||||
part = np.array([int(p) for p in partstr])
|
||||
part = list(part.reshape((dim,dim)))
|
||||
part = [list([int(cell) for cell in row]) for row in part]
|
||||
|
||||
inp = [[q+sum([state[(i+1)%dim][j],
|
||||
state[(i-1)%dim][j],
|
||||
state[i][(j+1)%dim],
|
||||
state[i][(j-1)%dim]
|
||||
]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
beps = [[int(inp[i][j]>2)*eta+int(inp[i][j]<3)*eps for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
zplus = [[q+sum([state[(i+1)%dim][j]*(part[i][j]==part[(i+1)%dim][j]),
|
||||
state[(i-1)%dim][j]*(part[i][j]==part[(i-1)%dim][j]),
|
||||
state[i][(j+1)%dim]*(part[i][j]==part[i][(j+1)%dim]),
|
||||
state[i][(j-1)%dim]*(part[i][j]==part[i][(j-1)%dim])
|
||||
]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
zminus = [[sum([(1-state[(i+1)%dim][j])*(part[i][j]==part[(i+1)%dim][j]),
|
||||
(1-state[(i-1)%dim][j])*(part[i][j]==part[(i-1)%dim][j]),
|
||||
(1-state[i][(j+1)%dim])*(part[i][j]==part[i][(j+1)%dim]),
|
||||
(1-state[i][(j-1)%dim])*(part[i][j]==part[i][(j-1)%dim])
|
||||
]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
kplus = [[kcomb(zplus[i][j],zminus[i][j]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
pi = [[eps*(1-kplus[i][j]) + eta*kplus[i][j] for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
crossent = [[-beps[i][j]*m.log2(pi[i][j])-(1-beps[i][j])*m.log2(1-pi[i][j]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
return np.sum(crossent) - length*H2(eps)
|
||||
|
||||
def MIP(dim,statestr,eps):
|
||||
lophi=np.inf
|
||||
mip = []
|
||||
for parti in range(1,2**(dim**2-1)):
|
||||
partstr = bin(parti)[2:].zfill(dim**2)
|
||||
print(partstr)
|
||||
curphi = phi(dim,statestr,partstr,eps)
|
||||
|
||||
if curphi<lophi:
|
||||
lophi=curphi
|
||||
mip = [partstr]
|
||||
elif curphi==lophi:
|
||||
mip.append(partstr)
|
||||
|
||||
return mip,lophi
|
||||
|
81
evaluation/2Layer Activity.py
Normal file
81
evaluation/2Layer Activity.py
Normal file
@ -0,0 +1,81 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/2lay/steps=100100/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10))#+[16,49]
|
||||
|
||||
mode='density'
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
lastkurt=None
|
||||
for dim in dims[-1:]:
|
||||
dimpath = new_folder(path + f'dim={dim:02}/')
|
||||
for epsilon in eps_space[:]:
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = np.array(json.load(f)[:500])
|
||||
activation = list(zip(*activation))
|
||||
|
||||
qtplot(f"Activity time series for eps={round(epsilon,3):.3f}",
|
||||
[list(range(500))]*2,
|
||||
activation,
|
||||
x_tag = 'time step',
|
||||
y_tag = f'activity density',
|
||||
y_range = (-1,1),
|
||||
export=True,
|
||||
path=dimpath+"evolution/",
|
||||
filename=f'eps={round(epsilon,3):.3f}_evolution.png',
|
||||
close=True)
|
||||
|
||||
mode = 'density'
|
||||
#%%
|
155
evaluation/2Layer Suscept.py
Normal file
155
evaluation/2Layer Suscept.py
Normal file
@ -0,0 +1,155 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/2lay/steps=100100/'
|
||||
suspath = path + 'suscepts_euk/'
|
||||
kurpath = path + 'kurts_euk/'
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
kappa = chr(954)
|
||||
rho=chr(961)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 100000
|
||||
|
||||
#eps_space = np.linspace(0.005, 0.1, 20)
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
|
||||
epses=20
|
||||
|
||||
dims = list(range(3,27,2))#+[16]
|
||||
|
||||
def __calc_chi(mode='density'):
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
|
||||
suffix = mode
|
||||
|
||||
for dim in dims:
|
||||
try:
|
||||
with open(suspath+f"radium_{suffix}_dim={dim:02}.txt", 'r', encoding='utf-8') as f:
|
||||
magnet = json.load(f)
|
||||
with open(suspath+f"susceptibility_{suffix}_dim={dim:02}.txt", 'r', encoding='utf-8') as f:
|
||||
suscept = json.load(f)
|
||||
with open(kurpath+f"kurtosis_{suffix}_dim={dim:02}.txt", 'r', encoding='utf-8') as f:
|
||||
kurt = []+json.load(f)
|
||||
except:
|
||||
magnet=[]
|
||||
suscept = []
|
||||
kurt = []
|
||||
jumped = False
|
||||
print('suscept or kurt file not found')
|
||||
if not os.path.exists(suspath):
|
||||
os.makedirs(suspath)
|
||||
if not os.path.exists(kurpath):
|
||||
os.makedirs(kurpath)
|
||||
for epsilon in eps_space:
|
||||
try:
|
||||
with open(path+f"dim={dim:02}/eps={round(epsilon,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = np.array(json.load(f)[100:])
|
||||
|
||||
if mode=='density':
|
||||
mag = np.sum(np.linalg.norm(activation,axis=1))/len(activation) # density
|
||||
mag2 = np.sum(np.linalg.norm(activation,axis=1)**2)/len(activation) # density
|
||||
mag4 = np.sum(np.linalg.norm(activation,axis=1)**4)/len(activation) # density
|
||||
elif mode=='absolute':
|
||||
mag = np.sum(np.linalg.norm(activation*dim**2,axis=1))/len(activation) # density
|
||||
mag2 = np.sum(np.linalg.norm(activation*dim**2,axis=1)**2)/len(activation) # density
|
||||
mag4 = np.sum(np.linalg.norm(activation*dim**2,axis=1)**4)/len(activation) # density
|
||||
|
||||
|
||||
mag = round(mag,6)
|
||||
fluct = round(mag2-mag**2,6)
|
||||
tail = round(mag4/mag2**2,6)
|
||||
|
||||
magnet.append(mag)
|
||||
suscept.append(fluct)
|
||||
kurt.append(tail)
|
||||
print(f"Done dim={dim:02} eps={round(epsilon,3):.3f}: mag={mag}")
|
||||
print(f"Done dim={dim:02} eps={round(epsilon,3):.3f}: fluct={fluct}")
|
||||
print(f"Done dim={dim:02} eps={round(epsilon,3):.3f}: tail ={tail}")
|
||||
print(f"===========================================================")
|
||||
jumped=True
|
||||
except:
|
||||
suscept.append(0)
|
||||
if not jumped:
|
||||
kurt.append(1)
|
||||
elif jumped:
|
||||
kurt.append(3)
|
||||
print(f"Missing dim={dim:02} eps={round(epsilon,3):.3f}")
|
||||
with open(suspath+f"radium_{suffix}_dim={dim:02}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(magnet, f, ensure_ascii=False, indent=1)
|
||||
with open(suspath+f"susceptibility_{suffix}_dim={dim:02}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(suscept, f, ensure_ascii=False, indent=1)
|
||||
with open(kurpath+f"kurtosis_{suffix}_dim={dim:02}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(kurt, f, ensure_ascii=False, indent=1)
|
||||
|
||||
ma.append([1]+magnet[:epses])
|
||||
s.append([0]+suscept[:epses])
|
||||
k.append([1]+kurt[:epses])
|
||||
|
||||
return ma,s,k
|
||||
|
||||
ma,s,k = __calc_chi()
|
||||
#%%
|
||||
qtplot(f"Magnetisation evolution for different automaton sizes",
|
||||
[[0]+list(eps_space[:epses])]*len(ma),
|
||||
[[dims[len(dims)-1-d]**2]+list(dims[len(dims)-1-d]**2*np.array(ls[1:])) for d,ls in enumerate(ma[::-1])],
|
||||
[f'dim={dim}x{dim}' for dim in dims[::-1]],
|
||||
y_tag = f'mean cycle {kappa}',
|
||||
export=True,
|
||||
path=path+"magnets/",
|
||||
filename=f'ep_radius_density_dims=3-9_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
||||
qtplot(f"Susceptibility evolution for different automaton sizes",
|
||||
[[0]+list(eps_space[:epses])]*len(ma),
|
||||
[[0]+list(np.array(sus[1:])*dims[len(dims)-1-i]**2) for i,sus in enumerate(s[::-1])],
|
||||
[f'dim={dim}x{dim}' for dim in dims[::-1]],
|
||||
y_tag = f'susceptibility {chi}',
|
||||
export=True,
|
||||
path=suspath,
|
||||
filename=f'eps+0_susceptibility_density_dims=3-15_odd_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
||||
qtplot("Kurtosis evolution for different automaton sizes",
|
||||
[[0]+list(eps_space[:epses])]*len(k),
|
||||
k[::-1],
|
||||
[f'dim={dim}x{dim}' for dim in dims[::-1]],
|
||||
y_tag = f'kurtosis U',
|
||||
y_range = (1,2),
|
||||
y_log=True,
|
||||
export=True,
|
||||
path=kurpath,
|
||||
filename=f'eps+0_kurtosis_density_dims=3-15_odd_steps={runsteps}_extra.png',
|
||||
close=False)
|
||||
|
433
evaluation/2Layer fft.py
Normal file
433
evaluation/2Layer fft.py
Normal file
@ -0,0 +1,433 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
import pyqtgraph as qt
|
||||
import pyqtgraph.exporters
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
from scipy.fftpack import fft, fftfreq
|
||||
from scipy.stats import cauchy
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
basepath = f'/cloud/Public/_data/neuropercolation/2lay/steps=1000100/'
|
||||
vals = [[],[]]
|
||||
|
||||
|
||||
TOTP_eps=[]
|
||||
TOTP_eps = []
|
||||
SIGP_eps = []
|
||||
SN_eps = []
|
||||
STON_eps = []
|
||||
FMAX_eps = []
|
||||
FWHM_eps = []
|
||||
Q_eps = []
|
||||
LHM_eps = []
|
||||
UHM_eps = []
|
||||
SOS_eps = []
|
||||
SPECC_eps=[]
|
||||
LORENTZ_eps=[]
|
||||
|
||||
dims=[9]
|
||||
runsteps = 1000000
|
||||
for dim in dims:
|
||||
path=basepath+f'dim={dim:02}/'
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space=eps_space[1::2]
|
||||
TOTP = []
|
||||
SIGP = []
|
||||
SN = []
|
||||
STON = []
|
||||
FMAX = []
|
||||
FWHM = []
|
||||
Q = []
|
||||
LHM=[]
|
||||
UHM=[]
|
||||
SOS=[]
|
||||
#%%
|
||||
for eps in eps_space:
|
||||
eps=round(eps,3)
|
||||
halfh_parts = []
|
||||
sigtonoi_parts = []
|
||||
noisefloor_parts = []
|
||||
totalpower_parts = []
|
||||
sigpower_parts = []
|
||||
sn_parts = []
|
||||
freqmax_parts = []
|
||||
freqfwhm_parts = []
|
||||
specc_list = []
|
||||
lorentz_list = []
|
||||
|
||||
subfreqmax_parts = []
|
||||
subfreqfwhm_parts = []
|
||||
totlorentz_list = []
|
||||
lhm_list=[]
|
||||
uhm_list=[]
|
||||
|
||||
parts = 1
|
||||
partlen = int(runsteps/parts)
|
||||
for part in range(0,runsteps,partlen):
|
||||
with open(path+f"eps={eps:.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100+part:100+part+partlen]
|
||||
phases = []
|
||||
for act in activation:
|
||||
ph = m.atan2(*act[::-1])
|
||||
phase = act[0] + act[1]*1j #m.e**(ph*1j)
|
||||
phases.append(phase)
|
||||
|
||||
spectrum = abs(fft(phases))**2
|
||||
freqs = fftfreq(partlen, 1)
|
||||
|
||||
accum = 1#int(500/parts)
|
||||
|
||||
|
||||
if accum==1:
|
||||
try:
|
||||
with open(path+f"eps={eps:.3f}_smooth_spectrum.txt", 'r', encoding='utf-8') as f:
|
||||
specc = json.load(f)
|
||||
freqqs = np.array([freqs[pos] for pos in range(-int(partlen/2),int(partlen/2))])
|
||||
print(f'Loaded specc for eps={eps:.3f}')
|
||||
except:
|
||||
freqqs = np.array([freqs[pos] for pos in range(-int(partlen/2),int(partlen/2))])
|
||||
spec = np.array([spectrum[pos] for pos in range(-int(partlen/2),int(partlen/2))])
|
||||
|
||||
kernelres=2500
|
||||
kernelbase = np.linspace(-1,1,2*kernelres+1)
|
||||
kernel = np.exp(-1/(1-np.abs(kernelbase)**2))
|
||||
kernel=kernel/np.sum(kernel)
|
||||
|
||||
spec_ext = np.hstack((spec[-kernelres:],spec,spec[:kernelres]))
|
||||
|
||||
specc=np.convolve(spec_ext,kernel,mode='same')[kernelres:-kernelres]
|
||||
with open(path+f"eps={eps:.3f}_smooth_spectrum.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(specc),f,indent=1)
|
||||
else:
|
||||
freqqs = np.array([freqs[pos]
|
||||
for pos in range(int((-partlen+accum)/2), int((partlen+accum)/2),accum)])
|
||||
specc = np.array([( spectrum[pos-int(accum/2)]/2 + sum(spectrum[pos-int(accum/2)+1:pos+int(accum/2)-1]) + spectrum[pos+int(accum/2)-1] + spectrum[pos+int(accum/2)]/2 )/accum
|
||||
for pos in range(int((-partlen+accum)/2), int((partlen+accum)/2),accum)])
|
||||
|
||||
halfh = (max(specc)+min(specc))/2
|
||||
|
||||
#sigtonoi = sum([specc[pos] for pos in range(len(specc)) if specc[pos]>=halfh])/sum([specc[pos] for pos in range(len(specc)) if specc[pos]<halfh])
|
||||
|
||||
fars = int(runsteps/accum/100)
|
||||
noisefloor = min(np.average(specc[:fars]),np.average(specc[-fars:]))
|
||||
speccnorm = specc - noisefloor
|
||||
|
||||
totalpower = sum(specc)
|
||||
sigpower = sum(speccnorm)
|
||||
sig = max(specc)
|
||||
sn = sigpower/(totalpower-sigpower)
|
||||
sigtonoi = sig/(totalpower-sig)
|
||||
|
||||
halfh_parts.append(halfh)
|
||||
sigtonoi_parts.append(sigtonoi)
|
||||
noisefloor_parts.append(noisefloor)
|
||||
totalpower_parts.append(totalpower)
|
||||
sigpower_parts.append(sig)
|
||||
sn_parts.append(sn)
|
||||
specc_list.append(specc)
|
||||
|
||||
|
||||
cumspecc = [speccnorm[0]]
|
||||
for pos in range(1,len(speccnorm)):
|
||||
cumspecc.append(cumspecc[pos-1]+speccnorm[pos])
|
||||
|
||||
for pos in range(len(specc)):
|
||||
if cumspecc[pos+1]>=sigpower/2:
|
||||
median = pos
|
||||
stepdiff = speccnorm[pos]
|
||||
halfdiff = sum(speccnorm[:pos+1]) - sigpower/2
|
||||
|
||||
freqstep = freqqs[pos] - freqqs[pos-1]
|
||||
freqmax = freqqs[pos] - freqstep * halfdiff/stepdiff
|
||||
|
||||
break
|
||||
|
||||
for pos in range(median, len(speccnorm)):
|
||||
if cumspecc[pos+1]>=sigpower*3/4:
|
||||
stepdiff = speccnorm[pos]
|
||||
halfdiff = sum(speccnorm[:pos+1]) - sigpower*3/4
|
||||
|
||||
|
||||
freqstep = freqqs[pos] - freqqs[pos-1]
|
||||
freqquart3 = freqqs[pos] - freqstep * halfdiff/stepdiff
|
||||
|
||||
break
|
||||
for pos in range(median+1):
|
||||
if cumspecc[pos+1]>=sigpower/4:
|
||||
stepdiff = speccnorm[pos]
|
||||
halfdiff = sum(speccnorm[:pos+1]) - sigpower/4
|
||||
|
||||
freqstep = freqqs[pos] - freqqs[pos-1]
|
||||
freqquart1 = freqqs[pos] - freqstep * halfdiff/stepdiff
|
||||
|
||||
break
|
||||
|
||||
|
||||
|
||||
freqfwhm = freqquart3-freqquart1
|
||||
gamma = freqfwhm/2
|
||||
|
||||
freqmax_parts.append(freqmax)
|
||||
freqfwhm_parts.append(freqfwhm)
|
||||
|
||||
print(gamma**2/median**2)
|
||||
|
||||
lorentz = sigpower/m.pi * accum/partlen * gamma/((freqqs-freqmax)**2+gamma**2) + noisefloor
|
||||
lorentz_list.append(lorentz)
|
||||
|
||||
speccpdf = speccnorm/sigpower
|
||||
deviation = np.sqrt(sum((specc-lorentz)**2)/len(specc))/sigpower
|
||||
|
||||
maxpos = 0
|
||||
for i in range(len(freqqs)):
|
||||
if abs(freqqs[i]-freqmax)<abs(freqqs[maxpos]-freqmax):
|
||||
maxpos = i
|
||||
pos=maxpos
|
||||
try:
|
||||
while lorentz[pos]>lorentz[maxpos]/2:
|
||||
pos-=1
|
||||
lhm=freqqs[pos]
|
||||
except:
|
||||
lhm=freqqs[0]
|
||||
pos=maxpos
|
||||
try:
|
||||
while lorentz[pos]>lorentz[maxpos]/2:
|
||||
pos+=1
|
||||
uhm=freqqs[pos]
|
||||
except:
|
||||
uhm=freqqs[-1]
|
||||
|
||||
SPECC = np.average(specc_list,axis=0)
|
||||
LORENTZ = np.average(lorentz_list,axis=0)
|
||||
DEV = np.sqrt(sum((SPECC-LORENTZ)**2)/len(SPECC))/sum(SPECC)
|
||||
|
||||
TOTP.append(np.average(totalpower_parts))
|
||||
SIGP.append(np.average(sigpower_parts))
|
||||
SN.append(np.average(sn_parts))
|
||||
STON.append(sigtonoi)
|
||||
|
||||
FMAX.append(np.average(freqmax_parts))
|
||||
FWHM.append(np.average(freqfwhm_parts))
|
||||
LHM.append(lhm)
|
||||
UHM.append(uhm)
|
||||
Q.append(np.average(freqmax_parts)/np.average(freqfwhm_parts))
|
||||
SOS.append(DEV)
|
||||
#%%
|
||||
with open(path+f"signal/SN_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(SN, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/FMAX_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(FMAX, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/FWHM_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(FWHM, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/LHM_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(LHM, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/UHM_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(UHM, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/TOTP_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(TOTP, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/SIGP_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(SIGP, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/Q_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(Q, f, ensure_ascii=False, indent=4)
|
||||
with open(path+f"signal/STON_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(STON, f, ensure_ascii=False, indent=4)
|
||||
|
||||
#%%
|
||||
plotpath = path + 'spectra/'
|
||||
# qtplot(f'Frequency spectrum for dim={dim} eps={eps:.3f}',
|
||||
# [freqqs]*2,
|
||||
# [LORENTZ,SPECC],
|
||||
# [f'lorentz regression dev={DEV}','spectrum'],
|
||||
# x_tag = 'frequency f',
|
||||
# y_tag = 'spectral power p',
|
||||
# y_log = False,
|
||||
# export=True,
|
||||
# lw=1,
|
||||
# path=plotpath,
|
||||
# filename=f'Lorentz and Cauchy fit to spectrum {parts} parts accum={accum} eps={round(eps,3):.3f}.png',
|
||||
# close=True)
|
||||
|
||||
|
||||
#%%
|
||||
|
||||
print("Power {}, Signal {}".format(round(totalpower), round(sigpower)))
|
||||
|
||||
print("Done {:.3f}: DEV {}, FMAX {}, FWHM {}".format(eps, round(deviation,3), round(freqmax,5), round(FWHM[-1],5)))
|
||||
|
||||
LORENTZ_eps.append(LORENTZ)
|
||||
SPECC_eps.append(SPECC)
|
||||
|
||||
#%%
|
||||
# vareps=chr(949)
|
||||
# qtplot(f'Frequency spectra to noise level',
|
||||
# [freqqs[50::100]]*len(eps_space),
|
||||
# [SPECC[50::100] for SPECC in SPECC_eps],
|
||||
# [f'{vareps}={eps:.3f}' for eps in eps_space],
|
||||
# x_tag = 'frequency f',
|
||||
# y_tag = 'energy spectral density',
|
||||
# y_log = True,
|
||||
# export=False,
|
||||
# lw=2,
|
||||
# path=plotpath,
|
||||
# filename=f'Lorentz and Cauchy fit to spectrum log {parts} parts accum={accum} eps={round(eps,3):.3f}.png',
|
||||
# close=False)
|
||||
#%%
|
||||
TOTP_eps.append(TOTP)
|
||||
SIGP_eps.append(SIGP)
|
||||
SN_eps.append(SN)
|
||||
STON_eps.append(STON)
|
||||
FMAX_eps.append(FMAX)
|
||||
FWHM_eps.append(FWHM)
|
||||
Q_eps.append(Q)
|
||||
LHM_eps.append(LHM)
|
||||
UHM_eps.append(UHM)
|
||||
SOS_eps.append(SOS)
|
||||
|
||||
#%%
|
||||
sigpath = basepath + 'signal/'
|
||||
if not os.path.exists(sigpath):
|
||||
os.makedirs(sigpath)
|
||||
|
||||
# qtplot(f'Signal to noise for dim={dim} eps={eps:.3f}',
|
||||
# [eps_space],
|
||||
# [SN],
|
||||
# ['signalpower to noisefloor'],
|
||||
# x_tag = 'epsilon',
|
||||
# y_tag = 'ratio',
|
||||
# y_log = False,
|
||||
# export=True,
|
||||
# path=sigpath,
|
||||
# filename=f'SN_plot.png',
|
||||
# close=True)
|
||||
|
||||
qtplot(f'Q facor for odd automaton sizes',
|
||||
[eps_space]*3,
|
||||
Q_eps,
|
||||
[f'dim={dim}x{dim}' for dim in dims],
|
||||
y_tag = 'purity',
|
||||
y_log = False,
|
||||
export=True,
|
||||
path=sigpath,
|
||||
filename=f'Q_plot_{parts}parts_accum={accum}.png',
|
||||
close=False)
|
||||
#%%
|
||||
|
||||
# with open(path+f"signal/FMAX_1_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'r', encoding='utf-8') as f:
|
||||
# FMAX=json.load(f)
|
||||
# with open(path+f"signal/FWHM_1_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'r', encoding='utf-8') as f:
|
||||
# FWHM=json.load(f)
|
||||
|
||||
qtplot(f'Dominant frequency',
|
||||
[[0]+list(eps_space[:-1])]*3,
|
||||
[[0]+FMAX[-1],[0]+LHM[-1],[0]+UHM[-1]],
|
||||
# [f'dim={dim}x{dim}' for dim in dims],
|
||||
['dominant frequency', 'lesser half maximum', 'upper half maximum'],
|
||||
#colors=['g','r','r'],
|
||||
y_tag = 'frequency f',
|
||||
y_log = True,
|
||||
export=True,
|
||||
path=sigpath,
|
||||
filename=f'FMAX+FWHMto50_plot_{parts}parts_accum={accum}.png',
|
||||
close=False)
|
||||
#%%
|
||||
qtplot(f'Dominant frequency',
|
||||
[[0]+list(eps_space[:-1])]*3,
|
||||
[[0]+FMAX[:-1] for FMAX in FMAX_eps],#, [[0]+LHM[i][:50] for LHM in LHM_eps], [[0]+UHM[i][:50] for UHM in UHM_eps],
|
||||
[f'dim={dim}x{dim}' for dim in dims],
|
||||
#[['dominant frequency', 'lesser half maximum', 'upper half maximum'],
|
||||
#colors=['g','r','r'],
|
||||
y_tag = 'frequency f',
|
||||
y_log = True,
|
||||
export=True,
|
||||
path=sigpath,
|
||||
filename=f'FMAX+FWHMto50_plot_{parts}parts_accum={accum}.png',
|
||||
close=False)
|
||||
#%%
|
||||
qtplot(f'Total and signal energy to noise level',
|
||||
[eps_space]*6,
|
||||
list(np.array(SIGP_eps)/runsteps*accum)+list(np.array(TOTP_eps)/runsteps*accum),
|
||||
[f'signal energy for dim={dim:02d}' for dim in dims]+[f'total energy for dim={dim:02d}' for dim in dims],
|
||||
colors=['b','g','r','c','y','m'],
|
||||
y_tag = f'energy',
|
||||
y_log = True,
|
||||
export=True,
|
||||
path=sigpath,
|
||||
filename=f'POWER_plot_{parts}parts_accum={accum}.png',
|
||||
close=False)
|
||||
#%%
|
||||
qtplot(f'Signal-to-noise ratio to noise level',
|
||||
[eps_space]*len(dims),
|
||||
STON_eps,
|
||||
[f'dim={dim:02d}x{dim:02d}' for dim in dims],
|
||||
y_tag = f'ratio S/N',
|
||||
y_log = True,
|
||||
export=True,
|
||||
path=sigpath,
|
||||
filename=f'SN_plot_{parts}parts_accum={accum}.png',
|
||||
close=False)
|
||||
#%%
|
||||
qtplot(f'Goodness of fit to Lorentz curve',
|
||||
[eps_space]*len(dims),
|
||||
SOS_eps,
|
||||
[f'dim={dim:02d}x{dim:02d}' for dim in dims],
|
||||
y_tag = f'mean deviation',
|
||||
y_log = False,
|
||||
export=True,
|
||||
path=sigpath,
|
||||
filename=f'DEV_plot_{parts}parts_accum={accum}.png',
|
||||
close=False)
|
||||
#%%
|
||||
with open(basepath+f"signal/SN_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(SN_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/FMAX_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(FMAX_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/FWHM_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(FWHM_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/LHM_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(LHM_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/UHM_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(UHM_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/TOTP_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(TOTP_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/SIGP_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(SIGP_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/Q_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(Q_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/STON_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(STON_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/Q_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(Q_eps, f, ensure_ascii=False, indent=4)
|
||||
with open(basepath+f"signal/STON_{parts}_accum={accum}_dim={dim}_runsteps={runsteps}.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(STON_eps, f, ensure_ascii=False, indent=4)
|
||||
|
||||
def plot_execute():
|
||||
if sys.flags.interactive != 1 or not hasattr(qt.QtCore, 'PYQT_VERSION'):
|
||||
qt.QtGui.QApplication.exec_()
|
||||
|
98
evaluation/2Layer phiplot.py
Normal file
98
evaluation/2Layer phiplot.py
Normal file
@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
|
||||
path='/cloud/Public/_data/neuropercolation/2lay/steps=100100/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
varphi = chr(981)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10,2))#+[16,49]
|
||||
|
||||
mode='density'
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
PHI=[]
|
||||
lastkurt=None
|
||||
for dim in dims:
|
||||
phis=[]
|
||||
con_gap = 3
|
||||
cons = [(n,(2*n+m)%dim) for n in range(dim) for m in range(0,dim-2,con_gap)]
|
||||
dimpath = new_folder(path + f'dim={dim:02}/')
|
||||
for epsilon in eps_space:
|
||||
try:
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = np.array(json.load(f)[100:])
|
||||
except:
|
||||
print(f'Calcing phi for eps={epsilon}')
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = np.array(json.load(f)[100:])
|
||||
|
||||
ei = np.sum(channels,axis=0)*(1-H2(epsilon))
|
||||
ei=list(ei)
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(ei),f, ensure_ascii=False,indent=1)
|
||||
|
||||
phi=np.average(ei)
|
||||
phis.append(phi)
|
||||
PHI.append(phis)
|
||||
#%%
|
||||
qtplot(f"Mean effect integration over noise level",
|
||||
[[0]+list(eps_space)]*len(dims),
|
||||
[[0]+phi for phi in PHI[::-1]],
|
||||
[f'dim={dim:02d}x{dim:02d}' for dim in dims[::-1]],
|
||||
y_tag = f'effect integration {varphi}',
|
||||
export=True,
|
||||
path=dimpath+"",
|
||||
filename=f'eps={round(epsilon,3):.3f}_evolution.png',
|
||||
close=False)
|
||||
|
||||
mode = 'density'
|
||||
#%%
|
60
evaluation/2axplot.py
Normal file
60
evaluation/2axplot.py
Normal file
@ -0,0 +1,60 @@
|
||||
"""
|
||||
Demonstrates a way to put multiple axes around a single plot.
|
||||
|
||||
(This will eventually become a built-in feature of PlotItem)
|
||||
"""
|
||||
|
||||
|
||||
import pyqtgraph as pg
|
||||
|
||||
pg.mkQApp()
|
||||
|
||||
pw = pg.PlotWidget()
|
||||
pw.show()
|
||||
pw.setWindowTitle('pyqtgraph example: MultiplePlotAxes')
|
||||
p1 = pw.plotItem
|
||||
p1.setLabels(left='axis 1')
|
||||
|
||||
## create a new ViewBox, link the right axis to its coordinate system
|
||||
p2 = pg.ViewBox()
|
||||
p1.showAxis('right')
|
||||
p1.scene().addItem(p2)
|
||||
p1.getAxis('right').linkToView(p2)
|
||||
p2.setXLink(p1)
|
||||
p1.getAxis('right').setLabel('axis2', color='#0000ff')
|
||||
|
||||
## create third ViewBox.
|
||||
## this time we need to create a new axis as well.
|
||||
p3 = pg.ViewBox()
|
||||
ax3 = pg.AxisItem('right')
|
||||
p1.layout.addItem(ax3, 2, 3)
|
||||
p1.scene().addItem(p3)
|
||||
ax3.linkToView(p3)
|
||||
p3.setXLink(p1)
|
||||
ax3.setZValue(-10000)
|
||||
ax3.setLabel('axis 3', color='#ff0000')
|
||||
|
||||
|
||||
## Handle view resizing
|
||||
def updateViews():
|
||||
## view has resized; update auxiliary views to match
|
||||
global p1, p2, p3
|
||||
p2.setGeometry(p1.vb.sceneBoundingRect())
|
||||
p3.setGeometry(p1.vb.sceneBoundingRect())
|
||||
|
||||
## need to re-update linked axes since this was called
|
||||
## incorrectly while views had different shapes.
|
||||
## (probably this should be handled in ViewBox.resizeEvent)
|
||||
p2.linkedViewChanged(p1.vb, p2.XAxis)
|
||||
p3.linkedViewChanged(p1.vb, p3.XAxis)
|
||||
|
||||
updateViews()
|
||||
p1.vb.sigResized.connect(updateViews)
|
||||
|
||||
|
||||
p1.plot([1,2,4,8,16,32])
|
||||
p2.addItem(pg.PlotCurveItem([10,20,40,80,40,20], pen='b'))
|
||||
p3.addItem(pg.PlotCurveItem([3200,1600,800,400,200,100], pen='r'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
pg.exec()
|
77
evaluation/4Layer Activity.py
Normal file
77
evaluation/4Layer Activity.py
Normal file
@ -0,0 +1,77 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=100100_causal/dim=09/batch=0/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10))#+[16,49]
|
||||
|
||||
mode='density'
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
lastkurt=None
|
||||
for dim in dims[-1:]:
|
||||
dimpath = new_folder(path + f'dim={dim:02}/')
|
||||
for epsilon in eps_space[:]:
|
||||
with open(path+f"eps={round(epsilon,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = np.array(json.load(f)[:500])
|
||||
with open(path+f"eps={round(epsilon,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = np.array(json.load(f)[:500])
|
||||
|
||||
qtplot(f"Phase relation time series for eps={round(epsilon,3):.3f}",
|
||||
[list(range(500))]*2,
|
||||
[phase_diff],
|
||||
x_tag = 'time step',
|
||||
y_tag = f'phase diffe´rence',
|
||||
y_range = (-m.pi,m.pi),
|
||||
export=True,
|
||||
path=dimpath+"evolution/",
|
||||
filename=f'eps={round(epsilon,3):.3f}_evolution.png',
|
||||
close=False)
|
||||
|
||||
mode = 'density'
|
||||
#%%
|
286
evaluation/4Layer Bootstrap Resultant.py
Normal file
286
evaluation/4Layer Bootstrap Resultant.py
Normal file
@ -0,0 +1,286 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Aug 21 14:59:22 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
from random import random
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
def coherence(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
|
||||
return np.average(phase_x)
|
||||
|
||||
|
||||
def bootstrap_stat(whole, sub, strength=10000, estimator='resultant'):
|
||||
k = len(sub)
|
||||
|
||||
if estimator=='resultant':
|
||||
whole_est = norm(resultant(whole))
|
||||
sub_est = norm(resultant(sub))
|
||||
|
||||
boot_dist = []
|
||||
for i in range(strength):
|
||||
boot_dist.append(norm(resultant(choose(whole,k))))
|
||||
if i%1000==999:
|
||||
print(f'Done {i:0{len(str(strength))}d} bootstraps')
|
||||
|
||||
confidence = len([val for val in boot_dist if (val-whole_est)<max(sub_est-whole_est,0)])/len(boot_dist)
|
||||
|
||||
return confidence
|
||||
|
||||
def bootstrap_res(whole, sub, strength=10000):
|
||||
k = len(sub)
|
||||
|
||||
resultants = []
|
||||
for i in range(strength):
|
||||
resultants.append(norm(resultant(choose(whole,k))))
|
||||
if i%1000==999:
|
||||
print(f'Done {i:0{len(str(strength))}d} bootstraps')
|
||||
return resultants
|
||||
|
||||
def sample_parts(wholes, subs):
|
||||
ks = [len(s) for s in subs]
|
||||
sample = [pha for j in range(len(wholes)) for pha in choose(wholes[j],ks[j])]
|
||||
|
||||
return sample
|
||||
|
||||
def bootstrap_parts(wholes, subs, strength=10000, estimator='resultant',sided='right'):
|
||||
ks = [len(s) for s in subs]
|
||||
|
||||
whole = [w for whole in wholes for w in whole]
|
||||
sub = [s for sub in subs for s in sub]
|
||||
|
||||
if estimator=='resultant':
|
||||
whole_est = norm(resultant(whole))
|
||||
sub_est = norm(resultant(sub))
|
||||
|
||||
boot_dist = []
|
||||
for i in range(strength):
|
||||
sample = [pha for j in range(len(wholes)) for pha in choose(wholes[j],ks[j])]
|
||||
boot_dist.append(norm(resultant(sample)))
|
||||
if i%1000==999:
|
||||
print(f'Done {i:0{len(str(strength))}d} bootstraps')
|
||||
|
||||
if sided=='right':
|
||||
confidence = len([val for val in boot_dist if val<sub_est])/len(boot_dist)
|
||||
elif sided=='double':
|
||||
confidence = len([val for val in boot_dist if abs(val-whole_est)<abs(sub_est-whole_est)])/len(boot_dist)
|
||||
elif sided=='left':
|
||||
confidence = len([val for val in boot_dist if val>sub_est])/len(boot_dist)
|
||||
else:
|
||||
raise NotImplemented
|
||||
return confidence
|
||||
|
||||
def bootstrap_res_parts(wholes, subs, strength=10000):
|
||||
ks = [len(s) for s in subs]
|
||||
|
||||
whole = [w for whole in wholes for w in whole]
|
||||
whole_est = norm(resultant(whole))
|
||||
|
||||
resultants = []
|
||||
for i in range(strength):
|
||||
sample = [pha for j in range(len(wholes)) for pha in choose(wholes[j],ks[j])]
|
||||
resultants.append(norm(resultant(sample)))
|
||||
if i%1000==999:
|
||||
print(f'Done {i:0{len(str(strength))}d} bootstraps')
|
||||
|
||||
return resultants
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
extremes = None
|
||||
maxdt = 250
|
||||
|
||||
stp = 1000100
|
||||
batch = 0
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.5,50))
|
||||
|
||||
print(f'Started at {datetime.now()}')
|
||||
|
||||
for dim in [9]:
|
||||
for eps in eps_space[:1]:#list(eps_space[3:8])+list(eps_space[:3])+list(eps_space[8:]):
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=dimtimesdimby3_steps=100100/dim=09_cons=27/batch=0/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = diff(phase_abs[0],phase_abs[1])
|
||||
phase_diff = [round(pha,6) for pha in phase_diff]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channels]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
|
||||
strength = 100000
|
||||
extremes = 10000 #[l//2 for l in lens]
|
||||
ext_rat = extremes/(stp-100)
|
||||
|
||||
circparts = 100
|
||||
pha_dev = m.pi/circparts
|
||||
pha_max = np.max(np.abs(phase_diff))
|
||||
|
||||
phase_in_part = lambda ph, i: abs(ph)<=pha_max/circparts if i==0 else i*pha_max/circparts<abs(ph)<=(i+1)*pha_max/circparts
|
||||
|
||||
dev_parts = [sorted([i for i,val in enumerate(ei[:-maxdt]) if phase_in_part(phase_diff[i],j)],
|
||||
key = lambda i: ei[i]) for j in range(circparts)]
|
||||
|
||||
ext_fracs = [round(len(dev_parts[i])*ext_rat,6) for i in range(circparts)]
|
||||
ext_parts = [int(ext_fracs[i]) for i in range(circparts)]
|
||||
ext_probs = [round(ext_fracs[i]%1,6) for i in range(circparts)]
|
||||
|
||||
exts = [ext_parts[i]+(random()<ext_probs[i]) for i in range(circparts)]
|
||||
|
||||
top_parts = [dev_parts[i][-exts[i]:] if exts[i]>0 else [] for i in range(circparts)]
|
||||
|
||||
pha_parts = [[phase_diff[i] for i in part] for part in dev_parts]
|
||||
syn_parts = [[phase_diff[i] for i in part] for part in top_parts]
|
||||
ran_sam = sample_parts(dev_parts, top_parts)
|
||||
top_sam = sample_parts(top_parts, top_parts)
|
||||
|
||||
dev = []
|
||||
for part in dev_parts:
|
||||
dev.extend(part)
|
||||
top = []
|
||||
for part in top_parts:
|
||||
top.extend(part)
|
||||
|
||||
assert sorted(top)==sorted(top_sam)
|
||||
|
||||
ran_res = []
|
||||
top_res = []
|
||||
tot_res = []
|
||||
|
||||
bot_ei = []
|
||||
top_ei = []
|
||||
|
||||
maxdt = 250
|
||||
for dt in range(maxdt):
|
||||
tot_pha = phase_diff[dt:dt-maxdt]
|
||||
ran_pha = [(phase_diff[i+dt]) for i in ran_sam]
|
||||
top_pha = [(phase_diff[i+dt]) for i in top_sam]
|
||||
|
||||
tot_res.append( norm(resultant(tot_pha)) )
|
||||
ran_res.append( norm(resultant(ran_pha)) )
|
||||
top_res.append( norm(resultant(top_pha)) )
|
||||
|
||||
|
||||
sampling = 'samedist_varmaxpha'
|
||||
plotpath = new_folder(path+f'{sampling}_causal_roll{pha_dev:.3f}/')#'bootstrap={strength}/'
|
||||
savepath=new_folder(plotpath+f'extremes={extremes}_bootstrength={strength}/')
|
||||
|
||||
newpath = new_folder(path+f'extremes={extremes}_bootstrength={strength}/')
|
||||
resultant_stat = bootstrap_res(phase_diff,top,strength=strength)
|
||||
|
||||
confs = []
|
||||
confdt = 250
|
||||
for dt in range(confdt+1):
|
||||
pha_evs = [[phase_diff[i+dt] for i in part] for part in dev_parts]
|
||||
syn_evs = [[phase_diff[i+dt] for i in part] for part in top_parts]
|
||||
phas = [phase_diff[i+dt] for i in top]
|
||||
res_pha = norm(resultant(phas))
|
||||
|
||||
conf = len([val for val in resultant_stat if val<res_pha])/len(resultant_stat)
|
||||
confs.append((dt, conf))
|
||||
print(f'Confidence for dim={dim} dt={dt}: {conf}')
|
||||
with open(newpath+f"eps={round(eps,3):.3f}_dt={confdt}_simpbootstrap.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(confs), f, indent=1)
|
||||
|
||||
qtplot(f'Diachronic resultant for dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[tot_res, ran_res, top_res],
|
||||
['Average Resultant', f'random sample of {extremes}',
|
||||
f'top sample of {extremes} ei'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=newpath,
|
||||
filename=f'Random Diachronic Resultant Norm eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
# qtplot(f'Diachronic resultant phase for dim={dim} eps={eps:.3f} with 4 layers',
|
||||
# [np.array(range(maxdt))]*3,
|
||||
# [top_ph[0], dis_ph[0], tot_ph],
|
||||
# ['top {extremes} ei', 'dis {extremes} ei',
|
||||
# 'Average'],
|
||||
# x_tag = 'dt',
|
||||
# y_tag = 'phase',
|
||||
# export=True,
|
||||
# path=plotpath,
|
||||
# filename=f'All Diachronic Resultant Phase eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
# close=True)
|
||||
|
||||
# qtplot(f'Diachronic ei for dim={dim} with 4 layers',
|
||||
# [np.array(range(maxdt))]*4,
|
||||
# [bot_ei, top_ei, dev_ei, tot_ei],
|
||||
# ['EI ev of bottom {extremes} ei', 'EI ev of top {extremes} ei',
|
||||
# 'EI ev of phase filtered ei', 'Average EI'],
|
||||
# x_tag = 'dt',
|
||||
# y_tag = 'average ei',
|
||||
# export=True,
|
||||
# path=path+'plots/',
|
||||
# filename=f'Diachronic EI balanced for eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
# close=True)
|
||||
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
# qtplot(f'Resultant and EI evolution for dim={dim} with 4 layers',
|
||||
# [[0]+eps_space]*2,
|
||||
# [max(av_ei)*diff_res, av_ei],
|
||||
# ['Resultant', 'avEI'],
|
||||
# export=True,
|
||||
# path=path,
|
||||
# filename=f'Resultant and EI for dim={dim}.png',
|
||||
# close=True)
|
||||
|
||||
|
369
evaluation/4Layer Causal 4way resultant.py
Normal file
369
evaluation/4Layer Causal 4way resultant.py
Normal file
@ -0,0 +1,369 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Aug 21 14:59:22 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
extremes = None
|
||||
maxdt = 300
|
||||
|
||||
stp = 10100
|
||||
batch = 0
|
||||
|
||||
for dim in [9]:
|
||||
for eps in eps_space[1:41:2]:
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps={stp}/dim={dim:02}/batch={batch}/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = diff(phase_abs[0],phase_abs[1])
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [np.sum(cha)*(1-H2(eps)) for cha in channels]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
|
||||
pha_center = av_diff
|
||||
pha_dev = m.pi/32
|
||||
|
||||
from_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else from_sync(i-1) if i>0 else None
|
||||
to_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else to_sync(i+1) if i+1<len(phase_diff) else None
|
||||
|
||||
dev_ind = sorted([i for i,val in enumerate(ei[:-maxdt]) if (pha_center-pha_dev)<=abs(phase_diff[i])<=(pha_center+pha_dev)], key = lambda i: ei[i])
|
||||
dev_00 = [i for i in dev_ind if from_sync(i) and to_sync(i) ]
|
||||
dev_01 = [i for i in dev_ind if from_sync(i) and to_sync(i) is False]
|
||||
dev_10 = [i for i in dev_ind if from_sync(i) is False and to_sync(i) ]
|
||||
dev_11 = [i for i in dev_ind if from_sync(i) is False and to_sync(i) is False]
|
||||
|
||||
lens = [len(dev_00),len(dev_01),len(dev_10),len(dev_11)]
|
||||
|
||||
#if not extremes:
|
||||
extremes = [100]*4 #[l//2 for l in lens]
|
||||
|
||||
print(lens)
|
||||
#print(all_res, av_diff)
|
||||
|
||||
|
||||
# bot_00 = dev_00[:extremes[0]]
|
||||
# bot_01 = dev_01[:extremes[1]]
|
||||
# bot_10 = dev_10[:extremes[2]]
|
||||
# bot_11 = dev_11[:extremes[3]]
|
||||
|
||||
top_00 = dev_00[-extremes[0]:]
|
||||
top_01 = dev_01[-extremes[1]:]
|
||||
top_10 = dev_10[-extremes[2]:]
|
||||
top_11 = dev_11[-extremes[3]:]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_states.txt", 'r', encoding='utf-8') as f:
|
||||
states = json.load(f)[100:]
|
||||
with open(path+f"eps={round(eps,3):.3f}_coupling.txt", 'r', encoding='utf-8') as f:
|
||||
coupling = json.load(f)
|
||||
coupling = [tuple(edge) for edge in coupling]
|
||||
|
||||
for top,name in [(top_00,'top_00'),(top_01,'top_01'),(top_10,'top_10'),(top_11,'top_11')]:
|
||||
for i in top:
|
||||
causal_init = states[i].translate(str.maketrans('', '', '.-='))
|
||||
path_c = path+f'causal_maxdt={maxdt}/{name}/{i:0{len(str(stp))}}/'
|
||||
|
||||
try:
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
phasediff = json.load(f)
|
||||
except:
|
||||
sim=Simulate4Layers(dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=0,
|
||||
steps=maxdt,
|
||||
draw=None,
|
||||
save='all',
|
||||
path=path_c,
|
||||
)
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff = diff(phase_abs[0],phase_abs[1])
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phasediff), f, indent=1)
|
||||
|
||||
for top,name in [(top_00,'top_00'),(top_01,'top_01'),(top_10,'top_10'),(top_11,'top_11')]:
|
||||
for i in top:
|
||||
causal_init = states[i].translate(str.maketrans('', '', '.-='))
|
||||
path_c = path+f'original_maxdt={maxdt}/{name}/{i:0{len(str(stp))}}/'
|
||||
|
||||
try:
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
phasediff = json.load(f)
|
||||
except:
|
||||
sim=Simulate4Layers(dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=-1,
|
||||
steps=maxdt,
|
||||
draw=None,
|
||||
save='all',
|
||||
path=path_c,
|
||||
)
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff = diff(phase_abs[0],phase_abs[1])
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phasediff), f, indent=1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# bot_res = []
|
||||
top_res = []
|
||||
dis_res = []
|
||||
tot_res = []
|
||||
|
||||
# bot_ph = []
|
||||
top_ph = []
|
||||
dis_ph = []
|
||||
tot_ph = []
|
||||
|
||||
# bot_ei = []
|
||||
top_ei = []
|
||||
# dev_ei = []
|
||||
tot_ei = []
|
||||
for dt in range(maxdt):
|
||||
# bot_pha = [[abs(phase_diff[i+dt]) for i in bot_00],
|
||||
# [abs(phase_diff[i+dt]) for i in bot_01],
|
||||
# [abs(phase_diff[i+dt]) for i in bot_10],
|
||||
# [abs(phase_diff[i+dt]) for i in bot_11]]
|
||||
|
||||
top_pha = [[abs(phase_diff[i+dt]) for i in top_00],
|
||||
[abs(phase_diff[i+dt]) for i in top_01],
|
||||
[abs(phase_diff[i+dt]) for i in top_10],
|
||||
[abs(phase_diff[i+dt]) for i in top_11]]
|
||||
|
||||
dis_00 = []
|
||||
dis_01 = []
|
||||
dis_10 = []
|
||||
dis_11 = []
|
||||
for i in top_00:
|
||||
path_c = path+f'causal_maxdt={maxdt}/top_00/{i:0{len(str(stp))}}/'
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
dis_00.append(abs(json.load(f)[dt]))
|
||||
for i in top_01:
|
||||
path_c = path+f'causal_maxdt={maxdt}/top_01/{i:0{len(str(stp))}}/'
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
dis_01.append(abs(json.load(f)[dt]))
|
||||
for i in top_10:
|
||||
path_c = path+f'causal_maxdt={maxdt}/top_10/{i:0{len(str(stp))}}/'
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
dis_10.append(abs(json.load(f)[dt]))
|
||||
for i in top_11:
|
||||
path_c = path+f'causal_maxdt={maxdt}/top_11/{i:0{len(str(stp))}}/'
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
dis_11.append(abs(json.load(f)[dt]))
|
||||
|
||||
dis_pha = [dis_00, dis_01, dis_10, dis_11]
|
||||
|
||||
tot_pha = np.abs(phase_diff[dt:dt-maxdt])
|
||||
|
||||
# bot_res.append( [norm(resultant(bot_pha[i])) for i in range(4)] )
|
||||
top_res.append( [norm(resultant(top_pha[i])) for i in range(4)] )
|
||||
dis_res.append( [norm(resultant(dis_pha[i])) for i in range(4)] )
|
||||
tot_res.append( norm(resultant(tot_pha)) )
|
||||
|
||||
# bot_ph.append( [phase(*resultant(bot_pha[i])) for i in range(4)] )
|
||||
top_ph.append( [phase(*resultant(top_pha[i])) for i in range(4)] )
|
||||
dis_ph.append( [phase(*resultant(dis_pha[i])) for i in range(4)] )
|
||||
tot_ph.append( phase(*resultant(tot_pha)) )
|
||||
|
||||
# bot_ei.append( [np.average([ei[i+dt] for i in bot]) for bot in [bot_00,bot_01,bot_10,bot_11]] )
|
||||
top_ei.append( [np.average([ei[i+dt] for i in top]) for top in [top_00,top_01,top_10,top_11]] )
|
||||
# dev_ei.append( [np.average([ei[i+dt] for i in dev]) for dev in [dev_00,dev_01,dev_10,dev_11]] )
|
||||
tot_ei.append( np.average(ei[dt:dt-maxdt]) )
|
||||
|
||||
if dt%10==0:
|
||||
print(f'Done dt={dt}')
|
||||
|
||||
# bot_res = list(zip(*bot_res))
|
||||
top_res = list(zip(*top_res))
|
||||
dis_res = list(zip(*dis_res))
|
||||
|
||||
# bot_ph = list(zip(*bot_ph))
|
||||
top_ph = list(zip(*top_ph))
|
||||
dis_ph = list(zip(*dis_ph))
|
||||
|
||||
# bot_ei = list(zip(*bot_ei))
|
||||
top_ei = list(zip(*top_ei))
|
||||
# dev_ei = list(zip(*dev_ei))
|
||||
|
||||
plotpath = path+'4waycausal/'
|
||||
|
||||
qtplot(f'Diachronic resultant sync to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_res[0], dis_res[0], tot_res],
|
||||
|
||||
['sync to sync top {extremes} ei',
|
||||
'sync to sync dis {extremes} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} sts dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant sync to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_res[1], dis_res[1], tot_res],
|
||||
['sync to orth top {extremes} ei', 'sync to orth dis {extremes} ei',
|
||||
'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} sto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant orth to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_res[2], dis_res[2], tot_res],
|
||||
['orth to sync top {extremes} ei', 'orth to sync dis {extremes} ei',
|
||||
'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} ots dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant orth to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_res[3], dis_res[3], tot_res],
|
||||
['orth to orth top {extremes} ei', 'orth to orth dis {extremes} ei',
|
||||
'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} oto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'Diachronic resultant phase sync to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_ph[0], dis_ph[0], tot_ph],
|
||||
['sync to sync top {extremes} ei', 'sync to sync dis {extremes} ei',
|
||||
'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} sts dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase sync to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_ph[1], dis_ph[1], tot_ph],
|
||||
['sync to orth top {extremes} ei', 'sync to orth dis {extremes} ei',
|
||||
'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} sto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase orth to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_ph[2], dis_ph[2], tot_ph],
|
||||
['orth to sync top {extremes} ei', 'orth to sync dos {extremes} ei',
|
||||
'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} ots dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase orth to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*3,
|
||||
[top_ph[3], dis_ph[3], tot_ph],
|
||||
['orth to orth top {extremes} ei', 'orth to orth dis {extremes} ei',
|
||||
'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} oto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
|
||||
# qtplot(f'Diachronic ei for dim={dim} with 4 layers',
|
||||
# [np.array(range(maxdt))]*4,
|
||||
# [bot_ei, top_ei, dev_ei, tot_ei],
|
||||
# ['EI ev of bottom {extremes} ei', 'EI ev of top {extremes} ei',
|
||||
# 'EI ev of phase filtered ei', 'Average EI'],
|
||||
# x_tag = 'dt',
|
||||
# y_tag = 'average ei',
|
||||
# export=True,
|
||||
# path=path+'plots/',
|
||||
# filename=f'Diachronic EI balanced for eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
# close=True)
|
||||
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
# qtplot(f'Resultant and EI evolution for dim={dim} with 4 layers',
|
||||
# [[0]+eps_space]*2,
|
||||
# [max(av_ei)*diff_res, av_ei],
|
||||
# ['Resultant', 'avEI'],
|
||||
# export=True,
|
||||
# path=path,
|
||||
# filename=f'Resultant and EI for dim={dim}.png',
|
||||
# close=True)
|
||||
|
||||
|
191
evaluation/4Layer Causal Significance.py
Normal file
191
evaluation/4Layer Causal Significance.py
Normal file
@ -0,0 +1,191 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sat Sep 9 22:23:30 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
extremes = None
|
||||
maxdt = 200
|
||||
|
||||
stp = 1000100
|
||||
batch = 0
|
||||
|
||||
print(f'Started at {datetime.now()}')
|
||||
|
||||
for dim in [9]:
|
||||
for eps in eps_space[4:]:
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = diff(phase_abs[0],phase_abs[1])
|
||||
phase_diff = [round(pha,6) for pha in phase_diff]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channels]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
|
||||
extremes = 10000 #[l//2 for l in lens]
|
||||
|
||||
ei_ind = [i for i,val in enumerate(ei[:-maxdt]) if val>0]
|
||||
|
||||
print(f'{len(ei_ind)} states with positive EI')
|
||||
|
||||
samples = choose(ei_ind, extremes)
|
||||
sampling = 'allpos_ei'
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_states.txt", 'r', encoding='utf-8') as f:
|
||||
states = json.load(f)[100:]
|
||||
with open(path+f"eps={round(eps,3):.3f}_coupling.txt", 'r', encoding='utf-8') as f:
|
||||
coupling = json.load(f)
|
||||
coupling = [tuple(edge) for edge in coupling]
|
||||
|
||||
phase_pairs = []
|
||||
ei_pairs = []
|
||||
for num,i in enumerate(samples):
|
||||
causal_init = states[i].translate(str.maketrans('', '', '.-='))
|
||||
|
||||
sim = Simulate4Layers( dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=0,
|
||||
steps=1,
|
||||
draw=None,
|
||||
)
|
||||
|
||||
activation = sim._activations()
|
||||
channel = sim._channels()
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff_c = np.round(diff(phase_abs[0],phase_abs[1]),6)
|
||||
ei_c = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channel]
|
||||
max_ei_c = max([np.sum(cha) for cha in channel])
|
||||
|
||||
sim = Simulate4Layers( dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=-1,
|
||||
steps=1,
|
||||
draw=None,
|
||||
)
|
||||
|
||||
activation = sim._activations()
|
||||
channel = sim._channels()
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff_i = np.round(diff(phase_abs[0],phase_abs[1]),6)
|
||||
ei_i = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channel]
|
||||
max_ei_i = max([np.sum(cha) for cha in channel])
|
||||
|
||||
phase_pairs.append((phasediff_i[-1], phasediff_c[-1]))
|
||||
ei_pairs.append((ei_i[-1], ei_c[-1]))
|
||||
|
||||
savepath = path + sampling + '/'
|
||||
new_folder(savepath)
|
||||
|
||||
if num%100==99:
|
||||
print(f'Done {num:0{len(str(extremes))}d}')
|
||||
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(phase_pairs, f, indent=1)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei_pairs, f, indent=1)
|
||||
|
||||
phases_i, phases_c = zip(*phase_pairs)
|
||||
ei_i, ei_c = zip(*ei_pairs)
|
||||
|
||||
phase_space = np.linspace(0,m.pi,100+1)
|
||||
ei_space = np.linspace(0,np.max([ei_i,ei_c]),100+1)
|
||||
|
||||
phase_dist_i = [len([ph for ph in phases_i if low<=ph<high])/extremes for low,high in zip(phase_space[:-1],phase_space[1:])]
|
||||
phase_dist_c = [len([ph for ph in phases_c if low<=ph<high])/extremes for low,high in zip(phase_space[:-1],phase_space[1:])]
|
||||
|
||||
max_ei = max(max_ei_i, max_ei_c)
|
||||
|
||||
ei_dist_i = [len([e for e in ei_i if round(e/(1-H2(eps)))==val])/extremes for val in range(max_ei)]
|
||||
ei_dist_c = [len([e for e in ei_c if round(e/(1-H2(eps)))==val])/extremes for val in range(max_ei)]
|
||||
|
||||
qtplot(f'Phase distribution for dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[phase_space[:-1]]*2,
|
||||
[phase_dist_i, phase_dist_c],
|
||||
['Phase dist with ei',
|
||||
'Phase dist without ei'],
|
||||
x_tag = 'phase',
|
||||
y_tag = 'density',
|
||||
export=True,
|
||||
path=savepath,
|
||||
filename=f'Phase dist eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'EI distribution for dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[range(max_ei)]*2,
|
||||
[ei_dist_i, ei_dist_c],
|
||||
['EI dist with ei',
|
||||
'EI dist without ei'],
|
||||
x_tag = 'ei',
|
||||
y_tag = 'density',
|
||||
export=True,
|
||||
path=savepath,
|
||||
filename=f'EI dist eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
|
405
evaluation/4Layer Causal resultant.py
Normal file
405
evaluation/4Layer Causal resultant.py
Normal file
@ -0,0 +1,405 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Aug 21 14:59:22 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
#from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
extremes = None
|
||||
maxdt = 200
|
||||
|
||||
stp = 1000100
|
||||
batch = 0
|
||||
|
||||
print(f'Started at {datetime.now()}')
|
||||
|
||||
for dim in [9]:
|
||||
for eps in eps_space[14:]:
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = diff(phase_abs[0],phase_abs[1])
|
||||
phase_diff = [round(pha,6) for pha in phase_diff]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channels]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
|
||||
extremes = 10000 #[l//2 for l in lens]
|
||||
ext_rat = extremes/(stp-100)
|
||||
|
||||
circparts = 32
|
||||
pha_dev = m.pi/circparts
|
||||
pha_max = np.max(np.abs(phase_diff))
|
||||
|
||||
phase_in_part = lambda ph, i: abs(ph)<=pha_max/circparts if i==0 else i*pha_max/circparts<abs(ph)<=(i+1)*pha_max/circparts
|
||||
|
||||
dev_parts = [sorted([i for i,val in enumerate(ei[:-maxdt]) if phase_in_part(phase_diff[i],j)],
|
||||
key = lambda i: ei[i]) for j in range(circparts)]
|
||||
|
||||
ext_parts = [int(np.ceil(len(dev_parts[i])*ext_rat)) for i in range(circparts)]
|
||||
|
||||
top_parts = [dev_parts[i][-ext_parts[i]:] if ext_parts[i]>0 else [] for i in range(circparts)]
|
||||
bot_parts = [dev_parts[i][:ext_parts[i]] if ext_parts[i]>0 else [] for i in range(circparts)]
|
||||
|
||||
top = []
|
||||
for part in top_parts:
|
||||
top.extend(part)
|
||||
bot = []
|
||||
for part in bot_parts:
|
||||
bot.extend(part)
|
||||
|
||||
print(len(top), len(bot), extremes, 'equal?')
|
||||
sampling = 'samedist_varmaxpha'
|
||||
|
||||
# pha_center = av_diff
|
||||
# pha_dev = m.pi/8
|
||||
|
||||
# from_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else from_sync(i-1) if i>0 else None
|
||||
# to_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else to_sync(i+1) if i+1<len(phase_diff) else None
|
||||
|
||||
# infra_phase = lambda ph: (pha_center-pha_dev)<=abs(ph)<=(pha_center )
|
||||
# supra_phase = lambda ph: (pha_center )< abs(ph)<=(pha_center+pha_dev)
|
||||
|
||||
# dev_inf = sorted([i for i,val in enumerate(ei[:-maxdt]) if infra_phase(phase_diff[i])], key = lambda i: ei[i])
|
||||
# dev_sup = sorted([i for i,val in enumerate(ei[:-maxdt]) if supra_phase(phase_diff[i])], key = lambda i: ei[i])
|
||||
|
||||
# ext_inf = round(extremes*len(dev_inf)/(len(dev_inf)+len(dev_sup)))
|
||||
# ext_sup = round(extremes*len(dev_sup)/(len(dev_inf)+len(dev_sup)))
|
||||
|
||||
# top_inf = dev_inf[-ext_inf:]
|
||||
# top_sup = dev_sup[-ext_sup:]
|
||||
|
||||
# top = top_inf + top_sup
|
||||
|
||||
# print(len(top), extremes, 'equal?')
|
||||
# sampling = 'biequal'
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_states.txt", 'r', encoding='utf-8') as f:
|
||||
states = json.load(f)[100:]
|
||||
with open(path+f"eps={round(eps,3):.3f}_coupling.txt", 'r', encoding='utf-8') as f:
|
||||
coupling = json.load(f)
|
||||
coupling = [tuple(edge) for edge in coupling]
|
||||
|
||||
for i in top:
|
||||
causal_maxdt=0
|
||||
for file in os.listdir(path):
|
||||
f = os.path.join(path, file)
|
||||
if not os.path.isfile(f):
|
||||
c_maxdt = file.replace('causal_maxdt=','')
|
||||
if c_maxdt != file:
|
||||
causal_maxdt = int(c_maxdt)
|
||||
|
||||
if causal_maxdt>=maxdt:
|
||||
path_c = path+f'causal_maxdt={causal_maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
else:
|
||||
path_c = path+f'causal_maxdt={maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
|
||||
causal_init = states[i].translate(str.maketrans('', '', '.-='))
|
||||
|
||||
try:
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phasediff = json.load(f)
|
||||
except:
|
||||
sim=Simulate4Layers(dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=0,
|
||||
steps=maxdt,
|
||||
draw=None,
|
||||
save='simple',
|
||||
path=path_c,
|
||||
)
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff = diff(phase_abs[0],phase_abs[1])
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phasediff), f, indent=1)
|
||||
|
||||
for i in []:#top:
|
||||
original_maxdt=0
|
||||
for file in os.listdir(path):
|
||||
f = os.path.join(path, file)
|
||||
if not os.path.isfile(f):
|
||||
o_maxdt = file.replace('original_maxdt=','')
|
||||
if c_maxdt != file:
|
||||
original_maxdt = int(o_maxdt)
|
||||
|
||||
if original_maxdt>=maxdt:
|
||||
path_c = path+f'original_maxdt={original_maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
else:
|
||||
path_c = path+f'original_maxdt={maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
|
||||
causal_init = states[i].translate(str.maketrans('', '', '.-='))
|
||||
|
||||
try:
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phasediff = json.load(f)
|
||||
except:
|
||||
sim=Simulate4Layers(dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=-1,
|
||||
steps=maxdt,
|
||||
draw=None,
|
||||
save='simple',
|
||||
path=path_c,
|
||||
)
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff = diff(phase_abs[0],phase_abs[1])
|
||||
phasediff = [round(pha,6) for pha in phasediff]
|
||||
|
||||
with open(path_c+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phasediff), f, indent=1)
|
||||
|
||||
|
||||
|
||||
bot_res = []
|
||||
top_res = []
|
||||
dis_res = []
|
||||
tot_res = []
|
||||
|
||||
bot_ph = []
|
||||
top_ph = []
|
||||
dis_ph = []
|
||||
tot_ph = []
|
||||
|
||||
bot_ei = []
|
||||
top_ei = []
|
||||
dis_ei = []
|
||||
tot_ei = []
|
||||
|
||||
|
||||
present_dt=0
|
||||
for file in os.listdir(path):
|
||||
f = os.path.join(path, file)
|
||||
if not os.path.isfile(f):
|
||||
p_maxdt = file.replace(f'{sampling}_causal_roll{pha_dev:.3f}_maxdt=','')
|
||||
if p_maxdt != file:
|
||||
present_dt = int(p_maxdt)
|
||||
|
||||
if present_dt>0:
|
||||
try:
|
||||
datapath = path + f"{sampling}_causal_roll{pha_dev:.3f}_maxdt={present_dt}/data/"
|
||||
with open(datapath+f"eps={round(eps,3):.3f}_bot_dia_res.txt", 'r', encoding='utf-8') as f:
|
||||
bot_res = json.load(f)
|
||||
with open(datapath+f"eps={round(eps,3):.3f}_top_dia_res.txt", 'r', encoding='utf-8') as f:
|
||||
top_res = json.load(f)
|
||||
with open(datapath+f"eps={round(eps,3):.3f}_dis_dia_res.txt", 'r', encoding='utf-8') as f:
|
||||
dis_res = json.load(f)
|
||||
with open(datapath+f"eps={round(eps,3):.3f}_bot_dia_ei.txt", 'r', encoding='utf-8') as f:
|
||||
bot_ei = json.load(f)
|
||||
with open(datapath+f"eps={round(eps,3):.3f}_top_dia_ei.txt", 'r', encoding='utf-8') as f:
|
||||
top_ei = json.load(f)
|
||||
with open(datapath+f"eps={round(eps,3):.3f}_dis_dia_ei.txt", 'r', encoding='utf-8') as f:
|
||||
dis_ei = json.load(f)
|
||||
except:
|
||||
present_dt=0
|
||||
bot_res = []
|
||||
top_res = []
|
||||
dis_res = []
|
||||
|
||||
for dt in range(present_dt,maxdt):
|
||||
top_pha = [(phase_diff[i+dt]) for i in top]
|
||||
bot_pha = [(phase_diff[i+dt]) for i in bot]
|
||||
|
||||
dis = []
|
||||
for i in top:
|
||||
causal_maxdt=0
|
||||
for file in os.listdir(path):
|
||||
f = os.path.join(path, file)
|
||||
if not os.path.isfile(f):
|
||||
c_maxdt = file.replace('causal_maxdt=','')
|
||||
if c_maxdt != file:
|
||||
causal_maxdt = int(c_maxdt)
|
||||
|
||||
if causal_maxdt>=maxdt:
|
||||
path_c = path+f'causal_maxdt={causal_maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
else:
|
||||
path_c = path+f'causal_maxdt={maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_phase_diff.txt', 'r', encoding='utf-8') as f:
|
||||
dis.append((json.load(f)[dt]))
|
||||
|
||||
dei = []
|
||||
for i in top:
|
||||
causal_maxdt=0
|
||||
for file in os.listdir(path):
|
||||
f = os.path.join(path, file)
|
||||
if not os.path.isfile(f):
|
||||
c_maxdt = file.replace('causal_maxdt=','')
|
||||
if c_maxdt != file:
|
||||
causal_maxdt = int(c_maxdt)
|
||||
|
||||
if causal_maxdt>=maxdt:
|
||||
path_c = path+f'causal_maxdt={causal_maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
else:
|
||||
path_c = path+f'causal_maxdt={maxdt}/eps={round(eps,3):.3f}/{i:0{len(str(stp))}}/'
|
||||
|
||||
with open(path_c+f'eps={round(eps,3):.3f}_channels.txt', 'r', encoding='utf-8') as f:
|
||||
dei.append(np.sum(json.load(f)[dt])*(1-H2(eps)))
|
||||
|
||||
dis_pha = dis
|
||||
tot_pha = (phase_diff[dt:dt-maxdt])
|
||||
|
||||
bot_res.append( [norm(resultant(bot_pha))] )
|
||||
top_res.append( [norm(resultant(top_pha))] )
|
||||
dis_res.append( [norm(resultant(dis_pha))] )
|
||||
tot_res.append( norm(resultant(tot_pha)) )
|
||||
|
||||
# bot_ph.append( [phase(*resultant(bot_pha[i])) for i in range(1)] )
|
||||
# top_ph.append( [phase(*resultant(top_pha[i])) for i in range(1)] )
|
||||
# dis_ph.append( [phase(*resultant(dis_pha[i])) for i in range(1)] )
|
||||
# tot_ph.append( phase(*resultant(tot_pha)) )
|
||||
|
||||
bot_ei.append( [np.average([ei[i+dt] for i in bot])] )
|
||||
top_ei.append( [np.average([ei[i+dt] for i in top])] )
|
||||
dis_ei.append( [np.average(dei)] )
|
||||
tot_ei.append( np.average(ei[dt:dt-maxdt]) )
|
||||
|
||||
if dt%10==9:
|
||||
print(f'Done dt={dt:{len(str(maxdt))}d}')
|
||||
|
||||
plotpath = path+f'{sampling}_causal_roll{pha_dev:.3f}_maxdt={maxdt}/'
|
||||
new_folder(plotpath+'data/')
|
||||
|
||||
with open(plotpath+f"data/eps={round(eps,3):.3f}_bot_dia_res.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(bot_res), f, indent=1)
|
||||
with open(plotpath+f"data/eps={round(eps,3):.3f}_top_dia_res.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(top_res), f, indent=1)
|
||||
with open(plotpath+f"data/eps={round(eps,3):.3f}_dis_dia_res.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(dis_res), f, indent=1)
|
||||
with open(plotpath+f"data/eps={round(eps,3):.3f}_bot_dia_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(bot_ei), f, indent=1)
|
||||
with open(plotpath+f"data/eps={round(eps,3):.3f}_top_dia_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(top_ei), f, indent=1)
|
||||
with open(plotpath+f"data/eps={round(eps,3):.3f}_dis_dia_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(dis_ei), f, indent=1)
|
||||
|
||||
bot_res = list(zip(*bot_res))
|
||||
top_res = list(zip(*top_res))
|
||||
dis_res = list(zip(*dis_res))
|
||||
|
||||
# bot_ph = list(zip(*bot_ph))
|
||||
# top_ph = list(zip(*top_ph))
|
||||
# dis_ph = list(zip(*dis_ph))
|
||||
|
||||
bot_ei = list(zip(*bot_ei))
|
||||
top_ei = list(zip(*top_ei))
|
||||
dis_ei = list(zip(*dis_ei))
|
||||
|
||||
|
||||
|
||||
qtplot(f'Diachronic resultant for dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[tot_res, bot_res[0], dis_res[0], top_res[0]],
|
||||
['Average Resultant', f'bottom {extremes} ei'
|
||||
f'dis {extremes} ei', f'top {extremes} ei'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'All Diachronic Resultant Norm eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
# qtplot(f'Diachronic resultant phase for dim={dim} eps={eps:.3f} with 4 layers',
|
||||
# [np.array(range(maxdt))]*3,
|
||||
# [top_ph[0], dis_ph[0], tot_ph],
|
||||
# ['top {extremes} ei', 'dis {extremes} ei',
|
||||
# 'Average'],
|
||||
# x_tag = 'dt',
|
||||
# y_tag = 'phase',
|
||||
# export=True,
|
||||
# path=plotpath,
|
||||
# filename=f'All Diachronic Resultant Phase eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
# close=True)
|
||||
|
||||
# qtplot(f'Diachronic ei for dim={dim} with 4 layers',
|
||||
# [np.array(range(maxdt))]*4,
|
||||
# [bot_ei, top_ei, dev_ei, tot_ei],
|
||||
# ['EI ev of bottom {extremes} ei', 'EI ev of top {extremes} ei',
|
||||
# 'EI ev of phase filtered ei', 'Average EI'],
|
||||
# x_tag = 'dt',
|
||||
# y_tag = 'average ei',
|
||||
# export=True,
|
||||
# path=path+'plots/',
|
||||
# filename=f'Diachronic EI balanced for eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
# close=True)
|
||||
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
# qtplot(f'Resultant and EI evolution for dim={dim} with 4 layers',
|
||||
# [[0]+eps_space]*2,
|
||||
# [max(av_ei)*diff_res, av_ei],
|
||||
# ['Resultant', 'avEI'],
|
||||
# export=True,
|
||||
# path=path,
|
||||
# filename=f'Resultant and EI for dim={dim}.png',
|
||||
# close=True)
|
||||
|
||||
|
@ -14,6 +14,7 @@ from datetime import datetime
|
||||
from plot import qtplot
|
||||
|
||||
eps_space = list(np.linspace(0.005,0.5,100))
|
||||
eps_space= eps_space[1::2]
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
@ -24,37 +25,68 @@ def resultant(sample):
|
||||
def H2(x):
|
||||
return -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
for dim in [7]:
|
||||
diff_res = [0]
|
||||
dims=list(range(3,10))
|
||||
|
||||
R=[]
|
||||
EI=[]
|
||||
for dim in dims:
|
||||
cons = [(n,(2*n+m)%dim) for n in range(dim) for m in range(0,dim-2,3)]
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/cons=dimtimesdimby3_steps=100100/dim={dim:02d}_cons={len(cons)}/batch=0/'
|
||||
|
||||
diff_res = [1]
|
||||
av_ei = [0]
|
||||
for eps in eps_space:
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/steps=100000/dim={dim:02}/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)[100:]
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)[100:]
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = (phase[1]-phase[0]+m.pi)%(2*m.pi)-m.pi
|
||||
ei = [np.sum(cha)*(1-H2(eps)) for cha in channels]
|
||||
|
||||
res = np.linalg.norm(resultant(phase_diff))
|
||||
diff_res.append(res)
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [np.sum(cha)*(1-H2(eps)) for cha in channels]
|
||||
av_ei.append(np.average(ei))
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
R.append(diff_res)
|
||||
EI.append(av_ei)
|
||||
|
||||
qtplot(f'Resultant and EI evolution for dim={dim} with 4 layers',
|
||||
[[0]+eps_space]*2,
|
||||
[diff_res, av_ei],
|
||||
['Resultant', 'avEI'],
|
||||
export=True,
|
||||
path=path,
|
||||
filename=f'Resultant and EI for dim={dim}.png',
|
||||
close=True)
|
||||
#%%
|
||||
savepath=f'/cloud/Public/_data/neuropercolation/4lay/cons=dimtimesdimby3_steps=100100/plots/'
|
||||
|
||||
qtplot(f'Total resultant of phase-difference',
|
||||
[[0]+eps_space]*len(dims),
|
||||
R[::-1],
|
||||
[f'{dim}x{dim}' for dim in dims[::-1]],
|
||||
y_tag = 'concentration',
|
||||
export=False,
|
||||
path=savepath,
|
||||
filename=f'Resultant_ev.png',
|
||||
close=False)
|
||||
|
||||
qtplot(f'Average Effect Integrated Information',
|
||||
[[0]+eps_space]*len(dims),
|
||||
EI[::-1],
|
||||
[f'{dim}x{dim}' for dim in dims[::-1]],
|
||||
y_tag='integrated information',
|
||||
export=False,
|
||||
path=savepath,
|
||||
filename=f'EI_ev.png',
|
||||
close=False)
|
||||
|
||||
|
277
evaluation/4Layer Resultant 4way Phase Evolution.py
Normal file
277
evaluation/4Layer Resultant 4way Phase Evolution.py
Normal file
@ -0,0 +1,277 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Aug 21 14:59:22 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
extremes = None
|
||||
maxdt = 300
|
||||
|
||||
for dim in [9]:
|
||||
for eps in eps_space:
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/steps=100100/dim=09/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = diff(phase_abs[0],phase_abs[1])
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [np.sum(cha)*(1-H2(eps)) for cha in channels]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
|
||||
pha_center = av_diff
|
||||
pha_dev = m.pi/32
|
||||
|
||||
from_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else from_sync(i-1) if i>0 else None
|
||||
to_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else to_sync(i+1) if i+1<len(phase_diff) else None
|
||||
|
||||
dev_ind = sorted([i for i,val in enumerate(ei[:-maxdt]) if (pha_center-pha_dev)<=abs(phase_diff[i])<=(pha_center+pha_dev)], key = lambda i: ei[i])
|
||||
dev_00 = [i for i in dev_ind if from_sync(i) and to_sync(i) ]
|
||||
dev_01 = [i for i in dev_ind if from_sync(i) and to_sync(i) is False]
|
||||
dev_10 = [i for i in dev_ind if from_sync(i) is False and to_sync(i) ]
|
||||
dev_11 = [i for i in dev_ind if from_sync(i) is False and to_sync(i) is False]
|
||||
|
||||
lens = [len(dev_00),len(dev_01),len(dev_10),len(dev_11)]
|
||||
|
||||
#if not extremes:
|
||||
extremes = [1000]*4 #[l//2 for l in lens]
|
||||
|
||||
print(lens)
|
||||
#print(all_res, av_diff)
|
||||
|
||||
|
||||
# bot_00 = dev_00[:extremes[0]]
|
||||
# bot_01 = dev_01[:extremes[1]]
|
||||
# bot_10 = dev_10[:extremes[2]]
|
||||
# bot_11 = dev_11[:extremes[3]]
|
||||
|
||||
top_00 = dev_00[-extremes[0]:]
|
||||
top_01 = dev_01[-extremes[1]:]
|
||||
top_10 = dev_10[-extremes[2]:]
|
||||
top_11 = dev_11[-extremes[3]:]
|
||||
|
||||
|
||||
# bot_res = []
|
||||
top_res = []
|
||||
# dev_res = []
|
||||
tot_res = []
|
||||
|
||||
# bot_ph = []
|
||||
top_ph = []
|
||||
# dev_ph = []
|
||||
tot_ph = []
|
||||
|
||||
# bot_ei = []
|
||||
top_ei = []
|
||||
# dev_ei = []
|
||||
tot_ei = []
|
||||
for dt in range(maxdt):
|
||||
# bot_pha = [[abs(phase_diff[i+dt]) for i in bot_00],
|
||||
# [abs(phase_diff[i+dt]) for i in bot_01],
|
||||
# [abs(phase_diff[i+dt]) for i in bot_10],
|
||||
# [abs(phase_diff[i+dt]) for i in bot_11]]
|
||||
|
||||
top_pha = [[abs(phase_diff[i+dt]) for i in top_00],
|
||||
[abs(phase_diff[i+dt]) for i in top_01],
|
||||
[abs(phase_diff[i+dt]) for i in top_10],
|
||||
[abs(phase_diff[i+dt]) for i in top_11]]
|
||||
|
||||
# dev_pha = [[abs(phase_diff[i+dt]) for i in dev_00],
|
||||
# [abs(phase_diff[i+dt]) for i in dev_01],
|
||||
# [abs(phase_diff[i+dt]) for i in dev_10],
|
||||
# [abs(phase_diff[i+dt]) for i in dev_11]]
|
||||
|
||||
tot_pha = np.abs(phase_diff[dt:dt-maxdt])
|
||||
|
||||
# bot_res.append( [norm(resultant(bot_pha[i])) for i in range(4)] )
|
||||
top_res.append( [norm(resultant(top_pha[i])) for i in range(4)] )
|
||||
# dev_res.append( [norm(resultant(dev_pha[i])) for i in range(4)] )
|
||||
tot_res.append( norm(resultant(tot_pha)) )
|
||||
|
||||
# bot_ph.append( [phase(*resultant(bot_pha[i])) for i in range(4)] )
|
||||
top_ph.append( [phase(*resultant(top_pha[i])) for i in range(4)] )
|
||||
# dev_ph.append( [phase(*resultant(dev_pha[i])) for i in range(4)] )
|
||||
tot_ph.append( phase(*resultant(tot_pha)) )
|
||||
|
||||
# bot_ei.append( [np.average([ei[i+dt] for i in bot]) for bot in [bot_00,bot_01,bot_10,bot_11]] )
|
||||
top_ei.append( [np.average([ei[i+dt] for i in top]) for top in [top_00,top_01,top_10,top_11]] )
|
||||
# dev_ei.append( [np.average([ei[i+dt] for i in dev]) for dev in [dev_00,dev_01,dev_10,dev_11]] )
|
||||
tot_ei.append( np.average(ei[dt:dt-maxdt]) )
|
||||
|
||||
if dt%10==0:
|
||||
print(f'Done dt={dt}')
|
||||
|
||||
# bot_res = list(zip(*bot_res))
|
||||
top_res = list(zip(*top_res))
|
||||
# dev_res = list(zip(*dev_res))
|
||||
|
||||
# bot_ph = list(zip(*bot_ph))
|
||||
top_ph = list(zip(*top_ph))
|
||||
# dev_ph = list(zip(*dev_ph))
|
||||
|
||||
# bot_ei = list(zip(*bot_ei))
|
||||
top_ei = list(zip(*top_ei))
|
||||
# dev_ei = list(zip(*dev_ei))
|
||||
|
||||
plotpath = path+'4waymedian/'
|
||||
|
||||
qtplot(f'Diachronic resultant sync to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
# [bot_res[0], top_res[0], dev_res[0], tot_res],
|
||||
[top_res[0], cau_res[0]]
|
||||
['sync to sync bottom {extremes} ei', 'sync to sync top {extremes} ei',
|
||||
'sync to sync {len(dev_00)} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} sts dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant sync to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_res[1], top_res[1], dev_res[1], tot_res],
|
||||
['sync to orth bottom {extremes} ei', 'sync to orth top {extremes} ei',
|
||||
'sync to orth {len(dev_01)} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} sto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant orth to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_res[2], top_res[2], dev_res[2], tot_res],
|
||||
['orth to sync bottom {extremes} ei', 'orth to sync top {extremes} ei',
|
||||
'orth to sync {len(dev_10)} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} ots dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant orth to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_res[3], top_res[3], dev_res[3], tot_res],
|
||||
['orth to orth bottom {extremes} ei', 'orth to orth top {extremes} ei',
|
||||
'orth to orth {len(dev_11)} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Norm eps={round(eps,3):.3f} oto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'Diachronic resultant phase sync to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_ph[0], top_ph[0], dev_ph[0], tot_ph],
|
||||
['sync to sync bottom {extremes} ei', 'sync to sync top {extremes} ei',
|
||||
'sync to sync {len(dev_in_ind)} ei', 'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} sts dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase sync to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_ph[1], top_ph[1], dev_ph[1], tot_ph],
|
||||
['sync to orth bottom {extremes} ei', 'sync to orth top {extremes} ei',
|
||||
'sync to orth {len(dev_out_ind)} ei', 'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} sto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase orth to sync for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_ph[2], top_ph[2], dev_ph[2], tot_ph],
|
||||
['orth to sync bottom {extremes} ei', 'orth to sync top {extremes} ei',
|
||||
'orth to sync {len(dev_in_ind)} ei', 'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} ots dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase orth to orth for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_ph[3], top_ph[3], dev_ph[3], tot_ph],
|
||||
['orth to orth bottom {extremes} ei', 'orth to orth top {extremes} ei',
|
||||
'orth to orth {len(dev_out_ind)} ei', 'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Diachronic Resultant Phase eps={round(eps,3):.3f} oto dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
|
||||
# qtplot(f'Diachronic ei for dim={dim} with 4 layers',
|
||||
# [np.array(range(maxdt))]*4,
|
||||
# [bot_ei, top_ei, dev_ei, tot_ei],
|
||||
# ['EI ev of bottom {extremes} ei', 'EI ev of top {extremes} ei',
|
||||
# 'EI ev of phase filtered ei', 'Average EI'],
|
||||
# x_tag = 'dt',
|
||||
# y_tag = 'average ei',
|
||||
# export=True,
|
||||
# path=path+'plots/',
|
||||
# filename=f'Diachronic EI balanced for eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
# close=True)
|
||||
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
# qtplot(f'Resultant and EI evolution for dim={dim} with 4 layers',
|
||||
# [[0]+eps_space]*2,
|
||||
# [max(av_ei)*diff_res, av_ei],
|
||||
# ['Resultant', 'avEI'],
|
||||
# export=True,
|
||||
# path=path,
|
||||
# filename=f'Resultant and EI for dim={dim}.png',
|
||||
# close=True)
|
||||
|
||||
|
@ -32,7 +32,7 @@ maxdt = 300
|
||||
|
||||
for dim in [7]:
|
||||
for eps in eps_space:
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/cons=7-knight_steps=1000100/dim=07/batch=0/'
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/cons=9-3dist_steps=1000100/dim=09/batch=1/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
@ -67,11 +67,14 @@ for dim in [7]:
|
||||
pha_center = av_diff
|
||||
pha_dev = m.pi/32
|
||||
|
||||
from_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else from_sync(i-1)
|
||||
from_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else from_sync(i-1) if i>0 else None
|
||||
to_sync = lambda i: True if abs(phase_diff[i])<0.08*m.pi else False if 0.42*m.pi<abs(phase_diff[i])<0.58*m.pi else to_sync(i+1) if i+1<len(phase_diff) else None
|
||||
|
||||
dev_ind = sorted([i for i,val in enumerate(ei[:-maxdt]) if (pha_center-pha_dev)<=abs(phase_diff[i])<=(pha_center+pha_dev)], key = lambda i: ei[i])
|
||||
dev_in_ind = [i for i in dev_ind if not from_sync(i)]
|
||||
dev_out_ind = [i for i in dev_ind if from_sync(i)]
|
||||
dev_00_ind = [i for i in dev_ind if from_sync(i) and to_sync(i) ]
|
||||
dev_01_ind = [i for i in dev_ind if from_sync(i) and to_sync(i) is False]
|
||||
dev_10_ind = [i for i in dev_ind if from_sync(i) is False and to_sync(i) ]
|
||||
dev_11_ind = [i for i in dev_ind if from_sync(i) is False and to_sync(i) is False]
|
||||
|
||||
devm_in_ind = [i for i in dev_in_ind if phase_diff[i]<0]
|
||||
devp_in_ind = [i for i in dev_in_ind if phase_diff[i]>0]
|
||||
@ -108,19 +111,22 @@ for dim in [7]:
|
||||
dev_ei = []
|
||||
tot_ei = []
|
||||
for dt in range(maxdt):
|
||||
bot_pha = [abs(phase_diff[i+dt]) for i in bot_ind]
|
||||
top_pha = [abs(phase_diff[i+dt]) for i in top_ind]
|
||||
dev_pha = [abs(phase_diff[i+dt]) for i in dev_ind]
|
||||
bot_pha = [[abs(phase_diff[i+dt]) for i in bot_in_ind],
|
||||
[abs(phase_diff[i+dt]) for i in bot_out_ind]]
|
||||
top_pha = [[abs(phase_diff[i+dt]) for i in top_in_ind],
|
||||
[abs(phase_diff[i+dt]) for i in top_out_ind]]
|
||||
dev_pha = [[abs(phase_diff[i+dt]) for i in dev_in_ind],
|
||||
[abs(phase_diff[i+dt]) for i in dev_out_ind]]
|
||||
tot_pha = np.abs(phase_diff[dt:dt-maxdt])
|
||||
|
||||
bot_res.append( norm(resultant(bot_pha)) )
|
||||
top_res.append( norm(resultant(top_pha)) )
|
||||
dev_res.append( norm(resultant(dev_pha)) )
|
||||
bot_res.append( [norm(resultant(bot_pha[i])) for i in [0,1]] )
|
||||
top_res.append( [norm(resultant(top_pha[i])) for i in [0,1]] )
|
||||
dev_res.append( [norm(resultant(dev_pha[i])) for i in [0,1]] )
|
||||
tot_res.append( norm(resultant(tot_pha)) )
|
||||
|
||||
bot_ph.append( phase(*resultant(bot_pha)) )
|
||||
top_ph.append( phase(*resultant(top_pha)) )
|
||||
dev_ph.append( phase(*resultant(dev_pha)) )
|
||||
bot_ph.append( [phase(*resultant(bot_pha[i])) for i in [0,1]] )
|
||||
top_ph.append( [phase(*resultant(top_pha[i])) for i in [0,1]] )
|
||||
dev_ph.append( [phase(*resultant(dev_pha[i])) for i in [0,1]] )
|
||||
tot_ph.append( phase(*resultant(tot_pha)) )
|
||||
|
||||
bot_ei.append( np.average([ei[i+dt] for i in bot_ind]) )
|
||||
@ -131,28 +137,58 @@ for dim in [7]:
|
||||
if dt%100==99:
|
||||
print(f'Done dt={dt}')
|
||||
|
||||
bot_res = list(zip(*bot_res))
|
||||
top_res = list(zip(*top_res))
|
||||
dev_res = list(zip(*dev_res))
|
||||
|
||||
bot_ph = list(zip(*bot_ph))
|
||||
top_ph = list(zip(*top_ph))
|
||||
dev_ph = list(zip(*dev_ph))
|
||||
|
||||
qtplot(f'Diachronic resultant for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_res, top_res, dev_res, tot_res],
|
||||
['Resultant ev of bottom {extremes} ei', 'Resultant ev of top {extremes} ei',
|
||||
'Resultant ev of phase filtered ei', 'Average Resultant'],
|
||||
[bot_res[0], top_res[0], dev_res[0], tot_res],
|
||||
['in bottom {extremes} ei', 'in top {extremes} ei',
|
||||
'in all filtered {len(dev_in_ind)} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=path+'plots/',
|
||||
filename=f'Diachronic Resultant Norm balanced eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
path=path+'inout/',
|
||||
filename=f'Diachronic Resultant Norm in eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_res[1], top_res[1], dev_res[1], tot_res],
|
||||
['out bottom {extremes} ei', 'out top {extremes} ei',
|
||||
'out all filtered {len(dev_out_ind)} ei', 'Average Resultant'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
export=True,
|
||||
path=path+'inout/',
|
||||
filename=f'Diachronic Resultant Norm out eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'Diachronic resultant phase for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_ph, top_ph, dev_ph, tot_ph],
|
||||
['bottom {extremes} ei', 'top {extremes} ei',
|
||||
'all filtered ei', 'Average'],
|
||||
[bot_ph[0], top_ph[0], dev_ph[0], tot_ph],
|
||||
['in bottom {extremes} ei', 'in top {extremes} ei',
|
||||
'in all filtered {len(dev_in_ind)} ei', 'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'concentration',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=path+'plots/',
|
||||
filename=f'Diachronic Resultant Phase balanced eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
path=path+'inout/',
|
||||
filename=f'Diachronic Resultant Phase in eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
qtplot(f'Diachronic resultant phase for dim={dim} with 4 layers',
|
||||
[np.array(range(maxdt))]*4,
|
||||
[bot_ph[1], top_ph[1], dev_ph[1], tot_ph],
|
||||
['out bottom {extremes} ei', 'out top {extremes} ei',
|
||||
'out all filtered {len(dev_out_ind)} ei', 'Average'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'phase',
|
||||
export=True,
|
||||
path=path+'inout/',
|
||||
filename=f'Diachronic Resultant Phase out eps={round(eps,3):.3f} dim={dim} extremes={extremes} roll{pha_dev:.3f}.png',
|
||||
close=True)
|
||||
|
||||
# qtplot(f'Diachronic ei for dim={dim} with 4 layers',
|
||||
|
106
evaluation/4Layer Resultant.py
Normal file
106
evaluation/4Layer Resultant.py
Normal file
@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Aug 21 14:59:22 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
eps_space = list(np.linspace(0.005,0.5,100))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
def H2(x):
|
||||
return -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
def new_folder(p):
|
||||
if not os.path.exists(p):
|
||||
os.makedirs(p)
|
||||
return p
|
||||
|
||||
extremes = None
|
||||
maxdt = 300
|
||||
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/steps=100000_diares_manydim/'
|
||||
|
||||
|
||||
dims = list(range(7,9))
|
||||
|
||||
resus = []
|
||||
phalocks = []
|
||||
for dim in dims:
|
||||
resu = []
|
||||
phalock = []
|
||||
for eps in eps_space:
|
||||
dimpath=f'/cloud/Public/_data/neuropercolation/4lay/steps=100000_diares_manydim/dim={dim:02d}/'
|
||||
|
||||
try:
|
||||
with open(dimpath+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(dimpath+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = (phase[1]-phase[0]+m.pi)%(2*m.pi)-m.pi
|
||||
|
||||
with open(dimpath+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
all_pha = np.arctan2(*resultant(phase_diff)[::-1])
|
||||
|
||||
resu.append(all_res)
|
||||
phalock.append(all_pha)
|
||||
|
||||
plotpath = new_folder(dimpath + 'resplot/')
|
||||
with open(plotpath+f"resultants.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(resu), f, indent=1)
|
||||
with open(plotpath+f"mainphase.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phalock), f, indent=1)
|
||||
|
||||
resus.append(resu)
|
||||
phalocks.append(phalock)
|
||||
|
||||
plotspath = new_folder(path + 'resplot/')
|
||||
|
||||
#%%
|
||||
qtplot(f'Resultant evolution with 4 layers',
|
||||
[eps_space]*len(dims),
|
||||
resus,
|
||||
[f'Resultant for size {dim}x{dim}' for dim in dims],
|
||||
x_tag = 'epsilon',
|
||||
y_tag = 'resultant',
|
||||
lw=3,
|
||||
export=True,
|
||||
path=plotspath,
|
||||
filename=f'resultant.png',
|
||||
close=False)
|
||||
|
||||
qtplot(f'Dominant phase evolution with 4 layers',
|
||||
[eps_space]*len(dims),
|
||||
phalocks,
|
||||
[f'dominant phase for size {dim}x{dim}' for dim in dims],
|
||||
x_tag = 'epsilon',
|
||||
y_tag = 'resultant',
|
||||
lw=3,
|
||||
export=True,
|
||||
path=plotspath,
|
||||
filename=f'mainphase.png',
|
||||
close=True)
|
25
evaluation/4Layer Significance.py
Normal file
25
evaluation/4Layer Significance.py
Normal file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sun Sep 10 07:07:28 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import sys as _sys
|
||||
from sign import significance as _sign
|
||||
import numpy as _np
|
||||
|
||||
import gc as _gc
|
||||
|
||||
_dim = 9
|
||||
_epsilons = _np.linspace(0.01,0.20,20)
|
||||
|
||||
for _eps in _epsilons[_sys.argv[1]]:
|
||||
_sign(_dim,_eps)
|
||||
|
||||
for name in dir():
|
||||
if not name.startswith('_'):
|
||||
del globals()[name]
|
||||
|
||||
_gc.collect()
|
98
evaluation/4Layer phiplot.py
Normal file
98
evaluation/4Layer phiplot.py
Normal file
@ -0,0 +1,98 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Aug 30 14:25:12 2022
|
||||
|
||||
@author: timof
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
|
||||
vect = np.vectorize
|
||||
|
||||
@vect
|
||||
def log2(x):
|
||||
try:
|
||||
return m.log2(x)
|
||||
except ValueError:
|
||||
if x==0:
|
||||
return float(0)
|
||||
else:
|
||||
raise
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
|
||||
path = '/cloud/Public/_data/neuropercolation/4lay/cons=dimtimesdimby3_steps=100100/'
|
||||
suffix = ''
|
||||
|
||||
chi = chr(967)
|
||||
vareps = chr(949)
|
||||
varphi = chr(981)
|
||||
|
||||
vals = [[],[]]
|
||||
|
||||
runsteps = 1000100
|
||||
|
||||
eps_space = np.linspace(0.005, 0.5, 100)
|
||||
eps_space = eps_space[1::2]
|
||||
|
||||
dims = list(range(3,10))#+[16,49]
|
||||
|
||||
mode='density'
|
||||
ma=[]
|
||||
s=[]
|
||||
k=[]
|
||||
mk=[]
|
||||
PHI=[]
|
||||
lastkurt=None
|
||||
for dim in dims:
|
||||
phis=[]
|
||||
con_gap = 3
|
||||
cons = [(n,(2*n+m)%dim) for n in range(dim) for m in range(0,dim-2,con_gap)]
|
||||
dimpath = new_folder(path + f'dim={dim:02}_cons={len(cons)}/batch=0/')
|
||||
for epsilon in eps_space:
|
||||
try:
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = np.array(json.load(f)[100:])
|
||||
except:
|
||||
print('Calcing phi')
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = np.array(json.load(f)[100:])
|
||||
|
||||
ei = channels*(1-H2(epsilon))
|
||||
|
||||
with open(dimpath+f"eps={round(epsilon,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
json.dump(ei,f,indent=1)
|
||||
|
||||
phi=np.average(ei)
|
||||
phis.append(phi)
|
||||
PHI.append(phis)
|
||||
#%%
|
||||
qtplot(f"Mean effect integration over noise level",
|
||||
[eps_space]*len(dims),
|
||||
PHI[::-1],
|
||||
[f'dim={dim:02d}x{dim:02d}' for dim in dims[::-1]],
|
||||
y_tag = f'effect integration {varphi}',
|
||||
export=True,
|
||||
path=dimpath+"",
|
||||
filename=f'eps={round(epsilon,3):.3f}_evolution.png',
|
||||
close=False)
|
||||
|
||||
mode = 'density'
|
||||
#%%
|
173
evaluation/4laysign.py
Normal file
173
evaluation/4laysign.py
Normal file
@ -0,0 +1,173 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sun Sep 10 07:07:28 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import sys as _sys
|
||||
from sign import sampling as _samp
|
||||
from sign import plotting as _plot
|
||||
from sign import full_stats as _stats
|
||||
import numpy as _np
|
||||
import json
|
||||
from plot import qtplot
|
||||
|
||||
import gc as _gc
|
||||
|
||||
_dim = 9
|
||||
_epsilons = _np.linspace(0.005,0.20,40)
|
||||
_noeffects = list(range(1,41,5))
|
||||
try:
|
||||
_exeps = int(_sys.argv[1])
|
||||
_epses=_epsilons[_exeps-1:_exeps]
|
||||
_nf = int(_sys.argv[2])
|
||||
_nfs = _noeffects[_nf-1:_nf]
|
||||
except:
|
||||
_epses=_epsilons
|
||||
_nfs = list(range(1,41,5))
|
||||
_samplemethod = 'allpos_ei'
|
||||
_ext = 5000
|
||||
_dt=40
|
||||
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
plotpath = path + _samplemethod + f'_samples={_ext}/'
|
||||
savepath = path + _samplemethod + f'_samples={_ext}/dt={_dt}/'
|
||||
|
||||
meanlist = []
|
||||
stdlist = []
|
||||
integrallist = []
|
||||
cohendlist = []
|
||||
norm_p_list = []
|
||||
ttest_p_list = []
|
||||
sign_p_list = []
|
||||
|
||||
|
||||
resultant_i = []
|
||||
resultant_c = []
|
||||
for _eps in _epses[11:21]:
|
||||
res_evol_i = []
|
||||
res_evol_c = []
|
||||
for nf in _nfs:
|
||||
#_samp(_dim,_eps,_samplemethod,_ext,_dt,noeff=nf)
|
||||
resultant_i = []
|
||||
resultant_c = []
|
||||
for t in range(1,_dt):
|
||||
res_i, res_c = _stats(_dim,_eps,_samplemethod,_ext,t,noeff=nf,ret='phases')
|
||||
resultant_i.append(res_i)
|
||||
resultant_c.append(res_c)
|
||||
|
||||
res_evol_i.append(resultant_i)
|
||||
res_evol_c.append(resultant_c)
|
||||
# mean,std,integral,cohend,norm_p,ttest_p,sign_p = _stats(_dim,_eps,_samplemethod,_ext,1)
|
||||
|
||||
# meanlist.append(mean)
|
||||
# stdlist.append(std)
|
||||
# integrallist.append(integral)
|
||||
# cohendlist.append(-cohend)
|
||||
# norm_p_list.append(norm_p)
|
||||
# ttest_p_list.append(ttest_p)
|
||||
# sign_p_list.append(sign_p)
|
||||
|
||||
|
||||
# with open(savepath+f"phasediff_means.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(meanlist, f, indent=1)
|
||||
# with open(savepath+f"phasediff_stds.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(stdlist, f, indent=1)
|
||||
# with open(savepath+f"phasediff_integrals.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(integrallist, f, indent=1)
|
||||
# with open(savepath+f"phasediff_cohends.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(cohendlist, f, indent=1)
|
||||
# with open(savepath+f"phasediff_norm_ps.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(norm_p_list, f, indent=1)
|
||||
# with open(savepath+f"phasediff_ttest_ps.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(ttest_p_list, f, indent=1)
|
||||
# with open(savepath+f"phasediff_sign_ps.txt", 'w', encoding='utf-8') as f:
|
||||
# json.dump(sign_p_list, f, indent=1)
|
||||
|
||||
# stdlowlist = [meanlist[eps] - stdlist[eps] for eps in range(len(meanlist))]
|
||||
# stdhighlist = [meanlist[eps] + stdlist[eps] for eps in range(len(meanlist))]
|
||||
|
||||
# norm_conf = [1-p for p in norm_p_list]
|
||||
# ttest_conf = [1-p for p in ttest_p_list]
|
||||
# sign_conf = [1-p for p in sign_p_list]
|
||||
res_tot_i = _np.average(res_evol_i,axis=0)
|
||||
res_evol_c.append(res_tot_i)
|
||||
qtplot(f'Resultant reduction disconnect eps={_eps} dim={_dim} with 4 layers',
|
||||
[list(range(_dt-1))]*(len(_nfs)+1),
|
||||
res_evol_c,
|
||||
[f'{nf} disconnected steps' for nf in _nfs]+['connected steps'],
|
||||
x_tag = 'dt',
|
||||
y_tag = 'resultant',
|
||||
export=True,
|
||||
path=plotpath,
|
||||
filename=f'Resultant reduction disconnect eps={_eps} dim={_dim} extremes={_ext}.png',
|
||||
close=True)
|
||||
|
||||
# qtplot(f'Mean causal phase reduction for dt={_dt} dim={_dim} with 4 layers',
|
||||
# [_epsilons]*3,
|
||||
# [stdlowlist, stdhighlist, meanlist],
|
||||
# ['Low standard deviation',
|
||||
# 'High standard deviation',
|
||||
# 'Mean'],
|
||||
# colors=['r','r','g'],
|
||||
# x_tag = f'noise level {chr(949)}',
|
||||
# y_tag = 'abs phase reduction',
|
||||
# export=True,
|
||||
# path=savepath,
|
||||
# filename=f'Mean phase reduction dim={_dim} extremes={_ext}.png',
|
||||
# close=True)
|
||||
# qtplot(f'Phase reduction probability for dt={_dt} dim={_dim} with 4 layers',
|
||||
# [_epsilons],
|
||||
# [integrallist],
|
||||
# ['probability of phase reduction'],
|
||||
# x_tag = f'noise level {chr(949)}',
|
||||
# y_tag = 'abs phase reduction',
|
||||
# export=True,
|
||||
# path=savepath,
|
||||
# filename=f'Probability of phase reduction dim={_dim} extremes={_ext}.png',
|
||||
# close=True)
|
||||
# qtplot(f'Effect size phase reduction for dt={_dt} dim={_dim} with 4 layers',
|
||||
# [_epsilons],
|
||||
# [cohendlist],
|
||||
# ["Absolute Cohen's d (negative)"],
|
||||
# x_tag = f'noise level {chr(949)}',
|
||||
# y_tag = 'effect size',
|
||||
# x_range = (0,0.2),
|
||||
# # y_range = (0.05,1),
|
||||
# y_log = True,
|
||||
# export=True,
|
||||
# path=savepath,
|
||||
# filename=f'Effect size phase reduction dim={_dim} extremes={_ext}.png',
|
||||
# close=False)
|
||||
# qtplot(f'Test p-values for dt={_dt} dim={_dim} with 4 layers',
|
||||
# [_epsilons]*5,
|
||||
# [[0.001 for eps in _epsilons],[0.0001 for eps in _epsilons],
|
||||
# norm_p_list,ttest_p_list,sign_p_list],
|
||||
# ['0.001 limit','0.0001 limit',
|
||||
# 'p-value for normality', 'p-value for mean difference', 'p-value for median'],
|
||||
# x_tag = f'noise level {chr(949)}',
|
||||
# y_tag = 'p-value',
|
||||
# y_range = (0,0.01),
|
||||
# lw=2,
|
||||
# export=True,
|
||||
# path=savepath,
|
||||
# filename=f'P-values phase reduction dim={_dim} extremes={_ext}.png',
|
||||
# close=True)
|
||||
# qtplot(f'Test confidences for dt={_dt} dim={_dim} with 4 layers',
|
||||
# [_epsilons]*5,
|
||||
# [[0.999 for eps in _epsilons],[0.9999 for eps in _epsilons],
|
||||
# norm_conf,ttest_conf,sign_conf],
|
||||
# ['0.999 limit', '0.999 limit',
|
||||
# 'confidence for normality', 'confidence for mean difference', 'confidence for median'],
|
||||
# x_tag = f'noise level {chr(949)}',
|
||||
# y_tag = 'confidence´',
|
||||
# y_range = (0.99,1),
|
||||
# lw=2,
|
||||
# export=True,
|
||||
# path=savepath,
|
||||
# filename=f'Confidences phase reduction dim={_dim} extremes={_ext}.png',
|
||||
# close=True)
|
||||
|
||||
|
173
evaluation/phi.py
Normal file
173
evaluation/phi.py
Normal file
@ -0,0 +1,173 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Wed Sep 27 04:39:54 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
from random import random
|
||||
from numba import jit, njit, prange
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
@njit
|
||||
def neighbor(digit0, digit1, lenght):
|
||||
layer = int(lenght)
|
||||
dim = int(np.sqrt(layer))
|
||||
digit0, digit1 = np.array([digit0%dim, digit0//dim]), np.array([digit1%dim, digit1//dim])
|
||||
#print(digit0,digit1)
|
||||
coord_dif = list(map(abs,digit1 - digit0))
|
||||
layer_nbor = 0 in coord_dif and len(set([1,dim-1]).intersection(set(coord_dif))) != 0
|
||||
#print(coord_dif, set([1,dim-1]).intersection(set(coord_dif)))
|
||||
if layer_nbor:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@njit
|
||||
def kcomb(zp,zm):
|
||||
if zp>2:
|
||||
val=1
|
||||
elif zm>2:
|
||||
val=0
|
||||
elif zp==zm:
|
||||
val=0.5
|
||||
elif zm==2:
|
||||
val=0.5**(3-zp)
|
||||
elif zp==2:
|
||||
val=1-0.5**(3-zm)
|
||||
elif zm==0 and zp==1:
|
||||
val=9/16
|
||||
elif zp==0 and zm==1:
|
||||
val=7/16
|
||||
else:
|
||||
raise NotImplementedError(zp,zm)
|
||||
return val
|
||||
|
||||
path = new_folder('/cloud/Public/_data/neuropercolation/1lay/mips/')
|
||||
|
||||
|
||||
def phi(dim,statestr,partstr,eps):
|
||||
length = dim**2
|
||||
eta = 1-eps
|
||||
# statestr=statestr.translate(str.maketrans('','','.-='))
|
||||
|
||||
state = np.array([int(q) for q in statestr])
|
||||
state = list(state.reshape((dim,dim)))
|
||||
state = [list([int(cell) for cell in row]) for row in state]
|
||||
|
||||
part = np.array([int(p) for p in partstr])
|
||||
part = list(part.reshape((dim,dim)))
|
||||
part = [list([int(cell) for cell in row]) for row in part]
|
||||
|
||||
inp = [[q+sum([state[(i+1)%dim][j],
|
||||
state[(i-1)%dim][j],
|
||||
state[i][(j+1)%dim],
|
||||
state[i][(j-1)%dim]
|
||||
]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
beps = [[int(inp[i][j]>2)*eta+int(inp[i][j]<3)*eps for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
zplus = [[q+sum([state[(i+1)%dim][j]*(part[i][j]==part[(i+1)%dim][j]),
|
||||
state[(i-1)%dim][j]*(part[i][j]==part[(i-1)%dim][j]),
|
||||
state[i][(j+1)%dim]*(part[i][j]==part[i][(j+1)%dim]),
|
||||
state[i][(j-1)%dim]*(part[i][j]==part[i][(j-1)%dim])
|
||||
]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
zminus = [[(1-q)+sum([(1-state[(i+1)%dim][j])*(part[i][j]==part[(i+1)%dim][j]),
|
||||
(1-state[(i-1)%dim][j])*(part[i][j]==part[(i-1)%dim][j]),
|
||||
(1-state[i][(j+1)%dim])*(part[i][j]==part[i][(j+1)%dim]),
|
||||
(1-state[i][(j-1)%dim])*(part[i][j]==part[i][(j-1)%dim])
|
||||
]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
kplus = [[kcomb(zplus[i][j],zminus[i][j]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
pi = [[eps*(1-kplus[i][j]) + eta*kplus[i][j] for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
crossent = [[-beps[i][j]*m.log2(pi[i][j])-(1-beps[i][j])*m.log2(1-pi[i][j]) for j,q in enumerate(row)] for i,row in enumerate(state)]
|
||||
|
||||
return np.sum(crossent) - length*H2(eps)
|
||||
|
||||
|
||||
def MIP(dim,statestr,eps):
|
||||
lophi=np.inf
|
||||
mip = []
|
||||
# statestr=statestr.translate(str.maketrans('','','.-='))
|
||||
for parti in range(1,2**(dim**2-1)):
|
||||
partstr = bin(parti)[2:].zfill(dim**2)
|
||||
curphi = phi(dim,statestr,partstr,eps)
|
||||
|
||||
if curphi<lophi:
|
||||
lophi=curphi
|
||||
mip = [partstr]
|
||||
elif curphi==lophi:
|
||||
mip.append(partstr)
|
||||
|
||||
print(f'Done with {dim},{eps} = {mip},{lophi}')
|
||||
|
||||
return mip,lophi
|
||||
|
||||
def calc_mips(dim,eps):
|
||||
mip = []
|
||||
statestr='0'*dim**2
|
||||
# statestr=statestr.translate(str.maketrans('','','.-='))
|
||||
for parti in range(1,2**(dim**2-1)):
|
||||
partstr = bin(parti)[2:].zfill(dim**2)
|
||||
curphi = phi(dim,statestr,partstr,eps)
|
||||
mip.append(round(curphi,6))
|
||||
|
||||
mipath = new_folder(path+f'dim={dim:02d}/')
|
||||
with open(mipath+f"eps={round(eps,3):.3f}_mips.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(mip, f, indent=1)
|
||||
|
||||
def smartMIP(dim,statestr,eps):
|
||||
lophi=np.inf
|
||||
mip = []
|
||||
|
||||
for parti in range(0,dim**2):
|
||||
partstr = bin(2**parti)[2:].zfill(dim**2)
|
||||
curphi = phi(dim,statestr,partstr,eps)
|
||||
|
||||
if curphi<lophi:
|
||||
lophi=curphi
|
||||
mip = [partstr]
|
||||
elif curphi==lophi:
|
||||
mip.append(partstr)
|
||||
|
||||
mipath = new_folder(path+f'dim={dim:02d}/')
|
||||
with open(mipath+f"eps={round(eps,3):.3f}_mips.txt", 'r', encoding='utf-8') as f:
|
||||
mips=json.load(f)
|
||||
|
||||
for parti in range(1,2**(dim**2-1)):
|
||||
partstr = bin(parti)[2:].zfill(dim**2)
|
||||
if mips[parti-1]<curphi:
|
||||
curphi = phi(dim,statestr,partstr,eps)
|
||||
|
||||
if curphi<lophi:
|
||||
lophi=curphi
|
||||
mip = [partstr]
|
||||
elif curphi==lophi:
|
||||
mip.append(partstr)
|
||||
|
||||
return mip,lophi
|
||||
|
||||
|
||||
|
@ -17,6 +17,11 @@ from PyQt5.QtGui import QFont
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
def __get_color(factor, gamma=0.8):
|
||||
frequency=380+factor*(750-380)
|
||||
|
||||
@ -75,7 +80,10 @@ def plot_execute():
|
||||
if sys.flags.interactive != 1 or not hasattr(qt.QtCore, 'PYQT_VERSION'):
|
||||
qt.QtGui.QApplication.exec_()
|
||||
|
||||
def qtplot(titletext, spaces, vals, names, x_tag=f'noise level {chr(949)}', y_tag=None, colors=None, export=False, path=None, filename=None, lw=4, close=False):
|
||||
def qtplot(titletext, spaces, vals, names=None,
|
||||
x_tag=f'noise {chr(949)}', y_tag=None, x_log=False, y_log=False, x_range=None, y_range=None,
|
||||
colors=None,
|
||||
export=False, path=None, filename=None, lw=3, close=False):
|
||||
linewidth = lw
|
||||
|
||||
if not close:
|
||||
@ -83,14 +91,22 @@ def qtplot(titletext, spaces, vals, names, x_tag=f'noise level {chr(949)}', y_ta
|
||||
|
||||
ph = qt.plot()
|
||||
ph.showGrid(x=True,y=True)
|
||||
if x_range is not None:
|
||||
ph.setXRange(*x_range)
|
||||
else:
|
||||
ph.setXRange(min([min(sp) for sp in spaces]), max([max(sp) for sp in spaces]))
|
||||
# ph.setYRange(0.0, 6)
|
||||
if y_range is not None:
|
||||
ph.setYRange(*y_range)
|
||||
else:
|
||||
ph.setYRange(min([min(v) for v in vals]), max([max(v) for v in vals]))
|
||||
ph.setLogMode(x=x_log, y=y_log)
|
||||
|
||||
#ph.setTitle(title='Susceptibility density evolution for different automaton sizes', offset=(1000,500))#.format(dim))
|
||||
ph.setTitle(title=titletext, offset=(1000,500))#.format(dim))
|
||||
ph.setLabel('left', y_tag)
|
||||
ph.setLabel('bottom', x_tag)
|
||||
#ph.setXRange(0, 0.15)
|
||||
|
||||
if names is not None:
|
||||
ph.addLegend(offset=(400, 30))
|
||||
|
||||
|
||||
@ -105,22 +121,30 @@ def qtplot(titletext, spaces, vals, names, x_tag=f'noise level {chr(949)}', y_ta
|
||||
(100,100,0), (0,100,100), (100,0,100),
|
||||
(100,200,0), (0,100,200), (200,0,100),
|
||||
(200,100,0), (0,200,100), (100,0,200)]
|
||||
else:
|
||||
colors = colors[::-1]
|
||||
|
||||
|
||||
for i in range(len(vals)):
|
||||
s = ph.plot(spaces[i], vals[i],
|
||||
name=names[i], pen=qt.mkPen(colors[i], width=linewidth))
|
||||
for i in range(len(vals)-1,-1,-1):
|
||||
try:
|
||||
s = ph.plot(spaces[i],
|
||||
vals[i],
|
||||
name=names[i] if names is not None else None,
|
||||
pen=qt.mkPen(colors[i],
|
||||
width=linewidth))
|
||||
except:
|
||||
print('Failed plotting '+names[i])
|
||||
raise
|
||||
|
||||
title_item = qt.TextItem(text=titletext, anchor=(0.5, 7), color='grey')
|
||||
ph.addItem(title_item)
|
||||
title_item.setPos(ph.getViewBox().viewRect().center())
|
||||
font = QFont()
|
||||
font.setPointSize(14) # Adjust the font size as needed
|
||||
title_item.setFont(font)
|
||||
#title_item = qt.TextItem(text=titletext, anchor=(0.5, 7), color='grey')
|
||||
#ph.addItem(title_item)
|
||||
#title_item.setPos(ph.getViewBox().viewRect().center())
|
||||
#font = QFont()
|
||||
#font.setPointSize(14) # Adjust the font size as needed
|
||||
#title_item.setFont(font)
|
||||
|
||||
if export:
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
new_folder(path)
|
||||
|
||||
exporter = qt.exporters.ImageExporter(ph.plotItem)
|
||||
|
||||
|
29
evaluation/plots/bootstrap_plot.py
Normal file
29
evaluation/plots/bootstrap_plot.py
Normal file
@ -0,0 +1,29 @@
|
||||
from plot import qtplot
|
||||
import json
|
||||
import math as ma
|
||||
import numpy as np
|
||||
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=dimtimesdimby3_steps=100100/dim=09_cons=27/batch=0/'
|
||||
eps_sp=np.linspace(0.01,0.2,20)
|
||||
eps_ep=eps_sp
|
||||
|
||||
extremes=10000
|
||||
strength=100000
|
||||
|
||||
savepath=path+f'extremes={extremes}_bootstrength={strength}/'
|
||||
|
||||
dt=250
|
||||
boots=[]
|
||||
for eps in eps_ep:
|
||||
eps=round(eps,3)
|
||||
with open(savepath+f'eps={eps:.3f}_dt={dt}_simpbootstrap.txt','r') as f:
|
||||
|
||||
confs=list(zip(*json.load(f)))[1]
|
||||
ps=[1-conf for conf in confs]
|
||||
boots.append(ps)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
qtplot(f"Bootstrapping p-value for noise level {eps}",[range(dt+1)],boots[-1:],[f''],y_tag='p-value',y_log=False)
|
169
evaluation/plots/plot.py
Normal file
169
evaluation/plots/plot.py
Normal file
@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Aug 21 16:12:53 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
import pyqtgraph as qt
|
||||
import pyqtgraph.exporters
|
||||
from PyQt5.QtGui import QFont
|
||||
|
||||
|
||||
import math as m
|
||||
import numpy as np
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
return path
|
||||
|
||||
def __get_color(factor, gamma=0.8):
|
||||
frequency=380+factor*(750-380)
|
||||
|
||||
lightfrequency = 0.4*(3*np.log10(frequency/2)-2)/4
|
||||
|
||||
wavelength = 300/lightfrequency
|
||||
|
||||
'''steps of 10Hz: 22 means 220Hz'''
|
||||
|
||||
'''This converts a given wavelength of light to an
|
||||
approximate RGB color value. The wavelength must be given
|
||||
in values between 0 and 1 for 0=380nm through 1=750nm
|
||||
(789 THz through 400 THz).
|
||||
|
||||
Based on code by Dan Bruton
|
||||
http://www.physics.sfasu.edu/astro/color/spectra.html
|
||||
'''
|
||||
|
||||
wavelength = float(wavelength)
|
||||
if wavelength >= 380 and wavelength <= 440:
|
||||
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
|
||||
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
|
||||
G = 0.0
|
||||
B = (1.0 * attenuation) ** gamma
|
||||
elif wavelength >= 440 and wavelength <= 490:
|
||||
R = 0.0
|
||||
G = ((wavelength - 440) / (490 - 440)) ** gamma
|
||||
B = 1.0
|
||||
elif wavelength >= 490 and wavelength <= 510:
|
||||
R = 0.0
|
||||
G = 1.0
|
||||
B = (-(wavelength - 510) / (510 - 490)) ** gamma
|
||||
elif wavelength >= 510 and wavelength <= 580:
|
||||
R = ((wavelength - 510) / (580 - 510)) ** gamma
|
||||
G = 1.0
|
||||
B = 0.0
|
||||
elif wavelength >= 580 and wavelength <= 645:
|
||||
R = 1.0
|
||||
G = (-(wavelength - 645) / (645 - 580)) ** gamma
|
||||
B = 0.0
|
||||
elif wavelength >= 645 and wavelength <= 750:
|
||||
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
|
||||
R = (1.0 * attenuation) ** gamma
|
||||
G = 0.0
|
||||
B = 0.0
|
||||
else:
|
||||
R = 0.0
|
||||
G = 0.0
|
||||
B = 0.0
|
||||
R *= 255
|
||||
G *= 255
|
||||
B *= 255
|
||||
return (int(R), int(G), int(B))
|
||||
|
||||
def plot_execute():
|
||||
if sys.flags.interactive != 1 or not hasattr(qt.QtCore, 'PYQT_VERSION'):
|
||||
qt.QtGui.QApplication.exec_()
|
||||
|
||||
def qtplot(titletext, spaces, vals, names=None,
|
||||
x_tag=f'noise level {chr(949)}', y_tag=None, x_log=False, y_log=False, x_range=None, y_range=None,
|
||||
colors=None,
|
||||
export=False, path=None, filename=None, lw=3, close=False):
|
||||
linewidth = lw
|
||||
|
||||
if not close:
|
||||
app = qt.mkQApp()
|
||||
|
||||
ph = qt.plot()
|
||||
ph.showGrid(x=True,y=True)
|
||||
if x_range is not None:
|
||||
ph.setXRange(*x_range)
|
||||
else:
|
||||
ph.setXRange(min([min(sp) for sp in spaces]), max([max(sp) for sp in spaces]))
|
||||
if y_range is not None:
|
||||
ph.setYRange(*y_range)
|
||||
else:
|
||||
ph.setYRange(min([min(v) for v in vals]), max([max(v) for v in vals]))
|
||||
ph.setLogMode(x=x_log, y=y_log)
|
||||
|
||||
ph.setTitle(title=titletext, offset=(1000,500))#.format(dim))
|
||||
ph.setLabel('left', y_tag)
|
||||
ph.setLabel('bottom', x_tag)
|
||||
#ph.setXRange(0, 0.15)
|
||||
ph.setFlag(ph.ItemIgnoresTransformations, False) # the relevant line
|
||||
|
||||
if names is not None:
|
||||
ph.addLegend(offset=(400, 30),size=20)
|
||||
|
||||
|
||||
#s = ph.plot(np.linspace(0.01,0.32,32), eps_max_freq0, title=sp_Title, pen='w')
|
||||
#s = ph.plot(np.linspace(0.01,0.32,32), eps_max_freq1, title=sp_Title, pen='w')
|
||||
|
||||
|
||||
if colors=='rgb':
|
||||
colors=[__get_color(fac/(len(vals)-1)) for fac in range(len(vals))]
|
||||
elif colors is None:
|
||||
colors=['r', 'g', 'b', 'y', 'c', 'm', 'w',
|
||||
(100,100,0), (0,100,100), (100,0,100),
|
||||
(100,200,0), (0,100,200), (200,0,100),
|
||||
(200,100,0), (0,200,100), (100,0,200)]
|
||||
else:
|
||||
colors = colors[::-1]
|
||||
|
||||
|
||||
for i in range(len(vals)-1,-1,-1):
|
||||
try:
|
||||
s = ph.plot(spaces[i],
|
||||
vals[i],
|
||||
name=names[i] if names is not None else None,
|
||||
pen=qt.mkPen(colors[i],
|
||||
width=linewidth))
|
||||
except:
|
||||
print('Failed plotting '+names[i])
|
||||
raise
|
||||
|
||||
#title_item = qt.TextItem(text=titletext, anchor=(0.5, 7), color='grey')
|
||||
#ph.addItem(title_item)
|
||||
#title_item.setPos(ph.getViewBox().viewRect().center())
|
||||
#font = QFont()
|
||||
#font.setPointSize(14) # Adjust the font size as needed
|
||||
#title_item.setFont(font)
|
||||
|
||||
if export:
|
||||
new_folder(path)
|
||||
|
||||
exporter = qt.exporters.ImageExporter(ph.plotItem)
|
||||
|
||||
# set export parameters if needed
|
||||
exporter.parameters()['width'] = 1200 # (note this also affects height parameter)
|
||||
|
||||
# save to file
|
||||
exporter.export(path+filename)
|
||||
|
||||
print(f'Saving to {path+filename}')
|
||||
|
||||
def handleAppExit():
|
||||
# Add any cleanup tasks here
|
||||
print("closing")
|
||||
|
||||
if close:
|
||||
ph.close()
|
||||
else:
|
||||
app.aboutToQuit.connect(handleAppExit)
|
||||
app.exec_()
|
||||
|
374
evaluation/sign.py
Normal file
374
evaluation/sign.py
Normal file
@ -0,0 +1,374 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Sat Sep 9 22:23:30 2023
|
||||
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
|
||||
|
||||
def sampling(dim,eps,samplemethod,extremes,dtmax,noeff=1):
|
||||
print(f'Starting causal simulation with dim={dim}, eps={eps:.3f}, {samplemethod}, {extremes} extremes for dt={dtmax} and noeff={noeff:03d}')
|
||||
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
#print(f'Started at {datetime.now()} with eps={eps:.3f}')
|
||||
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'r', encoding='utf-8') as f:
|
||||
phase_diff = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_activation.txt", 'r', encoding='utf-8') as f:
|
||||
activation = json.load(f)[100:]
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phase_diff = diff(phase_abs[0],phase_abs[1])
|
||||
phase_diff = [round(pha,6) for pha in phase_diff]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_phase_diff.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(list(phase_diff), f, indent=1)
|
||||
|
||||
|
||||
all_res = norm(resultant(phase_diff))
|
||||
av_diff = np.arccos(all_res)
|
||||
|
||||
try:
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'r', encoding='utf-8') as f:
|
||||
ei = json.load(f)
|
||||
except:
|
||||
with open(path+f"eps={round(eps,3):.3f}_channels.txt", 'r', encoding='utf-8') as f:
|
||||
channels = json.load(f)[100:]
|
||||
|
||||
ei = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channels]
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_ei.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei, f, indent=1)
|
||||
|
||||
ei_ind = [i for i,val in enumerate(ei) if val>0]
|
||||
|
||||
print(f'{len(ei_ind)} states with positive EI')
|
||||
|
||||
samples = choose(ei_ind, extremes)
|
||||
|
||||
with open(path+f"eps={round(eps,3):.3f}_states.txt", 'r', encoding='utf-8') as f:
|
||||
states = json.load(f)[100:]
|
||||
with open(path+f"eps={round(eps,3):.3f}_coupling.txt", 'r', encoding='utf-8') as f:
|
||||
coupling = json.load(f)
|
||||
coupling = [tuple(edge) for edge in coupling]
|
||||
|
||||
phase_pairs = [[] for dt in range(dtmax+1)]
|
||||
ei_pairs = [[] for dt in range(dtmax+1)]
|
||||
for num,i in enumerate(samples):
|
||||
causal_init = states[i].translate(str.maketrans('', '', '.-='))
|
||||
|
||||
sim = Simulate4Layers( dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=noeff-1,
|
||||
steps=dtmax,
|
||||
draw=None,
|
||||
)
|
||||
|
||||
activation = sim._activations()
|
||||
channel = sim._channels()
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff_c = diff(phase_abs[0],phase_abs[1])
|
||||
ei_c = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channel]
|
||||
max_ei_c = max([np.sum(cha) for cha in channel])
|
||||
|
||||
sim = Simulate4Layers( dim,
|
||||
eps,
|
||||
coupling=coupling,
|
||||
init=causal_init,
|
||||
noeffect=-1,
|
||||
steps=dtmax,
|
||||
draw=None,
|
||||
)
|
||||
|
||||
activation = sim._activations()
|
||||
channel = sim._channels()
|
||||
|
||||
osc = list(zip(*activation))
|
||||
phase_abs = np.array([[np.arctan2(*act[::-1]) for act in osc[i]] for i in range(2)])
|
||||
phasediff_i = diff(phase_abs[0],phase_abs[1])
|
||||
ei_i = [round(np.sum(cha)*(1-H2(eps)),6) for cha in channel]
|
||||
max_ei_i = max([np.sum(cha) for cha in channel])
|
||||
|
||||
for dt in range(1,dtmax+1):
|
||||
phase_pairs[dt].append((i, phasediff_i[dt], phasediff_c[dt]))
|
||||
ei_pairs[dt].append((i, ei_i[dt], ei_c[dt]))
|
||||
|
||||
if num%1000==999:
|
||||
print(f'Done {num:0{len(str(extremes))}d}')
|
||||
|
||||
|
||||
for dt in range(1,dtmax+1):
|
||||
savepath = path + samplemethod + f'_samples={extremes}/dt={dt}/noeff={noeff:03d}/'
|
||||
new_folder(savepath)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(phase_pairs[dt], f, indent=1)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(ei_pairs[dt], f, indent=1)
|
||||
|
||||
def plotting(dim,eps,samplemethod,extremes,dt):
|
||||
import os
|
||||
import json
|
||||
import math as m
|
||||
import numpy as np
|
||||
from numpy.linalg import norm
|
||||
from datetime import datetime
|
||||
from random import sample as choose
|
||||
|
||||
from plot import qtplot
|
||||
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
eps_space = list(np.linspace(0.01,0.2,20))
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
|
||||
print(f'Started at {datetime.now()} with eps={eps:.3f}')
|
||||
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
|
||||
savepath = path + samplemethod + f'_samples={extremes}/dt={dt}/'
|
||||
|
||||
try:
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
phase_pairs = json.load(f)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
ei_pairs = json.load(f)
|
||||
except:
|
||||
sampling(dim,eps,samplemethod,extremes,dt)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
phase_pairs = json.load(f)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
ei_pairs = json.load(f)
|
||||
|
||||
t, phases_i, phases_c = zip(*phase_pairs)
|
||||
t, ei_i, ei_c = zip(*ei_pairs)
|
||||
|
||||
extremes = len(t)
|
||||
|
||||
phases_cdiff = [abs(phases_i[i])-abs(phases_c[i]) for i in range(len(t))]
|
||||
|
||||
phase_space = np.linspace(-m.pi,m.pi,101)
|
||||
absph_space = np.linspace(0,m.pi,50+1)
|
||||
cdiff_space = np.linspace(min(phases_cdiff),max(phases_cdiff),51)
|
||||
|
||||
phase_dist_i = [len([ph for ph in phases_i if (phase_space[j]+phase_space[j-1])/2<=ph<(phase_space[j]+phase_space[j+1])/2])/len(t)
|
||||
for j in range(100)]
|
||||
phase_dist_c = [len([ph for ph in phases_c if (phase_space[j]+phase_space[j-1])/2<=ph<(phase_space[j]+phase_space[j+1])/2])/len(t)
|
||||
for j in range(100)]
|
||||
|
||||
absph_dist_i = [len([ph for ph in phases_i if low<=abs(ph)<high])/len(t) for low,high in zip(absph_space[:-1],absph_space[1:])]
|
||||
absph_dist_c = [len([ph for ph in phases_c if low<=abs(ph)<high])/len(t) for low,high in zip(absph_space[:-1],absph_space[1:])]
|
||||
|
||||
cdiff_dist = [len([diff for diff in phases_cdiff if low<=diff<high])/len(t) for low,high in zip(cdiff_space[:-1],cdiff_space[1:])]
|
||||
|
||||
max_cha_i = max([round(e/(1-H2(eps))) for e in ei_i])
|
||||
max_cha_c = max([round(e/(1-H2(eps))) for e in ei_c])
|
||||
|
||||
max_cha = max(max_cha_i, max_cha_c)
|
||||
ei_space = np.linspace(0,np.max([ei_i,ei_c]),max_cha+1)
|
||||
|
||||
ei_dist_i = [len([e for e in ei_i if round(e/(1-H2(eps)))==val])/len(t) for val in range(max_cha)]
|
||||
ei_dist_c = [len([e for e in ei_c if round(e/(1-H2(eps)))==val])/len(t) for val in range(max_cha)]
|
||||
|
||||
qtplot(f'Phase distribution for dt={dt} dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[absph_space[:-1]]*2,
|
||||
[absph_dist_i, absph_dist_c],
|
||||
['Phase dist with ei',
|
||||
'Phase dist without ei'],
|
||||
x_tag = 'phase',
|
||||
y_tag = 'density',
|
||||
export=True,
|
||||
path=savepath,
|
||||
filename=f'Phase dist eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'Phase distribution for dt={dt} dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[phase_space[:-1]]*2,
|
||||
[phase_dist_i, phase_dist_c],
|
||||
['Phase dist with ei',
|
||||
'Phase dist without ei'],
|
||||
x_tag = 'phase',
|
||||
y_tag = 'density',
|
||||
export=True,
|
||||
path=savepath,
|
||||
filename=f'Phase original dist eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'Phase causal diff distribution for dt={dt} dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[cdiff_space[:-1]],
|
||||
[cdiff_dist],
|
||||
['Phase causal difference dist with ei'],
|
||||
x_tag = 'sync raise',
|
||||
y_tag = 'density',
|
||||
export=True,
|
||||
path=savepath,
|
||||
filename=f'Phase causal diff dist eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
qtplot(f'EI distribution for dt={dt} dim={dim} eps={eps:.3f} with 4 layers',
|
||||
[ei_space[:-1]]*2,
|
||||
[ei_dist_i, ei_dist_c],
|
||||
['EI dist with ei',
|
||||
'EI dist without ei'],
|
||||
x_tag = 'ei',
|
||||
y_tag = 'density',
|
||||
export=True,
|
||||
path=savepath,
|
||||
filename=f'EI dist eps={round(eps,3):.3f} dim={dim} extremes={extremes}.png',
|
||||
close=True)
|
||||
|
||||
ttest(dim,eps,samplemethod,extremes,dt)
|
||||
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
def ttest(dim,eps,samplemethod,extremes,dt):
|
||||
from scipy.stats import ttest_rel, ttest_1samp, normaltest
|
||||
import numpy as np
|
||||
import json
|
||||
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
|
||||
savepath = path + samplemethod + f'_samples={extremes}/dt={dt}/'
|
||||
|
||||
try:
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
phase_pairs = json.load(f)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
ei_pairs = json.load(f)
|
||||
except:
|
||||
sampling(dim,eps,samplemethod,extremes,dt)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
phase_pairs = json.load(f)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
ei_pairs = json.load(f)
|
||||
|
||||
t, phases_i, phases_c = zip(*phase_pairs)
|
||||
t, ei_i, ei_c = zip(*ei_pairs)
|
||||
|
||||
phases_cdiff = [abs(phases_i[i])-abs(phases_c[i]) for i in range(len(t))]
|
||||
|
||||
stat = phases_cdiff
|
||||
stat_av = np.average(stat)
|
||||
|
||||
print('===============')
|
||||
print(f'For eps={eps} and dt={dt}: Mean={stat_av}')
|
||||
print('normaltest: pval='+str(normaltest(stat).pvalue))
|
||||
print('ttest: pval='+str(ttest_1samp(stat,0).pvalue))
|
||||
print('===============')
|
||||
|
||||
def full_stats(dim,eps,samplemethod,extremes,dt,ret='stats',noeff=1):
|
||||
from scipy.stats import ttest_rel, ttest_1samp, normaltest, wilcoxon
|
||||
import numpy as np
|
||||
import json
|
||||
import math as m
|
||||
from random import random
|
||||
from numpy.linalg import norm
|
||||
|
||||
def resultant(sample):
|
||||
phase_x = [m.cos(ind) for ind in sample]
|
||||
phase_y = [m.sin(ind) for ind in sample]
|
||||
|
||||
return (np.average(phase_x), np.average(phase_y))
|
||||
|
||||
phase = np.vectorize(lambda x,y: (m.atan2(y,x)+m.pi)%(2*m.pi)-m.pi)
|
||||
diff = np.vectorize(lambda x,y: (y-x+m.pi)%(2*m.pi)-m.pi)
|
||||
H2 = lambda x: -x*m.log2(x)-(1-x)*m.log2(1-x)
|
||||
|
||||
eps = round(eps,3)
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100/dim=09/batch=0/'
|
||||
|
||||
savepath = path + samplemethod + f'_samples={extremes}/dt={dt}/noeff={noeff:03d}/'
|
||||
|
||||
try:
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
phase_pairs = json.load(f)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
ei_pairs = json.load(f)
|
||||
except:
|
||||
sampling(dim,eps,samplemethod,extremes,dt,noeff=noeff)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_phase_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
phase_pairs = json.load(f)
|
||||
with open(savepath+f"eps={round(eps,3):.3f}_ei_pairs.txt", 'r', encoding='utf-8') as f:
|
||||
ei_pairs = json.load(f)
|
||||
|
||||
t, phases_i, phases_c = zip(*phase_pairs)
|
||||
t, ei_i, ei_c = zip(*ei_pairs)
|
||||
|
||||
phases_cdiff = [abs(phases_i[i])-abs(phases_c[i]) if abs(abs(phases_i[i])-abs(phases_c[i]))!=m.pi else m.pi*(-1)**(random()<0.5)
|
||||
for i in range(len(t))]
|
||||
|
||||
print('phases with pi diff: '+str(len([i for i in range(len(t)) if abs(abs(phases_i[i])-abs(phases_c[i]))==m.pi])))
|
||||
|
||||
stat = phases_cdiff
|
||||
mean = np.average(stat)
|
||||
std = np.std(stat)
|
||||
integral = sum([1 for val in stat if val<0])/len(stat)
|
||||
cohend = mean / (np.var(np.abs(phases_i),ddof=1)/2+np.var(np.abs(phases_c),ddof=1)/2)
|
||||
norm_p = normaltest(stat).pvalue
|
||||
ttest_p = ttest_1samp(stat,0,alternative='less').pvalue
|
||||
sign_p = wilcoxon(stat,alternative='less').pvalue
|
||||
print('===============')
|
||||
print(f'For eps={eps} and dt={dt}: Mean={mean}')
|
||||
print('normaltest: pval='+str(norm_p))
|
||||
print('ttest: pval='+str(ttest_p))
|
||||
print('sign: pval='+str(sign_p))
|
||||
print('===============')
|
||||
|
||||
if ret=='stats':
|
||||
return mean,std,integral,cohend,norm_p,ttest_p,sign_p
|
||||
elif ret=='phases':
|
||||
return norm(resultant(phases_i)), norm(resultant(phases_c))
|
28
evaluation/simpleplot.py
Normal file
28
evaluation/simpleplot.py
Normal file
@ -0,0 +1,28 @@
|
||||
from plot import qtplot
|
||||
import json
|
||||
import math as ma
|
||||
import numpy as np
|
||||
|
||||
path='/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100_diares/dim=09/batch=0/samedist_varmaxpha_causal_roll0.031/'
|
||||
eps_sp=np.linspace(0.01,0.2,20)
|
||||
eps_ep=eps_sp[3:4]
|
||||
|
||||
extremes=200
|
||||
strength=200
|
||||
|
||||
savepath=path+f'extremes={extremes}_bootstrength={strength}/'
|
||||
|
||||
boots=[]
|
||||
for eps in eps_ep:
|
||||
|
||||
with open(savepath+f'eps={eps:.3f}_dt=60_bootstrap.txt','r') as f:
|
||||
|
||||
confs=list(zip(*json.load(f)))[1]
|
||||
ps=[1-conf for conf in confs]
|
||||
boots.append(ps)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
qtplot("Bootstrapping confidence",[range(61)]*len(eps_sp),boots,[f'{eps}' for eps in eps_sp],y_tag='p-value',y_log=False)
|
353
evaluation/updated_bib_file.bib
Normal file
353
evaluation/updated_bib_file.bib
Normal file
@ -0,0 +1,353 @@
|
||||
@book{barrat2008,
|
||||
title={Dynamical Processes on Complex Networks},
|
||||
author={Barrat, A. and Barth{\'e}lemy, M. and Vespignani, A.},
|
||||
isbn={9781107377424},
|
||||
url={https://books.google.pt/books?id=fUAhAwAAQBAJ},
|
||||
year={2008},
|
||||
publisher={Cambridge University Press}
|
||||
}
|
||||
|
||||
@article{Varela2001,
|
||||
title={The brainweb: Phase synchronization and large-scale integration},
|
||||
author={Francisco J. Varela and Jean-Philippe Lachaux and Eugenio Rodriguez and Jacques Martinerie},
|
||||
journal={Nature Reviews Neuroscience},
|
||||
year={2001},
|
||||
volume={2},
|
||||
pages={229-239}
|
||||
}
|
||||
|
||||
@article{Carmichael2002,
|
||||
title={Synchronous Neuronal Activity Is a Signal for Axonal Sprouting after Cortical Lesions in the Adult},
|
||||
author={S. Thomas Carmichael and M. -F. Chesselet},
|
||||
journal={The Journal of Neuroscience},
|
||||
year={2002},
|
||||
volume={22},
|
||||
pages={6062 - 6070}
|
||||
}
|
||||
|
||||
@ARTICLE{Otsuka2000,
|
||||
author = {{Otsuka}, Kenju and {Kawai}, Ryoji and {Hwong}, Siao-Lung and {Ko}, Jing-Yuan and {Chern}, Jyh-Long},
|
||||
title = "{Synchronization of Mutually Coupled Self-Mixing Modulated Lasers}",
|
||||
journal = {Phys. Rev. Lett.},
|
||||
year = {2000},
|
||||
month = {apr},
|
||||
volume = {84},
|
||||
number = {14},
|
||||
pages = {3049-3052},
|
||||
doi = {10.1103/PhysRevLett.84.3049},
|
||||
adsurl = {https://ui.adsabs.harvard.edu/abs/2000PhRvL..84.3049O},
|
||||
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
||||
}
|
||||
|
||||
@ARTICLE{Hansel1992,
|
||||
author = {{Hansel}, D. and {Sompolinsky}, H.},
|
||||
title = "{Synchronization and computation in a chaotic neural network}",
|
||||
journal = {Phys. Rev. Lett.},
|
||||
keywords = {87.10.+e, 05.45.+b, General theory and mathematical aspects},
|
||||
year = 1992,
|
||||
month = feb,
|
||||
volume = {68},
|
||||
number = {5},
|
||||
pages = {718-721},
|
||||
doi = {10.1103/PhysRevLett.68.718},
|
||||
adsurl = {https://ui.adsabs.harvard.edu/abs/1992PhRvL..68..718H},
|
||||
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
||||
}
|
||||
|
||||
@ARTICLE{Glass2001,
|
||||
author = {{Glass}, Leon},
|
||||
title = "{Synchronization and rhythmic processes in physiology}",
|
||||
journal = {Nature},
|
||||
year = 2001,
|
||||
month = mar,
|
||||
volume = {410},
|
||||
number = {6825},
|
||||
pages = {277-284},
|
||||
doi = {10.1038/35065745},
|
||||
adsurl = {https://ui.adsabs.harvard.edu/abs/2001Natur.410..277G},
|
||||
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
||||
}
|
||||
|
||||
@article{BIEBERICH2002145,
|
||||
title = {Recurrent fractal neural networks: a strategy for the exchange of local and global information processing in the brain},
|
||||
journal = {Biosystems},
|
||||
volume = {66},
|
||||
number = {3},
|
||||
pages = {145-164},
|
||||
year = {2002},
|
||||
issn = {0303-2647},
|
||||
doi = {https://doi.org/10.1016/S0303-2647(02)00040-0},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S0303264702000400},
|
||||
author = {Erhard Bieberich},
|
||||
keywords = {Networks, Fractal, Brain, Mind, Consciousness, Neuron},
|
||||
abstract = {The regulation of biological networks relies significantly on convergent feedback signaling loops that render a global output locally accessible. Ideally, the recurrent connectivity within these systems is self-organized by a time-dependent phase-locking mechanism. This study analyzes recurrent fractal neural networks (RFNNs), which utilize a self-similar or fractal branching structure of dendrites and downstream networks for phase-locking of reciprocal feedback loops: output from outer branch nodes of the network tree enters inner branch nodes of the dendritic tree in single neurons. This structural organization enables RFNNs to amplify re-entrant input by over-the-threshold signal summation from feedback loops with equivalent signal traveling times. The columnar organization of pyramidal neurons in the neocortical layers V and III is discussed as the structural substrate for this network architecture. RFNNs self-organize spike trains and render the entire neural network output accessible to the dendritic tree of each neuron within this network. As the result of a contraction mapping operation, the local dendritic input pattern contains a downscaled version of the network output coding structure. RFNNs perform robust, fractal data compression, thus coping with a limited number of feedback loops for signal transport in convergent neural networks. This property is discussed as a significant step toward the solution of a fundamental problem in neuroscience: how is neuronal computation in separate neurons and remote brain areas unified as an instance of experience in consciousness? RFNNs are promising candidates for engaging neural networks into a coherent activity and provide a strategy for the exchange of global and local information processing in the human brain, thereby ensuring the completeness of a transformation from neuronal computation into conscious experience.}
|
||||
}
|
||||
|
||||
|
||||
@incollection{Chalmers2000,
|
||||
title = {What is a Neural Correlate of Consciousness?},
|
||||
booktitle = {Neural Correlates of Consciousness},
|
||||
author = {David J. Chalmers},
|
||||
publisher = {MIT Press},
|
||||
year = {2000},
|
||||
editor = {Thomas Metzinger}
|
||||
}
|
||||
|
||||
@inproceedings{Crick1990,
|
||||
title={Towards a neurobiological theory of consciousness},
|
||||
author={Crick, Francis and Koch, Christof},
|
||||
booktitle={Seminars in the Neurosciences (Vol.2)},
|
||||
year={1990},
|
||||
organization={Saunders Scientific Publications}
|
||||
}
|
||||
|
||||
|
||||
@article{Hidaka2018,
|
||||
doi = {10.1371/journal.pone.0201126},
|
||||
author = {Hidaka, Shohei AND Oizumi, Masafumi},
|
||||
journal = {PLOS ONE},
|
||||
publisher = {Public Library of Science},
|
||||
title = {Fast and exact search for the partition with minimal information loss},
|
||||
year = {2018},
|
||||
month = {09},
|
||||
volume = {13},
|
||||
url = {https://doi.org/10.1371/journal.pone.0201126},
|
||||
pages = {1-14},
|
||||
number = {9},
|
||||
|
||||
}
|
||||
|
||||
@Article{Mediano2019,
|
||||
AUTHOR = {Mediano, Pedro A.M. and Seth, Anil K. and Barrett, Adam B.},
|
||||
TITLE = {Measuring Integrated Information: Comparison of Candidate Measures in Theory and Simulation},
|
||||
JOURNAL = {Entropy},
|
||||
VOLUME = {21},
|
||||
YEAR = {2019},
|
||||
NUMBER = {1},
|
||||
ARTICLE-NUMBER = {17},
|
||||
URL = {https://www.mdpi.com/1099-4300/21/1/17},
|
||||
ISSN = {1099-4300}
|
||||
}
|
||||
|
||||
@incollection{ENGEL2016,
|
||||
title = {Chapter 3 - Neuronal Oscillations, Coherence, and Consciousness},
|
||||
editor = {Steven Laureys and Olivia Gosseries and Giulio Tononi},
|
||||
booktitle = {The Neurology of Conciousness (Second Edition)},
|
||||
publisher = {Academic Press},
|
||||
edition = {Second Edition},
|
||||
address = {San Diego},
|
||||
pages = {49-60},
|
||||
year = {2016},
|
||||
author = {Andreas K. Engel and Pascal Fries}}
|
||||
|
||||
|
||||
@article{FRIES2015,
|
||||
title = {Rhythms for Cognition: Communication through Coherence},
|
||||
journal = {Neuron},
|
||||
volume = {88},
|
||||
number = {1},
|
||||
pages = {220-235},
|
||||
year = {2015},
|
||||
author = {Pascal Fries}
|
||||
}
|
||||
|
||||
|
||||
@article{OIZUMI2014,
|
||||
author = {Oizumi, Masafumi AND Albantakis, Larissa AND Tononi, Giulio},
|
||||
journal = {PLOS Computational Biology},
|
||||
publisher = {Public Library of Science},
|
||||
title = {From the Phenomenology to the Mechanisms of Consciousness: Integrated Information Theory 3.0},
|
||||
year = {2014},
|
||||
month = {05},
|
||||
volume = {10},
|
||||
pages = {1-25},
|
||||
number = {5}
|
||||
}
|
||||
|
||||
@article{DOERIG2019,
|
||||
title = {The unfolding argument: Why IIT and other causal structure theories cannot explain consciousness},
|
||||
author = {Doerig, Adrien and Schurger, Aaron and Hess, Kathryn and Herzog, Michael H.},
|
||||
journal = {Consciousness and Cognition},
|
||||
volume = {72},
|
||||
year = {2019}
|
||||
}
|
||||
|
||||
@book{BARRAT2008,
|
||||
title={Dynamical Processes on Complex Networks},
|
||||
author={Barrat, A. and Barth{\'e}lemy, M. and Vespignani, A.},
|
||||
year={2008},
|
||||
publisher={Cambridge University Press}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@article{Tononi2004,
|
||||
title={An information integration theory of consciousness},
|
||||
author={Giulio Tononi},
|
||||
journal={BMC Neuroscience},
|
||||
year={2004},
|
||||
volume={5},
|
||||
pages={42 - 42}
|
||||
}
|
||||
|
||||
@article{Rezaei2020,
|
||||
author = {Rezaei, Hedyeh AND Aertsen, Ad AND Kumar, Arvind AND Valizadeh, Alireza},
|
||||
journal = {PLOS Computational Biology},
|
||||
publisher = {Public Library of Science},
|
||||
title = {Facilitating the propagation of spiking activity in feedforward networks by including feedback},
|
||||
year = {2020},
|
||||
month = {08},
|
||||
volume = {16},
|
||||
pages = {1-27},
|
||||
number = {8}
|
||||
}
|
||||
|
||||
|
||||
@book{BARRAT2008,
|
||||
title={Dynamical Processes on Complex Networks},
|
||||
author={Barrat, A. and Barth{\'e}lemy, M. and Vespignani, A.},
|
||||
year={2008},
|
||||
publisher={Cambridge University Press}
|
||||
}
|
||||
|
||||
|
||||
@ARTICLE{LLINAS2014,
|
||||
|
||||
AUTHOR={Llinás, Rodolfo R.},
|
||||
|
||||
TITLE={Intrinsic electrical properties of mammalian neurons and CNS function: a historical perspective},
|
||||
|
||||
JOURNAL={Frontiers in Cellular Neuroscience},
|
||||
|
||||
YEAR={2014}
|
||||
}
|
||||
|
||||
@article{Mayner2018,
|
||||
author = {Mayner, William G. P. AND Marshall, William AND Albantakis, Larissa AND Findlay, Graham AND Marchman, Robert AND Tononi, Giulio},
|
||||
journal = {PLOS Computational Biology},
|
||||
publisher = {Public Library of Science},
|
||||
title = {PyPhi: A toolbox for integrated information theory},
|
||||
year = {2018},
|
||||
month = {07},
|
||||
volume = {14},
|
||||
pages = {1-21},
|
||||
number = {7}
|
||||
}
|
||||
|
||||
@article{Hahn2014CommunicationTR,
|
||||
title={Communication through Resonance in Spiking Neuronal Networks},
|
||||
author={Gerald Hahn and Alejandro F. Bujan and Yves Fr{\'e}gnac and Ad Aertsen and Arvind Kumar},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2014},
|
||||
volume={10}
|
||||
}
|
||||
|
||||
@article{Fries2005AMF,
|
||||
title={A mechanism for cognitive dynamics: neuronal communication through neuronal coherence},
|
||||
author={Pascal Fries},
|
||||
journal={Trends in Cognitive Sciences},
|
||||
year={2005},
|
||||
volume={9},
|
||||
pages={474-480},
|
||||
url={}
|
||||
}
|
||||
|
||||
@book{neuroscience6th,
|
||||
title = {Neuroscience},
|
||||
edition = {Sixth},
|
||||
editor = {Purves, Dale and Augustine, George J. and Fitzpatrick, David and Hall, William C. and LaMantia, Anthony-Samuel and Mooney, Richard D. and Platt, Michael L. and White, Leonard E.},
|
||||
year = {2017},
|
||||
publisher = {Sinauer Associates},
|
||||
address = {Sunderland, MA},
|
||||
isbn = {9781605353807},
|
||||
pages = {960},
|
||||
month = {October 12},
|
||||
}
|
||||
|
||||
@article{Bennett2020AnAA,
|
||||
title={An Attempt at a Unified Theory of the Neocortical Microcircuit in Sensory Cortex},
|
||||
author={Max Bennett},
|
||||
journal={Frontiers in Neural Circuits},
|
||||
year={2020},
|
||||
volume={14},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{vanKerkoerle2014AlphaAG,
|
||||
title={Alpha and gamma oscillations characterize feedback and feedforward processing in monkey visual cortex},
|
||||
author={Timo van Kerkoerle and Matthew W. Self and Bruno Dagnino and Marie-Alice Gariel-Mathis and Jasper Poort and Chris van der Togt and Pieter R. Roelfsema},
|
||||
journal={Proceedings of the National Academy of Sciences},
|
||||
year={2014},
|
||||
volume={111},
|
||||
pages={14332 - 14341},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article {Westerberg2022,
|
||||
article_type = {journal},
|
||||
title = {Laminar microcircuitry of visual cortex producing attention-associated electric fields},
|
||||
author = {Westerberg, Jacob A and Schall, Michelle S and Maier, Alexander and Woodman, Geoffrey F and Schall, Jeffrey D},
|
||||
editor = {Ray, Supratim and Baker, Chris I and Luck, Steven J and Nandy, Anirvan S},
|
||||
volume = 11,
|
||||
year = 2022,
|
||||
month = {jan},
|
||||
pub_date = {2022-01-28},
|
||||
pages = {e72139},
|
||||
citation = {eLife 2022;11:e72139},
|
||||
doi = {10.7554/eLife.72139},
|
||||
url = {https://doi.org/10.7554/eLife.72139},
|
||||
keywords = {CSD, ECoG, EEG, LFP, N2pc, V4},
|
||||
journal = {eLife},
|
||||
issn = {2050-084X},
|
||||
publisher = {eLife Sciences Publications, Ltd},
|
||||
}
|
||||
@inproceedings{Presigny2021MultiscaleMO,
|
||||
title={Multiscale modeling of brain network organization},
|
||||
author={Charley Presigny and Fabrizio De Vico Fallani},
|
||||
year={2021},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Mejas2016FeedforwardAF,
|
||||
title={Feedforward and feedback frequency-dependent interactions in a large-scale laminar network of the primate cortex},
|
||||
author={Jorge F. Mej{\'i}as and John D. Murray and Henry Kennedy and Xiao-Jing Wang},
|
||||
journal={Science Advances},
|
||||
year={2016},
|
||||
volume={2},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Rezaei2019FacilitatingTP,
|
||||
title={Facilitating the propagation of spiking activity in feedforward networks by including feedback},
|
||||
author={Hedyeh Rezaei and Ad Aertsen and Alireza Valizadeh and Arvind Kumar},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2019},
|
||||
volume={16},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Hahn2014CommunicationTR,
|
||||
title={Communication through Resonance in Spiking Neuronal Networks},
|
||||
author={Gerald Hahn and Alejandro F. Bujan and Yves Fr{\'e}gnac and Ad Aertsen and Arvind Kumar},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2014},
|
||||
volume={10},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Jensen2014HumanBO,
|
||||
title={Human Brain Oscillations: From Physiological Mechanisms to Analysis and Cognition},
|
||||
author={Ole Jensen and Eelke Spaak and Johanna M. Zumer},
|
||||
journal={Magnetoencephalography},
|
||||
year={2014},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Lowet2015InputDependentFM,
|
||||
title={Input-Dependent Frequency Modulation of Cortical Gamma Oscillations Shapes Spatial Synchronization and Enables Phase Coding},
|
||||
author={Eric Lowet and M. Roberts and Avgis Hadjipapas and Alina Peter and Jan van der Eerden and Peter de Weerd},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2015},
|
||||
volume={11},
|
||||
url={}
|
||||
}
|
353
evaluation/updated_sync.bib
Normal file
353
evaluation/updated_sync.bib
Normal file
@ -0,0 +1,353 @@
|
||||
@book{barrat2008,
|
||||
title={Dynamical Processes on Complex Networks},
|
||||
author={Barrat, A. and Barth{\'e}lemy, M. and Vespignani, A.},
|
||||
isbn={9781107377424},
|
||||
url={https://books.google.pt/books?id=fUAhAwAAQBAJ},
|
||||
year={2008},
|
||||
publisher={Cambridge University Press}
|
||||
}
|
||||
|
||||
@article{Varela2001,
|
||||
title={The brainweb: Phase synchronization and large-scale integration},
|
||||
author={Francisco J. Varela and Jean-Philippe Lachaux and Eugenio Rodriguez and Jacques Martinerie},
|
||||
journal={Nature Reviews Neuroscience},
|
||||
year={2001},
|
||||
volume={2},
|
||||
pages={229-239}
|
||||
}
|
||||
|
||||
@article{Carmichael2002,
|
||||
title={Synchronous Neuronal Activity Is a Signal for Axonal Sprouting after Cortical Lesions in the Adult},
|
||||
author={S. Thomas Carmichael and M. -F. Chesselet},
|
||||
journal={The Journal of Neuroscience},
|
||||
year={2002},
|
||||
volume={22},
|
||||
pages={6062 - 6070}
|
||||
}
|
||||
|
||||
@ARTICLE{Otsuka2000,
|
||||
author = {{Otsuka}, Kenju and {Kawai}, Ryoji and {Hwong}, Siao-Lung and {Ko}, Jing-Yuan and {Chern}, Jyh-Long},
|
||||
title = "{Synchronization of Mutually Coupled Self-Mixing Modulated Lasers}",
|
||||
journal = {Phys. Rev. Lett.},
|
||||
year = {2000},
|
||||
month = {apr},
|
||||
volume = {84},
|
||||
number = {14},
|
||||
pages = {3049-3052},
|
||||
doi = {10.1103/PhysRevLett.84.3049},
|
||||
adsurl = {https://ui.adsabs.harvard.edu/abs/2000PhRvL..84.3049O},
|
||||
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
||||
}
|
||||
|
||||
@ARTICLE{Hansel1992,
|
||||
author = {{Hansel}, D. and {Sompolinsky}, H.},
|
||||
title = "{Synchronization and computation in a chaotic neural network}",
|
||||
journal = {Phys. Rev. Lett.},
|
||||
keywords = {87.10.+e, 05.45.+b, General theory and mathematical aspects},
|
||||
year = 1992,
|
||||
month = feb,
|
||||
volume = {68},
|
||||
number = {5},
|
||||
pages = {718-721},
|
||||
doi = {10.1103/PhysRevLett.68.718},
|
||||
adsurl = {https://ui.adsabs.harvard.edu/abs/1992PhRvL..68..718H},
|
||||
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
||||
}
|
||||
|
||||
@ARTICLE{Glass2001,
|
||||
author = {{Glass}, Leon},
|
||||
title = "{Synchronization and rhythmic processes in physiology}",
|
||||
journal = {Nature},
|
||||
year = 2001,
|
||||
month = mar,
|
||||
volume = {410},
|
||||
number = {6825},
|
||||
pages = {277-284},
|
||||
doi = {10.1038/35065745},
|
||||
adsurl = {https://ui.adsabs.harvard.edu/abs/2001Natur.410..277G},
|
||||
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
|
||||
}
|
||||
|
||||
@article{BIEBERICH2002145,
|
||||
title = {Recurrent fractal neural networks: a strategy for the exchange of local and global information processing in the brain},
|
||||
journal = {Biosystems},
|
||||
volume = {66},
|
||||
number = {3},
|
||||
pages = {145-164},
|
||||
year = {2002},
|
||||
issn = {0303-2647},
|
||||
doi = {https://doi.org/10.1016/S0303-2647(02)00040-0},
|
||||
url = {https://www.sciencedirect.com/science/article/pii/S0303264702000400},
|
||||
author = {Erhard Bieberich},
|
||||
keywords = {Networks, Fractal, Brain, Mind, Consciousness, Neuron},
|
||||
abstract = {The regulation of biological networks relies significantly on convergent feedback signaling loops that render a global output locally accessible. Ideally, the recurrent connectivity within these systems is self-organized by a time-dependent phase-locking mechanism. This study analyzes recurrent fractal neural networks (RFNNs), which utilize a self-similar or fractal branching structure of dendrites and downstream networks for phase-locking of reciprocal feedback loops: output from outer branch nodes of the network tree enters inner branch nodes of the dendritic tree in single neurons. This structural organization enables RFNNs to amplify re-entrant input by over-the-threshold signal summation from feedback loops with equivalent signal traveling times. The columnar organization of pyramidal neurons in the neocortical layers V and III is discussed as the structural substrate for this network architecture. RFNNs self-organize spike trains and render the entire neural network output accessible to the dendritic tree of each neuron within this network. As the result of a contraction mapping operation, the local dendritic input pattern contains a downscaled version of the network output coding structure. RFNNs perform robust, fractal data compression, thus coping with a limited number of feedback loops for signal transport in convergent neural networks. This property is discussed as a significant step toward the solution of a fundamental problem in neuroscience: how is neuronal computation in separate neurons and remote brain areas unified as an instance of experience in consciousness? RFNNs are promising candidates for engaging neural networks into a coherent activity and provide a strategy for the exchange of global and local information processing in the human brain, thereby ensuring the completeness of a transformation from neuronal computation into conscious experience.}
|
||||
}
|
||||
|
||||
|
||||
@incollection{Chalmers2000,
|
||||
title = {What is a Neural Correlate of Consciousness?},
|
||||
booktitle = {Neural Correlates of Consciousness},
|
||||
author = {David J. Chalmers},
|
||||
publisher = {MIT Press},
|
||||
year = {2000},
|
||||
editor = {Thomas Metzinger}
|
||||
}
|
||||
|
||||
@inproceedings{Crick1990,
|
||||
title={Towards a neurobiological theory of consciousness},
|
||||
author={Crick, Francis and Koch, Christof},
|
||||
booktitle={Seminars in the Neurosciences (Vol.2)},
|
||||
year={1990},
|
||||
organization={Saunders Scientific Publications}
|
||||
}
|
||||
|
||||
|
||||
@article{Hidaka2018,
|
||||
doi = {10.1371/journal.pone.0201126},
|
||||
author = {Hidaka, Shohei AND Oizumi, Masafumi},
|
||||
journal = {PLOS ONE},
|
||||
publisher = {Public Library of Science},
|
||||
title = {Fast and exact search for the partition with minimal information loss},
|
||||
year = {2018},
|
||||
month = {09},
|
||||
volume = {13},
|
||||
url = {https://doi.org/10.1371/journal.pone.0201126},
|
||||
pages = {1-14},
|
||||
number = {9},
|
||||
|
||||
}
|
||||
|
||||
@Article{Mediano2019,
|
||||
AUTHOR = {Mediano, Pedro A.M. and Seth, Anil K. and Barrett, Adam B.},
|
||||
TITLE = {Measuring Integrated Information: Comparison of Candidate Measures in Theory and Simulation},
|
||||
JOURNAL = {Entropy},
|
||||
VOLUME = {21},
|
||||
YEAR = {2019},
|
||||
NUMBER = {1},
|
||||
ARTICLE-NUMBER = {17},
|
||||
URL = {https://www.mdpi.com/1099-4300/21/1/17},
|
||||
ISSN = {1099-4300}
|
||||
}
|
||||
|
||||
@incollection{ENGEL2016,
|
||||
title = {Chapter 3 - Neuronal Oscillations, Coherence, and Consciousness},
|
||||
editor = {Steven Laureys and Olivia Gosseries and Giulio Tononi},
|
||||
booktitle = {The Neurology of Conciousness (Second Edition)},
|
||||
publisher = {Academic Press},
|
||||
edition = {Second Edition},
|
||||
address = {San Diego},
|
||||
pages = {49-60},
|
||||
year = {2016},
|
||||
author = {Andreas K. Engel and Pascal Fries}}
|
||||
|
||||
|
||||
@article{FRIES2015,
|
||||
title = {Rhythms for Cognition: Communication through Coherence},
|
||||
journal = {Neuron},
|
||||
volume = {88},
|
||||
number = {1},
|
||||
pages = {220-235},
|
||||
year = {2015},
|
||||
author = {Pascal Fries}
|
||||
}
|
||||
|
||||
|
||||
@article{OIZUMI2014,
|
||||
author = {Oizumi, Masafumi AND Albantakis, Larissa AND Tononi, Giulio},
|
||||
journal = {PLOS Computational Biology},
|
||||
publisher = {Public Library of Science},
|
||||
title = {From the Phenomenology to the Mechanisms of Consciousness: Integrated Information Theory 3.0},
|
||||
year = {2014},
|
||||
month = {05},
|
||||
volume = {10},
|
||||
pages = {1-25},
|
||||
number = {5}
|
||||
}
|
||||
|
||||
@article{DOERIG2019,
|
||||
title = {The unfolding argument: Why IIT and other causal structure theories cannot explain consciousness},
|
||||
author = {Doerig, Adrien and Schurger, Aaron and Hess, Kathryn and Herzog, Michael H.},
|
||||
journal = {Consciousness and Cognition},
|
||||
volume = {72},
|
||||
year = {2019}
|
||||
}
|
||||
|
||||
@book{BARRAT2008,
|
||||
title={Dynamical Processes on Complex Networks},
|
||||
author={Barrat, A. and Barth{\'e}lemy, M. and Vespignani, A.},
|
||||
year={2008},
|
||||
publisher={Cambridge University Press}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@article{Tononi2004,
|
||||
title={An information integration theory of consciousness},
|
||||
author={Giulio Tononi},
|
||||
journal={BMC Neuroscience},
|
||||
year={2004},
|
||||
volume={5},
|
||||
pages={42 - 42}
|
||||
}
|
||||
|
||||
@article{Rezaei2020,
|
||||
author = {Rezaei, Hedyeh AND Aertsen, Ad AND Kumar, Arvind AND Valizadeh, Alireza},
|
||||
journal = {PLOS Computational Biology},
|
||||
publisher = {Public Library of Science},
|
||||
title = {Facilitating the propagation of spiking activity in feedforward networks by including feedback},
|
||||
year = {2020},
|
||||
month = {08},
|
||||
volume = {16},
|
||||
pages = {1-27},
|
||||
number = {8}
|
||||
}
|
||||
|
||||
|
||||
@book{BARRAT2008,
|
||||
title={Dynamical Processes on Complex Networks},
|
||||
author={Barrat, A. and Barth{\'e}lemy, M. and Vespignani, A.},
|
||||
year={2008},
|
||||
publisher={Cambridge University Press}
|
||||
}
|
||||
|
||||
|
||||
@ARTICLE{LLINAS2014,
|
||||
|
||||
AUTHOR={Llinás, Rodolfo R.},
|
||||
|
||||
TITLE={Intrinsic electrical properties of mammalian neurons and CNS function: a historical perspective},
|
||||
|
||||
JOURNAL={Frontiers in Cellular Neuroscience},
|
||||
|
||||
YEAR={2014}
|
||||
}
|
||||
|
||||
@article{Mayner2018,
|
||||
author = {Mayner, William G. P. AND Marshall, William AND Albantakis, Larissa AND Findlay, Graham AND Marchman, Robert AND Tononi, Giulio},
|
||||
journal = {PLOS Computational Biology},
|
||||
publisher = {Public Library of Science},
|
||||
title = {PyPhi: A toolbox for integrated information theory},
|
||||
year = {2018},
|
||||
month = {07},
|
||||
volume = {14},
|
||||
pages = {1-21},
|
||||
number = {7}
|
||||
}
|
||||
|
||||
@article{Hahn2014CommunicationTR,
|
||||
title={Communication through Resonance in Spiking Neuronal Networks},
|
||||
author={Gerald Hahn and Alejandro F. Bujan and Yves Fr{\'e}gnac and Ad Aertsen and Arvind Kumar},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2014},
|
||||
volume={10}
|
||||
}
|
||||
|
||||
@article{Fries2005AMF,
|
||||
title={A mechanism for cognitive dynamics: neuronal communication through neuronal coherence},
|
||||
author={Pascal Fries},
|
||||
journal={Trends in Cognitive Sciences},
|
||||
year={2005},
|
||||
volume={9},
|
||||
pages={474-480},
|
||||
url={}
|
||||
}
|
||||
|
||||
@book{neuroscience6th,
|
||||
title = {Neuroscience},
|
||||
edition = {Sixth},
|
||||
editor = {Purves, Dale and Augustine, George J. and Fitzpatrick, David and Hall, William C. and LaMantia, Anthony-Samuel and Mooney, Richard D. and Platt, Michael L. and White, Leonard E.},
|
||||
year = {2017},
|
||||
publisher = {Sinauer Associates},
|
||||
address = {Sunderland, MA},
|
||||
isbn = {9781605353807},
|
||||
pages = {960},
|
||||
month = {October 12},
|
||||
}
|
||||
|
||||
@article{Bennett2020AnAA,
|
||||
title={An Attempt at a Unified Theory of the Neocortical Microcircuit in Sensory Cortex},
|
||||
author={Max Bennett},
|
||||
journal={Frontiers in Neural Circuits},
|
||||
year={2020},
|
||||
volume={14},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{vanKerkoerle2014AlphaAG,
|
||||
title={Alpha and gamma oscillations characterize feedback and feedforward processing in monkey visual cortex},
|
||||
author={Timo van Kerkoerle and Matthew W. Self and Bruno Dagnino and Marie-Alice Gariel-Mathis and Jasper Poort and Chris van der Togt and Pieter R. Roelfsema},
|
||||
journal={Proceedings of the National Academy of Sciences},
|
||||
year={2014},
|
||||
volume={111},
|
||||
pages={14332 - 14341},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article {Westerberg2022,
|
||||
article_type = {journal},
|
||||
title = {Laminar microcircuitry of visual cortex producing attention-associated electric fields},
|
||||
author = {Westerberg, Jacob A and Schall, Michelle S and Maier, Alexander and Woodman, Geoffrey F and Schall, Jeffrey D},
|
||||
editor = {Ray, Supratim and Baker, Chris I and Luck, Steven J and Nandy, Anirvan S},
|
||||
volume = 11,
|
||||
year = 2022,
|
||||
month = {jan},
|
||||
pub_date = {2022-01-28},
|
||||
pages = {e72139},
|
||||
citation = {eLife 2022;11:e72139},
|
||||
doi = {10.7554/eLife.72139},
|
||||
url = {https://doi.org/10.7554/eLife.72139},
|
||||
keywords = {CSD, ECoG, EEG, LFP, N2pc, V4},
|
||||
journal = {eLife},
|
||||
issn = {2050-084X},
|
||||
publisher = {eLife Sciences Publications, Ltd},
|
||||
}
|
||||
@inproceedings{Presigny2021MultiscaleMO,
|
||||
title={Multiscale modeling of brain network organization},
|
||||
author={Charley Presigny and Fabrizio De Vico Fallani},
|
||||
year={2021},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Mejas2016FeedforwardAF,
|
||||
title={Feedforward and feedback frequency-dependent interactions in a large-scale laminar network of the primate cortex},
|
||||
author={Jorge F. Mej{\'i}as and John D. Murray and Henry Kennedy and Xiao-Jing Wang},
|
||||
journal={Science Advances},
|
||||
year={2016},
|
||||
volume={2},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Rezaei2019FacilitatingTP,
|
||||
title={Facilitating the propagation of spiking activity in feedforward networks by including feedback},
|
||||
author={Hedyeh Rezaei and Ad Aertsen and Alireza Valizadeh and Arvind Kumar},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2019},
|
||||
volume={16},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Hahn2014CommunicationTR,
|
||||
title={Communication through Resonance in Spiking Neuronal Networks},
|
||||
author={Gerald Hahn and Alejandro F. Bujan and Yves Fr{\'e}gnac and Ad Aertsen and Arvind Kumar},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2014},
|
||||
volume={10},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Jensen2014HumanBO,
|
||||
title={Human Brain Oscillations: From Physiological Mechanisms to Analysis and Cognition},
|
||||
author={Ole Jensen and Eelke Spaak and Johanna M. Zumer},
|
||||
journal={Magnetoencephalography},
|
||||
year={2014},
|
||||
url={}
|
||||
}
|
||||
|
||||
@article{Lowet2015InputDependentFM,
|
||||
title={Input-Dependent Frequency Modulation of Cortical Gamma Oscillations Shapes Spatial Synchronization and Enables Phase Coding},
|
||||
author={Eric Lowet and M. Roberts and Avgis Hadjipapas and Alina Peter and Jan van der Eerden and Peter de Weerd},
|
||||
journal={PLoS Computational Biology},
|
||||
year={2015},
|
||||
volume={11},
|
||||
url={}
|
||||
}
|
@ -10,17 +10,19 @@ import numpy as np
|
||||
from neuropercolation import Simulate1Layer
|
||||
|
||||
#eps_space = np.linspace(0.005,0.5,100)
|
||||
eps_space = np.linspace(0.135,0.15,4)
|
||||
eps_space = np.linspace(0.01,0.5,50)
|
||||
stp = 1000100
|
||||
|
||||
for dim in [49,100]:
|
||||
for dim in range(11,21,2):
|
||||
for eps in eps_space:
|
||||
eps = round(eps,3)
|
||||
Simulate2Layers(dim,
|
||||
Simulate1Layer(dim,
|
||||
eps,
|
||||
res=2,
|
||||
path=f'/cloud/Public/_data/neuropercolation/1lay/steps=100000/dim={dim:02}/',
|
||||
#draw=None
|
||||
).run(evolutions_per_second=30,
|
||||
last_evolution_step=100000,
|
||||
save_states=False)
|
||||
steps=stp,
|
||||
init = 0,
|
||||
draw=None,
|
||||
res=6,
|
||||
save='all',
|
||||
path=f'/cloud/Public/_data/neuropercolation/1lay/steps={stp}/dim={dim:02}/',
|
||||
)
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
@ -8,29 +8,19 @@ Created on Fri Aug 18 19:05:04 2023
|
||||
|
||||
from datetime import datetime
|
||||
import numpy as np
|
||||
from neuropercolation import Simulate2Layers, Simulate4Layers
|
||||
from neuropercolation import Simulate2Layers
|
||||
|
||||
eps_space = np.linspace(0.005,0.5,100)
|
||||
#eps_space = np.linspace(0.135,0.15,4)
|
||||
|
||||
for dim in [8]:
|
||||
for eps in eps_space:
|
||||
eps = round(eps,3)
|
||||
sim = Simulate4Layers(dim,
|
||||
eps,
|
||||
steps=100100,
|
||||
draw=None,
|
||||
save='all',
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/steps=100100/dim={dim:02}/',
|
||||
)
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
||||
|
||||
for eps in eps_space:
|
||||
dims=list(range(21,27,2))
|
||||
for dim in [101]:
|
||||
for eps in [0.15]:
|
||||
eps = round(eps,3)
|
||||
sim = Simulate2Layers(dim,
|
||||
eps,
|
||||
steps=100100,
|
||||
draw=None,
|
||||
draw='pygame',
|
||||
save='all',
|
||||
path=f'/cloud/Public/_data/neuropercolation/2lay/steps=100100/dim={dim:02}/',
|
||||
)
|
||||
|
@ -6,29 +6,53 @@ Created on Fri Aug 18 19:05:04 2023
|
||||
@author: astral
|
||||
"""
|
||||
|
||||
import os
|
||||
from datetime import datetime
|
||||
import numpy as np
|
||||
from neuropercolation import Simulate4Layers
|
||||
|
||||
def new_folder(path):
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
eps_space = np.linspace(0.005,0.5,100)
|
||||
#eps_space = np.linspace(0.135,0.15,4)
|
||||
|
||||
stp = 1000100
|
||||
dims = range(9,10)
|
||||
batches = 1
|
||||
|
||||
for batch in range(5):
|
||||
for dim in [8]:
|
||||
for eps in eps_space[1:41:2]:
|
||||
path = f'/cloud/Public/_data/neuropercolation/4lay/cons=27-knight_steps=1000100_diares/dim=09/batch=0/'
|
||||
|
||||
for dim in dims:
|
||||
con_gap = 3
|
||||
cons = [(n,(2*n+m)%dim) for n in range(dim) for m in range(0,dim-2,con_gap)]
|
||||
dimpath = path + f'dim={dim:02}_cons={len(cons)}/'
|
||||
|
||||
max_bat=0
|
||||
# for file in os.listdir(dimpath):
|
||||
# f = os.path.join(dimpath, file)
|
||||
# if not os.path.isfile(f):
|
||||
# bat = file.replace('batch=','')
|
||||
# if bat != file and int(bat)+1 > max_bat:
|
||||
# max_bat = int(bat)+1
|
||||
|
||||
for batch in range(max_bat, max_bat+batches):
|
||||
savepath = dimpath + f'batch={batch}'
|
||||
new_folder(savepath)
|
||||
for eps in [0.3]:#list(eps_space[61::2]):
|
||||
eps = round(eps,3)
|
||||
cons = [(n,(n+m)%dim) for n in range(dim) for m in [0,int(dim/2)]]
|
||||
initstate = [[0,0],[0,0]]
|
||||
sim = Simulate4Layers(dim,
|
||||
eps,
|
||||
coupling=cons,
|
||||
init=initstate,
|
||||
steps=stp,
|
||||
noeffect=-1,
|
||||
#fps=20,
|
||||
draw=None,
|
||||
res=2,
|
||||
save='simple',
|
||||
path=f'/cloud/Public/_data/neuropercolation/4lay/cons={len(cons)}-2diag_steps={stp}/dim={dim:02}/batch={batch}/',
|
||||
res=6,
|
||||
save='all',
|
||||
path=path,
|
||||
)
|
||||
print(f'Done eps={eps:.3f} with dim={dim} at {datetime.now()}')
|
@ -92,7 +92,7 @@ class Neurolattice(CellularAutomatonCreator, abc.ABC):
|
||||
self.epsilon = eps
|
||||
|
||||
def init_cell_state(self, coord): # pylint: disable=no-self-use
|
||||
return DEAD
|
||||
return [self.init]
|
||||
|
||||
def get_cells(self):
|
||||
return self._current_state
|
||||
@ -195,7 +195,8 @@ class Neuropercolation(CellularAutomatonCreator, abc.ABC):
|
||||
for coord, old, new in zip(this_state.keys(), this_state.values(), next_state.values()):
|
||||
coord_c = tuple([*coord[:2],int(1-coord[2])])
|
||||
old_c = this_state[coord_c]
|
||||
new_state = evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], coord[2], coord_c[2]) #inverse the inhibitory layer's action
|
||||
new_state = evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], coord[2], coord_c[2])
|
||||
#inverse the inhibitory layer's action
|
||||
|
||||
evolve_cell(old, new, new_state)
|
||||
|
||||
@ -233,16 +234,25 @@ class Neuropercolation(CellularAutomatonCreator, abc.ABC):
|
||||
|
||||
class NeuropercolationCoupled(CellularAutomatonCreator, abc.ABC):
|
||||
|
||||
def __init__(self, dim, eps, coupling=[], init=[[0,0],[0,0]], *args, **kwargs):
|
||||
def __init__(self, dim, eps, coupling=[], init=[[0,0],[0,0]], noeffect=-1, *args, **kwargs):
|
||||
super().__init__(dimension=[dim, dim, 2, 2],
|
||||
init=init,
|
||||
neighborhood=VonNeumannNeighborhood(EdgeRule.FIRST_AND_LAST_CELL_OF_DIMENSION_ARE_NEIGHBORS))
|
||||
self._evolution_step = 0
|
||||
self.epsilon = eps
|
||||
self.coupling = coupling
|
||||
self.noeffect = noeffect
|
||||
|
||||
def init_cell_state(self, coord): # pylint: disable=no-self-use
|
||||
if type(self.init) is list:
|
||||
return [self.init[coord[3]][coord[2]]]
|
||||
elif type(self.init) is str:
|
||||
self.init = self.init.translate(str.maketrans('', '', '.-='))
|
||||
dim = self._dimension[0]
|
||||
assert len(self.init)==4*dim**2
|
||||
coordval = [1, dim, dim**2, 2*dim**2]
|
||||
digit = sum([coord[i]*coordval[i] for i in range(4)])
|
||||
return [int(self.init[digit])]
|
||||
|
||||
def get_cells(self):
|
||||
return self._current_state
|
||||
@ -277,11 +287,14 @@ class NeuropercolationCoupled(CellularAutomatonCreator, abc.ABC):
|
||||
if coord[:2] in self.coupling:
|
||||
coord_c = tuple([*coord[:2],coord[2],int(1-coord[3])])
|
||||
old_c = this_state[coord_c]
|
||||
new_state = evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], 0)
|
||||
new_state = evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], 0) \
|
||||
if self.noeffect<self._evolution_step \
|
||||
else evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], 'disabled')
|
||||
else:
|
||||
coord_c = tuple([*coord[:2],int(1-coord[2]),coord[3]])
|
||||
old_c = this_state[coord_c]
|
||||
new_state = evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], coord_c[2]) #inverse the inhibitory layer's action
|
||||
new_state = evolution_rule(old.state.copy(), old_c.state.copy(), [n.state[0] for n in old.neighbors], coord_c[2])
|
||||
#inverse the inhibitory layer's action
|
||||
|
||||
evolve_cell(old, new, new_state)
|
||||
|
||||
@ -295,20 +308,25 @@ class NeuropercolationCoupled(CellularAutomatonCreator, abc.ABC):
|
||||
new_cell_state = last_cell_state
|
||||
if other_layer==0:
|
||||
alive_neighbours = sum(neighbors_last_states)+link_last_state[0] # adjust for excitatory link cells
|
||||
else:
|
||||
elif other_layer==1:
|
||||
alive_neighbours = sum(neighbors_last_states)+(1-link_last_state[0]) # adjust for inhibitory link cells
|
||||
else:
|
||||
alive_neighbours = sum(neighbors_last_states)+0.5
|
||||
|
||||
CASE = (random.random()>=self.epsilon)
|
||||
if alive_neighbours > 2:
|
||||
if alive_neighbours > 2.5:
|
||||
if CASE:
|
||||
new_cell_state = ALIVE
|
||||
else:
|
||||
new_cell_state = DEAD
|
||||
else:
|
||||
elif alive_neighbours < 2.5:
|
||||
if CASE:
|
||||
new_cell_state = DEAD
|
||||
else:
|
||||
new_cell_state = ALIVE
|
||||
else:
|
||||
new_cell_state = [int(random.random()>=0.5)]
|
||||
|
||||
return new_cell_state
|
||||
|
||||
def count_channels(self):
|
||||
|
@ -148,7 +148,7 @@ class Simulate1Layer:
|
||||
self._append_activation()
|
||||
|
||||
def _append_all(self):
|
||||
automaton_state = [[0 for n in range(self.__dimension)] for m in range(self.__dimension)]
|
||||
automaton_state = [[0 for n in range(self.__dimension)] for l in range(self.__dimension)]
|
||||
activation = 0
|
||||
for coord, cell in self._cellular_automaton._current_state.items():
|
||||
x,y = coord
|
||||
@ -321,7 +321,7 @@ class Simulate2Layers:
|
||||
self.__channel_list.append(self._cellular_automaton.count_channels())
|
||||
|
||||
def _append_all(self):
|
||||
automaton_state = [[[0 for n in range(self.__dimension)] for m in range(self.__dimension)] for l in range(2)]
|
||||
automaton_state = [[[0 for n in range(self.__dimension)] for l in range(self.__dimension)] for k in range(2)]
|
||||
activation = [0]*2
|
||||
for coord, cell in self._cellular_automaton._current_state.items():
|
||||
x,y,l = coord
|
||||
@ -424,6 +424,7 @@ class Simulate4Layers:
|
||||
eps,
|
||||
coupling=[],
|
||||
init=[[0,0],[0,0]],
|
||||
noeffect=-1,
|
||||
steps=100,
|
||||
draw='pygame',
|
||||
res=4,
|
||||
@ -442,11 +443,12 @@ class Simulate4Layers:
|
||||
:param state_to_color_cb: A callback to define the draw color of CA states (default: red for states != 0)
|
||||
"""
|
||||
super().__init__(*args, **kwargs)
|
||||
self._cellular_automaton = NeuropercolationCoupled(dim,eps,coupling,init)
|
||||
self._cellular_automaton = NeuropercolationCoupled(dim,eps,coupling,init,noeffect)
|
||||
self.__active = True
|
||||
self.__cell_size = [res,res]
|
||||
self.__dimension = dim
|
||||
self.__epsilon = eps
|
||||
self.__coupling = coupling
|
||||
self.__gridside = dim*res
|
||||
self.__samegap = 10
|
||||
self.__othergap = 20
|
||||
@ -499,18 +501,22 @@ class Simulate4Layers:
|
||||
except:
|
||||
print('Failed to quit pygame')
|
||||
|
||||
def _activations(self):
|
||||
return self.__activation_list
|
||||
|
||||
def _channels(self):
|
||||
return self.__channel_list
|
||||
|
||||
|
||||
def _track(self):
|
||||
if self.__save == 'all':
|
||||
self._append_all()
|
||||
elif self.__save == 'simple':
|
||||
else:
|
||||
self._append_activation()
|
||||
self.__channel_list.append(self._cellular_automaton.count_channels())
|
||||
|
||||
def _append_all(self):
|
||||
automaton_state = [[[[0 for n in range(self.__dimension)] for m in range(self.__dimension)] for l in range(2)] for k in range(2)]
|
||||
automaton_state = [[[[0 for n in range(self.__dimension)] for l in range(self.__dimension)] for k in range(2)] for j in range(2)]
|
||||
activation = [[0,0],[0,0]]
|
||||
for coord, cell in self._cellular_automaton._current_state.items():
|
||||
x,y,l,o = coord
|
||||
@ -546,6 +552,8 @@ class Simulate4Layers:
|
||||
if not os.path.exists(self.__path):
|
||||
os.makedirs(self.__path)
|
||||
|
||||
with open(self.__path+f"eps={round(self.__epsilon,3):.3f}_coupling.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(self.__coupling, f, indent=1)
|
||||
with open(self.__path+f"eps={round(self.__epsilon,3):.3f}_states.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(self.__state_list, f, indent=1)
|
||||
with open(self.__path+f"eps={round(self.__epsilon,3):.3f}_activation.txt", 'w', encoding='utf-8') as f:
|
||||
@ -557,6 +565,8 @@ class Simulate4Layers:
|
||||
if not os.path.exists(self.__path):
|
||||
os.makedirs(self.__path)
|
||||
|
||||
with open(self.__path+f"eps={round(self.__epsilon,3):.3f}_coupling.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(self.__coupling, f, indent=1)
|
||||
with open(self.__path+f"eps={round(self.__epsilon,3):.3f}_activation.txt", 'w', encoding='utf-8') as f:
|
||||
json.dump(self.__activation_list, f, indent=1)
|
||||
with open(self.__path+f"eps={round(self.__epsilon,3):.3f}_channels.txt", 'w', encoding='utf-8') as f:
|
||||
|
Loading…
Reference in New Issue
Block a user