Implemented by Pytorch
Package
import numpy as np
import random as random
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
from matplotlib import animation
from celluloid import Camera
import torch
from torch import nn
from torchviz import make_dot, make_dot_from_trace
from torch import optim
from torchdiffeq import odeint_adjoint as odeint
from decimal import *
import numpy as np
from mpmath import *
from sympy import *
import scipy.integrate as integrate
import scipy.special as sc
from scipy.misc import derivative
from pynverse import inversefunc
import pandas as pd
from pandas import Series,DataFrame
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Final Layer
def t(F):
signr= np.heaviside(F-ir_cutoff,0)
signl= np.heaviside(-F-ir_cutoff,0)
return signr+signl
Fp= np.arange(-0.6, 0.6, 0.001) #step
plt.plot(Fp,t(Fp), lw=5, label='$t(F)$')
plt.title('Function of Final Layer')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel('F')
plt.ylabel('$t(F)$')
#plt.tight_layout()
#plt.savefig("Tanh.png")
plt.show()
Setup
We consider the scalar field only dependent on the holographic direction
in asymptotic AdS black hole background
where the emblackening function have following properties
Specially,
in RN case. Note that, in extremal case, .
Reproduced Metric and EoM
The EoM for is
Now, we’re wanna let . Consider the following coordinate transformation
Then the EoM become
where . Specially, for Schwarzschild case,
def h(y):
return 1-y**3-(q**2)*y**3+(q**2)*y**4
def eta_coord(y):
r=[]
for i in range (0,len(y)):
r=np.append(r,integrate.quad(lambda z: 1/(z*(h(z)**(1/2))), y[i],1)[0])
return r
def y_coord(eta):
r=[]
accep_error=10**(-3)
for i in range (0,len(eta)):
erroreta=100
y_lower=0
y_upper=1
while abs(erroreta) > accep_error:
yy=(y_lower+y_upper)/2
test_eta=eta_coord(np.array([yy]))
if test_eta > eta[i] :
y_lower=yy
else:
y_upper=yy
erroreta=eta[i]-test_eta
r=np.append(r,yy)
return r
def H_r(eta):
eta=eta/scale
return (6*h(y_coord(eta))-y_coord(eta)*derivative(h,y_coord(eta), dx=1e-6))/(2*(h(y_coord(eta))**(1/2)))
def v(phi):
return (lam*phi**4)/4 #delta_v(phi)-> derivative(v,phi)
def ff(eta,phi,pi):
return (derivative(v,phi, dx=1e-6)-H_r(eta*scale)*pi*scale+phi*m2)*scale**2
eta_base=[]
for i in range(0,int(layer)):
eta_base.append(ir_cutoff+i*abs(delta_eta))
tanh = nn.Tanh()
print(len(eta_base),eta_base[layer-1],layer)
xx=np.arange(0.1, 1.05/scale, 0.05)
plt.plot(xx,3*np.cosh(3*xx/scale)/np.sinh(3*xx/scale), lw=2, label='$3 coth(3\eta)$')
plt.plot(xx,H_r(xx), lw=2, label='$H_R(\eta)$')
plt.title('Reproduced Metric $H(\eta)$')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.xlabel('$\eta$')
plt.ylabel('$H_R(\eta)$')
#plt.tight_layout()
#plt.savefig("Hr_rnq09_n3.png")
plt.show()
Activation Function
Runge-Kutta Fourth-Order
From EoM, let’s say
The activation function at each layer is
where are defined by
phi_list_pos_exp=[]
pi_list_pos_exp=[]
phi_list_neg_exp=[]
pi_list_neg_exp=[]
phi_exp_data=[]
pi_exp_data=[]
train_data_y=[]
np.random.seed(1)
while len(phi_list_pos_exp)<num_training_data or len(phi_list_neg_exp)<num_training_data :
eta=uv_cutoff
phi_ini=np.random.uniform(low=0, high=1.7, size=(num_training_data*50)) #random.uniform(0,1)
pi_ini=np.random.uniform(low=-0.2, high=0.7, size=(num_training_data*50))*scale#random.uniform(-1.7,0.01)
phi=phi_ini
pi=pi_ini
for i in range(0,int(layer-1)):
k1=delta_eta*ff(np.array([eta]),phi,pi)/2
k=delta_eta*(k1/2+pi)/2
k2=delta_eta*ff(np.array([eta+delta_eta/2]),phi+k,pi+k1)/2
k3=delta_eta*ff(np.array([eta+delta_eta/2]),phi+k,pi+k2)/2
ell=delta_eta*(pi+k3)
k4=delta_eta*ff(np.array([eta+delta_eta]),phi+k,pi+2*k3)/2
phi_new=phi+delta_eta*(pi+(k1+k2+k3)/3)
pi_new=pi+(k1+2*k2+2*k3+k4)/3
eta+=delta_eta
phi=phi_new
pi=pi_new
for j in range(0,len(phi)):
final_layer=t(pi[j])
if final_layer>0.5: #2*pi/eta-m2*phi-delta_v(phi)
if len(phi_list_neg_exp)<num_training_data:
phi_list_neg_exp.append(phi_ini[j])
pi_list_neg_exp.append(pi_ini[j])
phi_exp_data.append(phi_ini[j])
pi_exp_data.append(pi_ini[j])
train_data_y.append(final_layer) #false t(pi)=1
else:
if len(phi_list_pos_exp)<num_training_data:
phi_list_pos_exp.append(phi_ini[j])
pi_list_pos_exp.append(pi_ini[j])
phi_exp_data.append(phi_ini[j])
pi_exp_data.append(pi_ini[j])
train_data_y.append(final_layer) #true t(pi)=0
#print(len(train_data_y))
Generating Data by the Real Metric
Set
True: False: