Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
e798c18
trial_stage
Apr 7, 2021
4052df8
fixed some compiling errors, added python bindings for ML_approx_force
Marcel-Rodekamp Apr 7, 2021
67037ce
Merge pull request #2 from evanberkowitz/devel
chelseajohn Apr 7, 2021
c9a0a35
Merge branch 'devel' into Ml_approx_force
chelseajohn Apr 7, 2021
4b5b4a1
Merge pull request #5 from chelseajohn/Ml_approx_force
chelseajohn Apr 7, 2021
1ac1492
removed copy mistakes
Marcel-Rodekamp Apr 9, 2021
3eff8d7
Merge pull request #6 from chelseajohn/Ml_approx_force
chelseajohn Apr 9, 2021
1e74c74
Fixed (?) action bindings; went from C++14 to C++17
Marcel-Rodekamp Apr 22, 2021
8817997
Merge pull request #3 from chelseajohn/devel
chelseajohn Apr 22, 2021
826005d
CMake for NNgHMC
Apr 28, 2021
83b3d38
Adding CMake for NNgHMC
Apr 28, 2021
fba8609
Merge branch 'devel' of https://github.com/chelseajohn/isle into ML_a…
Apr 28, 2021
b409393
Merge pull request #7 from chelseajohn/devel
chelseajohn Apr 28, 2021
a51d76b
Update CMakeLists.txt
chelseajohn Apr 28, 2021
5b7c196
Added C++17 (again? something in merge must have went wrong)
Marcel-Rodekamp Apr 30, 2021
5866b30
Commented in the bind stuff (Thats why isle_cpp wasn't found)
Marcel-Rodekamp Apr 30, 2021
42124d8
resolved merge conflicts
Marcel-Rodekamp Apr 30, 2021
c2e1ac2
Merge pull request #8 from chelseajohn/devel
chelseajohn Apr 30, 2021
a4cc532
working NN
May 10, 2021
e3558d7
fixed acceptanceProb
May 18, 2021
1f8bddd
Merge pull request #9 from chelseajohn/devel
chelseajohn May 18, 2021
f610b55
merged eval
May 21, 2021
3525441
Merge pull request #10 from chelseajohn/devel
chelseajohn May 21, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file modified .github/workflows/linux_build.yml
100644 → 100755
Empty file.
Empty file modified .github/workflows/macos_build.yml
100644 → 100755
Empty file.
Empty file modified .gitignore
100644 → 100755
Empty file.
Empty file modified .gitmodules
100644 → 100755
Empty file.
Empty file modified .travis.yml
100644 → 100755
Empty file.
5 changes: 4 additions & 1 deletion CMakeLists.txt
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ include(cmake/StandardProjectSettings.cmake)

# Link this 'library' to set the c++ standard / compile-time options requested
add_library(project_options INTERFACE)
target_compile_features(project_options INTERFACE cxx_std_14)
target_compile_features(project_options INTERFACE cxx_std_17)
set_target_properties(project_options
PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ON)

Expand All @@ -36,6 +36,9 @@ option(ENABLE_TESTING "Enable Test Builds" ON)
# look for 3rd party packages
include(cmake/pybind11.cmake)

#libtorch
include(cmake/NNgHMC.cmake)

find_package(blaze 3.6 REQUIRED)
add_library(blaze_options INTERFACE)
target_include_directories(blaze_options SYSTEM INTERFACE ${blaze_INCLUDE_DIRS})
Expand Down
Empty file modified LICENSE
100644 → 100755
Empty file.
Empty file modified README.md
100644 → 100755
Empty file.
Empty file modified benchmarks/.gitignore
100644 → 100755
Empty file.
Empty file modified benchmarks/logdet.py
100644 → 100755
Empty file.
Empty file modified benchmarks/show.py
100644 → 100755
Empty file.
Empty file modified cmake/Cache.cmake
100644 → 100755
Empty file.
Empty file modified cmake/CompilerWarnings.cmake
100644 → 100755
Empty file.
Empty file modified cmake/Modules/Findblaze.cmake
100644 → 100755
Empty file.
9 changes: 9 additions & 0 deletions cmake/NNgHMC.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
option(USE_NN "Enable gardient calculation with NN" ON)
list(APPEND CMAKE_PREFIX_PATH "/p/project/cjjsc37/john/libtorch")
if (USE_NN)
message(STATUS "Using NNgHMC")
find_package(Torch REQUIRED)
target_link_libraries(project_options INTERFACE "${TORCH_LIBRARIES}")

endif ()
Empty file modified cmake/StandardProjectSettings.cmake
100644 → 100755
Empty file.
Empty file modified cmake/pybind11.cmake
100644 → 100755
Empty file.
Empty file modified docs/.gitignore
100644 → 100755
Empty file.
Empty file modified docs/Makefile
100644 → 100755
Empty file.
Empty file modified docs/algorithm/.gitignore
100644 → 100755
Empty file.
Empty file modified docs/algorithm/Makefile
100644 → 100755
Empty file.
Empty file modified docs/algorithm/hubbardFermiAction.tex
100644 → 100755
Empty file.
Empty file modified docs/algorithm/references.bib
100644 → 100755
Empty file.
Empty file modified docs/algorithm/settings.tex
100644 → 100755
Empty file.
Empty file modified docs/doxyfile.conf
100644 → 100755
Empty file.
96 changes: 96 additions & 0 deletions docs/examples/NNgPytorchModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from itertools import chain
from tqdm import tqdm
import numpy as np


# loading data for training
xx = np.load('trainingData_NN/inputs_4sites_U3.0B3.0Nt32.npy')
yy = np.load('trainingData_NN/targets_4sites_U3.0B3.0Nt32.npy')


input_dim = xx.shape[1]
hidden_dim = [2*input_dim]
output_dim = input_dim


class NNg(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim: int for input dimension
hidden_dim: list of int for multiple hidden layers
output_dim: list of int for multiple output layers
"""

# calling constructor of parent class
super().__init__()

# defining the inputs to the first hidden layer
self.hid1 = nn.Linear(input_dim, hidden_dim[0])
nn.init.normal_(self.hid1.weight, mean=0.0, std=0.01)
nn.init.zeros_(self.hid1.bias)
self.act1 = nn.ReLU()

# defining the inputs to the output layer
self.hid2 = nn.Linear(hidden_dim[0], output_dim)
nn.init.xavier_uniform_(self.hid2.weight)

def forward(self, X):

# input and act for layer 1
X = self.hid1(X)
X = self.act1(X)

# input and act for layer 2
X = self.hid2(X)
return X


epochs = 80
batchsize = 150
#converting to tensors
inputs, targets = torch.from_numpy(np.real(xx)).double(), torch.from_numpy(np.real(yy)).double()
train_ds = TensorDataset(inputs, targets)
# split the data into batches
train_dl = DataLoader(train_ds, batchsize, shuffle=False)
test_dl = DataLoader(train_ds, batchsize, shuffle=False)
model_torch = NNg(input_dim, hidden_dim, output_dim).double()
optimizer = torch.optim.Adam(model_torch.parameters(), weight_decay=1e-5)
criterion = nn.L1Loss()

# iterate through all the epoch
for epoch in tqdm(range(epochs)):
# go through all the batches generated by dataloader
for i, (inputs, targets) in enumerate(train_dl):
# clear the gradients
optimizer.zero_grad()
# compute the model output
yhat = model_torch(inputs)
# calculate loss
loss = criterion(yhat, targets.type(torch.FloatTensor))
# credit assignment
loss.backward()
# update model weights
optimizer.step()
# Print the progress
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch +
1, epochs, loss.item()))

# load a sample data
#example_input, example_target = next(iter(train_dl))
#run the tracing
#traced_script_module = torch.jit.trace(model_torch, example_input)
# save the converted model
#traced_script_module.save("NNg_model.pt")

#scripting the model
script_module = torch.jit.script(model_torch)
script_module.save("NNgModel_del.pt")



152 changes: 152 additions & 0 deletions docs/examples/bootStrapCorrelators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
import h5py as h5
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import isle
import isle.drivers

with h5.File(sys.argv[1],"r") as h5f:
rawP = h5f["correlation_functions/single_particle/destruction_creation/"][()]
weights = h5f["weights/actVal"][()]

metadata = isle.h5io.readMetadata(sys.argv[1])
lattice = metadata[0]
parameters = metadata[1]
BETA = parameters.beta
U = parameters.U
NBS = 100
BSLENGTH=rawP.shape[0]
NCORR = rawP.shape[1]
NT=rawP.shape[3]
DELTA = BETA/NT

print(f'U={U} beta={BETA} Nt={NT}')
print(f'# of corelators = {NCORR}')
# Ok, this next part is related to simulations that have a sign problem, which yours do not have at the moment.
# But you can still evaluate this part anyways, as it won't affect your results. I add it here anyways in
# case we decide in the near future to run on systems with a sign problem.
theta = -np.imag(weights)
allWeights = np.exp(1j*theta)

# now the number of weights equals the total number of configurations
# but the number of measurements is not necessarily the same!
# I need to take this into account

measFreq = int(weights.shape[0]/rawP.shape[0])
print("# measurements were done every {}th configuration (that was stored)".format(measFreq))

weights = np.array([allWeights[i] for i in range(0,allWeights.shape[0],measFreq)])
# now multiple correlators by their weights
for itr in range(rawP.shape[0]):
rawP[itr] *= weights[itr]

# All data are stored in these arrays
corrRe = [[] for i in range(NCORR)]
errRe = [[] for i in range(NCORR)]
corrIm = [[] for i in range(NCORR)]
errIm = [[] for i in range(NCORR)]
tau = [ t*DELTA for t in range(NT)]


#bining the data
NBIN = 100
NBS = 100

for nc in range(NCORR):
weightsBinned = []
rawPBinned = []
for i in range(int(rawP.shape[0]/NBIN)):
weightsBinned.append(np.mean(np.array([weights[i*NBIN+j] for j in range(NBIN)])))
temp = np.array([0+0j for _ in range(NT)])
for j in range(NBIN):
temp += rawP[i*NBIN+j,nc,nc]
temp /= NBIN
rawPBinned.append(temp)

weightsBinned = np.array(weightsBinned)
rawPBinned = np.array(rawPBinned)

avgCorrP = np.mean(rawPBinned,axis=0)/np.mean(weightsBinned)

BSLENGTH = weightsBinned.shape[0]
bootstrapIndices = np.random.randint(0, BSLENGTH, [NBS, BSLENGTH])


# now get error on correlators
RerrPxx = np.std(np.real(np.array([np.mean(np.array([rawPBinned[cfg] for cfg in bootstrapIndices[sample]])/np.mean(np.array([weightsBinned[cfg] for cfg in bootstrapIndices[sample]])), axis=0) for sample in range(NBS) ] )), axis=0)

IerrPxx = np.std(np.imag(np.array([np.mean(np.array([rawPBinned[cfg] for cfg in bootstrapIndices[sample]])/np.mean(np.array([weightsBinned[cfg] for cfg in bootstrapIndices[sample]])), axis=0) for sample in range(NBS) ] )), axis=0)

for t in range(NT):
corrRe[nc].append(avgCorrP[t].real)
errRe[nc].append(RerrPxx[t])
corrIm[nc].append(avgCorrP[t].imag)
errIm[nc].append(IerrPxx[t])
#print(t*DELTA,avgCorrP[t].real,RerrPxx[t],avgCorrP[t].imag,IerrPxx[t])


dataG = open('ExactFiles/U3B4G.dat','r').readlines()
dataM = open('ExactFiles/U3B4M.dat','r').readlines()
exact_t_gamma = []
exact_gamma_plus = []
exact_gamma_minus = []
exact_t_m = []
exact_m_plus = []
exact_m_minus = []
for dat in dataG:
ss = dat.split()
exact_t_gamma.append(float(ss[0]))
exact_gamma_plus.append(float(ss[1]))
exact_gamma_minus.append(float(ss[2]))
for dat in dataM:
ss = dat.split()
exact_t_m.append(float(ss[0]))
exact_m_plus.append(float(ss[1]))
exact_m_minus.append(float(ss[2]))

#Exact co-relators data
# exactData = open("ExactFiles/U4B6.dat").readlines()
# exT = []
# exBonding = []
# exAntiBonding = []
# exAA = []
# exAB = []
# exBA = []
# exBB = []
# for i in range(len(exactData)):
# split = exactData[i].split()
# exT.append(float(split[0])) # tau
# exAntiBonding.append(float(split[1])) # anti-bonding
# exBonding.append(float(split[2])) # bonding
# exAA.append(float(split[3])) # cAA
# exAB.append(float(split[4])) # cAB
# exBA.append(float(split[5])) # cBA
# exBB.append(float(split[6])) # cBB

# Now I just plot our the real parts of the correlators (the imaginary part should be MUCH smaller)
fig, ax = plt.subplots(1,1,figsize=(10,7.5))
ax.set_yscale('log')
for nc in range(NCORR):
#print("error",nc,"=",errRe[nc])
ax.errorbar(tau,corrRe[nc],yerr=errRe[nc],marker='o',label=f'corr{nc}')

# ax.plot(exT,exBonding,'k--',label='exact')
# ax.plot(exT,exAntiBonding,'k--')
ax.plot(exact_t_gamma,exact_gamma_plus,'k--')
ax.plot(exact_t_gamma,exact_gamma_minus,'k--')
ax.plot(exact_t_m,exact_m_plus,'r--')
ax.plot(exact_t_m,exact_m_minus,'r--')

ax.grid()
ax.set_xlabel(r'$\tau$')
ax.set_ylabel(r'$C(\tau)$')
ax.set_title(rf' Four sites $U={U}$ $\beta={BETA}$, $N_t={NT}$')
ax.legend()
plt.savefig('results/Exact4sitesNmd3_logU{}B{}NT{}.pdf'.format(U,BETA,NT))






Empty file modified docs/examples/changeEvolver.py
100644 → 100755
Empty file.
105 changes: 105 additions & 0 deletions docs/examples/create_training_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import numpy as np
import h5py as h5
import sys
import matplotlib.pyplot as plt
from tqdm import tqdm
# Import base functionality of Isle.
import isle
# Import drivers (not included in above).
import isle.drivers


if len(sys.argv) > 1:
#HMC data file
HMC_data_file = str(sys.argv[1])
else:
print("Please enter the h5 file")
sys.exit()

hf = h5.File(HMC_data_file, 'r')

LATTICE = "four_sites"
lat = isle.LATTICES[LATTICE]
Nt = 32 # number of time steps
lat.nt(Nt)

PARAMS = isle.util.parameters(
beta=3.0, # inverse temperature
U=3.0, # on-site coupling
mu=0, # chemical potential
sigmaKappa=-1, # prefactor of kappa for holes / spin down
# (+1 only allowed for bipartite lattices)

# Those three control which implementation of the action gets used.
# The values given here are the defaults.
# See documentation in docs/algorithm.
hopping=isle.action.HFAHopping.EXP,
basis=isle.action.HFABasis.PARTICLE_HOLE,
algorithm=isle.action.HFAAlgorithm.DIRECT_SINGLE
)

def makeAction(lat, params):
# Import everything this function needs so it is self-contained.
import isle
import isle.action

return isle.action.HubbardGaugeAction(params.tilde("U", lat)) \
+ isle.action.makeHubbardFermiAction(lat,
params.beta,
params.tilde("mu", lat),
params.sigmaKappa,
params.hopping,
params.basis,
params.algorithm)
#calculates the action
action = makeAction(lat,PARAMS)


#loading the trajectory indexes from actual HMC run
traj_index = [int(index) for index in np.array((hf['configuration']))]
check_index = [print("negative index") for i in traj_index if i < 0]

############intialization##################
training_actualHMC = np.zeros((len(traj_index),lat.lattSize()))+ 0j
gradient_actualHMC = np.zeros((len(traj_index),lat.lattSize()))+ 0j
# action_HMC = np.zeros(len(traj_index)) + 0.j

# training data from Gaussian distribution
num_samples = 10000
training_gaus = np.zeros((num_samples,lat.lattSize())) + 0j
gradient_gaus = np.zeros((num_samples,lat.lattSize())) + 0j

#stores data from Actual HMC and Gaussian distributions
xx = np.zeros((len(traj_index)+ num_samples,lat.lattSize())) + 0j
yy = np.zeros((len(traj_index)+ num_samples,lat.lattSize())) + 0j


#loading actual HMC phi's and calculating the force
with h5.File(HMC_data_file, "r") as h5f:
for i,index in tqdm(enumerate(traj_index)):
# get the configuration group with the given index
cfgGrp = hf["configuration"][str(index)]
training_actualHMC[i,:] , _ = cfgGrp["phi"][()], cfgGrp["actVal"][()]
gradient_actualHMC[i,:] = -action.force(isle.Vector(training_actualHMC[i,:]+0j))



#creating gaussian samples
for i in tqdm(range(num_samples)):
training_gaus[i,:] = np.random.normal(0,PARAMS.tilde("U", lat)**(1/2),lat.lattSize())
gradient_gaus[i,:] = -action.force(isle.Vector(training_gaus[i,:]+0j))

# joining actual HMC and gaussian data
xx[:num_samples,:] = training_gaus[:num_samples,:]
xx[num_samples:,:] = training_actualHMC[:,:]
yy[:num_samples,:] = gradient_gaus[:num_samples,:]
yy[num_samples:,:] = gradient_actualHMC[:,:]

#saving the training_data
np.save(f'trainingData_NN/tinputs_4sites_U{PARAMS.U}B{PARAMS.beta}Nt{Nt}',xx)
np.save(f'trainingData_NN/ttargets_4sites_U{PARAMS.U}B{PARAMS.beta}Nt{Nt}',yy)

#plotting histogram of training_data
plt.hist(np.real(xx).flatten(),density=True,bins=30,label='training_gaus + HMC')
plt.legend()
plt.savefig(f'trainingData_NN/ttrainingData_4sites_U{PARAMS.U}B{PARAMS.beta}Nt{Nt}.pdf')
Loading