diff --git a/papers/MeerTRAP/average_grids.py b/papers/MeerTRAP/average_grids.py new file mode 100644 index 00000000..3ffbc9d4 --- /dev/null +++ b/papers/MeerTRAP/average_grids.py @@ -0,0 +1,237 @@ +""" +Iterates over the Hoffmann et al parameter set to estimate how many +get ruled out by the z=2 scenario. +""" + + +import emcee +import argparse +import importlib.resources as resources +import os +import json +from zdm import survey +import numpy as np +from zdm import parameters +from astropy.cosmology import Planck18 +from zdm import figures +from zdm import misc_functions as mf +from zdm import grid as zdm_grid +import copy +from zdm import pcosmic +from matplotlib import pyplot as plt +import corner + + +# run with python average_grids.py --survey=MeerTRAPcoherent -b 500 -d "../../zdm/scripts/MCMC/" -o "z2limit/" -i "H0_prior10" +# these are now defaults + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--infile',default="H0_prior10",type=str,help="Input HDF5 file containing MCMC results") + parser.add_argument('-s', '--survey', default='MeerTRAPcoherent') + parser.add_argument('-d', '--directory', default="../../zdm/scripts/MCMC/", type=str, help="Directory containing the HDF5 file. Defaults to zdm/mcmc/") + parser.add_argument('-o', '--opdir', default="z2limit/", type=str, help="Output directory for files") + parser.add_argument('-n', default=100, type=int, help="Number of parameter sets to calculate") + parser.add_argument('-b', '--burnin', default=500, type=int, help="Burnin to discard from infile") + args = parser.parse_args() + + if args.directory == None: + args.directory = resources.files('zdm').joinpath('mcmc') + + return args + +def main(): + # Parse command line arguments + args = parse_args() + # Read MCMC output file + samples, params, config,labels = get_samples(args) + + if args.opdir != "": + opdir = args.opdir+"/" + if not os.path.exists(opdir): + os.mkdir(opdir) + + else: + opdir = "./" + dmmax = 7000.0 + #ndm = 1400 + #nz = 500 + dmmax = 4000 + zmax = 4 + ndm = 400 + nz = 200 + + load = False + + # Set up state + state = parameters.State() + state.set_astropy_cosmo(Planck18) + if config is not None: + state.update_params(config) + + # We explicitly load this to allow the "average grids" routine + # to more speedily calculate many grids. This only works because we + # are not changing H0 or F + grid_vals = mf.get_zdm_grid( + state, new=True, plot=False, method='analytic', + nz=nz, ndm=ndm, dmmax=dmmax, zmax = zmax, + datdir=resources.files('zdm').joinpath('GridData')) + + # Load survey + # If the survey is not specified, then the default is to use CRAFT_ICS_1300 + s = survey.load_survey(args.survey, state, grid_vals[2]) + + # Calculate and average grids + print("Calculating average grids...") + if load: + good_samples = np.load(opdir+"good_samples.npy") + bad_samples = np.load(opdir+"bad_samples.npy") + else: + good_samples,bad_samples = average_grids(samples, params, s, state, grid_vals,opdir) + + do_cornerplot(good_samples,labels,opdir+"passing_parameters.png") + if len(bad_samples) == 0: + print("Nothing is bad!") + else: + print("Bad has length ",len(bad_samples)) + +def do_cornerplot(samples,labels,savefile): + """ + Args: + samples (np.array): + Array of samples of dimensions nwalkers x nparameters. + Used to create the cornerplot + labels (list of strings): + Text labels for each variable, in order of appearance in samples + savefile (string): filename for saved cornerplot + + """ + fig = plt.figure(figsize=(12,12)) + + titles = ['' for i in range(samples.shape[1])] # no titles for the plots + truths = None + corner.corner(samples,labels=labels, show_titles=True, titles=titles, + fig=fig,title_kwargs={"fontsize": 15},label_kwargs={"fontsize": 15}, + quantiles=[0.16,0.5,0.84], truths=truths); + + plt.savefig(savefile) + +def get_samples(args): + # Read in the MCMC results without the burnin + infile = os.path.join(args.directory, args.infile) + reader = emcee.backends.HDFBackend(infile + '.h5') + sample = reader.get_chain(discard=args.burnin, flat=True) + + # Thin the results + step = len(sample) // args.n + sample = sample[::step,:] + + # Get the corresponding parameters + # If there is a corresponding .out file, it will contain all the necessary information that was used during that run, + # otherwise parameters must be specified manually + if os.path.exists(infile + '.out'): + with open(infile + '.out', 'r') as f: + # Get configuration + line = f.readline() + + while not line.startswith('Config') and line: + line = f.readline() + if not line: + raise ValueError("No 'Config' line found in the .out file.") + config = json.loads(line[9:].replace("'", '"')) + + # Read down to parameter lines + while ('priors' not in line) and line: + line = f.readline() + + # Read parameters + params = [] + while ('priors' in line) and line: + vals = line.split() + params.append(vals[0]) + line = f.readline() + + # If there is no .out file, then the parameters must be specified manually + else: + params = ["sfr_n", "alpha", "lmean", "lsigma", "lEmax", "lEmin", "gamma", "H0"] + labels = [r"$n_{\rm sfr}$", r"$\alpha$", r"$\log_{10}\mu_h$", r"$\log_{10}\sigma_h$", + r"$\log_{10} E_{\rm max}$",r"$\log_{10}E_{\rm min}$", r"$\gamma$", r"$H_0$"] + config = None + + return sample, params, config, labels + +def average_grids(samples, params, s, state, grid_vals, opdir, zFRB = 2.148, pmin=0.01,log_halo=False,verbose=False): + + zDMgrid = grid_vals[0] + zvals = grid_vals[1] + dmvals = grid_vals[2] + + av_rates = np.zeros([len(zvals), len(dmvals)]) + rates = [] + + good_samples = [] + bad_samples = [] + + # Load a grid for each parameter set + for i in range(samples.shape[0]): + if verbose: + print("Sampling parameter set ",i,". Params: ") + vals = samples[i,:] + + + dict = {} + for j in range(len(vals)): + dict[params[j]] = vals[j] + if verbose: + print(" ",params[j],": ",vals[j]) + + state.update_params(dict) + if 'DMhalo' in params: + if log_halo: + DMhalo = 10**dict['DMhalo'] + else: + DMhalo = dict['DMhalo'] + s.init_DMEG(DMhalo) + s.get_efficiency_from_wlist(s.DMlist,s.wlist,s.wplist,model=s.meta['WBIAS']) + + mask = pcosmic.get_dm_mask( + dmvals, (state.host.lmean, state.host.lsigma), zvals, plot=True + ) + grid = zdm_grid.Grid( + s, copy.deepcopy(state), zDMgrid, zvals, dmvals, mask, wdist=True + ) + + # we take all grid values where z_grid < z_FRB, but we use the highest one + # as "including" the FRB, so that we round conservatively + iz = np.where(grid.zvals < zFRB)[0] + izmin = iz[-1] + + # get redshift distribution + zdist = np.sum(grid.rates,axis=1) + zdist = np.cumsum(zdist) + zdist /= zdist[-1] + + # probability of being larger + pz = 1.-zdist[izmin] + print("Found a cumulative pz! Prob of being larger is ",pz) + if pz < pmin: + # this parameter set rules out the detection, and is incompatible + bad_samples.append(samples[i]) + else: + # it's all OK!!! It has enough probability at high z + good_samples.append(samples[i]) + + #av_rates += grid.rates + #rates.append(grid.rates) + + # we now have a list of good and bad samples + + good = np.array(good_samples) + bad = np.array(bad_samples) + np.save(opdir+"good_samples.npy",good) + np.save(opdir+"bad_samples.npy",bad) + bad=[] + #av_rates = av_rates / samples.shape[1] + return good,bad + +main() diff --git a/papers/MeerTRAP/plot_z_comparison.py b/papers/MeerTRAP/plot_z_comparison.py index 9e40f790..171d4231 100644 --- a/papers/MeerTRAP/plot_z_comparison.py +++ b/papers/MeerTRAP/plot_z_comparison.py @@ -1,5 +1,5 @@ """ -This script creates a redshifty comparison figure of MeerTRAP, +This script creates a redshift comparison figure of MeerTRAP, ASKAP/CRACO (estimates), DSA, and CHIME diff --git a/papers/SKA_science/make_zdists.py b/papers/SKA_science/make_zdists.py index d6d6d373..8d7096fa 100644 --- a/papers/SKA_science/make_zdists.py +++ b/papers/SKA_science/make_zdists.py @@ -4,6 +4,9 @@ It first loads in the simulation info from the script "sim_SKA_configs.py", and generates plots for the best cases. +It does this when iterating over various parameter estimates from +Hoffmann et al. + """ import os import emcee @@ -15,12 +18,12 @@ from zdm import io from zdm import misc_functions as mf from zdm import grid as zdm_grid -from zdm import survey + import numpy as np import copy from matplotlib import pyplot as plt -from pkg_resources import resource_filename +import importlib.resources as resources def main(): """ @@ -35,11 +38,11 @@ def main(): zDMgrid, zvals, dmvals = mf.get_zdm_grid( state, new=True, plot=False, method='analytic', - datdir=resource_filename('zdm', 'GridData')) + datdir=resources.files('zdm').joinpath('GridData') ####### sample MCMC parameter sets ###### - infile = resource_filename('zdm', 'scripts/MCMC')+"/H0_prior10.h5" + infile = resources.files('zdm').joinpath('scripts/MCMC/')+"/H0_prior10.h5" nsets=100 sample, params, pconfig = get_samples(infile,nsets) @@ -118,7 +121,6 @@ def generate_sensitivity_plot(infile,state,zDMgrid, zvals, dmvals, label, freq, ibest = np.argmax(oldNs) survey_dict = {"THRESH": thresh_Jyms[ibest], "TOBS": TOBS[ibest], "FBAR": freq, "BW": bw} - Nsamples = samples.shape[0] Nz = zvals.size Ndm = dmvals.size @@ -132,8 +134,6 @@ def generate_sensitivity_plot(infile,state,zDMgrid, zvals, dmvals, label, freq, opfile8 = opdir+label+"_sys_pz.npy" opfile9 = opdir+label+"_sys_pdm.npy" - - np.save("sysplotdir/zvals.npy",zvals) np.save("sysplotdir/dmvals.npy",dmvals) diff --git a/papers/SKA_science/plot_zdm_dists.py b/papers/SKA_science/plot_zdm_dists.py index 968eceb2..a2f9e356 100644 --- a/papers/SKA_science/plot_zdm_dists.py +++ b/papers/SKA_science/plot_zdm_dists.py @@ -76,7 +76,6 @@ def make_plots(label,datadir="sys_outputs/",plotdir="sysplotdir/"): pzs = np.load(datadir+label+"_sys_pz.npy") pdms = np.load(datadir+label+"_sys_pdm.npy") - plow,pmid,phigh = make_pz_plots(zvals,pzs,plotdir+label) make_pdm_plots(dmvals,pdms,plotdir+label) diff --git a/papers/SKA_science/sim_SKA_configs.py b/papers/SKA_science/sim_SKA_configs.py index 06e668b6..a822510f 100644 --- a/papers/SKA_science/sim_SKA_configs.py +++ b/papers/SKA_science/sim_SKA_configs.py @@ -1,7 +1,7 @@ """ This script creates a zDM plot for SKA_Mid -It also estimates the raction of SKA bursts that will have +It also estimates the fraction of SKA bursts that will have unseen hosts by a VLT-like optical obeservation """ import os @@ -19,7 +19,7 @@ import numpy as np import copy from matplotlib import pyplot as plt -from pkg_resources import resource_filename +import importlib.resources as resources def main(): """ @@ -34,7 +34,7 @@ def main(): zDMgrid, zvals, dmvals = mf.get_zdm_grid( state, new=True, plot=False, method='analytic', - datdir=resource_filename('zdm', 'GridData')) + datdir=resources.files('zdm').joinpath('GridData')) ####### Loop over input files ######### @@ -44,7 +44,6 @@ def main(): freqs = [865,1400,190] bws = [300,300,120] - for i,tel in enumerate(["Band1", "Band2", "Low"]): # sets frequency and bandwidth for each instrument freq = freqs[i] @@ -123,7 +122,7 @@ def generate_sensitivity_plot(infile,state,zDMgrid, zvals, dmvals, label, freq, ########## speedups ############ #set survey path - sdir = os.path.join(resource_filename('zdm', 'data'), 'Surveys') + sdir = resources.files('zdm').joinpath('data/Surveys') # we use SKA mid, but actually we will over-ride may attributes here survey_name='SKA_mid' diff --git a/papers/SKA_science/sim_ska_repeaters.py b/papers/SKA_science/sim_ska_repeaters.py new file mode 100644 index 00000000..ec16a3e1 --- /dev/null +++ b/papers/SKA_science/sim_ska_repeaters.py @@ -0,0 +1,190 @@ +""" +This script creates plots of p(z) and p(dm) for different SKA configs + +""" +import numpy as np +from matplotlib import pyplot as plt +import matplotlib +import importlib.resources as resources +from astropy.cosmology import Planck18 +from zdm import cosmology as cos +from zdm import parameters +from zdm import survey +from zdm import pcosmic +from zdm import loading +from zdm import io +from zdm import misc_functions as mf +from zdm import grid as zdm_grid +from zdm import figures +from zdm import states +import copy + +defaultsize=14 +ds=4 +font = {'family' : 'Helvetica', + 'weight' : 'normal', + 'size' : defaultsize} +matplotlib.rc('font', **font) + +def main(): + """ + Plots outputs of simulations + + """ + + ####### Loop over input files ######### + # these set the frequencies in MHz and bandwidths in MHz + names = ["SKA_mid_band1_AA4","SKA_mid_band2_AA4"] #,"SKA_mid_band5a_AA4","SKA_mid_band5b_AA4"] + # don't have Aeff/Tsys for these ones. The above are based on how far out we beamform + Tobs = np.array([100.]) #[1.,10.,100.,1000.]) # hr per pointing + Tobs /= 24. # converts to day + + outdir = "FRBAstrophysics/" + + # ensures we keep the same state + case = "b" + state = states.load_state("HoffmannEmin25",scat="updated",rep=case) + + ss,gs = loading.surveys_and_grids(init_state=state,survey_names=["CRAFT_class_I_and_II"],repeaters=False) + s=ss[0] + g=gs[0] + + # we expect np.sum(g.rates)*s.TOBS * C = s.NORM_FRB + #norm = s.NORM_FRB/(s.TOBS*np.sum(g.rates)) + #logC = np.log10(norm) + #print("logC is ",logC) + + for i,name in enumerate(names): + # sets frequency and bandwidth for each instrument + zvals,plow,pmid,phigh = make_plots(name,outdir,state,Tobs,tag=name+"_case_"+case) + + +def make_plots(survey_name,outdir,state,Tobss,tag=""): + """ + + Args: + label (string): string label identifying the band and config + of the SKA data to load, and tag to apply to the + output files + + """ + state = parameters.State() + survey_dict={} + survey_dict["Telescope"]={} + survey_dict["TOBS"] = 365 # This is one year + survey_dict["NORM_FRB"] = 0 + survey_dict["NORM_REPS"] = 0 # fake + survey_dict["NORM_SINGLES"] = 0 #fake + sdir = resources.files('zdm').joinpath('data/Surveys/SKA/') + + + for i,Tobs in enumerate(Tobss): + + survey_dict["TFIELD"] = Tobs #OK, this is time per field + ss,gs = loading.surveys_and_grids(init_state=state,survey_names=[survey_name],repeaters=True, + survey_dict=survey_dict,sdir=sdir) + s=ss[0] + g=gs[0] + figures.plot_repeaters_zdist(g,prefix=tag) + exit() + + pzs = np.load(datadir+label+"_sys_pz.npy") + pdms = np.load(datadir+label+"_sys_pdm.npy") + + plow,pmid,phigh = make_pz_plots(zvals,pzs,plotdir+label) + make_pdm_plots(dmvals,pdms,plotdir+label) + + return zvals,plow,pmid,phigh + +def make_pz_plots(zvals,pzs,label): + """ + Make plots of p(z) for each systematic simulation + """ + + Nparams,NZ = pzs.shape + + # this scales from the "per z bin" to "per z", + # i.e. to make the units N per year per dz + scale = 1./(zvals[1]-zvals[0]) + + mean = np.sum(pzs,axis=0)/Nparams + + # total estimates + Ntots = np.sum(pzs,axis=1) + Nordered = np.sort(Ntots) + Nbar = np.sum(Ntots)/Nparams + sigma1 = Nordered[15] + sigma2 = Nordered[83] + print("Range for Ntotal is ",sigma1-Nbar,Nbar,sigma2-Nbar) + + # constructs intervals - does this on a per-z basis + # first sorts over the axis of different simulations + zordered = np.sort(pzs,axis=0) + pzlow = zordered[15,:] + pzhigh = zordered[83,:] + + # make un-normalised plots + plt.figure() + plt.xlabel("z") + plt.ylabel("N(z) per year") + plt.xlim(0,5) + + themax = np.max(pzs) + plt.ylim(0,themax*scale) + for i in np.arange(Nparams): + plt.plot(zvals,pzs[i,:]*scale,color="grey",linestyle="-") + + plt.plot(zvals,mean*scale,color="black",linestyle="-",linewidth=2,label="Simulation mean") + + plt.legend() + plt.tight_layout() + plt.savefig(label+"_pz.png") + plt.close() + + # prints some summary statistics in z-space + # calculates mean z + zbar = zvals * mean / np.sum(mean) + last=0. + tot = np.sum(mean) + for z in np.arange(5)+1: + OK = np.where(zvals < z) + Nthis = np.sum(mean[OK]) + N = Nthis -last + print("FRBs from ",z-1," to ",z,": ",N/tot," %") + last = Nthis + + return pzlow*scale,mean*scale,pzhigh*scale + +def make_pdm_plots(dmvals,pdms,label): + """ + Make plots of p(DM) for each systematic simulation + """ + + Nparams,NDM = pdms.shape + + # this scales from the "per z bin" to "per z", + # i.e. to make the units N per year per dz + scale = 1./(dmvals[1]-dmvals[0]) + + mean = np.sum(pdms,axis=0)/Nparams + + # make un-normalised plots + plt.figure() + plt.xlabel("DM [pc cm$^{-3}$]") + plt.ylabel("N(DM) per year") + plt.xlim(0,5000) + + themax = np.max(pdms) + plt.ylim(0,themax*scale) + + for i in np.arange(Nparams): + plt.plot(dmvals,pdms[i,:]*scale,color="grey",linestyle="-") + + plt.plot(dmvals,mean*scale,color="black",linestyle="-",linewidth=2,label="Simulation mean") + + plt.legend() + plt.tight_layout() + plt.savefig(label+"_pdm.png") + plt.close() + +main() diff --git a/papers/Scattering/visualise_mcmc.py b/papers/Scattering/visualise_mcmc.py index 65728cd5..e960bc26 100644 --- a/papers/Scattering/visualise_mcmc.py +++ b/papers/Scattering/visualise_mcmc.py @@ -105,7 +105,10 @@ def main(filenames,labels,prefix): for i in range(sample.shape[2]): final_sample[i].append(sample[burnin[j]:,:,i].flatten()) final_sample = np.array([np.hstack(final_sample[i]) for i in range(len(final_sample))]).T - + + print(final_sample.shape) + exit() + # - Changes prior to discard samples outside the specified prior range # - Implements the burnin using either the predefined burnin or a constant specified # e.g.: diff --git a/zdm/c_code.py b/zdm/c_code.py index 33aad05f..bf80d62b 100644 --- a/zdm/c_code.py +++ b/zdm/c_code.py @@ -1,11 +1,11 @@ """ Codes related to C """ import os import ctypes -from pkg_resources import resource_filename +import importlib.resources as resources from scipy import LowLevelCallable lib_path = os.path.join( - resource_filename('zdm', 'src'), 'zdmlib.so') + resources.files('zdm').joinpath('src'), 'zdmlib.so') if not os.path.isfile(lib_path): raise ImportError("You need to create zdmlib.so!!") @@ -15,4 +15,4 @@ lib.lognormal_dlog_c.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double)) -func_ll = LowLevelCallable(lib.lognormal_dlog_c) \ No newline at end of file +func_ll = LowLevelCallable(lib.lognormal_dlog_c) diff --git a/zdm/data/Surveys/CRAFT_ICS_1300.ecsv b/zdm/data/Surveys/CRAFT_ICS_1300.ecsv index eb715d29..c484862e 100644 --- a/zdm/data/Surveys/CRAFT_ICS_1300.ecsv +++ b/zdm/data/Surveys/CRAFT_ICS_1300.ecsv @@ -19,27 +19,28 @@ # - {name: DEC, datatype: string} # - {name: RA, datatype: string} # - {name: Z, datatype: float64} +# - {name: B, datatype: float64} # meta: !!omap # - {survey_data: '{"observing": {"NORM_FRB": 19, "TOBS": 165.506756761, "MAX_IDT": 4096, "MAX_IW": 12, "MAXWMETH": 1}, # "telescope": {"BEAM": "ASKAP_1300", "DIAM": 12.0, "NBEAMS": 36, "NBINS": 5, "WDATA": 1}}'} # schema: astropy-2.0 -TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XW95 TAU DEC RA Z -20180924B 336.0 362.4 40.5 1297.5 1.0 -74.40520121494983 277.20651893408893 21.1 9.0 4.4 0.864 0.91 2 0.59 -40:54:00.1 21:44:25.255 0.3214 -20181112A 336.0 589.0 40.2 1297.5 1.0 -63.30983709852826 290.87390392674445 19.3 9.0 4.4 0.864 0.1 0.8 0.023 -52:58:15.39 21:49:23.630 0.4755 -20190102C 336.0 364.5 57.3 1271.5 1.0 -37.519392943553534 300.95052401722796 14.0 9.0 4.4 0.864 0.076 1.25 0.027 -79:28:32.2845 21:29:39.70836 0.291 -20190608B 336.0 339.5 37.2 1271.5 1.0 -67.23724646562972 88.21721604792883 16.1 9.0 4.4 1.728 4.95 10.8 3.8 -07:53:53.6 22:16:04.7472 0.1178 -20190611B 336.0 322.2 57.6 1271.5 1.0 -37.59976893568559 300.9594501909617 9.3 9.0 4.4 1.728 0.076 1.59 0.03 -79:23:51.284 21:22:58.372 0.378 -20190711A 336.0 594.6 56.6 1271.5 1.0 -36.63629532677801 301.03976293370494 23.8 9.0 4.4 1.728 8.6 11.0 0.0076 -80:21:28.18 21:57:40.012 0.522 -20190714A 336.0 504.7 38.5 1271.5 1.0 -75.88144720209479 120.55805492153455 10.7 9.0 4.4 1.728 0.86 3.0 0.422 -13:01:14.36 12:15:55.081 0.2365 -20191228A 336.0 297.5 32.9 1271.5 1.0 -80.77822140033614 230.79855541687724 22.9 9.0 4.4 1.728 7.8 13.6 5.85 -29:35:37.85 22:57:43.269 0.243 -20210117A 336.0 730.0 34.4 1271.5 1.0 -75.7801432700954 164.65014968696988 27.1 9.0 4.4 1.182 1.24 3.58 0.25 -16:11:25.2 22:39:36.0 0.214 -20210214A 336.0 398.3 31.9 1271.5 1.0 -65.65372930742399 91.72782990931984 11.6 9.0 4.4 1.182 3.5 3.5 -1 -05:49:56 00:27:43 -1.0 -20210407E 336.0 1785.3 154.0 1271.5 1.0 -35.320866474063905 114.6146256941771 19.1 9.0 4.4 1.182 0.743 1.62 0.09 27:03:30.24 05:14:36.202 -1.0 -20210912A 336.0 1234.5 30.9 1271.5 1.0 -80.16655338584705 235.42165843951094 31.7 9.0 4.4 1.182 0.095 1.61 0.048 -30:29:33.1 23:24:40.3 -1.0 -20211127I 336.0 234.83 42.5 1271.5 1.0 -81.68554744714709 125.94306109583985 37.9 9.0 4.4 1.182 0.229 0.48 0.025 -18:49:28.4 13:19:09.5 0.046946 -20220531A 336.0 727.0 70.0 1271.5 1.0 -56.509199987039224 296.83938685003784 9.7 9.0 4.4 1.182 11.0 11.0 -1 -60:17:48.2 19:38:50.2 -1.0 -20220610A 336.0 1458.1 31.0 1271.5 1.0 -78.89122591551634 250.56280818953536 29.8 9.0 4.4 1.182 1.07 2.0 0.521 -33:30:49.87 23:24:17.559 1.016 -20220918A 336.0 656.8 40.7 1271.5 1.0 -45.81623556809721 308.4134807482381 26.4 9.0 4.4 1.182 11.43 13.9 7.66 -70:48:40.5 01:10:22.1 0.45 -20230526A 336.0 316.4 50.0 1271.5 1.0 -62.994316139408156 318.1591960546148 22.1 9.0 4.4 1.182 2 2.7 1.16 -52:46:07.7 01:29:27.5 0.157 -20230718A 336.0 477.0 396.0 1271.5 1.0 -75.66933767869608 316.30925962515585 10.9 9.0 4.4 1.182 0.55 0.695 0.117 -41:00:12.8 08:30:27.1 -1.0 -20230731A 336.0 701.1 547.0 1271.5 1.0 -60.14372530213189 304.262204790738 16.6 9.0 4.4 1.182 0.65 2.66 0.45 -56:58:19.1 11:38:40.1 -1.0 +TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XW95 TAU DEC RA Z B +20180924B 336.0 362.4 40.5 1297.5 1.0 -74.40520121494983 277.20651893408893 21.1 9.0 4.4 0.864 0.91 2 0.59 -40:54:00.1 21:44:25.255 0.3214 0.902 +20181112A 336.0 589.0 40.2 1297.5 1.0 -63.30983709852826 290.87390392674445 19.3 9.0 4.4 0.864 0.1 0.8 0.023 -52:58:15.39 21:49:23.630 0.4755 0.833 +20190102C 336.0 364.5 57.3 1271.5 1.0 -37.519392943553534 300.95052401722796 14.0 9.0 4.4 0.864 0.076 1.25 0.027 -79:28:32.2845 21:29:39.70836 0.291 0.910 +20190608B 336.0 339.5 37.2 1271.5 1.0 -67.23724646562972 88.21721604792883 16.1 9.0 4.4 1.728 4.95 10.8 3.8 -07:53:53.6 22:16:04.7472 0.1178 0.789 +20190611B 336.0 322.2 57.6 1271.5 1.0 -37.59976893568559 300.9594501909617 9.3 9.0 4.4 1.728 0.076 1.59 0.03 -79:23:51.284 21:22:58.372 0.378 0.658 +20190711A 336.0 594.6 56.6 1271.5 1.0 -36.63629532677801 301.03976293370494 23.8 9.0 4.4 1.728 8.6 11.0 0.0076 -80:21:28.18 21:57:40.012 0.522 0.989 +20190714A 336.0 504.7 38.5 1271.5 1.0 -75.88144720209479 120.55805492153455 10.7 9.0 4.4 1.728 0.86 3.0 0.422 -13:01:14.36 12:15:55.081 0.2365 0.684 +20191228A 336.0 297.5 32.9 1271.5 1.0 -80.77822140033614 230.79855541687724 22.9 9.0 4.4 1.728 7.8 13.6 5.85 -29:35:37.85 22:57:43.269 0.243 0.652 +20210117A 336.0 730.0 34.4 1271.5 1.0 -75.7801432700954 164.65014968696988 27.1 9.0 4.4 1.182 1.24 3.58 0.25 -16:11:25.2 22:39:36.0 0.214 0.857 +20210214A 336.0 398.3 31.9 1271.5 1.0 -65.65372930742399 91.72782990931984 11.6 9.0 4.4 1.182 3.5 3.5 -1 -05:49:56 00:27:43 -1.0 0.702 +20210407E 336.0 1785.3 154.0 1271.5 1.0 -35.320866474063905 114.6146256941771 19.1 9.0 4.4 1.182 0.743 1.62 0.09 27:03:30.24 05:14:36.202 -1.0 0.781 +20210912A 336.0 1234.5 30.9 1271.5 1.0 -80.16655338584705 235.42165843951094 31.7 9.0 4.4 1.182 0.095 1.61 0.048 -30:29:33.1 23:24:40.3 -1.0 0.655 +20211127I 336.0 234.83 42.5 1271.5 1.0 -81.68554744714709 125.94306109583985 37.9 9.0 4.4 1.182 0.229 0.48 0.025 -18:49:28.4 13:19:09.5 0.046946 0.954 +20220531A 336.0 727.0 70.0 1271.5 1.0 -56.509199987039224 296.83938685003784 9.7 9.0 4.4 1.182 11.0 11.0 -1 -60:17:48.2 19:38:50.2 -1.0 0.324 +20220610A 336.0 1458.1 31.0 1271.5 1.0 -78.89122591551634 250.56280818953536 29.8 9.0 4.4 1.182 1.07 2.0 0.521 -33:30:49.87 23:24:17.559 1.016 0.990 +20220918A 336.0 656.8 40.7 1271.5 1.0 -45.81623556809721 308.4134807482381 26.4 9.0 4.4 1.182 11.43 13.9 7.66 -70:48:40.5 01:10:22.1 0.45 0.685 +20230526A 336.0 316.4 50.0 1271.5 1.0 -62.994316139408156 318.1591960546148 22.1 9.0 4.4 1.182 2 2.7 1.16 -52:46:07.7 01:29:27.5 0.157 0.767 +20230718A 336.0 477.0 396.0 1271.5 1.0 -75.66933767869608 316.30925962515585 10.9 9.0 4.4 1.182 0.55 0.695 0.117 -41:00:12.8 08:30:27.1 -1.0 0.834 +20230731A 336.0 701.1 547.0 1271.5 1.0 -60.14372530213189 304.262204790738 16.6 9.0 4.4 1.182 0.65 2.66 0.45 -56:58:19.1 11:38:40.1 -1.0 0.625 diff --git a/zdm/data/Surveys/CRAFT_ICS_1632.ecsv b/zdm/data/Surveys/CRAFT_ICS_1632.ecsv index 90429980..b8a60e45 100644 --- a/zdm/data/Surveys/CRAFT_ICS_1632.ecsv +++ b/zdm/data/Surveys/CRAFT_ICS_1632.ecsv @@ -19,11 +19,14 @@ # - {name: DEC, datatype: string} # - {name: RA, datatype: string} # - {name: Z, datatype: float64} +# - {name: B, datatype: float64} # meta: !!omap # - {survey_data: '{"observing": {"NORM_FRB": 3, "TOBS": 50.866971336, "MAX_IDT": 4096, "MAX_IW": 12, "MAXWMETH": 1}, # "telescope": {"BEAM": "ASKAP_1632", "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5}}'} # schema: astropy-2.0 -TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XW95 TAU DEC RA Z -20211212A 336.0 206.0 27.1 1631.5 1.0 -61.10615576177682 118.0697870677333 12.8 9.0 4.4 1.182 2.1 5.628 1.8 01:40:36.8 10:30:40.7 0.0715 -20220105A 336.0 583.0 22.0 1631.5 1.0 -40.39508677874226 124.21912132806385 9.8 9.0 4.4 1.182 0.95 2.25 0.43 22:27:57.8 13:55:13.01 0.2785 -20221106A 336.0 343.8 34.8 1631.5 1.0 -81.71820282415577 41.73623647983608 19.5 9.0 4.4 1.182 5.33 6.895 0.182 -25:34:10.5166 03:46:49.0982 0.204 +TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XW95 TAU DEC RA Z B +DUMMY 336.0 206.0 27.1 1631.5 1.0 -61.10615576177682 118.0697870677333 12.8 9.0 4.4 1.182 2.1 5.628 1.8 01:40:36.8 10:30:40.7 -1 0.169 +DUMMY 336.0 206.0 27.1 1631.5 1.0 -61.10615576177682 118.0697870677333 12.8 9.0 4.4 1.182 2.1 5.628 1.8 01:40:36.8 10:30:40.7 -1 0.169 +20211212A 336.0 206.0 27.1 1631.5 1.0 -61.10615576177682 118.0697870677333 12.8 9.0 4.4 1.182 2.1 5.628 1.8 01:40:36.8 10:30:40.7 0.0715 0.169 +20220105A 336.0 583.0 22.0 1631.5 1.0 -40.39508677874226 124.21912132806385 9.8 9.0 4.4 1.182 0.95 2.25 0.43 22:27:57.8 13:55:13.01 0.2785 0.557 +20221106A 336.0 343.8 34.8 1631.5 1.0 -81.71820282415577 41.73623647983608 19.5 9.0 4.4 1.182 5.33 6.895 0.182 -25:34:10.5166 03:46:49.0982 0.204 0.678 diff --git a/zdm/data/Surveys/CRAFT_ICS_892.ecsv b/zdm/data/Surveys/CRAFT_ICS_892.ecsv index a785ab84..9b7875aa 100644 --- a/zdm/data/Surveys/CRAFT_ICS_892.ecsv +++ b/zdm/data/Surveys/CRAFT_ICS_892.ecsv @@ -19,30 +19,31 @@ # - {name: DEC, datatype: string} # - {name: RA, datatype: string} # - {name: Z, datatype: float64} +# - {name: B, datatype: float64} # meta: !!omap # - {survey_data: '{"observing": {"NORM_FRB": 15, "TOBS": 317.293429793, "MAX_IDT": 4096, "MAX_IW": 12, "MAXWMETH": 1}, # "telescope": {"BEAM": "ASKAP_892", "DIAM": 12.0, "NBEAMS": 1, "NBINS": 5}}'} # schema: astropy-2.0 -TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XW95 TAU DEC RA Z -20191001A 336.0 506.92 44.2 919.5 1.0 -61.65860153281646 292.33820148500735 62.0 9.0 4.4 1.728 5.3 13.468 4.52 -54:44:54 21:33:24 0.23 -20200430A 336.0 380.1 27.0 864.5 1.0 -50.423867191684614 126.69623499918922 16.0 9.0 4.4 1.728 11 22.68 6.5 12:22:34.007 15:18:49.581 0.161 -20200627 336.0 294.0 40.0 920.5 1.0 -75.58714298569731 274.1944222920371 11.0 9.0 4.4 1.728 2.9 2.9 -1 -39:29:05.0 21:46:47.0 -1.0 -20200906A 336.0 577.8 35.9 864.5 1.0 -74.33639680729853 87.47153687885996 19.2 9.0 4.4 1.728 0.057 0.128 0.0315 -14:04:59.9136 03:33:58.9364 0.36879 -20210320C 336.0 384.8 42.2 864.5 1.0 -78.99989352050102 126.81646106581941 15.3 9.0 4.4 1.728 0.381 0.884 0.193 -16:09:05.1 13:37:50.08605 0.28 -20210807 336.0 251.9 121.2 920.5 1.0 -62.769666370514145 138.57666701461682 47.1 9.0 4.4 1.182 10.0 10.0 -1 -00:45:44.5 19:56:53.144 0.12969 -20210809 336.0 651.5 190.1 920.5 1.0 -61.10280652957891 133.77599069157813 16.8 9.0 4.4 1.182 14.2 14.2 -1 01:19:43.5 18:04:37.7 -1.0 -20211203C 336.0 636.2 63.4 920.5 1.0 -85.70736537183255 294.09236840807733 14.2 9.0 4.4 1.182 12.4 25.449 1.66 -31:22:04.0 13:37:52.8 0.34386 -20220501C 336.0 449.5 30.6 863.5 1.0 -79.3478690413585 245.52087739106628 16.1 9.0 4.4 1.182 6.1 6.9 0.35 -32:27:41.4 23:29:46.8 0.381 -20220725A 336.0 290.4 30.7 920.5 1.0 -77.2090157833712 260.2992248635945 12.7 9.0 4.4 1.182 3.43 8.016 2.29 -36:07:51.2 23:33:32.1 0.1926 -20230521A 336.0 640.2 41.8 831.5 1.0 -63.802137907452966 143.6440869115044 15.2 9.0 4.4 1.182 11.0 11.0 -1 -02:23:09.6 21:51:00.3 -1.0 -20230708A 336.0 411.5 50.2 920.5 1.0 -61.245461788949015 294.23527074045234 31.5 9.0 4.4 1.182 1.14 23.578 0.24 -55:22:59.4 20:12:56.9 0.105 -20230902A 336.0 440.1 34.3 831.5 1.0 -68.28916777429704 320.20535326767765 11.8 9.0 4.4 1.182 0.229 0.678 0.123 -47:33:45.5 03:29:28.1 0.3619 -20231006 336.0 509.7 67.5 863.5 1.0 -52.222367446784375 298.13333823887274 15.2 9.0 4.4 1.182 6.5 6.5 -1 -64:38:56.2 19:44:00.8 -1.0 -20231226A 336.0 329.9 38.0 863.5 1.0 -56.65392198775274 118.39332419712602 17.8 9.0 4.4 1.182 5.3 9.72 0.1 06:07:45.9 10:21:07.6 0.1569 -20240201A 336.0 374.5 38 920.5 1.0 67.29517415215354 347.08291493850965 13.9 9.0 4.4 1.182 3.05 3.901 0.78 10:01:49.1 13:54:49 0.042729 -20240208A 336.0 260.2 98 863.5 1.0 -52.0180286372879 115.89194578232592 12.1 9.0 4.4 1.182 1.7 10 1.35 10:36:46.5 -00:33:50 -1 -20240210A 336.0 283.73 31 863.5 1.0 -36.19982407564526 55.957809969522174 11.6 9.0 4.4 1.182 0.305 1.539 0.1 00:39:55.0 -27:39:35 0.023686 -20240304A 336.0 652.6 30 832.5 1.0 22.379345521533207 214.02134339859478 12.3 9.0 4.4 1.182 8.57 19 2.51 09:05:19.3 -16:13:42 -1.0 -20240310A 336.0 601.8 36 902.5 1.0 -31.606263057321083 192.9022403619661 19.1 9.0 4.4 1.182 4.19 13.493 2.23 01:10:57.7 -44:24:05 0.127 -20240318A 336.0 256.4 37 902.5 1.0 69.83146076143834 337.11332396083634 13.2 9.0 4.4 1.182 0.286 0.837 0.163 10:01:50.6 37:36:49 -1.0 +TNS BW DM DMG FBAR FRES Gb Gl SNR SNRTHRESH THRESH TRES WIDTH XW95 TAU DEC RA Z B +20191001A 336.0 506.92 44.2 919.5 1.0 -61.65860153281646 292.33820148500735 62.0 9.0 4.4 1.728 5.3 13.468 4.52 -54:44:54 21:33:24 0.23 0.728 +20200430A 336.0 380.1 27.0 864.5 1.0 -50.423867191684614 126.69623499918922 16.0 9.0 4.4 1.728 11 22.68 6.5 12:22:34.007 15:18:49.581 0.161 0.828 +20200627 336.0 294.0 40.0 920.5 1.0 -75.58714298569731 274.1944222920371 11.0 9.0 4.4 1.728 2.9 2.9 -1 -39:29:05.0 21:46:47.0 -1.0 0.896 +20200906A 336.0 577.8 35.9 864.5 1.0 -74.33639680729853 87.47153687885996 19.2 9.0 4.4 1.728 0.057 0.128 0.0315 -14:04:59.9136 03:33:58.9364 0.36879 0.762 +20210320C 336.0 384.8 42.2 864.5 1.0 -78.99989352050102 126.81646106581941 15.3 9.0 4.4 1.728 0.381 0.884 0.193 -16:09:05.1 13:37:50.08605 0.28 0.334 +20210807 336.0 251.9 121.2 920.5 1.0 -62.769666370514145 138.57666701461682 47.1 9.0 4.4 1.182 10.0 10.0 -1 -00:45:44.5 19:56:53.144 0.12969 0.709 +20210809 336.0 651.5 190.1 920.5 1.0 -61.10280652957891 133.77599069157813 16.8 9.0 4.4 1.182 14.2 14.2 -1 01:19:43.5 18:04:37.7 -1.0 0.966 +20211203C 336.0 636.2 63.4 920.5 1.0 -85.70736537183255 294.09236840807733 14.2 9.0 4.4 1.182 12.4 25.449 1.66 -31:22:04.0 13:37:52.8 0.34386 0.958 +20220501C 336.0 449.5 30.6 863.5 1.0 -79.3478690413585 245.52087739106628 16.1 9.0 4.4 1.182 6.1 6.9 0.35 -32:27:41.4 23:29:46.8 0.381 0.800 +20220725A 336.0 290.4 30.7 920.5 1.0 -77.2090157833712 260.2992248635945 12.7 9.0 4.4 1.182 3.43 8.016 2.29 -36:07:51.2 23:33:32.1 0.1926 0.216 +20230521A 336.0 640.2 41.8 831.5 1.0 -63.802137907452966 143.6440869115044 15.2 9.0 4.4 1.182 11.0 11.0 -1 -02:23:09.6 21:51:00.3 -1.0 0.883 +20230708A 336.0 411.5 50.2 920.5 1.0 -61.245461788949015 294.23527074045234 31.5 9.0 4.4 1.182 1.14 23.578 0.24 -55:22:59.4 20:12:56.9 0.105 0.664 +20230902A 336.0 440.1 34.3 831.5 1.0 -68.28916777429704 320.20535326767765 11.8 9.0 4.4 1.182 0.229 0.678 0.123 -47:33:45.5 03:29:28.1 0.3619 0.736 +20231006 336.0 509.7 67.5 863.5 1.0 -52.222367446784375 298.13333823887274 15.2 9.0 4.4 1.182 6.5 6.5 -1 -64:38:56.2 19:44:00.8 -1.0 0.788 +20231226A 336.0 329.9 38.0 863.5 1.0 -56.65392198775274 118.39332419712602 17.8 9.0 4.4 1.182 5.3 9.72 0.1 06:07:45.9 10:21:07.6 0.1569 0.634 +20240201A 336.0 374.5 38 920.5 1.0 67.29517415215354 347.08291493850965 13.9 9.0 4.4 1.182 3.05 3.901 0.78 10:01:49.1 13:54:49 0.042729 -1 +20240208A 336.0 260.2 98 863.5 1.0 -52.0180286372879 115.89194578232592 12.1 9.0 4.4 1.182 1.7 10 1.35 10:36:46.5 -00:33:50 -1 -1 +20240210A 336.0 283.73 31 863.5 1.0 -36.19982407564526 55.957809969522174 11.6 9.0 4.4 1.182 0.305 1.539 0.1 00:39:55.0 -27:39:35 0.023686 -1 +20240304A 336.0 652.6 30 832.5 1.0 22.379345521533207 214.02134339859478 12.3 9.0 4.4 1.182 8.57 19 2.51 09:05:19.3 -16:13:42 -1.0 -1 +20240310A 336.0 601.8 36 902.5 1.0 -31.606263057321083 192.9022403619661 19.1 9.0 4.4 1.182 4.19 13.493 2.23 01:10:57.7 -44:24:05 0.127 -1 +20240318A 336.0 256.4 37 902.5 1.0 69.83146076143834 337.11332396083634 13.2 9.0 4.4 1.182 0.286 0.837 0.163 10:01:50.6 37:36:49 -1.0 -1 diff --git a/zdm/figures.py b/zdm/figures.py index 46622fdb..bb09e6d4 100644 --- a/zdm/figures.py +++ b/zdm/figures.py @@ -757,3 +757,38 @@ def gen_cdf_hist(origx): yvals[2*i+1] = (i+1)/N return xvals,yvals +def plot_repeaters_zdist(g,prefix="",zmax=2): + """ + Plots the distribution of sources for a repeat grid + + Args: + grids: list of repeat grid objects to plot + prefix: prfix for the output + """ + plt.figure() + + #if not grids.hasattr("len"): + if True: + # in case only one in plote + + pztot = np.sum(g.rates,axis=1)* g.survey.TOBS * 10**g.state.FRBdemo.lC # weight by Tobs wrst repeaters + pzsingles = np.sum(g.exact_singles,axis=1) * g.Rc * g.survey.Nfields + pzreps = np.sum(g.exact_reps,axis=1) * g.Rc * g.survey.Nfields + pzbursts = np.sum(g.exact_rep_bursts,axis=1) * g.Rc * g.survey.Nfields + pzsources = pzsingles+pzreps + + plt.plot(g.zvals,pztot,label="Total",linestyle="-",linewidth=2) + plt.plot(g.zvals,pzsingles,label="Single bursts",linestyle="--") + plt.plot(g.zvals,pzreps,label="Repeaters",linestyle="-") + plt.plot(g.zvals,pzbursts,label="Bursts from repeaters",linestyle="-.") + plt.plot(g.zvals,pzsources,label="Unique sources",linestyle=":") + + + plt.xlabel('z') + plt.ylabel('p(z) [a.u.]') + plt.xlim(0,zmax) + plt.ylim(0,None) + plt.legend() + plt.tight_layout() + plt.savefig(prefix+"repeater_pz.png") + plt.close() diff --git a/zdm/galactic_dm_models.py b/zdm/galactic_dm_models.py index be67198c..6bc92e53 100644 --- a/zdm/galactic_dm_models.py +++ b/zdm/galactic_dm_models.py @@ -1,12 +1,12 @@ import numpy as np import sys -from pkg_resources import resource_filename + #inputs are l, b, maximum angular separation allowed, tolerance of minimum angular separation #in deg def dmg_sanskriti2020(l_FRB, b_FRB, sep_th=5, sep_tol=5, verb=False): #Sightlines - l,b,DMavg,e_l,e_u,DMmax,esys = np.loadtxt(resource_filename('zdm','data/Misc/Sanskriti_DM_inputs.txt'),unpack=True) + l,b,DMavg,e_l,e_u,DMmax,esys = np.loadtxt(resources.files('zdm').joinpath('data/Misc/Sanskriti_DM_inputs.txt'),unpack=True) length = np.size(l) #deg to radian diff --git a/zdm/grid.py b/zdm/grid.py index 8db2bce8..885035fd 100644 --- a/zdm/grid.py +++ b/zdm/grid.py @@ -381,8 +381,10 @@ def calc_pdv(self, beam_b=None, beam_o=None): thresh, Emin, Emax, self.state.energy.gamma, self.use_log10 ).T * w.T).T + # partial sum over all beam values for a given width self.b_fractions[:, :, i] += temp_wb - + + # partial sum over all width values for a given beam self.w_fractions[:, :, j] += temp_wb # here, b-fractions are unweighted according to the value of b. self.fractions = np.sum( diff --git a/zdm/iteration.py b/zdm/iteration.py index 45b2e286..39465dda 100644 --- a/zdm/iteration.py +++ b/zdm/iteration.py @@ -13,107 +13,8 @@ import scipy.stats as st from zdm import repeat_grid as zdm_repeat_grid -# internal counter -NCF=0 -def get_likelihood(pset,grid,survey,norm=True,psnr=True): - """ Returns log-likelihood for parameter set - norm:normalizatiom - psnr: probability of snr (S/R) - """ - #changed this so that calc_likelihood doList=True, helps in debugging while checking likelihoods for different param values - if isinstance(grid,list): - if not isinstance(survey,list): - raise ValueError("Grid is a list, survey is not...") - ng=len(grid) - else: - ng=1 - ns=1 - if ng==1: - update_grid(grid,pset,survey) - if survey.nD==1: - llsum,lllist,expected=calc_likelihoods_1D(grid,survey,norm=norm,psnr=True,dolist=1) - elif survey.nD==2: - llsum,lllist,expected=calc_likelihoods_2D(grid,survey,norm=norm,psnr=True,dolist=1) - elif survey.nD==3: - # mixture of 1 and 2D samples. NEVER calculate Pn twice! - llsum1,lllist1,expected1=calc_likelihoods_1D(grid,survey,norm=norm,psnr=True,dolist=1) - llsum2,lllist2,expected2=calc_likelihoods_2D(grid,survey,norm=norm,psnr=True,dolist=1,Pn=False) - llsum = llsum1+llsum2 - # adds log-likelihoods for psnrs, pzdm, pn - # however, one of these Pn *must* be zero by setting Pn=False - lllist = [lllist1[0]+lllist2[0], lllist1[1]+lllist2[1], lllist1[2]+lllist2[2]] #messy! - expected = expected1 #expected number of FRBs ignores how many are localsied - else: - raise ValueError("Unknown code ",survey.nD," for dimensions of survey") - return llsum,lllist,expected - #negative loglikelihood is NOT returned, positive is. - else: - loglik=0 - for i,g in enumerate(grid): - s=survey[i] - update_grid(g,pset,s) - if s.nD==1: - llsum,lllist,expected=calc_likelihoods_1D(g,s,norm=norm,psnr=True,dolist=1) - elif s.nD==2: - llsum,lllist,expected=calc_likelihoods_2D(g,s,norm=norm,psnr=True,dolist=1) - elif s.nD==3: - # mixture of 1 and 2D samples. NEVER calculate Pn twice! - llsum1,lllist1,expected1=calc_likelihoods_1D(g,s,norm=norm,psnr=True,dolist=1) - llsum2,lllist2,expected2=calc_likelihoods_2D(g,s,norm=norm,psnr=True,dolist=1,Pn=False) - llsum = llsum1+llsum2 - # adds log-likelihoods for psnrs, pzdm, pn - # however, one of these Pn *must* be zero by setting Pn=False - lllist = [lllist1[0]+lllist2[0], lllist1[1]+lllist2[1], lllist1[2]+lllist2[2]] - expected = expected1 #expected number of FRBs ignores how many are localsied - else: - raise ValueError("Unknown code ",s.nD," for dimensions of survey") - loglik += llsum - return loglik,lllist,expected - #negative loglikelihood is NOT returned, positive is. - - -def maximise_likelihood(grid,survey): - # specifies which set of parameters to pass to the dmx function - - if isinstance(grid,list): - if not isinstance(survey,list): - raise ValueError("Grid is a list, survey is not...") - ng=len(grid) - ns=len(survey) - if ng != ns: - raise ValueError("Number of grids and surveys not equal.") - pset=set_defaults(grid[0]) # just chooses the first one - else: - ng=1 - ns=1 - pset=set_defaults(grid) - - # fixed alpha=1.6 (Fnu ~ nu**-alpha), Emin 10^30 erg, sfr_n > 0 - eq_cons = {'type': 'eq', - 'fun': lambda x: np.array([x[2]-1.6,x[0]-30]), - 'jac': lambda x: np.array([[0,0,1.0,0,0,0,0,0],[1,0,0,0,0,0,0,0]]) - } - - # holds sfr_n >0 - # also holds Emax > 1e40 - ineq_cons = {'type': 'ineq', - 'fun': lambda x: np.array([x[4],x[1]-40]), - 'jac': lambda x: np.array([[0,0,0,0,1,0,0,0],[0,1,0,0,0,0,0,0]]) - } - - bounds=((None,None),(39,44),(0,5),(-3,-0.1),(0,3),(0,3),(0,2),(None,None)) - - # these 'arguments' get sent to the likelihood function - #results=minimize(get_likelihood,pset,args=(grid,survey),constraints=[eq_cons,ineq_cons],method='SLSQP',tol=1e-10,options={'eps': 1e-4},bounds=bounds) - results=minimize(get_likelihood,pset,args=(grid,survey),method='L-BFGS-B',tol=1e-10,options={'eps': 1e-4},bounds=bounds) - - #print("Results from minimisation are ",results) - #print("Best-fit values: ",results["x"]) - return results - - -def get_log_likelihood(grid, s, norm=True, psnr=True, Pn=False, pNreps=True, ptauw=False): +def get_log_likelihood(grid, s, norm=True, psnr=True, Pn=False, pNreps=True, ptauw=False, pwb=False): """ Returns the likelihood for the grid given the survey. @@ -132,19 +33,18 @@ def get_log_likelihood(grid, s, norm=True, psnr=True, Pn=False, pNreps=True, pta if isinstance(grid, zdm_repeat_grid.repeat_Grid): # Repeaters if s.nDr==1: - llsum1, lllist, expected = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=1, Pn=Pn, pNreps=pNreps, ptauw=ptauw) - llsum = llsum1 - # print(s.name, "repeaters:", lllist) + llsum = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=1, Pn=Pn, pNreps=pNreps, ptauw=ptauw, pwb=pwb) elif s.nDr==2: - llsum1, lllist, expected = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=1, Pn=Pn, pNreps=pNreps, ptauw=ptauw) - llsum = llsum1 + llsum = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=1, Pn=Pn, pNreps=pNreps, ptauw=ptauw, pwb=pwb) elif s.nDr==3: - llsum1, lllist1, expected1 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=1, Pn=Pn, pNreps=pNreps, ptauw=ptauw) - llsum2, lllist2, expected2 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=1, Pn=False, pNreps=False, ptauw=ptauw) + llsum11 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=1, Pn=Pn, pNreps=pNreps, ptauw=ptauw, + pwb=pwb) + llsum2 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=1, Pn=False, pNreps=False, ptauw=ptauw, + pwb=pwb) llsum = llsum1 + llsum2 else: print("Implementation is only completed for nD 1-3.") @@ -152,37 +52,40 @@ def get_log_likelihood(grid, s, norm=True, psnr=True, Pn=False, pNreps=True, pta # Singles if s.nDs==1: - llsum1, lllist, expected = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=2, Pn=Pn, ptauw=ptauw) + llsum1 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=2, Pn=Pn, ptauw=ptauw, + pwb=pwb) llsum += llsum1 - # print(s.name, "singles:", lllist) elif s.nDs==2: - llsum1, lllist, expected = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=2, Pn=Pn, ptauw=ptauw) + llsum1 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=2, Pn=Pn, ptauw=ptauw, + pwb=pwb) llsum += llsum1 elif s.nDs==3: - llsum1, lllist1, expected1 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=2, Pn=Pn, ptauw=ptauw) - llsum2, lllist2, expected2 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, - dolist=1, grid_type=2, Pn=False, ptauw=ptauw) + llsum1 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=2, Pn=Pn, ptauw=ptauw, + pwb=pwb) + llsum2 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, + dolist=0, grid_type=2, Pn=False, ptauw=ptauw, + pwb=pwb) llsum = llsum + llsum1 + llsum2 else: print("Implementation is only completed for nD 1-3.") exit() else: if s.nD==1: - llsum1, lllist, expected = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, - dolist=1, Pn=Pn, ptauw=ptauw) - llsum = llsum1 + llsum = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, + dolist=0, Pn=Pn, ptauw=ptauw,pwb=pwb) elif s.nD==2: - llsum1, lllist, expected = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, - dolist=1, Pn=Pn, ptauw=ptauw) - llsum = llsum1 + llsum = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, + dolist=0, Pn=Pn, ptauw=ptauw,pwb=pwb) elif s.nD==3: - llsum1, lllist1, expected1 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, - dolist=1, Pn=Pn, ptauw=ptauw) - llsum2, lllist2, expected2 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, - dolist=1, Pn=False, ptauw=ptauw) + llsum1 = calc_likelihoods_1D(grid, s, norm=norm, psnr=psnr, + dolist=0, Pn=Pn, ptauw=ptauw, + pwb=pwb) + llsum2 = calc_likelihoods_2D(grid, s, norm=norm, psnr=psnr, + dolist=0, Pn=False, ptauw=ptauw, + pwb=pwb) llsum = llsum1 + llsum2 else: print("Implementation is only completed for nD 1-3.") @@ -191,7 +94,7 @@ def get_log_likelihood(grid, s, norm=True, psnr=True, Pn=False, pNreps=True, pta return llsum def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, - Pn=False,pNreps=True,ptauw=False,dolist=0,grid_type=0): + Pn=False,pNreps=True,ptauw=False,pwb=False,dolist=0,grid_type=0): """ Calculates 1D likelihoods using only observedDM values Here, Zfrbs is a dummy variable allowing it to be treated like a 2D function for purposes of calling. @@ -218,11 +121,41 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, True: calculate probability of intrinsic width and scattering *given* total width False: do not calculate this - dolist - 2: llsum,lllist [Pzdm,Pn,Ps],expected,longlist - longlist holds the LL for each FRB - 5: llsum,lllist,expected,[0.,0.,0.,0.] - + pwb: + True: calculate probability of specific width and beam values, and psnr | bw + False: do not calculate this; simply sum psnr over all possible p(b,w) + + dolist: + 0: returns total log10 likelihood llsum only [float] + 1: also returns a dict of individual statistical contributions to log-likelihood + 2: also returns the above for every FRB individually + Structure of llsum and longlist: + ["pzDM"]["pDM"] + ["pN"] + ["Nexpected"] (only for + ["ptauw"] + ["pbar"] + ["piw"] + ["ptau"] + ["w_indices"] + ["pbw"] + ["pb"] + ["pw"] + ["pbgw"] + ["pwgb"] + ["pbw"] + ["psnr_gbw"] + ["psnrbw"] + lllist: + longlist [list of lists]: + Pn: float + zDM: PzDM + zDM extras: P(z) P(DM), P(DM|z), P(z|DM) + ptauw: p(tau|wtot), p(wtot), p(snr|wtot) + psnr: p_snr (over all beams/widths) + pwb: p(snr|b,w,z,dm), p(snr,b,w|z,DM), p(b|zDM), p(w|zDM), p(b|w,zDM), p(w|b,zDM), p(wb|zDM) + else: returns nothing (actually quite useful behaviour!) + grid_type: 0: normal zdm grid 1: assumes the grid passed is a repeat_grid.zdm_repeat_grid object and calculates likelihood for repeaters @@ -261,23 +194,19 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, zvals=grid.zvals # start by collapsing over z - # TODO: this is slow - should collapse only used columns pdm=np.sum(rates,axis=0) if np.sum(pdm) == 0: if dolist==0: return -np.inf elif dolist==1: - return -np.inf, None, None + return -np.inf, None elif dolist==2: - return -np.inf, None, None, None - elif dolist==5: #for compatibility with 2D likelihood calculation - return -np.inf, None, None,[0.,0.,0.,0.] + return -np.inf, None, None if norm: global_norm=np.sum(pdm) log_global_norm=np.log10(global_norm) - #pdm /= global_norm else: log_global_norm=0 @@ -298,13 +227,45 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, for i in range(len(idms1)): pvals[i]=np.sum(pdm[iweights[i]]*dm_weights[i]) - # holds individual FRB data - if dolist == 2: - longlist=np.log10(pvals)-log_global_norm - # sums over all FRBs for total likelihood llsum=np.sum(np.log10(pvals))-log_global_norm*DMobs.size - lllist=[llsum] + + # initialise dicts to return detailed log-likelihood information + longlist={} + longlist["pzDM"]={} + longlist["pzDM"]["pdm"]=np.log10(pvals)-log_global_norm + + lllist={} + lllist["pzDM"]={} # pz,DM + lllist["pzDM"]["pdm"]=llsum + + ######################################################## + # calculates a p(z) distribution for each FRB, allowing other + # distributions to be weighted by p(z). Shape is + + # ensures a normalised p(z) distribution for each FRB (shape: nz,nDM) + noztau_in_noz=[] + + if grid.state.MW.sigmaDMG == 0.0 and grid.state.MW.sigmaHalo == 0.0: + # here, each FRB only has two DM weightings (linear interolation) + zidms1,zidms2,zdkdms1,zdkdms2 = grid.get_dm_coeffs(DMobs) + tomult = rates[:,zidms1]*zdkdms1 + rates[:,zidms2]*zdkdms2 + # normalise to a p(z) distribution for each FRB + tomult /= np.sum(tomult,axis=0) + + else: + dm_weights, iweights = calc_DMG_weights(DMobs, survey.DMhalos[nozlist], + survey.DMGs[nozlist], dmvals, grid.state.MW.sigmaDMG, + grid.state.MW.sigmaHalo, grid.state.MW.logu) + # here, each FRB has many DM weightings + tomult = np.zeros([grid.zvals.size,len(iweights)]) + # construct a p(z) distribution. + for iFRB,indices in enumerate(iweights): + # we construct a p(z) vector for each FRB + indices = indices[0] + tomult[:,iFRB] = np.sum(rates[:,indices] * dm_weights[iFRB],axis=1) + # normalise to a p(z) distribution for each FRB + tomult /= np.sum(tomult,axis=0) ########### Calculation of p((Tau,w)) ############## if ptauw: @@ -321,33 +282,15 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, Tauobs = survey.TAUs[noztaulist] Iwobs = survey.IWIDTHs[noztaulist] ztDMobs=survey.DMEGs[noztaulist] + + # gets indices of noztaulist within nozlist + tz_tomult = tomult[:,:inoztaulist] # This could all be precalculated within the survey. iws1,iws2,dkws1,dkws2 = survey.get_w_coeffs(Wobs) # total width in survey width bins itaus1,itaus2,dktaus1,dktaus2 = survey.get_internal_coeffs(Tauobs) # scattering time tau iis1,iis2,dkis1,dkis2 = survey.get_internal_coeffs(Iwobs) # intrinsic width - # ensures a normalised p(z) distribution for each FRB (shape: nz,nDM) - if grid.state.MW.sigmaDMG == 0.0 and grid.state.MW.sigmaHalo == 0.0: - # here, each FRB only has two DM weightings (linear interolation) - ztidms1,ztidms2,ztdkdms1,ztdkdms2 = grid.get_dm_coeffs(ztDMobs) - tomult = rates[:,ztidms1]*ztdkdms1 + rates[:,ztidms2]*ztdkdms2 - # normalise to a p(z) distribution for each FRB - tomult = (tomult.T/np.sum(tomult,axis=0)).T - else: - dm_weights, iweights = calc_DMG_weights(DMobs, survey.DMhalos[noztaulist], - survey.DMGs[noztaulist], dmvals, grid.state.MW.sigmaDMG, - grid.state.MW.sigmaHalo, grid.state.MW.logu) - # here, each FRB has many DM weightings - tomult = np.zeros([grid.zvals.size,len(iweights)]) - # construct a p(z) distribution. - for iFRB,indices in enumerate(iweights): - # we construct a p(z) vector for each FRB - indices = indices[0] - tomult[:,iFRB] = np.sum(rates[:,indices] * dm_weights[iFRB],axis=1) - # normalise to a p(z) distribution for each FRB - tomult /= np.sum(tomult,axis=0) - # vectors below are [nz,NFRB] in length piws = survey.pws[:,iis1,iws1]*dkis1*dkws1 \ + survey.pws[:,iis1,iws2]*dkis1*dkws2 \ @@ -359,22 +302,9 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, + survey.ptaus[:,itaus2,iws1]*dktaus1*dkws1 \ + survey.ptaus[:,itaus2,iws2]*dktaus1*dkws2 - if False: - plt.figure() - plt.xlabel("z") - plt.ylabel("ptau") - plt.yscale("log") - NFRB = len(Tauobs) - for i in np.arange(NFRB): - plt.plot(zvals,ptaus[:,i],label=str(Tauobs[i])[0:4]+", "+str(Iwobs[i])[0:4]+", "+str(Wobs[i])[0:4]) - plt.text(1,0.05,str(10**survey.slogmean)[0:5]+" "+str(survey.slogsigma)[0:5]) - plt.legend() - plt.show() - exit() - # we now multiply by the z-dependencies - ptaus *= tomult - piws *= tomult + ptaus *= zt_tomult + piws *= zt_tomult # sum down the redshift axis to get sum p(tau,w|z)*p(z) ptaus = np.sum(ptaus,axis=0) @@ -384,7 +314,6 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, bad2 = np.where(ptaus==0) piws[bad1] = 1e-10 ptaus[bad2] = 1e-10 - pbars = 0.5*ptaus + 0.5*piws # take the mean of these two llptw = np.sum(np.log10(ptaus)) @@ -397,10 +326,21 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, # hence, we add half of eavh value here #llsum += 0.5*llpiw #llsum += 0.5*llptw - llsum += np.sum(np.log10(pbars)) + llpbar = np.sum(np.log10(pbars)) + llsum += llpbar - lllist.append(llptw) - lllist.append(llpiw) + lllist["ptauw"]={} + # appending total of each to log0-likelihood list + lllist["ptauw"]["piw"]=llpiw + lllist["ptauw"]["ptw"]=llptw + lllist["ptauw"]["pbar"]=llpbar + + # appending individual FRB data to long long list + longlist["ptauw"]={} + longlist["ptauw"]["pbar"]=np.log10(pbars) + longlist["ptauw"]["piw"]=np.log10(piws) + longlist["ptauw"]["ptau"]=np.log10(ptaus) + longlist["ptauw"]["w_indices"]=inoztaulist ############# Assesses total number of FRBs, P(N) ######### # TODO: make the grid tell you the correct nromalisation @@ -424,15 +364,15 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, if Pn==0: Nll=-1e10 - if dolist==0: - return Nll else: Nll=np.log10(Pn) - lllist.append(Nll) + + lllist["pN"]=Nll + lllist["Nexpected"]=expected llsum += Nll else: - lllist.append(0) - expected=0 + lllist["Nexpected"]=-1 + lllist["pN"]=0 # this is updated version, and probably should overwrite the previous calculations if psnr: @@ -467,6 +407,7 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, rs[j,i] = np.sum(grid.rates[j,iweights[i]] * dm_weights[i]) / np.sum(dm_weights[i]) # this has shape nz,nFRB - FRBs could come from any z-value + nb = survey.beam_b.size nw,nz,nfrb = Eths.shape zpsnr=np.zeros([nz,nfrb]) # numpy flattens this to the order of [z0frb0,z0f1,z0f2,...,z1f0,...] @@ -480,23 +421,14 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, # this variable keeps the normalisation of sums over p(b,w) as a function of z pbw_norm = 0 - if doplot: - # this will produce a plot of the p(SNR) given a particular width bin - # for a range of beam values as a function of z for the zeroeth FRB - plt.figure() - ax1 = plt.gca() - plt.xlabel("$z$") - plt.ylabel("p(snr | b,w,z)") - - # this will produce a plot of p(z) for all beam values at an - # arbitrary w-bin - plt.figure() - ax2 = plt.gca() - plt.xlabel("$z$") - plt.ylabel("p(b,w|z)") - if ptauw: + if ptauw and not pwb: # hold array representing p(w) dpbws = np.zeros([nw,nz,nfrb]) + + if pwb: + psnrbws = np.zeros([nb,nw,nz*nfrb]) # holds psnr_gbw * p(b,w,) for each b,w bin + psnr_gbws = np.zeros([nb,nw,nz*nfrb]) # holds psnr_gbw * p(b,w,) for each b,w bin + pbws = np.zeros([nb,nw,nz*nfrb]) # holds p(bw given z,dm) for each b,w, bin for i,b in enumerate(survey.beam_b): #iterate over the grid of weights @@ -521,48 +453,156 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, pbw_norm += dpbw zpsnr += differential*survey.beam_o[i]*usew - if ptauw: + if ptauw and not pwb: # record probability of this w summed over all beams for each FRB dpbws[j,:,:] += dpbw - if doplot and j==5: - #arbitrrily plots for FRB iFRB - iFRB=0 - # chooses an arbitrary width to plot at for j=5 - ax1.plot(grid.zvals,(differential/cumulative)[:,iFRB],label="b_"+str(b)[0:4]+"_w_"+str(j)) - ax2.plot(grid.zvals,dpbw[:,iFRB],label="b_"+str(b)[0:4]+"_w_"+str(j)) - - if doplot: - plt.sca(ax1) - plt.yscale('log') - plt.tight_layout() - plt.legend(fontsize=6) - plt.savefig("FRB1_psnr_given_bw.png") - plt.close() - - plt.sca(ax2) - plt.tight_layout() - plt.legend(fontsize=6) - plt.savefig("FRB1_pbw_given_z.png") - plt.close() - + if pwb: + # psnr given beam, width, z,dm + cumulative = cumulative.flatten() # first index is [0,0], next is [0,1] + differential = differential.flatten() + + OK = np.where(cumulative > 0)[0] + + if zwidths: + usew = usew[OK] + + psnr_gbws[i,j,OK] = differential[OK]/cumulative[OK] + + # psnr given beam, width, z,dm + psnrbws[i,j,OK] = differential[OK]*survey.beam_o[i]*usew + + # total probability of that p(w,b) + pbws[i,j,OK] = survey.beam_o[i]*usew*cumulative[OK] + + # calculate p(w) - if ptauw: + if ptauw and not pwb: # we would like to calculate \int p(w|z) p(z) dz # we begin by calculating p(w|z), below, by normalising for each z # normalise over all w values for each z # Q: should we calculate p(w|b,z) then multiply by p(b,w)? dpbws /= np.sum(dpbws,axis=0) temp = dpbws[iws1,:,inoztaulist] - # tomult is p(z) + # tomult is p(z) distribution temp *= tomult.T - pws = np.sum(temp,axis=1) + pws = np.sum(temp,axis=1) # sum in the linear domain - it's summing over p(z) bad = np.where(pws == 0.)[0] pws[bad] = 1.e-10 # prevents nans, but penalty is a bit arbitrary. llpws = np.sum(np.log10(pws)) llsum += llpws - lllist.append(llpws) + + # adds these to list of likelihood outputs + lllist["ptauw"]["pws"]=llpws + longlist["ptauw"]["pws"]=np.log10(pws) + # calculates all metrics: (psnr|b,w,z,DM), p(b,w | z,DM), p(w|z,DM), p(b|z,dM), p(w|b,z,DM), p(b|w,z,DM) + if pwb: + # each of these is probability calculated at each particular value of z + # to properly interpret, we should first calculate the probability within each z + # and at the last, multiply by p(z) + + # we have previously go "tomult": which is calculated *only* for ptauw + # this should be done for all cases, not just ptauw + + + psnrbws = psnrbws.reshape([nb,nw,nz,nfrb]) # holds psnr_gbw * p(b,w,) for each b,w bin + psnr_gbws = psnr_gbws.reshape([nb,nw,nz,nfrb]) # holds psnr_gbw * p(b,w,) for each b,w bin + pbws = pbws.reshape([nb,nw,nz,nfrb]) # holds p(bw given z,dm) for each b,w, bin + + pw_norm = np.sum(pbws,axis=0) # sums along b axis, giving p(w) + pb_norm = np.sum(pbws,axis=1) # sums along w axis, giving p(b) + pwb_norm = np.sum(pw_norm,axis=0) # sums along w axis after b axis, giving pbw norm for all FRBs + + psnrbw = np.zeros([nz,nfrb]) + psnr_gbw = np.zeros([nz,nfrb]) + pbw = np.zeros([nz,nfrb]) + pw = np.zeros([nz,nfrb]) + pb = np.zeros([nz,nfrb]) + + for i,b in enumerate(survey.beam_b): + for j,w in enumerate(grid.eff_weights): + # multiplies by the width and beam weights for that FRB. These are pre-calculated in the survey + # each component below is a vector over nfrb + psnrbw += psnrbws[i,j,:,:]*survey.frb_nozbweights[:,i]*survey.frb_nozwweights[:,j] # multiply last axis + psnr_gbw += psnr_gbws[i,j,:,:]*survey.frb_nozbweights[:,i]*survey.frb_nozwweights[:,j] # multiply last axis + pbw += pbws[i,j,:,:]*survey.frb_nozbweights[:,i]*survey.frb_nozwweights[:,j] # multiply last axis for all z + + # normalises pbw by normalised sum over all b,w. This gives dual p(b,w) for each FRB + # pwb_norm is 2D. pbw is 2D. Should work! + pbw = pbw / pwb_norm + psnrbw = psnrbw/pwb_norm + + # psnr_gbws needs no normalisation, provided weights in each dimension sum to unity. But we check here just to be sure + # the division is along the last (FRB) axis + psnr_gbw = psnr_gbw / (np.sum(survey.frb_nozbweights,axis=1) * np.sum(survey.frb_nozwweights,axis=1)) + psnrbw = psnrbw / (np.sum(survey.frb_nozbweights,axis=1) * np.sum(survey.frb_nozwweights,axis=1)) + + + # calculates p(w) values + # then normalises probability over all pbw + for j,w in enumerate(grid.eff_weights): + pw[:,:] += pw_norm[j,:,:]*survey.frb_nozwweights[:,j] + pw = pw/pwb_norm + + + # calculates p(b) values. + # then normalised probability over all pbw + for i,b in enumerate(survey.beam_b): + pb[:,:] += pb_norm[i,:,:]*survey.frb_nozbweights[:,i] + pb = pb/pwb_norm + + # calculates p(b|w,z,dM), using p(b|w) p(w) = p(b,w) + pb_gw = pbw / pw + + # calculates p(w|b,z,DM), using p(w|b) p(b) = p(b,w) + pw_gb = pbw / pb + + # this is where we sum along the z-axis! (after multiplying by p(z) of course) + + pb = np.sum(pb*tomult,axis=0) + pw = np.sum(pw*tomult,axis=0) + pb_gw = np.sum(pb_gw*tomult,axis=0) + pw_gb = np.sum(pw_gb*tomult,axis=0) + pbw = np.sum(pbw*tomult,axis=0) + psnr_gbw = np.sum(psnr_gbw*tomult,axis=0) + psnrbw = np.sum(psnrbw*tomult,axis=0) + + # we should have now calculated psnr|b,w, + #print("psnr_gbw,pb_gw,pw_gb,pw,pb,pbw,psnrbw") + #for i in np.arange(nfrb): + # print(psnr_gbw[i],pb_gw[i],pw_gb[i],pw[i],pb[i],pbw[i],psnrbw[i]) + + # adds p(width, beam) to the list + bad = np.where(pbw == 0.) + pbw[bad] = 1.e-10 + llpbw = np.sum(np.log10(pbw)) + llsum += llpbw + + # adds psnr values to the list + bad = np.where(psnr_gbw == 0.) + psnr_gbw[bad] = 1.e-10 + llpsnr_gbw = np.sum(np.log10(psnr_gbw)) + llsum += llpsnr_gbw + + longlist["pbw"]={} + longlist["pbw"]["pb"]=np.log10(pb) + longlist["pbw"]["pw"]=np.log10(pw) + longlist["pbw"]["pbgw"]=np.log10(pb_gw) + longlist["pbw"]["pwgb"]=np.log10(pw_gb) + longlist["pbw"]["pbw"]=np.log10(pbw) + longlist["pbw"]["psnr_gbw"]=np.log10(psnr_gbw) + longlist["pbw"]["psnrbw"]=np.log10(psnrbw) + + lllist["pbw"]={} + lllist["pbw"]["pb"]=np.sum(np.log10(pb)) + lllist["pbw"]["pw"]=np.sum(np.log10(pw)) + lllist["pbw"]["pbgw"]=np.sum(np.log10(pb_gw)) + lllist["pbw"]["pwgb"]=np.sum(np.log10(pw_gb)) + lllist["pbw"]["pbw"]=np.sum(np.log10(pbw)) + lllist["pbw"]["psnr_gbw"]=np.sum(np.log10(psnr_gbw)) + lllist["pbw"]["psnrbw"]=np.sum(np.log10(psnrbw)) + # normalise by the beam and FRB width values #This ensures that regions with zero probability don't produce nans due to 0/0 @@ -575,13 +615,15 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, rnorms = np.sum(rs,axis=0) zpsnr *= rs psnr = np.sum(zpsnr,axis=0) / rnorms + + # normalises for total probability of DM occurring in the first place. # We need to do this. This effectively cancels however the Emin-Emax factor. # sums down the z-axis # keeps individual FRB values - if dolist==2: - longlist += np.log10(psnr) + longlist["psnr"] = np.log10(psnr) + # checks to ensure all frbs have a chance of being detected bad=np.array(np.where(psnr == 0.)) @@ -590,60 +632,13 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, else: snrll = np.sum(np.log10(psnr)) - lllist.append(snrll) - llsum += snrll + # add to likelihood list + lllist["psnr"] = snrll - if doplot: - - fig4=plt.figure() - plt.xlabel('z') - plt.ylabel('p(DM,z)p(z)') - #plt.xlim(0,1) - plt.yscale('log') - - fig2=plt.figure() - plt.xlabel('z') - plt.ylabel('p(SNR | DM,z)') - #plt.xlim(0,1) - plt.yscale('log') - - tm4=np.max(zpsnr) - tm2=np.max(rs) - for j in np.arange(survey.Ss.size): - linestyle='-' - if j>=survey.Ss.size/4: - linestyle=':' - if j>=survey.Ss.size/2: - linestyle='--' - if j>=3*survey.Ss.size/4: - linestyle='-.' - - plt.figure(fig4.number) - plt.plot(zvals,rs[:,j],label=str(int(DMobs[j])),linestyle=linestyle,linewidth=2) - - plt.figure(fig2.number) - plt.plot(zvals,zpsnr[:,j],label=str(int(DMobs[j])),linestyle=linestyle) - - fig4.legend(ncol=2,loc='upper right',fontsize=8) - fig4.tight_layout() - plt.figure(fig4.number) - plt.ylim(tm2/1e5,tm2) - fig4.savefig('TEMP_p_z.pdf') - plt.close(fig4.number) - - fig2.legend(ncol=2,loc='upper right',fontsize=8) - fig2.tight_layout() - plt.ylim(tm4/1e5,tm4) - fig2.savefig('TEMP_p_zpsnr.pdf') - plt.close(fig2.number) - - print("Total snr probabilities") - for i,p in enumerate(psnr): - print(i,survey.Ss[i],p) + if not pwb: + # only do this if we are not already calculating psnr given p(w,b) + llsum += snrll - else: - lllist.append(0) - if grid_type==1 and pNreps: repll = 0 if len(survey.replist) != 0: @@ -653,33 +648,22 @@ def calc_likelihoods_1D(grid,survey,doplot=False,norm=True,psnr=True, repll += -1e10 else: repll += np.log10(float(pReps)) - lllist.append(repll) + lllist["pReps"]=repll + longlist["pReps"] = np.log10(np.array(allpReps)) llsum += repll - else: - lllist.append(0) - - if doplot: - plt.figure() - plt.plot(dmvals,pdm,color='blue') - plt.plot(DMobs,pvals,'ro') - plt.xlabel('DM') - plt.ylabel('p(DM)') - plt.tight_layout() - plt.savefig('Plots/1d_dm_fit.pdf') - plt.close() + + # determines which list of things to return if dolist==0: return llsum elif dolist==1: - return llsum,lllist,expected + return llsum,lllist elif dolist==2: - return llsum,lllist,expected,longlist - elif dolist==5: #for compatibility with 2D likelihood calculation - return llsum,lllist,expected,[0.,0.,0.,0.] + return llsum,lllist,longlist def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=False, - Pn=False,pNreps=True,ptauw=False,dolist=0,verbose=False,grid_type=0): + Pn=False,pNreps=True,ptauw=False,pwb=False,dolist=0,verbose=False,grid_type=0): """ Calculates 2D likelihoods using observed DM,z values grid: the grid object calculated from survey @@ -704,18 +688,39 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal True: calculate probability of intrinsic width and scattering *given* total width False: do not calculate this + pwb: + True: calculate probability of specific width and beam values, and psnr | bw + False: do not calculate this; simply sum psnr over all possible p(b,w) + dolist: 0: returns total log10 likelihood llsum only [float] - 1: returns llsum, log10([Pzdm,Pn,Ps]), - 2: as above, plus a 'long list' giving log10(likelihood) - for each FRB individually - 3: return (llsum, -np.log10(norm)*Zobs.size, - np.sum(np.log10(pvals)), np.sum(np.log10(psnr))) - 4: return (llsum, -np.log10(norm)*Zobs.size, - np.sum(np.log10(pvals)), - pvals.copy(), psnr.copy()) - 5: returns llsum, log10([Pzdm,Pn,Ps]), , - np.log10([p(z|DM), p(DM), p(DM|z), p(z)]) + 1: also returns a dict of individual statistical contributions to log-likelihood + 2: also returns the above for every FRB individually + Structure of llsum and longlist: + ["pzDM"]["pDM"] + ["pN"] + ["Nexpected"] (only for + ["ptauw"] + ["pbar"] + ["piw"] + ["ptau"] + ["w_indices"] + ["pbw"] + ["pb"] + ["pw"] + ["pbgw"] + ["pwgb"] + ["pbw"] + ["psnr_gbw"] + ["psnrbw"] + lllist: + longlist [list of lists]: + Pn: float + zDM: PzDM + zDM extras: P(z) P(DM), P(DM|z), P(z|DM) + ptauw: p(tau|wtot), p(wtot), p(snr|wtot) + psnr: p_snr (over all beams/widths) + pwb: p(snr|b,w,z,dm), p(snr,b,w|z,DM), p(b|zDM), p(w|zDM), p(b|w,zDM), p(w|b,zDM), p(wb|zDM) else: returns nothing (actually quite useful behaviour!) norm: @@ -739,7 +744,7 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal print("WARNING: cannot calculate ptauw for this survey, please initialised backproject") # Determine which array to perform operations on and initialise - if grid_type == 1: + if grid_type == 1: rates = grid.exact_reps if survey.zreps is not None: DMobs=survey.DMEGs[survey.zreps] @@ -798,28 +803,36 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal pvals[i] = np.sum(rates[izs1[i],iweights[i]] * dm_weights[i] * dkzs1[i] + rates[izs2[i],iweights[i]] * dm_weights[i] * dkzs2[i]) - bad= pvals <= 0. + bad = pvals <= 0. flg_bad = False if np.any(bad): # This avoids a divide by 0 but we are in a NAN regime pvals[bad]=1e-50 # hopefully small but not infinitely so flg_bad = True + # initialise dicts to return detailed log-likelihood information + longlist={} + lllist={} + # holds individual FRB data - if dolist==2: - longlist=np.log10(pvals)-np.log10(norm) + # records p(z,DM) for each FRB. Analagous to lllist, but for each FRB + longlist["pzDM"]={} + longlist["pzDM"]["pzDM"]=np.log10(pvals/norm) + # llsum is the total log-likelihood for the entire set llsum=np.sum(np.log10(pvals)) if flg_bad: llsum = -1e10 - # llsum -= np.log10(norm)*Zobs.size # once per event - lllist=[llsum] # pz,DM + + # creates a list of all z,DM results + lllist["pzDM"]={} # pz,DM + lllist["pzDM"]["pzDM"]=llsum #### calculates zdm components p(DM),p(z|DM),p(z),p(DM|z) # does this by using previous results for p(z,DM) and # calculating p(DM) and p(z) - if dolist==5: + if dolist > 0: # calculates p(dm) pdmvals = np.sum(rates[:,idms1],axis=0)*dkdms1 pdmvals += np.sum(rates[:,idms2],axis=0)*dkdms2 @@ -840,12 +853,25 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal if bad.size > 0: array[bad]=1e-20 # hopefully small but not infinitely so + llnorm = np.log10(norm) + # logspace and normalisation llpzgdm = np.sum(np.log10(pzgdmvals)) llpdmgz = np.sum(np.log10(pdmgzvals)) - llpdm = np.sum(np.log10(pdmvals)) - np.log10(norm)*Zobs.size - llpz = np.sum(np.log10(pzvals)) - np.log10(norm)*Zobs.size - dolist5_return = [llpzgdm,llpdm,llpdmgz,llpz] + llpdm = np.sum(np.log10(pdmvals)) - llnorm*Zobs.size + llpz = np.sum(np.log10(pzvals)) - llnorm*Zobs.size + + # adds survey totals to log-likelihood list + lllist["pzDM"]["pzgdm"] = llpzgdm + lllist["pzDM"]["pdmgz"] = llpdmgz + lllist["pzDM"]["pdm"] = llpdm + lllist["pzDM"]["pz"] = llpz + + # adds individual FRB data to long list + longlist["pzDM"]["pzgdm"] = np.log10(pzgdmvals) + longlist["pzDM"]["pdmgz"] = np.log10(pdmgzvals) + longlist["pzDM"]["pdm"] = np.log10(pdmvals) - llnorm + longlist["pzDM"]["pz"] = np.log10(pzvals) - llnorm ############### Calculate p(N) ###############3 @@ -868,32 +894,23 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal Pn=Poisson_p(observed,expected) if Pn==0: Pll=-1e10 - # otherwise 1e-10 might be better than the actual total ll! - #if dolist==0: - # return Pll else: Pll=np.log10(Pn) - lllist.append(Pll) + lllist["pN"]=Pll + lllist["Nexpected"]=expected if verbose: print(f'Pll term = {Pll}') llsum += Pll else: - expected=0 - lllist.append(0) - - # plots figures as appropriate - if doplot: - plt.figure() - #plt.plot(dmvals,pdm,color='blue') - plt.plot(DMobs,pvals,'ro') - plt.xlabel('DM') - plt.ylabel('p(DM)') - plt.tight_layout() - plt.savefig('1d_dm_fit.pdf') - plt.close() + # dummy values + lllist["Nexpected"]=-1 + lllist["pN"]=0 ################ Calculates p(tau,w| total width) ############### if ptauw: + if not survey.backproject: + raise ValueError("Cannot estimate p(tau,w) without survey.backproject being True!") + # checks which have OK tau values - in general, this is a subset # ALSO: note that this only checks p(tau,iw | w)! It does NOT # evaluate p(w)!!! Which is a pretty key thing... @@ -942,36 +959,26 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal ptaus[bad2] = 1e-10 pbars = 0.5*piws + 0.5*ptaus - - # TESTING - I have left this in here, because it creates - # a useful plot showing the relative probabilities - # and how well they match up. Ideally, should be 1-1 - - TestProbabilities=False - if TestProbabilities: - for i,pb in enumerate(pbars): - print(i,Tauobs[i],Iwobs[i],pb,piws[i],ptaus[i]) - plt.scatter(piws/Iwobs,ptaus/Tauobs) - plt.xscale("log") - plt.yscale("log") - plt.show() - exit() - - llpbar = np.sum(np.log10(pbars)) llpiw = np.sum(np.log10(piws)) llptw = np.sum(np.log10(ptaus)) # while we calculate llpiw, we don't add it to the sum # this is because w and tau are not independent! - # p(iw|tau,w) = \delta(iw-(w**2 - tau**2)**0.5) - # However, numerical differences will affect this - # Hence, we ad half of each value - #llsum += 0.5*llpiw - #llsum += 0.5*llptw # this was summing in logspace + llsum += llpbar # now summing in linear space - lllist.append(llpiw) - lllist.append(llptw) + lllist["ptauw"]={} + # appending total of each to log0-likelihood list + lllist["ptauw"]["piw"]=llpiw + lllist["ptauw"]["ptw"]=llptw + lllist["ptauw"]["pbar"]=llpbar + + # appending individual FRB data to long long list + longlist["ptauw"]={} + longlist["ptauw"]["pbar"]=np.log10(pbars) + longlist["ptauw"]["piw"]=np.log10(piws) + longlist["ptauw"]["ptau"]=np.log10(ptaus) + longlist["ptauw"]["w_indices"]=iztaulist ############ Calculates p(s | z,DM) ############# @@ -1027,6 +1034,9 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal # We integrate p(snr|b,w) p(b,w) db dw. # Eths.shape[i] is the number of FRBs: length of izs1 # this has shape nz,nFRB - FRBs could come from any z-value + # Note: given that this includes p(b,w), we can use this loop + # to simultaneously calculate p(b,w) + nb = survey.beam_b.size nw,nfrb = Eths.shape psnr=np.zeros([nfrb]) @@ -1039,18 +1049,25 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal # initialised to hold w-b normalisations pbw_norm = 0. - if ptauw: - # hold array representing p(w) - dpbws = np.zeros([nw,nfrb]) + if ptauw and not pwb: + # hold array representing p(w) and p(b) + dpbws = np.zeros([nw,nfrb]) # holds pw over the width only, i.e. summing over the beam + + if pwb: + psnrbws = np.zeros([nb,nw,nfrb]) # holds psnr_gbw * p(b,w,) for each b,w bin + psnr_gbws = np.zeros([nb,nw,nfrb]) # holds psnr_gbw * p(b,w,) for each b,w bin + pbws = np.zeros([nb,nw,nfrb]) # holds p(bw given z,dm) for each b,w, bin for i,b in enumerate(survey.beam_b): bEths=Eths/b # array of shape NFRB, 1/b bEobs=bEths*survey.Ss[zlist] for j,w in enumerate(grid.eff_weights): + # probability of observing an FRB at this z,DM with given b,w at this particular snr dsnr temp=grid.array_diff_lf(bEobs[j,:],Emin,Emax,gamma) # * FtoE #one dim in beamshape, one dim in FRB differential = temp.T*bEths[j,:] #multiplies by beam factors and weight + # probability of observing an FRB at this z,DM with given b,w at *any* snr temp2=grid.array_cum_lf(bEths[j,:],Emin,Emax,gamma) # * FtoE #one dim in beamshape, one dim in FRB cumulative = temp2.T #*bEths[j,:] #multiplies by beam factors and weight @@ -1068,18 +1085,40 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal # p(b,w|DM,z) = survey.beam_o[i]*usew * cumulative / sum(survey.beam_o[i]*usew * cumulative) # hence, the "cumulative" part cancels + # this value normalises the pbw_gdmz value dpbw = survey.beam_o[i]*usew*cumulative - if ptauw: + if ptauw and not pwb: # record probability of this w summed over all beams for each FRB - dpbws[j,:] += dpbw + dpbws[i,j,:] += dpbw pbw_norm += dpbw + # this is the psnr_gbw * pbw_gdmz contribution for this particular b,w. The "cumulative" value cancels psnr += differential*survey.beam_o[i]*usew + + ###### Breaks p(snr,b,w) into three components, and saves them ##### + # this allows comoutations of psnr given b and w values, collapsing these over the dimensions of b and w + + if pwb: + # psnr given beam, width, z,dm + OK = np.where(cumulative > 0)[0] + if zwidths: + usew = usew[OK] + + psnr_gbws[i,j,OK] = differential[OK]/cumulative[OK] + + # psnr given beam, width, z,dm. if differential is OK, cool! + psnrbws[i,j,OK] = differential[OK]*survey.beam_o[i]*usew + + # total probability of that p(w,b) + pbws[i,j,OK] = survey.beam_o[i]*usew*cumulative[OK] + # calculate p(w) - if ptauw: + # Note that iws1 and iws2 is only defined for ztaulist + # this leaves info on the table for FRBs with no tau but known total width + if ptauw and not pwb: # normalise over all w values dpbws /= np.sum(dpbws,axis=0) # calculate pws @@ -1089,26 +1128,91 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal llpws = np.sum(np.log10(pws)) llsum += llpws - lllist.append(llpws) - OK = np.where(pbw_norm > 0.)[0] - psnr[OK] /= pbw_norm[OK] + # adds these to list of likelihood outputs + lllist["ptauw"]["pws"]=llpws + longlist["ptauw"]["pws"]=np.log10(pws) - if doplot: - plt.figure() - plt.xlabel("$s ( \\equiv {\\rm SNR}/{\\rm SNR_{\\rm th}})$") - plt.ylabel("p(s)") - plt.scatter(survey.Ss[zlist],psnr,c=Zobs) - cbar = plt.colorbar() - cbar.set_label('z') - plt.tight_layout() - plt.savefig("2D_ps.png") - plt.close() + # calculates all metrics: (psnr|b,w,z,DM), p(b,w | z,DM), p(w|z,DM), p(b|z,dM), p(w|b,z,DM), p(b|w,z,DM) + if pwb: + pw_norm = np.sum(pbws,axis=0) # sums along b axis, giving p(w) + pb_norm = np.sum(pbws,axis=1) # sums along w axis, giving p(b) + pwb_norm = np.sum(pw_norm,axis=0) # sums along w axis after b axis, giving pbw norm for all FRBs - - # keeps individual FRB values - if dolist==2: - longlist += np.log10(psnr) + psnrbw = np.zeros([nfrb]) + psnr_gbw = np.zeros([nfrb]) + pbw = np.zeros([nfrb]) + pw = np.zeros([nfrb]) + pb = np.zeros([nfrb]) + + for i,b in enumerate(survey.beam_b): + for j,w in enumerate(grid.eff_weights): + # multiplies by the width and beam weights for that FRB. These are pre-calculated in the survey + # each component below is a vector over nfrb + + psnrbw += psnrbws[i,j,:]*survey.frb_zbweights[:,i]*survey.frb_zwweights[:,j] + psnr_gbw += psnr_gbws[i,j,:] *survey.frb_zbweights[:,i]*survey.frb_zwweights[:,j] + pbw += pbws[i,j,:]*survey.frb_zbweights[:,i]*survey.frb_zwweights[:,j] + + + # normalises pbw by normalised sum over all b,w. This gives dual p(b,w) for each FRB + pbw = pbw / pwb_norm + psnrbw = psnrbw / pwb_norm + + # psnr_gbws needs no normalisation, provided weights in each dimension sum to unity. But we check here just to be sure + psnr_gbw = psnr_gbw / (np.sum(survey.frb_zbweights,axis=1) * np.sum(survey.frb_zwweights,axis=1)) + psnrbw = psnrbw / (np.sum(survey.frb_zbweights,axis=1) * np.sum(survey.frb_zwweights,axis=1)) + + # calculates p(w) values + # then normalises probability over all pbw + for j,w in enumerate(grid.eff_weights): + pw[:] += pw_norm[j,:]*survey.frb_zwweights[:,j] + pw = pw/pwb_norm + + # calculates p(b) values. + # then normalised probability over all pbw + for i,b in enumerate(survey.beam_b): + pb[:] += pb_norm[i,:]*survey.frb_zbweights[:,i] + pb = pb/pwb_norm + + # calculates p(b|w,z,dM), using p(b|w) p(w) = p(b,w) + pb_gw = pbw / pw + + # calculates p(w|b,z,DM), using p(w|b) p(b) = p(b,w) + pw_gb = pbw / pb + + # adds p(widht, beam) to the list + bad = np.where(pbw == 0.) + pbw[bad] = 1.e-10 + llpbw = np.sum(np.log10(pbw)) + llsum += llpbw + + # adds psnr values to the list + bad = np.where(psnr_gbw == 0.) + psnr_gbw[bad] = 1.e-10 + llpsnr_gbw = np.sum(np.log10(psnr_gbw)) + llsum += llpsnr_gbw + + longlist["pbw"]={} + longlist["pbw"]["pb"]=np.log10(pb) + longlist["pbw"]["pw"]=np.log10(pw) + longlist["pbw"]["pbgw"]=np.log10(pb_gw) + longlist["pbw"]["pwgb"]=np.log10(pw_gb) + longlist["pbw"]["pbw"]=np.log10(pbw) + longlist["pbw"]["psnr_gbw"]=np.log10(psnr_gbw) + longlist["pbw"]["psnrbw"]=np.log10(psnrbw) + + lllist["pbw"]={} + lllist["pbw"]["pb"]=np.sum(np.log10(pb)) + lllist["pbw"]["pw"]=np.sum(np.log10(pw)) + lllist["pbw"]["pbgw"]=np.sum(np.log10(pb_gw)) + lllist["pbw"]["pwgb"]=np.sum(np.log10(pw_gb)) + lllist["pbw"]["pbw"]=np.sum(np.log10(pbw)) + lllist["pbw"]["psnr_gbw"]=np.sum(np.log10(psnr_gbw)) + lllist["pbw"]["psnrbw"]=np.sum(np.log10(psnrbw)) + + OK = np.where(pbw_norm > 0.)[0] + psnr[OK] /= pbw_norm[OK] # checks to ensure all frbs have a chance of being detected bad=np.array(np.where(psnr == 0.)) @@ -1117,25 +1221,29 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal else: snrll = np.sum(np.log10(psnr)) - lllist.append(snrll) - llsum += snrll - if printit: - for i,snr in enumerate(survey.Ss): - print(i,snr,psnr[i]) - else: - lllist.append(0) - + + # keeps individual FRB values + longlist["psnr"] = np.log10(psnr) + + # add to likelihood list + lllist["psnr"] = snrll + + if not pwb: + # only do this if we are not already calculating psnr given p(w,b) + llsum += snrll + if grid_type==1 and pNreps: repll = 0 + allpReps=[] if len(survey.replist) != 0: for irep in survey.replist: pReps = grid.calc_exact_repeater_probability(Nreps=survey.frbs["NREP"][irep],DM=survey.DMs[irep],z=survey.Zs[irep]) + allpReps.append(float(pReps)) repll += np.log10(float(pReps)) - lllist.append(repll) + lllist["pReps"]=repll llsum += repll - else: - lllist.append(0) - + longlist["pReps"] = np.log10(np.array(allpReps)) + if verbose: print(f"rates={np.sum(rates):0.5f}," \ f"nterm={-np.log10(norm)*Zobs.size:0.2f}," \ @@ -1143,21 +1251,13 @@ def calc_likelihoods_2D(grid,survey,doplot=False,norm=True,psnr=True,printit=Fal f"wzterm={np.sum(np.log10(psnr)):0.2f}," \ f"comb={np.sum(np.log10(psnr*pvals)):0.2f}") + # determines which list of things to return if dolist==0: return llsum elif dolist==1: - return llsum,lllist,expected + return llsum,lllist elif dolist==2: - return llsum,lllist,expected,longlist - elif dolist==3: - return (llsum, -np.log10(norm)*Zobs.size, - np.sum(np.log10(pvals)), np.sum(np.log10(psnr))) - elif dolist==4: - return (llsum, -np.log10(norm)*Zobs.size, - np.sum(np.log10(pvals)), - pvals.copy(), psnr.copy()) - elif dolist==5: - return llsum,lllist,expected,dolist5_return + return llsum,lllist,longlist def calc_DMG_weights(DMEGs, DMhalos, DM_ISMs, dmvals, sigma_ISM=0.5, sigma_halo_abs=15.0, log=False): """ diff --git a/zdm/loading.py b/zdm/loading.py index 8f704e85..c641046c 100644 --- a/zdm/loading.py +++ b/zdm/loading.py @@ -4,10 +4,9 @@ # but in the current implementation it is not removed. import numpy as np import os -from pkg_resources import resource_filename from astropy.cosmology import Planck18 - +import importlib.resources as resources from zdm import survey from zdm import parameters from zdm import cosmology as cos @@ -114,7 +113,7 @@ def load_CHIME(Nbin:int=6, make_plots:bool=False, opdir='CHIME/',\ #state = st.set_state(pset) # loads survey data - sdir = resource_filename('zdm','data/Surveys/CHIME/') + sdir = resources.files('zdm').joinpath('data/Surveys/CHIME/') if Verbose: print("Loading CHIME surveys from ",sdir) @@ -235,7 +234,7 @@ def surveys_and_grids(init_state=None, alpha_method=1, zDMgrid, zvals,dmvals = misc_functions.get_zdm_grid( state, new=True, plot=False, method='analytic', nz=nz, ndm=ndm, zmax=zmax, dmmax=dmmax, - datdir=resource_filename('zdm', 'GridData')) + datdir=resources.files('zdm').joinpath('GridData')) ############## Initialise surveys ############## if survey_names is None: diff --git a/zdm/parameters.py b/zdm/parameters.py index 0d5414f9..582757cc 100644 --- a/zdm/parameters.py +++ b/zdm/parameters.py @@ -115,7 +115,7 @@ class FRBDemoParams(data_class.myDataClass): }, ) lC: float = field( - default=4.19, + default=3.3249, metadata={"help": "log10 constant in number per Gpc^-3 yr^-1 at z=0"}, ) diff --git a/zdm/scripts/MCMC/MCMC_wrap.py b/zdm/scripts/MCMC/MCMC_wrap.py index 47b4b140..e93dc288 100644 --- a/zdm/scripts/MCMC/MCMC_wrap.py +++ b/zdm/scripts/MCMC/MCMC_wrap.py @@ -16,7 +16,7 @@ import numpy as np from astropy.cosmology import Planck18 -from pkg_resources import resource_filename +import importlib.resources as resources from zdm import survey from zdm import cosmology as cos from zdm import loading @@ -95,7 +95,7 @@ def main(): zDMgrid, zvals, dmvals = mf.get_zdm_grid( state, new=True, plot=False, method='analytic', - datdir=resource_filename('zdm', 'GridData'), + datdir=resources.files('zdm').joinpath('GridData'), nz=args.Nz,ndm=args.Ndm,zmax=args.zmax,dmmax=args.dmmax) # pass this to starting iteration diff --git a/zdm/scripts/MCMC/average_grids.py b/zdm/scripts/MCMC/average_grids.py index 085de24d..dcbee2b7 100644 --- a/zdm/scripts/MCMC/average_grids.py +++ b/zdm/scripts/MCMC/average_grids.py @@ -12,7 +12,7 @@ import emcee import argparse -from pkg_resources import resource_filename +import importlib.resources as resources import os import json from zdm import survey @@ -37,7 +37,7 @@ def parse_args(): args = parser.parse_args() if args.directory == None: - args.directory = resource_filename('zdm', 'mcmc') + args.directory = resources.files('zdm').joinpath('mcmc') return args @@ -69,7 +69,7 @@ def main(): grid_vals = mf.get_zdm_grid( state, new=True, plot=False, method='analytic', nz=nz, ndm=ndm, dmmax=dmmax, - datdir=resource_filename('zdm', 'GridData')) + datdir=resources.files('zdm').joinpath('GridData')) # Load survey # If the survey is not specified, then the default is to use CRAFT_ICS_1300 diff --git a/zdm/scripts/likelihood_components.py b/zdm/scripts/likelihood_components.py new file mode 100644 index 00000000..fef75ad2 --- /dev/null +++ b/zdm/scripts/likelihood_components.py @@ -0,0 +1,163 @@ +""" +This script uses CRAFT surveys to show how all the components +of the likelihood calculation are unpacked. + +It uses calls to calc_likelihoods[1/2]D with +""" +import os + +from astropy.cosmology import Planck18 +from zdm import cosmology as cos +from zdm import figures +from zdm import parameters +from zdm import survey +from zdm import pcosmic +from zdm import iteration as it +from zdm import loading +from zdm import io +from zdm import optical as opt +from zdm import states + +import numpy as np +from zdm import survey +from matplotlib import pyplot as plt +import importlib.resources as resources + +def main(): + + # in case you wish to switch to another output directory + name="ASKAP" + opdir=name+"/" + + # load states from Hoffman et al 2025 + state = states.load_state("HoffmannEmin25",scat="updated",rep=None) + + # add estimation from ptauw + state.scat.Sbackproject=True + state.scat.Sbackproject=True + + if not os.path.exists(opdir): + os.mkdir(opdir) + + # Initialise surveys and grids + sdir = resources.files('zdm').joinpath('data/Surveys') + names=['CRAFT_ICS_892']#,'CRAFT_ICS_1300','CRAFT_ICS_1632'] + + # set this to true to calculate p(twau,w). However, this takes quite some time to evaluate - it's slow! + # That's because of internal arrays that must be built up by the survey + ptauw=False + # setting ptauw to True takes much longer! + if ptauw: + # ensure we have z-dependent scattering covered + survey_dict = {} + survey_dict["WMETHOD"] = 3 + # make this smaller to reduce time + ndm=100 + nz=100 + else: + survey_dict=None + ndm=1400 + nz=500 + + ss,gs = loading.surveys_and_grids(survey_names=names,repeaters=False, + init_state=state,sdir=sdir,survey_dict = survey_dict, + ndm=ndm, nz=nz) + + g=gs[0] + s=ss[0] + + ############# dolist = 0 ########## + print("####### DOLIST = 0: Total likelihoods ##########") + # simple illustration of "dolist=0" - returns only the total log likelihoods + llsum2 = it.calc_likelihoods_2D(g, s, norm=True, psnr=True, dolist=0, Pn=True, ptauw=ptauw, pwb=True) + llsum1 = it.calc_likelihoods_1D(g, s, norm=True, psnr=True, dolist=0, Pn=False, ptauw=ptauw, pwb=True) + + lltotal = llsum2+llsum1 + print("1D and 2D likelihoods ",llsum1,llsum2," sum to ",lltotal,"\n\n\n\n") + + ############# dolist = 1 ########## + # calculate 2D likelihoods from this survey + # illustrates three different values for dolist + #llsum = it.calc_likelihoods_2D(g, s, norm=True, psnr=True, dolist=0, Pn=True, ptauw=ptauw, pwb=True) + + + print("\n######## DOLIST = 1: component likelihoods #########") + + print("\n#### checking 2D likelihoods ####") + + # generates list of likelihoods for all components + llsum,lllist = it.calc_likelihoods_2D(g, s, norm=True, psnr=True, dolist=1, Pn=True, ptauw=ptauw, pwb=True) + + # performs some checks + print("Check p(DM,z), ",lllist["pzDM"]["pzDM"]," = p(z|DM)p(DM) ",lllist["pzDM"]["pdmgz"]+lllist["pzDM"]["pz"]) + print("Check p(DM,z), ",lllist["pzDM"]["pzDM"]," = p(DM|z)p(z) ",lllist["pzDM"]["pzgdm"]+lllist["pzDM"]["pdm"]) + + # checks regarding psnr, beam, and width + print("Check p(SNR,b,w) = p(SNR|bw)*p(bw)",lllist["pbw"]["psnrbw"],lllist["pbw"]["psnr_gbw"]+lllist["pbw"]["pbw"]) + print("Check p(b,w) = p(b|w)*p(w) = p(w|b)p(b)",lllist["pbw"]["pbw"],lllist["pbw"]["pwgb"]+lllist["pbw"]["pb"], + lllist["pbw"]["pbgw"]+lllist["pbw"]["pw"]) + + if ptauw: + print("Check the llsum = p(tau|w) p(snr|b,w,z,DM) p(b|w,z,DM) p(w|z,DM) p(z|DM) p(DM) p(N)") + print(llsum,lllist["pzDM"]["pzgdm"]+lllist["pzDM"]["pdm"]+lllist["pbw"]["psnr_gbw"]+lllist["pbw"]["pwgb"]+lllist["pbw"]["pb"] + +lllist["ptauw"]["pbar"]) + else: + print("Check the llsum = p(snr|b,w,z,DM) p(b|w,z,DM) p(w|z,DM) p(z|DM) p(DM) p(N))") + print(llsum,lllist["pzDM"]["pzgdm"]+lllist["pzDM"]["pdm"]+lllist["pbw"]["psnr_gbw"]+lllist["pbw"]["pwgb"]+lllist["pbw"]["pb"] + +lllist["pN"]) + + print("\n\n#### checking 1D likelihoods ####") + # generates list of likelihoods for all components + llsum,lllist = it.calc_likelihoods_1D(g, s, norm=True, psnr=True, dolist=1, Pn=True, ptauw=ptauw, pwb=True) + + # performs some checks + # checks regarding psnr, beam, and width + print("Check p(SNR,b,w) = p(SNR|bw)*p(bw)",lllist["pbw"]["psnrbw"],lllist["pbw"]["psnr_gbw"]+lllist["pbw"]["pbw"]) + print("Check p(b,w) = p(b|w)*p(w) = p(w|b)p(b)",lllist["pbw"]["pbw"],lllist["pbw"]["pwgb"]+lllist["pbw"]["pb"], + lllist["pbw"]["pbgw"]+lllist["pbw"]["pw"]) + + if ptauw: + print("Check the llsum = p(tau|w) p(snr|b,w,DM) p(b|w,DM) p(w|DM) p(DM) p(N)") + print(llsum,lllist["pzDM"]["pdm"]+lllist["pbw"]["psnr_gbw"]+lllist["pbw"]["pwgb"]+lllist["pbw"]["pb"] + +lllist["ptauw"]+lllist["pbar"]) + else: + print("Check the llsum = p(snr|b,w,DM) p(b|w,DM) p(w|DM) p(DM) p(N))") + print(llsum,lllist["pzDM"]["pdm"]+lllist["pbw"]["psnr_gbw"]+lllist["pbw"]["pwgb"]+lllist["pbw"]["pb"] + +lllist["pN"]) + + + + + ############# dolist = 1 ########## + # calculate 2D likelihoods from this survey + # illustrates three different values for dolist + #llsum = it.calc_likelihoods_2D(g, s, norm=True, psnr=True, dolist=0, Pn=True, ptauw=ptauw, pwb=True) + + + print("\n######## DOLIST = 2: individual FRB likelihoods #########") + + print("\n#### checking 2D likelihoods ####") + + # generates list of likelihoods for all components + llsum,lllist,longlist = it.calc_likelihoods_2D(g, s, norm=True, psnr=True, dolist=2, Pn=True, ptauw=ptauw, pwb=True) + + print("\nCheck p(DM,z) = p(DM|z)p(z) ") + for i,pzdm in enumerate(longlist["pzDM"]["pzDM"]): + print(i,pzdm,longlist["pzDM"]["pdmgz"][i]+longlist["pzDM"]["pz"][i]) + + + print("\n\nCheck p(DM,z) = p(z|DM)p(DM) ") + for i,pzdm in enumerate(longlist["pzDM"]["pzDM"]): + print(i,pzdm,longlist["pzDM"]["pzgdm"][i]+longlist["pzDM"]["pdm"][i]) + + + + # checks regarding psnr, beam, and width + print("\n\nCheck p(SNR,b,w) = p(SNR|bw)*p(bw)") + print("These will be slightly out, because all three are calculated by weighting over b,w,") + print(" and \sum_bw psnrbw *w(b,w) != (\sum_bw psnrgbw*w(b,w))*(\sum_bw p(bw) w(b,w,)") + for i,psnrbw in enumerate(longlist["pbw"]["psnrbw"]): + print(longlist["pbw"]["psnrbw"][i],longlist["pbw"]["psnr_gbw"][i]+longlist["pbw"]["pbw"][i]) + + +main() diff --git a/zdm/states.py b/zdm/states.py new file mode 100644 index 00000000..7e8876c2 --- /dev/null +++ b/zdm/states.py @@ -0,0 +1,378 @@ +""" +This file keeps a record of known states from specific papers. + +References below are: +JamesSFR22: https://ui.adsabs.harvard.edu/abs/2022MNRAS.510L..18J/abstract +JamesH022: https://ui.adsabs.harvard.edu/abs/2022MNRAS.516.4862J/abstract +BaptisaF24: https://ui.adsabs.harvard.edu/abs/2024ApJ...965...57B/abstract +HoffmannEmin25: https://ui.adsabs.harvard.edu/abs/2025PASA...42...17H/abstract +HoffmannHalo25: Unpublished (yet!) + +Other relevant references are: +https://ui.adsabs.harvard.edu/abs/2025arXiv251005654J/abstract (scattering) +https://ui.adsabs.harvard.edu/abs/2023PASA...40...57J/abstract (repeaters) +https://ui.adsabs.harvard.edu/abs/2022MNRAS.509.4775J/abstract (zDM methods paper) + +""" + +from zdm import parameters +import numpy as np +from astropy.cosmology import Planck18 + +def load_state(case="HoffmannHalo25",scat=None,rep=None): + """ + Routine to set state variables according to the methods used + in specific previous works. + + Args: + case [string]: which paper to use + scat [string]: which scattering method to use (overwrites default in case) + rep [string]: which repetition parameter set to use + + Returns: + state: zdm state object + """ + + #### primary fit params #### + cases=["JamesSFR22","JamesH022","BaptisaF24",\ + "HoffmannEmin25","HoffmannHalo25"] + + if not case in cases: + raise ValueError("Case ",case," Undefined, please choose from ",cases) + + # parameter dict to hold values to be passed to state + vparams={} + vparams = set_fit_params(vparams,case) + + ###### scattering ###### + scats=["orig","CHIME","updated"] + if scat is not None: + # just use the user-defined scattering version. Check if this exists! + if not scat in scats: + raise ValueError("Case ",scat," undefined, please choose from ",scats) + elif case == "JamesSFR22": + scat="orig" + vparams = set_orig_scat(vparams) + elif case == "JamesH022" or case == "BaptisaF24" \ + or case == "HoffmannEmin25" or case == "HoffmannHalo25": + scat="CHIME" + vparams = set_chime_scat(vparams) + else: + scat="updated" + vparams = set_updated_scat(vparams) + + + #### repetition #### + # four cases for CHIME repeaters paper + reps = ["a","b","c","d"] + if rep is not None: + if not rep in reps: + raise ValueError("repeaters ",rep," undefined, please choose from",reps) + else: + print(rep) + vparams = set_reps(vparams,rep) + + # initialise state with cosmological parameters + state = parameters.State() + state.set_astropy_cosmo(Planck18) # we always do this + + # update with relevant values + state.update_param_dict(vparams) + + return state + +def set_reps(vparams,rep): + """ + Sets repetition parameters from JamesRepeaters24. + + There are four cases to choose from - see that paper. + """ + if not "rep" in vparams: + vparams["rep"] = {} + + vparams["rep"]["RE0"] = 1.e39 # normalisation energy in ergs + + if rep == "a": + vparams["rep"]["lRmin"] = -1.73 + vparams["rep"]["lRmin"] = -0.25 + vparams["rep"]["Rgamma"] = -1.9 + elif rep== "b": + vparams["rep"]["lRmin"] = -1.23 + vparams["rep"]["lRmin"] = -0.25 + vparams["rep"]["Rgamma"] = -3 + elif rep== "c": + vparams["rep"]["lRmin"] = -1.38 + vparams["rep"]["lRmin"] = 3 + vparams["rep"]["Rgamma"] = -3 + elif rep== "d": + vparams["rep"]["lRmin"] = -4.54 + vparams["rep"]["lRmin"] = 3 + vparams["rep"]["Rgamma"] = -2.1 + return vparams + +def set_fit_params(vparams,case): + """ + sets best-fit standard fit parameters, + as returned from the MCMC + + + """ + + + # adds any missing categories + for param in ['FRBdemo','MW','cosmo','IGM','energy','rep','IGM','host']: + if not param in vparams: + vparams[param] = {} + + + if case == "JamesSFR22": # + vparams['energy']['lEmin'] = 30 + vparams['energy']['lEmax'] = 41.7 + vparams['energy']['alpha'] = 1.55 + vparams['energy']['gamma'] = -1.09 + vparams['energy']['luminosity_function'] = 0 + + vparams['FRBdemo']['alpha_method'] = 0 + vparams['FRBdemo']['source_evolution'] = 0 + vparams['FRBdemo']['sfr_n'] = 1.67 + #vparams['FRBdemo']['lC'] = 3.15 # incorrect, check + + vparams['host']['lmean'] = 2.11 + vparams['host']['lsigma'] = 0.53 + + vparams['MW']['DMhalo']=50 + vparams['MW']['halo_method']=0 + vparams['MW']['sigmaHalo']=0 + vparams['MW']['sigmaDMG']=0 + + vparams['IGM']['logF'] = np.log10(0.32) + + vparams['cosmo']['H0'] = 70. + + + elif case == "JamesH022": + vparams['energy']['lEmin'] = 30 + vparams['energy']['lEmax'] = 41.26 + vparams['energy']['alpha'] = 0.99 + vparams['energy']['gamma'] = -0.95 + vparams['energy']['luminosity_function'] = 2 + + vparams['FRBdemo']['alpha_method'] = 1 + vparams['FRBdemo']['source_evolution'] = 0 + vparams['FRBdemo']['sfr_n'] = 1.13 + #vparams['FRBdemo']['lC'] = 3.15 # incorrect, check + + vparams['host']['lmean'] = 2.27 + vparams['host']['lsigma'] = 0.55 + + vparams['MW']['DMhalo']=50 + vparams['MW']['halo_method']=0 + vparams['MW']['sigmaHalo']=0 + vparams['MW']['sigmaDMG']=0 + + vparams['IGM']['logF'] = np.log10(0.32) + + vparams['cosmo']['H0'] = 73.0 + + elif case == "BaptisaF24": + vparams['energy']['lEmin'] = 30 + vparams['energy']['lEmax'] = 41.40 + vparams['energy']['alpha'] = 0.65 + vparams['energy']['gamma'] = -1.01 + vparams['energy']['luminosity_function'] = 2 + + vparams['FRBdemo']['alpha_method'] = 1 + vparams['FRBdemo']['source_evolution'] = 0 + vparams['FRBdemo']['sfr_n'] = 0.73 + #vparams['FRBdemo']['lC'] = 3.15 # incorrect, check + + vparams['host']['lmean'] = 2.18 + vparams['host']['lsigma'] = 0.48 + + vparams['MW']['DMhalo']=50 + vparams['MW']['halo_method']=0 + vparams['MW']['sigmaHalo']=0 + vparams['MW']['sigmaDMG']=0 + + vparams['IGM']['logF'] = -0.49 + + vparams['cosmo']['H0'] = 67.66 + + elif case == "HoffmannEmin25": + # this case is for H0 fixed to a small range, + # and including P(N) + vparams['energy']['lEmin'] = 39.47 + vparams['energy']['lEmax'] = 41.37 + vparams['energy']['alpha'] = -0.11 + vparams['energy']['gamma'] = -1.04 + vparams['energy']['luminosity_function'] = 2 + + vparams['FRBdemo']['alpha_method'] = 1 + vparams['FRBdemo']['source_evolution'] = 0 + vparams['FRBdemo']['sfr_n'] = 0.21 + vparams['FRBdemo']['lC'] = -7.4 + + vparams['host']['lmean'] = 2.18 + vparams['host']['lsigma'] = 0.42 + + vparams['MW']['DMhalo']=50 + vparams['MW']['halo_method']=0 + vparams['MW']['sigmaHalo']=0 + vparams['MW']['sigmaDMG']=0 + + vparams['IGM']['logF'] = np.log10(0.32) + + vparams['cosmo']['H0'] = 70.23 + + + vparams['energy']['luminosity_function'] = 2 + elif case == "HoffmannHalo25": + # this case is for H0 fixed to a small range, + # and including P(N) + vparams['energy']['lEmin'] = 38.22 + vparams['energy']['lEmax'] = 40.9 + vparams['energy']['alpha'] = 1.55 + vparams['energy']['gamma'] = -1.12 + vparams['energy']['luminosity_function'] = 2 + + vparams['FRBdemo']['alpha_method'] = 1 + vparams['FRBdemo']['source_evolution'] = 0 + vparams['FRBdemo']['sfr_n'] = 2.88 + #vparams['FRBdemo']['lC'] = 3.15 # incorrect, check + + vparams['host']['lmean'] = 2.13 + vparams['host']['lsigma'] = 0.46 + + vparams['MW']['DMhalo']=68 + vparams['MW']['halo_method']=0 + vparams['MW']['sigmaHalo']=15 + vparams['MW']['sigmaDMG']=0 + + vparams['IGM']['logF'] = np.log10(0.32) + + vparams['cosmo']['H0'] = 70.63 + + + vparams['energy']['luminosity_function'] = 2 + else: + raise ValueError("Unrecognised case. Please select one of ",cases) + + return vparams + +def set_orig_scat(vparams): + """ + Set width/scattering method to original version from zDM + This introduces no scattering, and treats both width and + scattering as a single combined distribution. + Note that currently width method is given in the surveys. + This will be altered. + + + Args: + vparams: existing dict containing state variables + + Returns: + vparams: updated dict with width and scattering methods added + """ + + + + if not 'width' in vparams: + vparams['width'] = {} + vparams['width']['Wlogmean'] = 0.74 + vparams['width']['Wlogsigma'] = 1.07 # expressed as 2.46 in natural log space + vparams['width']['WNbins'] = 5 + vparams['width']['WidthFunction'] = 1 # lognormal + vparams['width']['Wthresh'] = 0.5 + # approximately the same width treatment - + # the functionality is changed, so it's not *quite* + # identical + vparams['width']['WMin'] = 0.1 + vparams['width']['WMax'] = 100 + + #scattering was not actually included, but this gets set to zero + # to ensure it doesn't contribute by accident + if not 'scat' in vparams: + vparams['width'] = {} + vparams['scat'] = {} + vparams['scat']['Slogmean'] = -2 + vparams['scat']['Slogsigma'] = 0.2 + vparams['scat']['ScatFunction'] = 1 # lognormal + vparams['scat']['Sfnorm'] = 600 + vparams['scat']['Sfpower'] = 0. + + return vparams + + +def set_chime_scat(vparams): + """ + Sets the width and scattering variables to those from CHIME, + which were used in zDM between James22H0 and HoffmannEmin25 + + Args: + vparams: existing dict containing state variables + + Returns: + vparams: updated dict with width and scattering methods added + """ + + if not 'width' in vparams: + vparams['width'] = {} + vparams['width']['Wlogmean'] = 0 + vparams['width']['Wlogsigma'] = 0.42 + vparams['width']['WNbins'] = 5 + vparams['width']['WidthFunction'] = 1 # lognormal + vparams['width']['Wthresh'] = 0.5 + # approximately the same width treatment - + # the functionality is changed, so it's not *quite* + # identical + vparams['width']['WMin'] = 0.1 + vparams['width']['WMax'] = 100 + vparams['width']['WNInternalBins'] = 100 + + + if not 'scat' in vparams: + vparams['width'] = {} + vparams['scat'] = {} + vparams['scat']['Slogmean'] = 0.305 + vparams['scat']['Slogsigma'] = 0.75 + vparams['scat']['ScatFunction'] = 1 # lognormal + vparams['scat']['Sfnorm'] = 600 + vparams['scat']['Sfpower'] = -4. + vparams['scat']['Smaxsigma'] = 3 + + return vparams + +def set_updated_scat(vparams): + """ + Sets the width and scattering variables to those from + James et al 2025, which focusses on width and scattering + """ + + if not 'width' in vparams: + vparams['width'] = {} + vparams['width']['Wlogmean'] = -0.29 + vparams['width']['Wlogsigma'] = 0.65 + vparams['width']['WNbins'] = 12 + vparams['width']['WidthFunction'] = 2 # half-lognormal + vparams['width']['Wthresh'] = 0.5 + # approximately the same width treatment - + # the functionality is changed, so it's not *quite* + # identical + vparams['width']['WMin'] = 0.01 + vparams['width']['WMax'] = 100 + vparams['width']['WNInternalBins'] = 1000 + + + if not 'scat' in vparams: + vparams['width'] = {} + vparams['scat'] = {} + vparams['scat']['Slogmean'] = -1.3 + vparams['scat']['Slogsigma'] = 0.2 + vparams['scat']['ScatFunction'] = 2 # half-lognormal + vparams['scat']['Sfnorm'] = 1000 + vparams['scat']['Sfpower'] = -4. + vparams['scat']['Smaxsigma'] = 3 + vparams['scat']['Sbackproject'] = False + + return vparams diff --git a/zdm/survey.py b/zdm/survey.py index a75337bb..440ca81c 100644 --- a/zdm/survey.py +++ b/zdm/survey.py @@ -6,10 +6,10 @@ import numpy as np import os -from pkg_resources import resource_filename + from scipy.integrate import quad from dataclasses import dataclass, fields - +import importlib.resources as resources import pandas from astropy.table import Table import json @@ -96,6 +96,9 @@ def __init__(self, state, survey_name:str, # initialise scattering/width and resulting efficiences self.init_widths() + self.init_frb_bvals() #initial;ise weights for FRBs with known beam values + self.init_frb_wvals() + self.calc_max_dm() def init_widths(self,state=None): @@ -453,6 +456,72 @@ def get_internal_coeffs(self,wlist): return iws1, iws2, dkws1, dkws2 + + def init_frb_bvals(self): + """ + Initialise frb-by-frb weights for each value of the beam histogram, based on + the value of the beam that the FRB was detected in. + + Simply does this by linear interpolation + """ + + # contains beam-dependent weights for each FRB + frb_bweights = np.zeros([self.NFRB,self.meta["NBINS"]]) + + lbs = np.log(self.beam_b) + + for i,B in enumerate(self.frbs["B"]): + if B == -1: # code for "ignore it" + # still have to decide what to do here. Likely give equal weights? + frb_bweights[i,:] = 1./self.meta["NBINS"] + elif B > self.beam_b[-1]: # greater value than max, just use max + frb_bweights[i,-1] = 1. + elif B < self.beam_b[0]: + frb_bweights[i,0] = 1. + else: + # at least one value of beam_b will be greater and one lesser than B + iB2 = np.where(self.beam_b > B)[0][0] + iB1 = iB2 - 1 + # do log-scaling + lB = np.log(B) + kB2 = (lB- lbs[iB1])/(lbs[iB2]-lbs[iB1]) + kB1 = 1.-kB2 + frb_bweights[i,iB1] = kB1 + frb_bweights[i,iB2] = kB2 + self.frb_bweights = frb_bweights + + # speedups when iterating through 1D and 2D likelihoods + self.frb_zbweights = frb_bweights[self.zlist,:] + self.frb_nozbweights = frb_bweights[self.nozlist,:] + + def init_frb_wvals(self): + """ + Initialises frb width coefficients for linear interpolation + + This is for a slightly different purpose than the init widths routine + """ + + + #wlist = survey.WIDTHs # measured total widths + nw = self.wlist.size + frb_wweights = np.zeros([self.NFRB,nw]) + + OKw = np.where(self.WIDTHs > 0.) + notOKw = np.where(self.WIDTHs <= 0.) + + # equal weights for all FRBs with no measured width + frb_wweights[notOKw,:] = 1./nw + + # iterature through the list + iws1,iws2,dkws1,dkws2 = self.get_w_coeffs(self.WIDTHs[OKw]) + frb_wweights[OKw,iws1] = dkws1 + frb_wweights[OKw,iws2] = dkws2 + self.frb_wweights = frb_wweights + + # speedups when iterating through 1D and 2D likelihoods + self.frb_zwweights = self.frb_wweights[self.zlist,:] + self.frb_nozwweights = self.frb_wweights[self.nozlist,:] + def get_w_coeffs(self,wlist): """ Returns indices and coefficients for linear interpolation between width values @@ -470,6 +539,17 @@ def get_w_coeffs(self,wlist): # convert to log-widths - the bins are in log10 space logwlist = np.log10(wlist) + if self.NWbins == 1: + # only when there is a single width bin + nfrb = logwlist.size + iws1 = np.full([nfrb],0,dtype='int') + iws2 = iws1 + dkws1 = np.full([nfrb],1.,dtype='float') + dkws2 = dkws1 + # dkws2 is identical to 1. This over-writes 1, but ensures + # that order of implementation of 1 and 2 does not matter + return iws1, iws2, dkws1, dkws2 + # the below assumes that kws=(logwlist-np.log10(self.WMin))/self.dlogw # now will assume it begins at Wmin+dlogw + 0.5 # forces any low values to zero @@ -991,7 +1071,7 @@ def init_beam(self,plot=False, else: print("No beam found to initialise...") - + def calc_max_dm(self): ''' Calculates the maximum searched DM. @@ -1208,7 +1288,7 @@ def calc_relative_sensitivity(DM_frb,DM,w,fbar,t_res,nu_res,Nchan=336,max_idt=No # If model not CHIME, Quadrature or Sammons assume it is a filename else: if edir is None: - edir = resource_filename('zdm', 'data/Efficiencies') + edir = resources.files('zdm').joinpath('data/Efficiencies') filename = os.path.expanduser(os.path.join(edir, model + ".npy")) if not os.path.exists(filename): @@ -1265,8 +1345,7 @@ def load_survey(survey_name:str, state:parameters.State, print(f"Loading survey: {survey_name}") if sdir is None: - sdir = os.path.join( - resource_filename('zdm', 'data'), 'Surveys') + sdir = resources.files('zdm').joinpath('data/Surveys') # Hard code real surveys if survey_name == 'CRAFT/FE': diff --git a/zdm/survey_data.py b/zdm/survey_data.py index d84fd60d..fed9ac34 100644 --- a/zdm/survey_data.py +++ b/zdm/survey_data.py @@ -97,7 +97,7 @@ class FRB(data_class.myDataClass): 'Notation': '', }) WIDTH: float = field( - default=0.1, + default=-1, metadata={'help': "Width of the event", 'unit': 'ms', 'Notation': '', @@ -114,6 +114,12 @@ class FRB(data_class.myDataClass): 'unit': '', 'Notation': '', }) + B: float = field( + default=-1., + metadata={'help': "Beam value B at point of detection. Negative means unknown.", + 'unit': '', + 'Notation': '', + }) @dataclass class Telescope(data_class.myDataClass): diff --git a/zdm/tests/test_chime.py b/zdm/tests/test_chime.py index fd7edfcf..2f8d6545 100644 --- a/zdm/tests/test_chime.py +++ b/zdm/tests/test_chime.py @@ -5,9 +5,6 @@ import os import pytest -from pkg_resources import resource_filename -import pandas - from zdm.tests import tstutils from zdm import loading